1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2017 Hisilicon Limited.
6 #include <linux/sched/clock.h>
8 #define DRV_NAME "hisi_sas_v3_hw"
10 /* global registers need init */
11 #define DLVRY_QUEUE_ENABLE 0x0
12 #define IOST_BASE_ADDR_LO 0x8
13 #define IOST_BASE_ADDR_HI 0xc
14 #define ITCT_BASE_ADDR_LO 0x10
15 #define ITCT_BASE_ADDR_HI 0x14
16 #define IO_BROKEN_MSG_ADDR_LO 0x18
17 #define IO_BROKEN_MSG_ADDR_HI 0x1c
18 #define PHY_CONTEXT 0x20
19 #define PHY_STATE 0x24
20 #define PHY_PORT_NUM_MA 0x28
21 #define PHY_CONN_RATE 0x30
23 #define ITCT_CLR_EN_OFF 16
24 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
25 #define ITCT_DEV_OFF 0
26 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
27 #define SAS_AXI_USER3 0x50
28 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
29 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
30 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60
31 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64
32 #define CFG_MAX_TAG 0x68
33 #define TRANS_LOCK_ICT_TIME 0X70
34 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
35 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
36 #define HGC_GET_ITV_TIME 0x90
37 #define DEVICE_MSG_WORK_MODE 0x94
38 #define OPENA_WT_CONTI_TIME 0x9c
39 #define I_T_NEXUS_LOSS_TIME 0xa0
40 #define MAX_CON_TIME_LIMIT_TIME 0xa4
41 #define BUS_INACTIVE_LIMIT_TIME 0xa8
42 #define REJECT_TO_OPEN_LIMIT_TIME 0xac
43 #define CQ_INT_CONVERGE_EN 0xb0
44 #define CFG_AGING_TIME 0xbc
45 #define HGC_DFX_CFG2 0xc0
46 #define CFG_ICT_TIMER_STEP_TRSH 0xc8
47 #define CFG_ABT_SET_QUERY_IPTT 0xd4
48 #define CFG_SET_ABORTED_IPTT_OFF 0
49 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF)
50 #define CFG_SET_ABORTED_EN_OFF 12
51 #define CFG_ABT_SET_IPTT_DONE 0xd8
52 #define CFG_ABT_SET_IPTT_DONE_OFF 0
53 #define HGC_IOMB_PROC1_STATUS 0x104
54 #define HGC_LM_DFX_STATUS2 0x128
55 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
56 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
57 HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
58 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
59 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
60 HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
61 #define HGC_CQE_ECC_ADDR 0x13c
62 #define HGC_CQE_ECC_1B_ADDR_OFF 0
63 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
64 #define HGC_CQE_ECC_MB_ADDR_OFF 8
65 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
66 #define HGC_IOST_ECC_ADDR 0x140
67 #define HGC_IOST_ECC_1B_ADDR_OFF 0
68 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
69 #define HGC_IOST_ECC_MB_ADDR_OFF 16
70 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
71 #define HGC_DQE_ECC_ADDR 0x144
72 #define HGC_DQE_ECC_1B_ADDR_OFF 0
73 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
74 #define HGC_DQE_ECC_MB_ADDR_OFF 16
75 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
76 #define CHNL_INT_STATUS 0x148
78 #define HGC_ITCT_ECC_ADDR 0x150
79 #define HGC_ITCT_ECC_1B_ADDR_OFF 0
80 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
81 HGC_ITCT_ECC_1B_ADDR_OFF)
82 #define HGC_ITCT_ECC_MB_ADDR_OFF 16
83 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
84 HGC_ITCT_ECC_MB_ADDR_OFF)
85 #define HGC_AXI_FIFO_ERR_INFO 0x154
86 #define AXI_ERR_INFO_OFF 0
87 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
88 #define FIFO_ERR_INFO_OFF 8
89 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
90 #define TAB_RD_TYPE 0x15c
91 #define INT_COAL_EN 0x19c
92 #define OQ_INT_COAL_TIME 0x1a0
93 #define OQ_INT_COAL_CNT 0x1a4
94 #define ENT_INT_COAL_TIME 0x1a8
95 #define ENT_INT_COAL_CNT 0x1ac
96 #define OQ_INT_SRC 0x1b0
97 #define OQ_INT_SRC_MSK 0x1b4
98 #define ENT_INT_SRC1 0x1b8
99 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
100 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
101 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
102 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
103 #define ENT_INT_SRC2 0x1bc
104 #define ENT_INT_SRC3 0x1c0
105 #define ENT_INT_SRC3_WP_DEPTH_OFF 8
106 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
107 #define ENT_INT_SRC3_RP_DEPTH_OFF 10
108 #define ENT_INT_SRC3_AXI_OFF 11
109 #define ENT_INT_SRC3_FIFO_OFF 12
110 #define ENT_INT_SRC3_LM_OFF 14
111 #define ENT_INT_SRC3_ITC_INT_OFF 15
112 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
113 #define ENT_INT_SRC3_ABT_OFF 16
114 #define ENT_INT_SRC3_DQE_POISON_OFF 18
115 #define ENT_INT_SRC3_IOST_POISON_OFF 19
116 #define ENT_INT_SRC3_ITCT_POISON_OFF 20
117 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21
118 #define ENT_INT_SRC_MSK1 0x1c4
119 #define ENT_INT_SRC_MSK2 0x1c8
120 #define ENT_INT_SRC_MSK3 0x1cc
121 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
122 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0
123 #define CHNL_ENT_INT_MSK 0x1d4
124 #define HGC_COM_INT_MSK 0x1d8
125 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
126 #define SAS_ECC_INTR 0x1e8
127 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
128 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
129 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
130 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
131 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4
132 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5
133 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6
134 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7
135 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8
136 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9
137 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
138 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
139 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12
140 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13
141 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14
142 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15
143 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16
144 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17
145 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18
146 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19
147 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20
148 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21
149 #define SAS_ECC_INTR_MSK 0x1ec
150 #define HGC_ERR_STAT_EN 0x238
151 #define CQE_SEND_CNT 0x248
152 #define DLVRY_Q_0_BASE_ADDR_LO 0x260
153 #define DLVRY_Q_0_BASE_ADDR_HI 0x264
154 #define DLVRY_Q_0_DEPTH 0x268
155 #define DLVRY_Q_0_WR_PTR 0x26c
156 #define DLVRY_Q_0_RD_PTR 0x270
157 #define HYPER_STREAM_ID_EN_CFG 0xc80
158 #define OQ0_INT_SRC_MSK 0xc90
159 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0
160 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4
161 #define COMPL_Q_0_DEPTH 0x4e8
162 #define COMPL_Q_0_WR_PTR 0x4ec
163 #define COMPL_Q_0_RD_PTR 0x4f0
164 #define HGC_RXM_DFX_STATUS14 0xae8
165 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
166 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
167 HGC_RXM_DFX_STATUS14_MEM0_OFF)
168 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
169 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
170 HGC_RXM_DFX_STATUS14_MEM1_OFF)
171 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
172 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
173 HGC_RXM_DFX_STATUS14_MEM2_OFF)
174 #define HGC_RXM_DFX_STATUS15 0xaec
175 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
176 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
177 HGC_RXM_DFX_STATUS15_MEM3_OFF)
178 #define AWQOS_AWCACHE_CFG 0xc84
179 #define ARQOS_ARCACHE_CFG 0xc88
180 #define HILINK_ERR_DFX 0xe04
181 #define SAS_GPIO_CFG_0 0x1000
182 #define SAS_GPIO_CFG_1 0x1004
183 #define SAS_GPIO_TX_0_1 0x1040
184 #define SAS_CFG_DRIVE_VLD 0x1070
186 /* phy registers requiring init */
187 #define PORT_BASE (0x2000)
188 #define PHY_CFG (PORT_BASE + 0x0)
189 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
190 #define PHY_CFG_ENA_OFF 0
191 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
192 #define PHY_CFG_DC_OPT_OFF 2
193 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
194 #define PHY_CFG_PHY_RST_OFF 3
195 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
196 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
197 #define CFG_PROG_PHY_LINK_RATE_OFF 0
198 #define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF)
199 #define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8
200 #define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF)
201 #define PHY_CTRL (PORT_BASE + 0x14)
202 #define PHY_CTRL_RESET_OFF 0
203 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
204 #define CMD_HDR_PIR_OFF 8
205 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
206 #define SERDES_CFG (PORT_BASE + 0x1c)
207 #define CFG_ALOS_CHK_DISABLE_OFF 9
208 #define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF)
209 #define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c)
210 #define CFG_BIST_MODE_SEL_OFF 0
211 #define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF)
212 #define CFG_LOOP_TEST_MODE_OFF 14
213 #define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF)
214 #define CFG_RX_BIST_EN_OFF 16
215 #define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF)
216 #define CFG_TX_BIST_EN_OFF 17
217 #define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF)
218 #define CFG_BIST_TEST_OFF 18
219 #define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF)
220 #define SAS_PHY_BIST_CODE (PORT_BASE + 0x30)
221 #define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34)
222 #define SAS_BIST_ERR_CNT (PORT_BASE + 0x38)
223 #define SL_CFG (PORT_BASE + 0x84)
224 #define AIP_LIMIT (PORT_BASE + 0x90)
225 #define SL_CONTROL (PORT_BASE + 0x94)
226 #define SL_CONTROL_NOTIFY_EN_OFF 0
227 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
228 #define SL_CTA_OFF 17
229 #define SL_CTA_MSK (0x1 << SL_CTA_OFF)
230 #define RX_PRIMS_STATUS (PORT_BASE + 0x98)
231 #define RX_BCAST_CHG_OFF 1
232 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
233 #define TX_ID_DWORD0 (PORT_BASE + 0x9c)
234 #define TX_ID_DWORD1 (PORT_BASE + 0xa0)
235 #define TX_ID_DWORD2 (PORT_BASE + 0xa4)
236 #define TX_ID_DWORD3 (PORT_BASE + 0xa8)
237 #define TX_ID_DWORD4 (PORT_BASE + 0xaC)
238 #define TX_ID_DWORD5 (PORT_BASE + 0xb0)
239 #define TX_ID_DWORD6 (PORT_BASE + 0xb4)
240 #define TXID_AUTO (PORT_BASE + 0xb8)
242 #define CT3_MSK (0x1 << CT3_OFF)
243 #define TX_HARDRST_OFF 2
244 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
245 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
246 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
247 #define STP_LINK_TIMER (PORT_BASE + 0x120)
248 #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
249 #define CON_CFG_DRIVER (PORT_BASE + 0x130)
250 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
251 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
252 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
253 #define CHL_INT0 (PORT_BASE + 0x1b4)
254 #define CHL_INT0_HOTPLUG_TOUT_OFF 0
255 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
256 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1
257 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
258 #define CHL_INT0_SL_PHY_ENABLE_OFF 2
259 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
260 #define CHL_INT0_NOT_RDY_OFF 4
261 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
262 #define CHL_INT0_PHY_RDY_OFF 5
263 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
264 #define CHL_INT1 (PORT_BASE + 0x1b8)
265 #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15
266 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16
267 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17
268 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18
269 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
270 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
271 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
272 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
273 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23
274 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24
275 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26
276 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27
277 #define CHL_INT2 (PORT_BASE + 0x1bc)
278 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
279 #define CHL_INT2_RX_DISP_ERR_OFF 28
280 #define CHL_INT2_RX_CODE_ERR_OFF 29
281 #define CHL_INT2_RX_INVLD_DW_OFF 30
282 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
283 #define CHL_INT0_MSK (PORT_BASE + 0x1c0)
284 #define CHL_INT1_MSK (PORT_BASE + 0x1c4)
285 #define CHL_INT2_MSK (PORT_BASE + 0x1c8)
286 #define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc)
287 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
288 #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4)
289 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
290 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
291 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
292 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
293 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
294 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
295 #define DMA_TX_STATUS (PORT_BASE + 0x2d0)
296 #define DMA_TX_STATUS_BUSY_OFF 0
297 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
298 #define DMA_RX_STATUS (PORT_BASE + 0x2e8)
299 #define DMA_RX_STATUS_BUSY_OFF 0
300 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
302 #define COARSETUNE_TIME (PORT_BASE + 0x304)
303 #define TXDEEMPH_G1 (PORT_BASE + 0x350)
304 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
305 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
306 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
307 #define ERR_CNT_CODE_ERR (PORT_BASE + 0x394)
308 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
309 #define DFX_FIFO_CTRL (PORT_BASE + 0x3a0)
310 #define DFX_FIFO_CTRL_TRIGGER_MODE_OFF 0
311 #define DFX_FIFO_CTRL_TRIGGER_MODE_MSK (0x7 << DFX_FIFO_CTRL_TRIGGER_MODE_OFF)
312 #define DFX_FIFO_CTRL_DUMP_MODE_OFF 3
313 #define DFX_FIFO_CTRL_DUMP_MODE_MSK (0x7 << DFX_FIFO_CTRL_DUMP_MODE_OFF)
314 #define DFX_FIFO_CTRL_SIGNAL_SEL_OFF 6
315 #define DFX_FIFO_CTRL_SIGNAL_SEL_MSK (0xF << DFX_FIFO_CTRL_SIGNAL_SEL_OFF)
316 #define DFX_FIFO_CTRL_DUMP_DISABLE_OFF 10
317 #define DFX_FIFO_CTRL_DUMP_DISABLE_MSK (0x1 << DFX_FIFO_CTRL_DUMP_DISABLE_OFF)
318 #define DFX_FIFO_TRIGGER (PORT_BASE + 0x3a4)
319 #define DFX_FIFO_TRIGGER_MSK (PORT_BASE + 0x3a8)
320 #define DFX_FIFO_DUMP_MSK (PORT_BASE + 0x3aC)
321 #define DFX_FIFO_RD_DATA (PORT_BASE + 0x3b0)
323 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
324 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
325 #error Max ITCT exceeded
328 #define AXI_MASTER_CFG_BASE (0x5000)
329 #define AM_CTRL_GLOBAL (0x0)
330 #define AM_CTRL_SHUTDOWN_REQ_OFF 0
331 #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
332 #define AM_CURR_TRANS_RETURN (0x150)
334 #define AM_CFG_MAX_TRANS (0x5010)
335 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
336 #define AXI_CFG (0x5100)
337 #define AM_ROB_ECC_ERR_ADDR (0x510c)
338 #define AM_ROB_ECC_ERR_ADDR_OFF 0
339 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff
341 /* RAS registers need init */
342 #define RAS_BASE (0x6000)
343 #define SAS_RAS_INTR0 (RAS_BASE)
344 #define SAS_RAS_INTR1 (RAS_BASE + 0x04)
345 #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
346 #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
347 #define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c)
348 #define SAS_RAS_INTR2 (RAS_BASE + 0x20)
349 #define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24)
351 /* HW dma structures */
352 /* Delivery queue header */
354 #define CMD_HDR_ABORT_FLAG_OFF 0
355 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
356 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
357 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
358 #define CMD_HDR_RESP_REPORT_OFF 5
359 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
360 #define CMD_HDR_TLR_CTRL_OFF 6
361 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
362 #define CMD_HDR_PORT_OFF 18
363 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
364 #define CMD_HDR_PRIORITY_OFF 27
365 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
366 #define CMD_HDR_CMD_OFF 29
367 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
369 #define CMD_HDR_UNCON_CMD_OFF 3
370 #define CMD_HDR_DIR_OFF 5
371 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
372 #define CMD_HDR_RESET_OFF 7
373 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
374 #define CMD_HDR_VDTL_OFF 10
375 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
376 #define CMD_HDR_FRAME_TYPE_OFF 11
377 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
378 #define CMD_HDR_DEV_ID_OFF 16
379 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
381 #define CMD_HDR_CFL_OFF 0
382 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
383 #define CMD_HDR_NCQ_TAG_OFF 10
384 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
385 #define CMD_HDR_MRFL_OFF 15
386 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
387 #define CMD_HDR_SG_MOD_OFF 24
388 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
390 #define CMD_HDR_IPTT_OFF 0
391 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
393 #define CMD_HDR_DIF_SGL_LEN_OFF 0
394 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
395 #define CMD_HDR_DATA_SGL_LEN_OFF 16
396 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
398 #define CMD_HDR_ADDR_MODE_SEL_OFF 15
399 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF)
400 #define CMD_HDR_ABORT_IPTT_OFF 16
401 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
403 /* Completion header */
405 #define CMPLT_HDR_CMPLT_OFF 0
406 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF)
407 #define CMPLT_HDR_ERROR_PHASE_OFF 2
408 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
409 /* bit[9:2] Error Phase */
410 #define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF \
412 #define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK \
413 (0x1 << ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF)
414 #define CMPLT_HDR_RSPNS_XFRD_OFF 10
415 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
416 #define CMPLT_HDR_RSPNS_GOOD_OFF 11
417 #define CMPLT_HDR_RSPNS_GOOD_MSK (0x1 << CMPLT_HDR_RSPNS_GOOD_OFF)
418 #define CMPLT_HDR_ERX_OFF 12
419 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
420 #define CMPLT_HDR_ABORT_STAT_OFF 13
421 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
423 #define STAT_IO_NOT_VALID 0x1
424 #define STAT_IO_NO_DEVICE 0x2
425 #define STAT_IO_COMPLETE 0x3
426 #define STAT_IO_ABORTED 0x4
428 #define CMPLT_HDR_IPTT_OFF 0
429 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
430 #define CMPLT_HDR_DEV_ID_OFF 16
431 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
433 #define SATA_DISK_IN_ERROR_STATUS_OFF 8
434 #define SATA_DISK_IN_ERROR_STATUS_MSK (0x1 << SATA_DISK_IN_ERROR_STATUS_OFF)
435 #define CMPLT_HDR_SATA_DISK_ERR_OFF 16
436 #define CMPLT_HDR_SATA_DISK_ERR_MSK (0x1 << CMPLT_HDR_SATA_DISK_ERR_OFF)
437 #define CMPLT_HDR_IO_IN_TARGET_OFF 17
438 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
439 /* bit[23:18] ERR_FIS_ATA_STATUS */
440 #define FIS_ATA_STATUS_ERR_OFF 18
441 #define FIS_ATA_STATUS_ERR_MSK (0x1 << FIS_ATA_STATUS_ERR_OFF)
442 #define FIS_TYPE_SDB_OFF 31
443 #define FIS_TYPE_SDB_MSK (0x1 << FIS_TYPE_SDB_OFF)
447 #define ITCT_HDR_DEV_TYPE_OFF 0
448 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
449 #define ITCT_HDR_VALID_OFF 2
450 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
451 #define ITCT_HDR_MCR_OFF 5
452 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
453 #define ITCT_HDR_VLN_OFF 9
454 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
455 #define ITCT_HDR_SMP_TIMEOUT_OFF 16
456 #define ITCT_HDR_AWT_CONTINUE_OFF 25
457 #define ITCT_HDR_PORT_ID_OFF 28
458 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
460 #define ITCT_HDR_INLT_OFF 0
461 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
462 #define ITCT_HDR_RTOLT_OFF 48
463 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
465 struct hisi_sas_protect_iu_v3_hw
{
475 struct hisi_sas_complete_v3_hdr
{
482 struct hisi_sas_err_record_v3
{
484 __le32 trans_tx_fail_type
;
487 __le32 trans_rx_fail_type
;
490 __le16 dma_tx_err_type
;
491 __le16 sipc_rx_err_type
;
494 __le32 dma_rx_err_type
;
497 #define RX_DATA_LEN_UNDERFLOW_OFF 6
498 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
500 #define RX_FIS_STATUS_ERR_OFF 0
501 #define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF)
503 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
504 #define HISI_SAS_MSI_COUNT_V3_HW 32
506 #define DIR_NO_DATA 0
508 #define DIR_TO_DEVICE 2
509 #define DIR_RESERVED 3
511 #define FIS_CMD_IS_UNCONSTRAINED(fis) \
512 ((fis.command == ATA_CMD_READ_LOG_EXT) || \
513 (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
514 ((fis.command == ATA_CMD_DEV_RESET) && \
515 ((fis.control & ATA_SRST) != 0)))
517 #define T10_INSRT_EN_OFF 0
518 #define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF)
519 #define T10_RMV_EN_OFF 1
520 #define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF)
521 #define T10_RPLC_EN_OFF 2
522 #define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF)
523 #define T10_CHK_EN_OFF 3
524 #define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF)
525 #define INCR_LBRT_OFF 5
526 #define INCR_LBRT_MSK (1 << INCR_LBRT_OFF)
527 #define USR_DATA_BLOCK_SZ_OFF 20
528 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF)
529 #define T10_CHK_MSK_OFF 16
530 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF)
531 #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF)
533 #define BASE_VECTORS_V3_HW 16
534 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
536 #define CHNL_INT_STS_MSK 0xeeeeeeee
537 #define CHNL_INT_STS_PHY_MSK 0xe
538 #define CHNL_INT_STS_INT0_MSK BIT(1)
539 #define CHNL_INT_STS_INT1_MSK BIT(2)
540 #define CHNL_INT_STS_INT2_MSK BIT(3)
543 #define BAR_NO_V3_HW 5
546 DSM_FUNC_ERR_HANDLE_MSI
= 0,
549 static bool hisi_sas_intr_conv
;
550 MODULE_PARM_DESC(intr_conv
, "interrupt converge enable (0-1)");
552 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
553 static int prot_mask
;
554 module_param(prot_mask
, int, 0444);
555 MODULE_PARM_DESC(prot_mask
, " host protection capabilities mask, def=0x0 ");
557 /* the index of iopoll queues are bigger than interrupt queues' */
558 static int experimental_iopoll_q_cnt
;
559 module_param(experimental_iopoll_q_cnt
, int, 0444);
560 MODULE_PARM_DESC(experimental_iopoll_q_cnt
, "number of queues to be used as poll mode, def=0");
562 static int debugfs_snapshot_regs_v3_hw(struct hisi_hba
*hisi_hba
);
564 static u32
hisi_sas_read32(struct hisi_hba
*hisi_hba
, u32 off
)
566 void __iomem
*regs
= hisi_hba
->regs
+ off
;
571 static void hisi_sas_write32(struct hisi_hba
*hisi_hba
, u32 off
, u32 val
)
573 void __iomem
*regs
= hisi_hba
->regs
+ off
;
578 static void hisi_sas_phy_write32(struct hisi_hba
*hisi_hba
, int phy_no
,
581 void __iomem
*regs
= hisi_hba
->regs
+ (0x400 * phy_no
) + off
;
586 static u32
hisi_sas_phy_read32(struct hisi_hba
*hisi_hba
,
589 void __iomem
*regs
= hisi_hba
->regs
+ (0x400 * phy_no
) + off
;
594 #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \
597 void __iomem *regs = hisi_hba->regs + off; \
598 readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \
601 #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \
604 void __iomem *regs = hisi_hba->regs + off; \
605 readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
608 static void interrupt_enable_v3_hw(struct hisi_hba
*hisi_hba
)
612 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
613 hisi_sas_write32(hisi_hba
, OQ0_INT_SRC_MSK
+ 0x4 * i
, 0);
615 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
, 0xfefefefe);
616 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK2
, 0xfefefefe);
617 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, 0xffc220ff);
618 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0x155555);
620 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
621 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1_MSK
, 0xf2057fff);
622 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2_MSK
, 0xffffbfe);
623 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_NOT_RDY_MSK
, 0x0);
624 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_PHY_ENA_MSK
, 0x0);
625 hisi_sas_phy_write32(hisi_hba
, i
, SL_RX_BCAST_CHK_MSK
, 0x0);
629 static void init_reg_v3_hw(struct hisi_hba
*hisi_hba
)
631 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
634 /* Global registers init */
635 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
,
636 (u32
)((1ULL << hisi_hba
->queue_count
) - 1));
637 hisi_sas_write32(hisi_hba
, CFG_MAX_TAG
, 0xfff0400);
638 /* time / CLK_AHB = 2.5s / 2ns = 0x4A817C80 */
639 hisi_sas_write32(hisi_hba
, TRANS_LOCK_ICT_TIME
, 0x4A817C80);
640 hisi_sas_write32(hisi_hba
, HGC_SAS_TXFAIL_RETRY_CTRL
, 0x108);
641 hisi_sas_write32(hisi_hba
, CFG_AGING_TIME
, 0x1);
642 hisi_sas_write32(hisi_hba
, CFG_ICT_TIMER_STEP_TRSH
, 0xf4240);
643 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x3);
644 /* configure the interrupt coalescing timeout period 10us */
645 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
, 0xa);
646 /* configure the count of CQ entries 10 */
647 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
, 0xa);
648 hisi_sas_write32(hisi_hba
, CQ_INT_CONVERGE_EN
,
650 hisi_sas_write32(hisi_hba
, OQ_INT_SRC
, 0xffff);
651 hisi_sas_write32(hisi_hba
, ENT_INT_SRC1
, 0xffffffff);
652 hisi_sas_write32(hisi_hba
, ENT_INT_SRC2
, 0xffffffff);
653 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
, 0xffffffff);
654 hisi_sas_write32(hisi_hba
, CHNL_PHYUPDOWN_INT_MSK
, 0x0);
655 hisi_sas_write32(hisi_hba
, CHNL_ENT_INT_MSK
, 0x0);
656 hisi_sas_write32(hisi_hba
, HGC_COM_INT_MSK
, 0x0);
657 hisi_sas_write32(hisi_hba
, AWQOS_AWCACHE_CFG
, 0xf0f0);
658 hisi_sas_write32(hisi_hba
, ARQOS_ARCACHE_CFG
, 0xf0f0);
659 hisi_sas_write32(hisi_hba
, HYPER_STREAM_ID_EN_CFG
, 1);
661 if (pdev
->revision
< 0x30)
662 hisi_sas_write32(hisi_hba
, SAS_AXI_USER3
, 0);
664 interrupt_enable_v3_hw(hisi_hba
);
665 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
666 enum sas_linkrate max
;
667 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
668 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
669 u32 prog_phy_link_rate
= hisi_sas_phy_read32(hisi_hba
, i
,
672 prog_phy_link_rate
&= ~CFG_PROG_PHY_LINK_RATE_MSK
;
673 if (!sas_phy
->phy
|| (sas_phy
->phy
->maximum_linkrate
<
674 SAS_LINK_RATE_1_5_GBPS
))
675 max
= SAS_LINK_RATE_12_0_GBPS
;
677 max
= sas_phy
->phy
->maximum_linkrate
;
678 prog_phy_link_rate
|= hisi_sas_get_prog_phy_linkrate_mask(max
);
679 hisi_sas_phy_write32(hisi_hba
, i
, PROG_PHY_LINK_RATE
,
681 hisi_sas_phy_write32(hisi_hba
, i
, SAS_RX_TRAIN_TIMER
, 0x13e80);
682 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT0
, 0xffffffff);
683 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1
, 0xffffffff);
684 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2
, 0xffffffff);
685 hisi_sas_phy_write32(hisi_hba
, i
, RXOP_CHECK_CFG_H
, 0x1000);
686 hisi_sas_phy_write32(hisi_hba
, i
, PHY_CTRL_RDY_MSK
, 0x0);
687 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_DWS_RESET_MSK
, 0x0);
688 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_OOB_RESTART_MSK
, 0x1);
689 hisi_sas_phy_write32(hisi_hba
, i
, STP_LINK_TIMER
, 0x7ffffff);
690 hisi_sas_phy_write32(hisi_hba
, i
, CON_CFG_DRIVER
, 0x2a0a01);
691 hisi_sas_phy_write32(hisi_hba
, i
, SAS_EC_INT_COAL_TIME
,
693 hisi_sas_phy_write32(hisi_hba
, i
, AIP_LIMIT
, 0x2ffff);
695 /* set value through firmware for 920B and later version */
696 if (pdev
->revision
< 0x30) {
697 hisi_sas_phy_write32(hisi_hba
, i
, SAS_SSP_CON_TIMER_CFG
, 0x32);
698 hisi_sas_phy_write32(hisi_hba
, i
, SERDES_CFG
, 0xffc00);
699 /* used for 12G negotiate */
700 hisi_sas_phy_write32(hisi_hba
, i
, COARSETUNE_TIME
, 0x1e);
703 /* get default FFE configuration for BIST */
704 for (j
= 0; j
< FFE_CFG_MAX
; j
++) {
705 u32 val
= hisi_sas_phy_read32(hisi_hba
, i
,
706 TXDEEMPH_G1
+ (j
* 0x4));
707 hisi_hba
->debugfs_bist_ffe
[i
][j
] = val
;
711 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
713 hisi_sas_write32(hisi_hba
,
714 DLVRY_Q_0_BASE_ADDR_HI
+ (i
* 0x14),
715 upper_32_bits(hisi_hba
->cmd_hdr_dma
[i
]));
717 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_BASE_ADDR_LO
+ (i
* 0x14),
718 lower_32_bits(hisi_hba
->cmd_hdr_dma
[i
]));
720 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_DEPTH
+ (i
* 0x14),
721 HISI_SAS_QUEUE_SLOTS
);
723 /* Completion queue */
724 hisi_sas_write32(hisi_hba
, COMPL_Q_0_BASE_ADDR_HI
+ (i
* 0x14),
725 upper_32_bits(hisi_hba
->complete_hdr_dma
[i
]));
727 hisi_sas_write32(hisi_hba
, COMPL_Q_0_BASE_ADDR_LO
+ (i
* 0x14),
728 lower_32_bits(hisi_hba
->complete_hdr_dma
[i
]));
730 hisi_sas_write32(hisi_hba
, COMPL_Q_0_DEPTH
+ (i
* 0x14),
731 HISI_SAS_QUEUE_SLOTS
);
735 hisi_sas_write32(hisi_hba
, ITCT_BASE_ADDR_LO
,
736 lower_32_bits(hisi_hba
->itct_dma
));
738 hisi_sas_write32(hisi_hba
, ITCT_BASE_ADDR_HI
,
739 upper_32_bits(hisi_hba
->itct_dma
));
742 hisi_sas_write32(hisi_hba
, IOST_BASE_ADDR_LO
,
743 lower_32_bits(hisi_hba
->iost_dma
));
745 hisi_sas_write32(hisi_hba
, IOST_BASE_ADDR_HI
,
746 upper_32_bits(hisi_hba
->iost_dma
));
749 hisi_sas_write32(hisi_hba
, IO_BROKEN_MSG_ADDR_LO
,
750 lower_32_bits(hisi_hba
->breakpoint_dma
));
752 hisi_sas_write32(hisi_hba
, IO_BROKEN_MSG_ADDR_HI
,
753 upper_32_bits(hisi_hba
->breakpoint_dma
));
755 /* SATA broken msg */
756 hisi_sas_write32(hisi_hba
, IO_SATA_BROKEN_MSG_ADDR_LO
,
757 lower_32_bits(hisi_hba
->sata_breakpoint_dma
));
759 hisi_sas_write32(hisi_hba
, IO_SATA_BROKEN_MSG_ADDR_HI
,
760 upper_32_bits(hisi_hba
->sata_breakpoint_dma
));
762 /* SATA initial fis */
763 hisi_sas_write32(hisi_hba
, SATA_INITI_D2H_STORE_ADDR_LO
,
764 lower_32_bits(hisi_hba
->initial_fis_dma
));
766 hisi_sas_write32(hisi_hba
, SATA_INITI_D2H_STORE_ADDR_HI
,
767 upper_32_bits(hisi_hba
->initial_fis_dma
));
769 /* RAS registers init */
770 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR0_MASK
, 0x0);
771 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR1_MASK
, 0x0);
772 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR2_MASK
, 0x0);
773 hisi_sas_write32(hisi_hba
, CFG_SAS_RAS_INTR_MASK
, 0x0);
775 /* LED registers init */
776 hisi_sas_write32(hisi_hba
, SAS_CFG_DRIVE_VLD
, 0x80000ff);
777 hisi_sas_write32(hisi_hba
, SAS_GPIO_TX_0_1
, 0x80808080);
778 hisi_sas_write32(hisi_hba
, SAS_GPIO_TX_0_1
+ 0x4, 0x80808080);
779 /* Configure blink generator rate A to 1Hz and B to 4Hz */
780 hisi_sas_write32(hisi_hba
, SAS_GPIO_CFG_1
, 0x121700);
781 hisi_sas_write32(hisi_hba
, SAS_GPIO_CFG_0
, 0x800000);
784 static void config_phy_opt_mode_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
786 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
788 cfg
&= ~PHY_CFG_DC_OPT_MSK
;
789 cfg
|= 1 << PHY_CFG_DC_OPT_OFF
;
790 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
793 static void config_id_frame_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
795 struct sas_identify_frame identify_frame
;
796 u32
*identify_buffer
;
798 memset(&identify_frame
, 0, sizeof(identify_frame
));
799 identify_frame
.dev_type
= SAS_END_DEVICE
;
800 identify_frame
.frame_type
= 0;
801 identify_frame
._un1
= 1;
802 identify_frame
.initiator_bits
= SAS_PROTOCOL_ALL
;
803 identify_frame
.target_bits
= SAS_PROTOCOL_NONE
;
804 memcpy(&identify_frame
._un4_11
[0], hisi_hba
->sas_addr
, SAS_ADDR_SIZE
);
805 memcpy(&identify_frame
.sas_addr
[0], hisi_hba
->sas_addr
, SAS_ADDR_SIZE
);
806 identify_frame
.phy_id
= phy_no
;
807 identify_buffer
= (u32
*)(&identify_frame
);
809 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD0
,
810 __swab32(identify_buffer
[0]));
811 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD1
,
812 __swab32(identify_buffer
[1]));
813 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD2
,
814 __swab32(identify_buffer
[2]));
815 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD3
,
816 __swab32(identify_buffer
[3]));
817 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD4
,
818 __swab32(identify_buffer
[4]));
819 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD5
,
820 __swab32(identify_buffer
[5]));
823 static void setup_itct_v3_hw(struct hisi_hba
*hisi_hba
,
824 struct hisi_sas_device
*sas_dev
)
826 struct domain_device
*device
= sas_dev
->sas_device
;
827 struct device
*dev
= hisi_hba
->dev
;
828 u64 qw0
, device_id
= sas_dev
->device_id
;
829 struct hisi_sas_itct
*itct
= &hisi_hba
->itct
[device_id
];
830 struct domain_device
*parent_dev
= device
->parent
;
831 struct asd_sas_port
*sas_port
= device
->port
;
832 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
835 memset(itct
, 0, sizeof(*itct
));
839 switch (sas_dev
->dev_type
) {
841 case SAS_EDGE_EXPANDER_DEVICE
:
842 case SAS_FANOUT_EXPANDER_DEVICE
:
843 qw0
= HISI_SAS_DEV_TYPE_SSP
<< ITCT_HDR_DEV_TYPE_OFF
;
846 case SAS_SATA_PENDING
:
847 if (parent_dev
&& dev_is_expander(parent_dev
->dev_type
))
848 qw0
= HISI_SAS_DEV_TYPE_STP
<< ITCT_HDR_DEV_TYPE_OFF
;
850 qw0
= HISI_SAS_DEV_TYPE_SATA
<< ITCT_HDR_DEV_TYPE_OFF
;
853 dev_warn(dev
, "setup itct: unsupported dev type (%d)\n",
857 qw0
|= ((1 << ITCT_HDR_VALID_OFF
) |
858 (device
->linkrate
<< ITCT_HDR_MCR_OFF
) |
859 (1 << ITCT_HDR_VLN_OFF
) |
860 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF
) |
861 (1 << ITCT_HDR_AWT_CONTINUE_OFF
) |
862 (port
->id
<< ITCT_HDR_PORT_ID_OFF
));
863 itct
->qw0
= cpu_to_le64(qw0
);
866 memcpy(&sas_addr
, device
->sas_addr
, SAS_ADDR_SIZE
);
867 itct
->sas_addr
= cpu_to_le64(__swab64(sas_addr
));
870 if (!dev_is_sata(device
))
871 itct
->qw2
= cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF
) |
872 (0x1ULL
<< ITCT_HDR_RTOLT_OFF
));
875 static int clear_itct_v3_hw(struct hisi_hba
*hisi_hba
,
876 struct hisi_sas_device
*sas_dev
)
878 DECLARE_COMPLETION_ONSTACK(completion
);
879 u64 dev_id
= sas_dev
->device_id
;
880 struct hisi_sas_itct
*itct
= &hisi_hba
->itct
[dev_id
];
881 u32 reg_val
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC3
);
882 struct device
*dev
= hisi_hba
->dev
;
884 sas_dev
->completion
= &completion
;
886 /* clear the itct interrupt state */
887 if (ENT_INT_SRC3_ITC_INT_MSK
& reg_val
)
888 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
,
889 ENT_INT_SRC3_ITC_INT_MSK
);
891 /* clear the itct table */
892 reg_val
= ITCT_CLR_EN_MSK
| (dev_id
& ITCT_DEV_MSK
);
893 hisi_sas_write32(hisi_hba
, ITCT_CLR
, reg_val
);
895 if (!wait_for_completion_timeout(sas_dev
->completion
,
896 HISI_SAS_CLEAR_ITCT_TIMEOUT
)) {
897 dev_warn(dev
, "failed to clear ITCT\n");
901 memset(itct
, 0, sizeof(struct hisi_sas_itct
));
905 static void dereg_device_v3_hw(struct hisi_hba
*hisi_hba
,
906 struct domain_device
*device
)
908 struct hisi_sas_slot
*slot
, *slot2
;
909 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
910 u32 cfg_abt_set_query_iptt
;
912 cfg_abt_set_query_iptt
= hisi_sas_read32(hisi_hba
,
913 CFG_ABT_SET_QUERY_IPTT
);
914 spin_lock(&sas_dev
->lock
);
915 list_for_each_entry_safe(slot
, slot2
, &sas_dev
->list
, entry
) {
916 cfg_abt_set_query_iptt
&= ~CFG_SET_ABORTED_IPTT_MSK
;
917 cfg_abt_set_query_iptt
|= (1 << CFG_SET_ABORTED_EN_OFF
) |
918 (slot
->idx
<< CFG_SET_ABORTED_IPTT_OFF
);
919 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_QUERY_IPTT
,
920 cfg_abt_set_query_iptt
);
922 spin_unlock(&sas_dev
->lock
);
923 cfg_abt_set_query_iptt
&= ~(1 << CFG_SET_ABORTED_EN_OFF
);
924 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_QUERY_IPTT
,
925 cfg_abt_set_query_iptt
);
926 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_IPTT_DONE
,
927 1 << CFG_ABT_SET_IPTT_DONE_OFF
);
930 static int reset_hw_v3_hw(struct hisi_hba
*hisi_hba
)
932 struct device
*dev
= hisi_hba
->dev
;
936 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0);
938 /* Disable all of the PHYs */
939 hisi_sas_stop_phys(hisi_hba
);
942 /* Ensure axi bus idle */
943 ret
= hisi_sas_read32_poll_timeout(AXI_CFG
, val
, !val
,
946 dev_err(dev
, "axi bus is not idle, ret = %d!\n", ret
);
950 if (ACPI_HANDLE(dev
)) {
953 s
= acpi_evaluate_object(ACPI_HANDLE(dev
), "_RST", NULL
, NULL
);
954 if (ACPI_FAILURE(s
)) {
955 dev_err(dev
, "Reset failed\n");
959 dev_err(dev
, "no reset method!\n");
966 static int hw_init_v3_hw(struct hisi_hba
*hisi_hba
)
968 struct device
*dev
= hisi_hba
->dev
;
969 struct acpi_device
*acpi_dev
;
970 union acpi_object
*obj
;
974 rc
= reset_hw_v3_hw(hisi_hba
);
976 dev_err(dev
, "hisi_sas_reset_hw failed, rc=%d\n", rc
);
981 init_reg_v3_hw(hisi_hba
);
983 if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid
)) {
984 dev_err(dev
, "Parse GUID failed\n");
989 * This DSM handles some hardware-related configurations:
990 * 1. Switch over to MSI error handling in kernel
991 * 2. BIOS *may* reset some register values through this method
993 obj
= acpi_evaluate_dsm(ACPI_HANDLE(dev
), &guid
, 0,
994 DSM_FUNC_ERR_HANDLE_MSI
, NULL
);
996 dev_warn(dev
, "can not find DSM method, ignore\n");
1000 acpi_dev
= ACPI_COMPANION(dev
);
1001 if (!acpi_device_power_manageable(acpi_dev
))
1002 dev_notice(dev
, "neither _PS0 nor _PR0 is defined\n");
1006 static void enable_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1008 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
1010 cfg
|= PHY_CFG_ENA_MSK
;
1011 cfg
&= ~PHY_CFG_PHY_RST_MSK
;
1012 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
1015 static void disable_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1017 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
1018 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2_MSK
);
1019 static const u32 msk
= BIT(CHL_INT2_RX_DISP_ERR_OFF
) |
1020 BIT(CHL_INT2_RX_CODE_ERR_OFF
) |
1021 BIT(CHL_INT2_RX_INVLD_DW_OFF
);
1024 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2_MSK
, msk
| irq_msk
);
1026 cfg
&= ~PHY_CFG_ENA_MSK
;
1027 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
1031 state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1032 if (state
& BIT(phy_no
)) {
1033 cfg
|= PHY_CFG_PHY_RST_MSK
;
1034 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
1039 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_INVLD_DW
);
1040 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DISP_ERR
);
1041 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_CODE_ERR
);
1043 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2
, msk
);
1044 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2_MSK
, irq_msk
);
1047 static void start_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1049 config_id_frame_v3_hw(hisi_hba
, phy_no
);
1050 config_phy_opt_mode_v3_hw(hisi_hba
, phy_no
);
1051 enable_phy_v3_hw(hisi_hba
, phy_no
);
1054 static void phy_hard_reset_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1056 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1059 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
1060 if (phy
->identify
.device_type
== SAS_END_DEVICE
) {
1061 txid_auto
= hisi_sas_phy_read32(hisi_hba
, phy_no
, TXID_AUTO
);
1062 hisi_sas_phy_write32(hisi_hba
, phy_no
, TXID_AUTO
,
1063 txid_auto
| TX_HARDRST_MSK
);
1066 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
1069 static enum sas_linkrate
phy_get_max_linkrate_v3_hw(void)
1071 return SAS_LINK_RATE_12_0_GBPS
;
1074 static void phys_init_v3_hw(struct hisi_hba
*hisi_hba
)
1078 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
1079 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
1080 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1082 if (!sas_phy
->phy
->enabled
)
1085 hisi_sas_phy_enable(hisi_hba
, i
, 1);
1089 static void sl_notify_ssp_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1093 sl_control
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1094 sl_control
|= SL_CONTROL_NOTIFY_EN_MSK
;
1095 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
, sl_control
);
1097 sl_control
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1098 sl_control
&= ~SL_CONTROL_NOTIFY_EN_MSK
;
1099 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
, sl_control
);
1102 static int get_wideport_bitmap_v3_hw(struct hisi_hba
*hisi_hba
, int port_id
)
1105 u32 phy_port_num_ma
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
1106 u32 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1108 for (i
= 0; i
< hisi_hba
->n_phy
; i
++)
1109 if (phy_state
& BIT(i
))
1110 if (((phy_port_num_ma
>> (i
* 4)) & 0xf) == port_id
)
1116 static void start_delivery_v3_hw(struct hisi_sas_dq
*dq
)
1118 struct hisi_hba
*hisi_hba
= dq
->hisi_hba
;
1119 struct hisi_sas_slot
*s
, *s1
, *s2
= NULL
;
1120 int dlvry_queue
= dq
->id
;
1123 list_for_each_entry_safe(s
, s1
, &dq
->list
, delivery
) {
1127 list_del(&s
->delivery
);
1134 * Ensure that memories for slots built on other CPUs is observed.
1137 wp
= (s2
->dlvry_queue_slot
+ 1) % HISI_SAS_QUEUE_SLOTS
;
1139 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_WR_PTR
+ (dlvry_queue
* 0x14), wp
);
1142 static void prep_prd_sge_v3_hw(struct hisi_hba
*hisi_hba
,
1143 struct hisi_sas_slot
*slot
,
1144 struct hisi_sas_cmd_hdr
*hdr
,
1145 struct scatterlist
*scatter
,
1148 struct hisi_sas_sge_page
*sge_page
= hisi_sas_sge_addr_mem(slot
);
1149 struct scatterlist
*sg
;
1152 for_each_sg(scatter
, sg
, n_elem
, i
) {
1153 struct hisi_sas_sge
*entry
= &sge_page
->sge
[i
];
1155 entry
->addr
= cpu_to_le64(sg_dma_address(sg
));
1156 entry
->page_ctrl_0
= entry
->page_ctrl_1
= 0;
1157 entry
->data_len
= cpu_to_le32(sg_dma_len(sg
));
1158 entry
->data_off
= 0;
1161 hdr
->prd_table_addr
= cpu_to_le64(hisi_sas_sge_addr_dma(slot
));
1163 hdr
->sg_len
|= cpu_to_le32(n_elem
<< CMD_HDR_DATA_SGL_LEN_OFF
);
1166 static void prep_prd_sge_dif_v3_hw(struct hisi_hba
*hisi_hba
,
1167 struct hisi_sas_slot
*slot
,
1168 struct hisi_sas_cmd_hdr
*hdr
,
1169 struct scatterlist
*scatter
,
1172 struct hisi_sas_sge_dif_page
*sge_dif_page
;
1173 struct scatterlist
*sg
;
1176 sge_dif_page
= hisi_sas_sge_dif_addr_mem(slot
);
1178 for_each_sg(scatter
, sg
, n_elem
, i
) {
1179 struct hisi_sas_sge
*entry
= &sge_dif_page
->sge
[i
];
1181 entry
->addr
= cpu_to_le64(sg_dma_address(sg
));
1182 entry
->page_ctrl_0
= 0;
1183 entry
->page_ctrl_1
= 0;
1184 entry
->data_len
= cpu_to_le32(sg_dma_len(sg
));
1185 entry
->data_off
= 0;
1188 hdr
->dif_prd_table_addr
=
1189 cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot
));
1191 hdr
->sg_len
|= cpu_to_le32(n_elem
<< CMD_HDR_DIF_SGL_LEN_OFF
);
1194 static u32
get_prot_chk_msk_v3_hw(struct scsi_cmnd
*scsi_cmnd
)
1196 unsigned char prot_flags
= scsi_cmnd
->prot_flags
;
1198 if (prot_flags
& SCSI_PROT_REF_CHECK
)
1199 return T10_CHK_APP_TAG_MSK
;
1200 return T10_CHK_REF_TAG_MSK
| T10_CHK_APP_TAG_MSK
;
1203 static void fill_prot_v3_hw(struct scsi_cmnd
*scsi_cmnd
,
1204 struct hisi_sas_protect_iu_v3_hw
*prot
)
1206 unsigned char prot_op
= scsi_get_prot_op(scsi_cmnd
);
1207 unsigned int interval
= scsi_prot_interval(scsi_cmnd
);
1208 u32 lbrt_chk_val
= t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd
));
1211 case SCSI_PROT_READ_INSERT
:
1212 prot
->dw0
|= T10_INSRT_EN_MSK
;
1213 prot
->lbrtgv
= lbrt_chk_val
;
1215 case SCSI_PROT_READ_STRIP
:
1216 prot
->dw0
|= (T10_RMV_EN_MSK
| T10_CHK_EN_MSK
);
1217 prot
->lbrtcv
= lbrt_chk_val
;
1218 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1220 case SCSI_PROT_READ_PASS
:
1221 prot
->dw0
|= T10_CHK_EN_MSK
;
1222 prot
->lbrtcv
= lbrt_chk_val
;
1223 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1225 case SCSI_PROT_WRITE_INSERT
:
1226 prot
->dw0
|= T10_INSRT_EN_MSK
;
1227 prot
->lbrtgv
= lbrt_chk_val
;
1229 case SCSI_PROT_WRITE_STRIP
:
1230 prot
->dw0
|= (T10_RMV_EN_MSK
| T10_CHK_EN_MSK
);
1231 prot
->lbrtcv
= lbrt_chk_val
;
1233 case SCSI_PROT_WRITE_PASS
:
1234 prot
->dw0
|= T10_CHK_EN_MSK
;
1235 prot
->lbrtcv
= lbrt_chk_val
;
1236 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1239 WARN(1, "prot_op(0x%x) is not valid\n", prot_op
);
1247 prot
->dw0
|= (0x1 << USR_DATA_BLOCK_SZ_OFF
);
1250 prot
->dw0
|= (0x2 << USR_DATA_BLOCK_SZ_OFF
);
1253 WARN(1, "protection interval (0x%x) invalid\n",
1258 prot
->dw0
|= INCR_LBRT_MSK
;
1261 static void prep_ssp_v3_hw(struct hisi_hba
*hisi_hba
,
1262 struct hisi_sas_slot
*slot
)
1264 struct sas_task
*task
= slot
->task
;
1265 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1266 struct domain_device
*device
= task
->dev
;
1267 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1268 struct hisi_sas_port
*port
= slot
->port
;
1269 struct sas_ssp_task
*ssp_task
= &task
->ssp_task
;
1270 struct scsi_cmnd
*scsi_cmnd
= ssp_task
->cmd
;
1271 struct sas_tmf_task
*tmf
= slot
->tmf
;
1272 int has_data
= 0, priority
= !!tmf
;
1273 unsigned char prot_op
;
1275 u32 dw1
= 0, dw2
= 0, len
= 0;
1277 hdr
->dw0
= cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF
) |
1278 (2 << CMD_HDR_TLR_CTRL_OFF
) |
1279 (port
->id
<< CMD_HDR_PORT_OFF
) |
1280 (priority
<< CMD_HDR_PRIORITY_OFF
) |
1281 (1 << CMD_HDR_CMD_OFF
)); /* ssp */
1283 dw1
= 1 << CMD_HDR_VDTL_OFF
;
1285 dw1
|= 2 << CMD_HDR_FRAME_TYPE_OFF
;
1286 dw1
|= DIR_NO_DATA
<< CMD_HDR_DIR_OFF
;
1288 prot_op
= scsi_get_prot_op(scsi_cmnd
);
1289 dw1
|= 1 << CMD_HDR_FRAME_TYPE_OFF
;
1290 switch (scsi_cmnd
->sc_data_direction
) {
1293 dw1
|= DIR_TO_DEVICE
<< CMD_HDR_DIR_OFF
;
1295 case DMA_FROM_DEVICE
:
1297 dw1
|= DIR_TO_INI
<< CMD_HDR_DIR_OFF
;
1300 dw1
&= ~CMD_HDR_DIR_MSK
;
1304 /* map itct entry */
1305 dw1
|= sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
;
1307 dw2
= (((sizeof(struct ssp_command_iu
) + sizeof(struct ssp_frame_hdr
)
1308 + 3) / 4) << CMD_HDR_CFL_OFF
) |
1309 ((HISI_SAS_MAX_SSP_RESP_SZ
/ 4) << CMD_HDR_MRFL_OFF
) |
1310 (2 << CMD_HDR_SG_MOD_OFF
);
1311 hdr
->dw2
= cpu_to_le32(dw2
);
1312 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1315 prep_prd_sge_v3_hw(hisi_hba
, slot
, hdr
, task
->scatter
,
1318 if (scsi_prot_sg_count(scsi_cmnd
))
1319 prep_prd_sge_dif_v3_hw(hisi_hba
, slot
, hdr
,
1320 scsi_prot_sglist(scsi_cmnd
),
1324 hdr
->cmd_table_addr
= cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot
));
1325 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1327 buf_cmd
= hisi_sas_cmd_hdr_addr_mem(slot
) +
1328 sizeof(struct ssp_frame_hdr
);
1330 memcpy(buf_cmd
, &task
->ssp_task
.LUN
, 8);
1332 buf_cmd
[9] = ssp_task
->task_attr
;
1333 memcpy(buf_cmd
+ 12, scsi_cmnd
->cmnd
, scsi_cmnd
->cmd_len
);
1335 buf_cmd
[10] = tmf
->tmf
;
1337 case TMF_ABORT_TASK
:
1338 case TMF_QUERY_TASK
:
1340 (tmf
->tag_of_task_to_be_managed
>> 8) & 0xff;
1342 tmf
->tag_of_task_to_be_managed
& 0xff;
1349 if (has_data
&& (prot_op
!= SCSI_PROT_NORMAL
)) {
1350 struct hisi_sas_protect_iu_v3_hw prot
;
1353 hdr
->dw7
|= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF
);
1354 dw1
|= CMD_HDR_PIR_MSK
;
1355 buf_cmd_prot
= hisi_sas_cmd_hdr_addr_mem(slot
) +
1356 sizeof(struct ssp_frame_hdr
) +
1357 sizeof(struct ssp_command_iu
);
1359 memset(&prot
, 0, sizeof(struct hisi_sas_protect_iu_v3_hw
));
1360 fill_prot_v3_hw(scsi_cmnd
, &prot
);
1361 memcpy(buf_cmd_prot
, &prot
,
1362 sizeof(struct hisi_sas_protect_iu_v3_hw
));
1364 * For READ, we need length of info read to memory, while for
1365 * WRITE we need length of data written to the disk.
1367 if (prot_op
== SCSI_PROT_WRITE_INSERT
||
1368 prot_op
== SCSI_PROT_READ_INSERT
||
1369 prot_op
== SCSI_PROT_WRITE_PASS
||
1370 prot_op
== SCSI_PROT_READ_PASS
) {
1371 unsigned int interval
= scsi_prot_interval(scsi_cmnd
);
1372 unsigned int ilog2_interval
= ilog2(interval
);
1374 len
= (task
->total_xfer_len
>> ilog2_interval
) * 8;
1378 hdr
->dw1
= cpu_to_le32(dw1
);
1380 hdr
->data_transfer_len
= cpu_to_le32(task
->total_xfer_len
+ len
);
1383 static void prep_smp_v3_hw(struct hisi_hba
*hisi_hba
,
1384 struct hisi_sas_slot
*slot
)
1386 struct sas_task
*task
= slot
->task
;
1387 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1388 struct domain_device
*device
= task
->dev
;
1389 struct hisi_sas_port
*port
= slot
->port
;
1390 struct scatterlist
*sg_req
;
1391 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1392 dma_addr_t req_dma_addr
;
1393 unsigned int req_len
;
1396 sg_req
= &task
->smp_task
.smp_req
;
1397 req_len
= sg_dma_len(sg_req
);
1398 req_dma_addr
= sg_dma_address(sg_req
);
1402 hdr
->dw0
= cpu_to_le32((port
->id
<< CMD_HDR_PORT_OFF
) |
1403 (1 << CMD_HDR_PRIORITY_OFF
) | /* high pri */
1404 (2 << CMD_HDR_CMD_OFF
)); /* smp */
1406 /* map itct entry */
1407 hdr
->dw1
= cpu_to_le32((sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
) |
1408 (1 << CMD_HDR_FRAME_TYPE_OFF
) |
1409 (DIR_NO_DATA
<< CMD_HDR_DIR_OFF
));
1412 hdr
->dw2
= cpu_to_le32((((req_len
- 4) / 4) << CMD_HDR_CFL_OFF
) |
1413 (HISI_SAS_MAX_SMP_RESP_SZ
/ 4 <<
1416 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
<< CMD_HDR_IPTT_OFF
);
1418 hdr
->cmd_table_addr
= cpu_to_le64(req_dma_addr
);
1419 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1422 static void prep_ata_v3_hw(struct hisi_hba
*hisi_hba
,
1423 struct hisi_sas_slot
*slot
)
1425 struct sas_task
*task
= slot
->task
;
1426 struct domain_device
*device
= task
->dev
;
1427 struct domain_device
*parent_dev
= device
->parent
;
1428 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1429 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1430 struct asd_sas_port
*sas_port
= device
->port
;
1431 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
1433 int has_data
= 0, hdr_tag
= 0;
1434 u32 dw1
= 0, dw2
= 0;
1436 hdr
->dw0
= cpu_to_le32(port
->id
<< CMD_HDR_PORT_OFF
);
1437 if (parent_dev
&& dev_is_expander(parent_dev
->dev_type
))
1438 hdr
->dw0
|= cpu_to_le32(3 << CMD_HDR_CMD_OFF
);
1440 hdr
->dw0
|= cpu_to_le32(4U << CMD_HDR_CMD_OFF
);
1442 switch (task
->data_dir
) {
1445 dw1
|= DIR_TO_DEVICE
<< CMD_HDR_DIR_OFF
;
1447 case DMA_FROM_DEVICE
:
1449 dw1
|= DIR_TO_INI
<< CMD_HDR_DIR_OFF
;
1452 dw1
&= ~CMD_HDR_DIR_MSK
;
1455 if ((task
->ata_task
.fis
.command
== ATA_CMD_DEV_RESET
) &&
1456 (task
->ata_task
.fis
.control
& ATA_SRST
))
1457 dw1
|= 1 << CMD_HDR_RESET_OFF
;
1459 dw1
|= (hisi_sas_get_ata_protocol(
1460 &task
->ata_task
.fis
, task
->data_dir
))
1461 << CMD_HDR_FRAME_TYPE_OFF
;
1462 dw1
|= sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
;
1464 if (FIS_CMD_IS_UNCONSTRAINED(task
->ata_task
.fis
))
1465 dw1
|= 1 << CMD_HDR_UNCON_CMD_OFF
;
1467 hdr
->dw1
= cpu_to_le32(dw1
);
1470 if (task
->ata_task
.use_ncq
) {
1471 struct ata_queued_cmd
*qc
= task
->uldd_task
;
1474 task
->ata_task
.fis
.sector_count
|= (u8
) (hdr_tag
<< 3);
1475 dw2
|= hdr_tag
<< CMD_HDR_NCQ_TAG_OFF
;
1478 dw2
|= (HISI_SAS_MAX_STP_RESP_SZ
/ 4) << CMD_HDR_CFL_OFF
|
1479 2 << CMD_HDR_SG_MOD_OFF
;
1480 hdr
->dw2
= cpu_to_le32(dw2
);
1483 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1486 prep_prd_sge_v3_hw(hisi_hba
, slot
, hdr
, task
->scatter
,
1489 hdr
->data_transfer_len
= cpu_to_le32(task
->total_xfer_len
);
1490 hdr
->cmd_table_addr
= cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot
));
1491 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1493 buf_cmd
= hisi_sas_cmd_hdr_addr_mem(slot
);
1495 if (likely(!task
->ata_task
.device_control_reg_update
))
1496 task
->ata_task
.fis
.flags
|= 0x80; /* C=1: update ATA cmd reg */
1497 /* fill in command FIS */
1498 memcpy(buf_cmd
, &task
->ata_task
.fis
, sizeof(struct host_to_dev_fis
));
1501 static void prep_abort_v3_hw(struct hisi_hba
*hisi_hba
,
1502 struct hisi_sas_slot
*slot
)
1504 struct sas_task
*task
= slot
->task
;
1505 struct sas_internal_abort_task
*abort
= &task
->abort_task
;
1506 struct domain_device
*dev
= task
->dev
;
1507 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1508 struct hisi_sas_port
*port
= slot
->port
;
1509 struct hisi_sas_device
*sas_dev
= dev
->lldd_dev
;
1510 bool sata
= dev_is_sata(dev
);
1513 hdr
->dw0
= cpu_to_le32((5U << CMD_HDR_CMD_OFF
) | /* abort */
1514 (port
->id
<< CMD_HDR_PORT_OFF
) |
1515 (sata
<< CMD_HDR_ABORT_DEVICE_TYPE_OFF
) |
1516 (abort
->type
<< CMD_HDR_ABORT_FLAG_OFF
));
1519 hdr
->dw1
= cpu_to_le32(sas_dev
->device_id
1520 << CMD_HDR_DEV_ID_OFF
);
1523 hdr
->dw7
= cpu_to_le32(abort
->tag
<< CMD_HDR_ABORT_IPTT_OFF
);
1524 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1527 static irqreturn_t
phy_up_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1531 u32 context
, port_id
, link_rate
;
1532 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1533 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1534 struct device
*dev
= hisi_hba
->dev
;
1536 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_PHY_ENA_MSK
, 1);
1538 port_id
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
1539 port_id
= (port_id
>> (4 * phy_no
)) & 0xf;
1540 link_rate
= hisi_sas_read32(hisi_hba
, PHY_CONN_RATE
);
1541 link_rate
= (link_rate
>> (phy_no
* 4)) & 0xf;
1543 if (port_id
== 0xf) {
1544 dev_err(dev
, "phyup: phy%d invalid portid\n", phy_no
);
1548 sas_phy
->linkrate
= link_rate
;
1549 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
1551 /* Check for SATA dev */
1552 context
= hisi_sas_read32(hisi_hba
, PHY_CONTEXT
);
1553 if (context
& (1 << phy_no
)) {
1554 struct hisi_sas_initial_fis
*initial_fis
;
1555 struct dev_to_host_fis
*fis
;
1556 u8 attached_sas_addr
[SAS_ADDR_SIZE
] = {0};
1557 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1559 dev_info(dev
, "phyup: phy%d link_rate=%d(sata)\n", phy_no
, link_rate
);
1560 initial_fis
= &hisi_hba
->initial_fis
[phy_no
];
1561 fis
= &initial_fis
->fis
;
1563 /* check ERR bit of Status Register */
1564 if (fis
->status
& ATA_ERR
) {
1565 dev_warn(dev
, "sata int: phy%d FIS status: 0x%x\n",
1566 phy_no
, fis
->status
);
1567 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1572 sas_phy
->oob_mode
= SATA_OOB_MODE
;
1573 attached_sas_addr
[0] = 0x50;
1574 attached_sas_addr
[6] = shost
->host_no
;
1575 attached_sas_addr
[7] = phy_no
;
1576 memcpy(sas_phy
->attached_sas_addr
,
1579 memcpy(sas_phy
->frame_rcvd
, fis
,
1580 sizeof(struct dev_to_host_fis
));
1581 phy
->phy_type
|= PORT_TYPE_SATA
;
1582 phy
->identify
.device_type
= SAS_SATA_DEV
;
1583 phy
->frame_rcvd_size
= sizeof(struct dev_to_host_fis
);
1584 phy
->identify
.target_port_protocols
= SAS_PROTOCOL_SATA
;
1586 u32
*frame_rcvd
= (u32
*)sas_phy
->frame_rcvd
;
1587 struct sas_identify_frame
*id
=
1588 (struct sas_identify_frame
*)frame_rcvd
;
1590 dev_info(dev
, "phyup: phy%d link_rate=%d\n", phy_no
, link_rate
);
1591 for (i
= 0; i
< 6; i
++) {
1592 u32 idaf
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1593 RX_IDAF_DWORD0
+ (i
* 4));
1594 frame_rcvd
[i
] = __swab32(idaf
);
1596 sas_phy
->oob_mode
= SAS_OOB_MODE
;
1597 memcpy(sas_phy
->attached_sas_addr
,
1600 phy
->phy_type
|= PORT_TYPE_SAS
;
1601 phy
->identify
.device_type
= id
->dev_type
;
1602 phy
->frame_rcvd_size
= sizeof(struct sas_identify_frame
);
1603 if (phy
->identify
.device_type
== SAS_END_DEVICE
)
1604 phy
->identify
.target_port_protocols
=
1606 else if (phy
->identify
.device_type
!= SAS_PHY_UNUSED
)
1607 phy
->identify
.target_port_protocols
=
1611 phy
->port_id
= port_id
;
1612 spin_lock(&phy
->lock
);
1613 /* Delete timer and set phy_attached atomically */
1614 del_timer(&phy
->timer
);
1615 phy
->phy_attached
= 1;
1616 spin_unlock(&phy
->lock
);
1619 * Call pm_runtime_get_noresume() which pairs with
1620 * hisi_sas_phyup_pm_work() -> pm_runtime_put_sync().
1621 * For failure call pm_runtime_put() as we are in a hardirq context.
1623 pm_runtime_get_noresume(dev
);
1624 res
= hisi_sas_notify_phy_event(phy
, HISI_PHYE_PHY_UP_PM
);
1626 pm_runtime_put(dev
);
1631 if (phy
->reset_completion
)
1632 complete(phy
->reset_completion
);
1633 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1634 CHL_INT0_SL_PHY_ENABLE_MSK
);
1635 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_PHY_ENA_MSK
, 0);
1640 static irqreturn_t
phy_down_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1642 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1643 u32 phy_state
, sl_ctrl
, txid_auto
;
1644 struct device
*dev
= hisi_hba
->dev
;
1646 atomic_inc(&phy
->down_cnt
);
1648 del_timer(&phy
->timer
);
1649 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_NOT_RDY_MSK
, 1);
1651 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1652 dev_info(dev
, "phydown: phy%d phy_state=0x%x\n", phy_no
, phy_state
);
1653 hisi_sas_phy_down(hisi_hba
, phy_no
, (phy_state
& 1 << phy_no
) ? 1 : 0,
1656 sl_ctrl
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1657 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
,
1658 sl_ctrl
&(~SL_CTA_MSK
));
1660 txid_auto
= hisi_sas_phy_read32(hisi_hba
, phy_no
, TXID_AUTO
);
1661 hisi_sas_phy_write32(hisi_hba
, phy_no
, TXID_AUTO
,
1662 txid_auto
| CT3_MSK
);
1664 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
, CHL_INT0_NOT_RDY_MSK
);
1665 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_NOT_RDY_MSK
, 0);
1670 static irqreturn_t
phy_bcast_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1672 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1675 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_RX_BCAST_CHK_MSK
, 1);
1676 bcast_status
= hisi_sas_phy_read32(hisi_hba
, phy_no
, RX_PRIMS_STATUS
);
1677 if (bcast_status
& RX_BCAST_CHG_MSK
)
1678 hisi_sas_phy_bcast(phy
);
1679 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1680 CHL_INT0_SL_RX_BCST_ACK_MSK
);
1681 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_RX_BCAST_CHK_MSK
, 0);
1686 static irqreturn_t
int_phy_up_down_bcast_v3_hw(int irq_no
, void *p
)
1688 struct hisi_hba
*hisi_hba
= p
;
1691 irqreturn_t res
= IRQ_NONE
;
1693 irq_msk
= hisi_sas_read32(hisi_hba
, CHNL_INT_STATUS
)
1697 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1699 u32 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1700 int rdy
= phy_state
& (1 << phy_no
);
1703 if (irq_value
& CHL_INT0_SL_PHY_ENABLE_MSK
)
1705 if (phy_up_v3_hw(phy_no
, hisi_hba
)
1708 if (irq_value
& CHL_INT0_SL_RX_BCST_ACK_MSK
)
1710 if (phy_bcast_v3_hw(phy_no
, hisi_hba
)
1714 if (irq_value
& CHL_INT0_NOT_RDY_MSK
)
1716 if (phy_down_v3_hw(phy_no
, hisi_hba
)
1728 static const struct hisi_sas_hw_error port_axi_error
[] = {
1730 .irq_msk
= BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF
),
1731 .msg
= "dmac_tx_ecc_bad_err",
1734 .irq_msk
= BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF
),
1735 .msg
= "dmac_rx_ecc_bad_err",
1738 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF
),
1739 .msg
= "dma_tx_axi_wr_err",
1742 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF
),
1743 .msg
= "dma_tx_axi_rd_err",
1746 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF
),
1747 .msg
= "dma_rx_axi_wr_err",
1750 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF
),
1751 .msg
= "dma_rx_axi_rd_err",
1754 .irq_msk
= BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF
),
1755 .msg
= "dma_tx_fifo_err",
1758 .irq_msk
= BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF
),
1759 .msg
= "dma_rx_fifo_err",
1762 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF
),
1763 .msg
= "dma_tx_axi_ruser_err",
1766 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF
),
1767 .msg
= "dma_rx_axi_ruser_err",
1771 static void handle_chl_int1_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1773 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT1
);
1774 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT1_MSK
);
1775 struct device
*dev
= hisi_hba
->dev
;
1778 irq_value
&= ~irq_msk
;
1780 dev_warn(dev
, "phy%d channel int 1 received with status bits cleared\n",
1785 for (i
= 0; i
< ARRAY_SIZE(port_axi_error
); i
++) {
1786 const struct hisi_sas_hw_error
*error
= &port_axi_error
[i
];
1788 if (!(irq_value
& error
->irq_msk
))
1791 dev_err(dev
, "%s error (phy%d 0x%x) found!\n",
1792 error
->msg
, phy_no
, irq_value
);
1793 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
1796 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT1
, irq_value
);
1799 static void phy_get_events_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1801 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1802 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1803 struct sas_phy
*sphy
= sas_phy
->phy
;
1804 unsigned long flags
;
1807 spin_lock_irqsave(&phy
->lock
, flags
);
1809 /* loss dword sync */
1810 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DWS_LOST
);
1811 sphy
->loss_of_dword_sync_count
+= reg_value
;
1813 /* phy reset problem */
1814 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_RESET_PROB
);
1815 sphy
->phy_reset_problem_count
+= reg_value
;
1818 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_INVLD_DW
);
1819 sphy
->invalid_dword_count
+= reg_value
;
1822 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DISP_ERR
);
1823 sphy
->running_disparity_error_count
+= reg_value
;
1825 /* code violation error */
1826 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_CODE_ERR
);
1827 phy
->code_violation_err_count
+= reg_value
;
1829 spin_unlock_irqrestore(&phy
->lock
, flags
);
1832 static void handle_chl_int2_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1834 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2_MSK
);
1835 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2
);
1836 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1837 struct pci_dev
*pci_dev
= hisi_hba
->pci_dev
;
1838 struct device
*dev
= hisi_hba
->dev
;
1839 static const u32 msk
= BIT(CHL_INT2_RX_DISP_ERR_OFF
) |
1840 BIT(CHL_INT2_RX_CODE_ERR_OFF
) |
1841 BIT(CHL_INT2_RX_INVLD_DW_OFF
);
1843 irq_value
&= ~irq_msk
;
1845 dev_warn(dev
, "phy%d channel int 2 received with status bits cleared\n",
1850 if (irq_value
& BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF
)) {
1851 dev_warn(dev
, "phy%d identify timeout\n", phy_no
);
1852 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1855 if (irq_value
& BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF
)) {
1856 u32 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1857 STP_LINK_TIMEOUT_STATE
);
1859 dev_warn(dev
, "phy%d stp link timeout (0x%x)\n",
1861 if (reg_value
& BIT(4))
1862 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1865 if (pci_dev
->revision
> 0x20 && (irq_value
& msk
)) {
1866 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1867 struct sas_phy
*sphy
= sas_phy
->phy
;
1869 phy_get_events_v3_hw(hisi_hba
, phy_no
);
1871 if (irq_value
& BIT(CHL_INT2_RX_INVLD_DW_OFF
))
1872 dev_info(dev
, "phy%d invalid dword cnt: %u\n", phy_no
,
1873 sphy
->invalid_dword_count
);
1875 if (irq_value
& BIT(CHL_INT2_RX_CODE_ERR_OFF
))
1876 dev_info(dev
, "phy%d code violation cnt: %u\n", phy_no
,
1877 phy
->code_violation_err_count
);
1879 if (irq_value
& BIT(CHL_INT2_RX_DISP_ERR_OFF
))
1880 dev_info(dev
, "phy%d disparity error cnt: %u\n", phy_no
,
1881 sphy
->running_disparity_error_count
);
1884 if ((irq_value
& BIT(CHL_INT2_RX_INVLD_DW_OFF
)) &&
1885 (pci_dev
->revision
== 0x20)) {
1889 rc
= hisi_sas_read32_poll_timeout_atomic(
1890 HILINK_ERR_DFX
, reg_value
,
1891 !((reg_value
>> 8) & BIT(phy_no
)),
1894 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1897 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2
, irq_value
);
1900 static void handle_chl_int0_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1902 u32 irq_value0
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT0
);
1904 if (irq_value0
& CHL_INT0_PHY_RDY_MSK
)
1905 hisi_sas_phy_oob_ready(hisi_hba
, phy_no
);
1907 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1908 irq_value0
& (~CHL_INT0_SL_RX_BCST_ACK_MSK
)
1909 & (~CHL_INT0_SL_PHY_ENABLE_MSK
)
1910 & (~CHL_INT0_NOT_RDY_MSK
));
1913 static irqreturn_t
int_chnl_int_v3_hw(int irq_no
, void *p
)
1915 struct hisi_hba
*hisi_hba
= p
;
1919 irq_msk
= hisi_sas_read32(hisi_hba
, CHNL_INT_STATUS
)
1923 if (irq_msk
& (CHNL_INT_STS_INT0_MSK
<< (phy_no
* CHNL_WIDTH
)))
1924 handle_chl_int0_v3_hw(hisi_hba
, phy_no
);
1926 if (irq_msk
& (CHNL_INT_STS_INT1_MSK
<< (phy_no
* CHNL_WIDTH
)))
1927 handle_chl_int1_v3_hw(hisi_hba
, phy_no
);
1929 if (irq_msk
& (CHNL_INT_STS_INT2_MSK
<< (phy_no
* CHNL_WIDTH
)))
1930 handle_chl_int2_v3_hw(hisi_hba
, phy_no
);
1932 irq_msk
&= ~(CHNL_INT_STS_PHY_MSK
<< (phy_no
* CHNL_WIDTH
));
1939 static const struct hisi_sas_hw_error multi_bit_ecc_errors
[] = {
1941 .irq_msk
= BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF
),
1942 .msk
= HGC_DQE_ECC_MB_ADDR_MSK
,
1943 .shift
= HGC_DQE_ECC_MB_ADDR_OFF
,
1944 .msg
= "hgc_dqe_eccbad_intr",
1945 .reg
= HGC_DQE_ECC_ADDR
,
1948 .irq_msk
= BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF
),
1949 .msk
= HGC_IOST_ECC_MB_ADDR_MSK
,
1950 .shift
= HGC_IOST_ECC_MB_ADDR_OFF
,
1951 .msg
= "hgc_iost_eccbad_intr",
1952 .reg
= HGC_IOST_ECC_ADDR
,
1955 .irq_msk
= BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF
),
1956 .msk
= HGC_ITCT_ECC_MB_ADDR_MSK
,
1957 .shift
= HGC_ITCT_ECC_MB_ADDR_OFF
,
1958 .msg
= "hgc_itct_eccbad_intr",
1959 .reg
= HGC_ITCT_ECC_ADDR
,
1962 .irq_msk
= BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF
),
1963 .msk
= HGC_LM_DFX_STATUS2_IOSTLIST_MSK
,
1964 .shift
= HGC_LM_DFX_STATUS2_IOSTLIST_OFF
,
1965 .msg
= "hgc_iostl_eccbad_intr",
1966 .reg
= HGC_LM_DFX_STATUS2
,
1969 .irq_msk
= BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF
),
1970 .msk
= HGC_LM_DFX_STATUS2_ITCTLIST_MSK
,
1971 .shift
= HGC_LM_DFX_STATUS2_ITCTLIST_OFF
,
1972 .msg
= "hgc_itctl_eccbad_intr",
1973 .reg
= HGC_LM_DFX_STATUS2
,
1976 .irq_msk
= BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF
),
1977 .msk
= HGC_CQE_ECC_MB_ADDR_MSK
,
1978 .shift
= HGC_CQE_ECC_MB_ADDR_OFF
,
1979 .msg
= "hgc_cqe_eccbad_intr",
1980 .reg
= HGC_CQE_ECC_ADDR
,
1983 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF
),
1984 .msk
= HGC_RXM_DFX_STATUS14_MEM0_MSK
,
1985 .shift
= HGC_RXM_DFX_STATUS14_MEM0_OFF
,
1986 .msg
= "rxm_mem0_eccbad_intr",
1987 .reg
= HGC_RXM_DFX_STATUS14
,
1990 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF
),
1991 .msk
= HGC_RXM_DFX_STATUS14_MEM1_MSK
,
1992 .shift
= HGC_RXM_DFX_STATUS14_MEM1_OFF
,
1993 .msg
= "rxm_mem1_eccbad_intr",
1994 .reg
= HGC_RXM_DFX_STATUS14
,
1997 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF
),
1998 .msk
= HGC_RXM_DFX_STATUS14_MEM2_MSK
,
1999 .shift
= HGC_RXM_DFX_STATUS14_MEM2_OFF
,
2000 .msg
= "rxm_mem2_eccbad_intr",
2001 .reg
= HGC_RXM_DFX_STATUS14
,
2004 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF
),
2005 .msk
= HGC_RXM_DFX_STATUS15_MEM3_MSK
,
2006 .shift
= HGC_RXM_DFX_STATUS15_MEM3_OFF
,
2007 .msg
= "rxm_mem3_eccbad_intr",
2008 .reg
= HGC_RXM_DFX_STATUS15
,
2011 .irq_msk
= BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF
),
2012 .msk
= AM_ROB_ECC_ERR_ADDR_MSK
,
2013 .shift
= AM_ROB_ECC_ERR_ADDR_OFF
,
2014 .msg
= "ooo_ram_eccbad_intr",
2015 .reg
= AM_ROB_ECC_ERR_ADDR
,
2019 static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba
*hisi_hba
,
2022 struct device
*dev
= hisi_hba
->dev
;
2023 const struct hisi_sas_hw_error
*ecc_error
;
2027 for (i
= 0; i
< ARRAY_SIZE(multi_bit_ecc_errors
); i
++) {
2028 ecc_error
= &multi_bit_ecc_errors
[i
];
2029 if (irq_value
& ecc_error
->irq_msk
) {
2030 val
= hisi_sas_read32(hisi_hba
, ecc_error
->reg
);
2031 val
&= ecc_error
->msk
;
2032 val
>>= ecc_error
->shift
;
2033 dev_err(dev
, "%s (0x%x) found: mem addr is 0x%08X\n",
2034 ecc_error
->msg
, irq_value
, val
);
2035 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2040 static void fatal_ecc_int_v3_hw(struct hisi_hba
*hisi_hba
)
2042 u32 irq_value
, irq_msk
;
2044 irq_msk
= hisi_sas_read32(hisi_hba
, SAS_ECC_INTR_MSK
);
2045 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0xffffffff);
2047 irq_value
= hisi_sas_read32(hisi_hba
, SAS_ECC_INTR
);
2049 multi_bit_ecc_error_process_v3_hw(hisi_hba
, irq_value
);
2051 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR
, irq_value
);
2052 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, irq_msk
);
2055 static const struct hisi_sas_hw_error axi_error
[] = {
2056 { .msk
= BIT(0), .msg
= "IOST_AXI_W_ERR" },
2057 { .msk
= BIT(1), .msg
= "IOST_AXI_R_ERR" },
2058 { .msk
= BIT(2), .msg
= "ITCT_AXI_W_ERR" },
2059 { .msk
= BIT(3), .msg
= "ITCT_AXI_R_ERR" },
2060 { .msk
= BIT(4), .msg
= "SATA_AXI_W_ERR" },
2061 { .msk
= BIT(5), .msg
= "SATA_AXI_R_ERR" },
2062 { .msk
= BIT(6), .msg
= "DQE_AXI_R_ERR" },
2063 { .msk
= BIT(7), .msg
= "CQE_AXI_W_ERR" },
2067 static const struct hisi_sas_hw_error fifo_error
[] = {
2068 { .msk
= BIT(8), .msg
= "CQE_WINFO_FIFO" },
2069 { .msk
= BIT(9), .msg
= "CQE_MSG_FIFIO" },
2070 { .msk
= BIT(10), .msg
= "GETDQE_FIFO" },
2071 { .msk
= BIT(11), .msg
= "CMDP_FIFO" },
2072 { .msk
= BIT(12), .msg
= "AWTCTRL_FIFO" },
2076 static const struct hisi_sas_hw_error fatal_axi_error
[] = {
2078 .irq_msk
= BIT(ENT_INT_SRC3_WP_DEPTH_OFF
),
2079 .msg
= "write pointer and depth",
2082 .irq_msk
= BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF
),
2083 .msg
= "iptt no match slot",
2086 .irq_msk
= BIT(ENT_INT_SRC3_RP_DEPTH_OFF
),
2087 .msg
= "read pointer and depth",
2090 .irq_msk
= BIT(ENT_INT_SRC3_AXI_OFF
),
2091 .reg
= HGC_AXI_FIFO_ERR_INFO
,
2095 .irq_msk
= BIT(ENT_INT_SRC3_FIFO_OFF
),
2096 .reg
= HGC_AXI_FIFO_ERR_INFO
,
2100 .irq_msk
= BIT(ENT_INT_SRC3_LM_OFF
),
2101 .msg
= "LM add/fetch list",
2104 .irq_msk
= BIT(ENT_INT_SRC3_ABT_OFF
),
2105 .msg
= "SAS_HGC_ABT fetch LM list",
2108 .irq_msk
= BIT(ENT_INT_SRC3_DQE_POISON_OFF
),
2109 .msg
= "read dqe poison",
2112 .irq_msk
= BIT(ENT_INT_SRC3_IOST_POISON_OFF
),
2113 .msg
= "read iost poison",
2116 .irq_msk
= BIT(ENT_INT_SRC3_ITCT_POISON_OFF
),
2117 .msg
= "read itct poison",
2120 .irq_msk
= BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF
),
2121 .msg
= "read itct ncq poison",
2126 static irqreturn_t
fatal_axi_int_v3_hw(int irq_no
, void *p
)
2128 u32 irq_value
, irq_msk
;
2129 struct hisi_hba
*hisi_hba
= p
;
2130 struct device
*dev
= hisi_hba
->dev
;
2131 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2134 irq_msk
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC_MSK3
);
2135 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, irq_msk
| 0x1df00);
2137 irq_value
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC3
);
2138 irq_value
&= ~irq_msk
;
2140 for (i
= 0; i
< ARRAY_SIZE(fatal_axi_error
); i
++) {
2141 const struct hisi_sas_hw_error
*error
= &fatal_axi_error
[i
];
2143 if (!(irq_value
& error
->irq_msk
))
2147 const struct hisi_sas_hw_error
*sub
= error
->sub
;
2148 u32 err_value
= hisi_sas_read32(hisi_hba
, error
->reg
);
2150 for (; sub
->msk
|| sub
->msg
; sub
++) {
2151 if (!(err_value
& sub
->msk
))
2154 dev_err(dev
, "%s error (0x%x) found!\n",
2155 sub
->msg
, irq_value
);
2156 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2159 dev_err(dev
, "%s error (0x%x) found!\n",
2160 error
->msg
, irq_value
);
2161 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2164 if (pdev
->revision
< 0x21) {
2167 reg_val
= hisi_sas_read32(hisi_hba
,
2168 AXI_MASTER_CFG_BASE
+
2170 reg_val
|= AM_CTRL_SHUTDOWN_REQ_MSK
;
2171 hisi_sas_write32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2172 AM_CTRL_GLOBAL
, reg_val
);
2176 fatal_ecc_int_v3_hw(hisi_hba
);
2178 if (irq_value
& BIT(ENT_INT_SRC3_ITC_INT_OFF
)) {
2179 u32 reg_val
= hisi_sas_read32(hisi_hba
, ITCT_CLR
);
2180 u32 dev_id
= reg_val
& ITCT_DEV_MSK
;
2181 struct hisi_sas_device
*sas_dev
=
2182 &hisi_hba
->devices
[dev_id
];
2184 hisi_sas_write32(hisi_hba
, ITCT_CLR
, 0);
2185 dev_dbg(dev
, "clear ITCT ok\n");
2186 complete(sas_dev
->completion
);
2189 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
, irq_value
& 0x1df00);
2190 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, irq_msk
);
2195 static bool is_ncq_err_v3_hw(struct hisi_sas_complete_v3_hdr
*complete_hdr
)
2199 dw0
= le32_to_cpu(complete_hdr
->dw0
);
2200 dw3
= le32_to_cpu(complete_hdr
->dw3
);
2202 return (dw0
& ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK
) &&
2203 (dw3
& FIS_TYPE_SDB_MSK
) &&
2204 (dw3
& FIS_ATA_STATUS_ERR_MSK
);
2208 slot_err_v3_hw(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
2209 struct hisi_sas_slot
*slot
)
2211 struct task_status_struct
*ts
= &task
->task_status
;
2212 struct hisi_sas_complete_v3_hdr
*complete_queue
=
2213 hisi_hba
->complete_hdr
[slot
->cmplt_queue
];
2214 struct hisi_sas_complete_v3_hdr
*complete_hdr
=
2215 &complete_queue
[slot
->cmplt_queue_slot
];
2216 struct hisi_sas_err_record_v3
*record
=
2217 hisi_sas_status_buf_addr_mem(slot
);
2218 u32 dma_rx_err_type
= le32_to_cpu(record
->dma_rx_err_type
);
2219 u32 trans_tx_fail_type
= le32_to_cpu(record
->trans_tx_fail_type
);
2220 u16 sipc_rx_err_type
= le16_to_cpu(record
->sipc_rx_err_type
);
2221 u32 dw3
= le32_to_cpu(complete_hdr
->dw3
);
2222 u32 dw0
= le32_to_cpu(complete_hdr
->dw0
);
2224 switch (task
->task_proto
) {
2225 case SAS_PROTOCOL_SSP
:
2226 if (dma_rx_err_type
& RX_DATA_LEN_UNDERFLOW_MSK
) {
2228 * If returned response frame is incorrect because of data underflow,
2229 * but I/O information has been written to the host memory, we examine
2232 if (!(dw0
& CMPLT_HDR_RSPNS_GOOD_MSK
) &&
2233 (dw0
& CMPLT_HDR_RSPNS_XFRD_MSK
))
2236 ts
->residual
= trans_tx_fail_type
;
2237 ts
->stat
= SAS_DATA_UNDERRUN
;
2238 } else if (dw3
& CMPLT_HDR_IO_IN_TARGET_MSK
) {
2239 ts
->stat
= SAS_QUEUE_FULL
;
2242 ts
->stat
= SAS_OPEN_REJECT
;
2243 ts
->open_rej_reason
= SAS_OREJ_RSVD_RETRY
;
2246 case SAS_PROTOCOL_SATA
:
2247 case SAS_PROTOCOL_STP
:
2248 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
2249 if ((dw0
& CMPLT_HDR_RSPNS_XFRD_MSK
) &&
2250 (sipc_rx_err_type
& RX_FIS_STATUS_ERR_MSK
)) {
2251 if (task
->ata_task
.use_ncq
) {
2252 struct domain_device
*device
= task
->dev
;
2253 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
2255 sas_dev
->dev_status
= HISI_SAS_DEV_NCQ_ERR
;
2258 ts
->stat
= SAS_PROTO_RESPONSE
;
2260 } else if (dma_rx_err_type
& RX_DATA_LEN_UNDERFLOW_MSK
) {
2261 ts
->residual
= trans_tx_fail_type
;
2262 ts
->stat
= SAS_DATA_UNDERRUN
;
2263 } else if ((dw3
& CMPLT_HDR_IO_IN_TARGET_MSK
) ||
2264 (dw3
& SATA_DISK_IN_ERROR_STATUS_MSK
)) {
2265 ts
->stat
= SAS_PHY_DOWN
;
2268 ts
->stat
= SAS_OPEN_REJECT
;
2269 ts
->open_rej_reason
= SAS_OREJ_RSVD_RETRY
;
2271 if (dw0
& CMPLT_HDR_RSPNS_XFRD_MSK
)
2272 hisi_sas_sata_done(task
, slot
);
2274 case SAS_PROTOCOL_SMP
:
2275 ts
->stat
= SAS_SAM_STAT_CHECK_CONDITION
;
2283 static void slot_complete_v3_hw(struct hisi_hba
*hisi_hba
,
2284 struct hisi_sas_slot
*slot
)
2286 struct sas_task
*task
= slot
->task
;
2287 struct hisi_sas_device
*sas_dev
;
2288 struct device
*dev
= hisi_hba
->dev
;
2289 struct task_status_struct
*ts
;
2290 struct domain_device
*device
;
2291 struct sas_ha_struct
*ha
;
2292 struct hisi_sas_complete_v3_hdr
*complete_queue
=
2293 hisi_hba
->complete_hdr
[slot
->cmplt_queue
];
2294 struct hisi_sas_complete_v3_hdr
*complete_hdr
=
2295 &complete_queue
[slot
->cmplt_queue_slot
];
2296 unsigned long flags
;
2297 bool is_internal
= slot
->is_internal
;
2300 if (unlikely(!task
|| !task
->lldd_task
|| !task
->dev
))
2303 ts
= &task
->task_status
;
2305 ha
= device
->port
->ha
;
2306 sas_dev
= device
->lldd_dev
;
2308 spin_lock_irqsave(&task
->task_state_lock
, flags
);
2309 task
->task_state_flags
&= ~SAS_TASK_STATE_PENDING
;
2310 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2312 memset(ts
, 0, sizeof(*ts
));
2313 ts
->resp
= SAS_TASK_COMPLETE
;
2315 if (unlikely(!sas_dev
)) {
2316 dev_dbg(dev
, "slot complete: port has not device\n");
2317 ts
->stat
= SAS_PHY_DOWN
;
2321 dw0
= le32_to_cpu(complete_hdr
->dw0
);
2322 dw1
= le32_to_cpu(complete_hdr
->dw1
);
2323 dw3
= le32_to_cpu(complete_hdr
->dw3
);
2326 * Use SAS+TMF status codes
2328 switch ((dw0
& CMPLT_HDR_ABORT_STAT_MSK
) >> CMPLT_HDR_ABORT_STAT_OFF
) {
2329 case STAT_IO_ABORTED
:
2330 /* this IO has been aborted by abort command */
2331 ts
->stat
= SAS_ABORTED_TASK
;
2333 case STAT_IO_COMPLETE
:
2334 /* internal abort command complete */
2335 ts
->stat
= TMF_RESP_FUNC_SUCC
;
2337 case STAT_IO_NO_DEVICE
:
2338 ts
->stat
= TMF_RESP_FUNC_COMPLETE
;
2340 case STAT_IO_NOT_VALID
:
2342 * abort single IO, the controller can't find the IO
2344 ts
->stat
= TMF_RESP_FUNC_FAILED
;
2350 /* check for erroneous completion */
2351 if ((dw0
& CMPLT_HDR_CMPLT_MSK
) == 0x3) {
2352 u32
*error_info
= hisi_sas_status_buf_addr_mem(slot
);
2354 if (slot_err_v3_hw(hisi_hba
, task
, slot
)) {
2355 if (ts
->stat
!= SAS_DATA_UNDERRUN
)
2356 dev_info(dev
, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
2357 slot
->idx
, task
, sas_dev
->device_id
,
2358 SAS_ADDR(device
->sas_addr
),
2359 dw0
, dw1
, complete_hdr
->act
, dw3
,
2360 error_info
[0], error_info
[1],
2361 error_info
[2], error_info
[3]);
2362 if (unlikely(slot
->abort
)) {
2363 if (dev_is_sata(device
) && task
->ata_task
.use_ncq
)
2364 sas_ata_device_link_abort(device
, true);
2366 sas_task_abort(task
);
2374 switch (task
->task_proto
) {
2375 case SAS_PROTOCOL_SSP
: {
2376 struct ssp_response_iu
*iu
=
2377 hisi_sas_status_buf_addr_mem(slot
) +
2378 sizeof(struct hisi_sas_err_record
);
2380 sas_ssp_task_response(dev
, task
, iu
);
2383 case SAS_PROTOCOL_SMP
: {
2384 struct scatterlist
*sg_resp
= &task
->smp_task
.smp_resp
;
2385 void *to
= page_address(sg_page(sg_resp
));
2387 ts
->stat
= SAS_SAM_STAT_GOOD
;
2389 memcpy(to
+ sg_resp
->offset
,
2390 hisi_sas_status_buf_addr_mem(slot
) +
2391 sizeof(struct hisi_sas_err_record
),
2395 case SAS_PROTOCOL_SATA
:
2396 case SAS_PROTOCOL_STP
:
2397 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
2398 ts
->stat
= SAS_SAM_STAT_GOOD
;
2399 if (dw0
& CMPLT_HDR_RSPNS_XFRD_MSK
)
2400 hisi_sas_sata_done(task
, slot
);
2403 ts
->stat
= SAS_SAM_STAT_CHECK_CONDITION
;
2407 if (!slot
->port
->port_attached
) {
2408 dev_warn(dev
, "slot complete: port %d has removed\n",
2409 slot
->port
->sas_port
.id
);
2410 ts
->stat
= SAS_PHY_DOWN
;
2414 spin_lock_irqsave(&task
->task_state_lock
, flags
);
2415 if (task
->task_state_flags
& SAS_TASK_STATE_ABORTED
) {
2416 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2417 dev_info(dev
, "slot complete: task(%pK) aborted\n", task
);
2420 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
2421 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2422 hisi_sas_slot_task_free(hisi_hba
, task
, slot
, true);
2424 if (!is_internal
&& (task
->task_proto
!= SAS_PROTOCOL_SMP
)) {
2425 spin_lock_irqsave(&device
->done_lock
, flags
);
2426 if (test_bit(SAS_HA_FROZEN
, &ha
->state
)) {
2427 spin_unlock_irqrestore(&device
->done_lock
, flags
);
2428 dev_info(dev
, "slot complete: task(%pK) ignored\n",
2432 spin_unlock_irqrestore(&device
->done_lock
, flags
);
2435 if (task
->task_done
)
2436 task
->task_done(task
);
2439 static int complete_v3_hw(struct hisi_sas_cq
*cq
)
2441 struct hisi_sas_complete_v3_hdr
*complete_queue
;
2442 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
2443 u32 rd_point
, wr_point
;
2447 rd_point
= cq
->rd_point
;
2448 complete_queue
= hisi_hba
->complete_hdr
[queue
];
2450 wr_point
= hisi_sas_read32(hisi_hba
, COMPL_Q_0_WR_PTR
+
2452 completed
= (wr_point
+ HISI_SAS_QUEUE_SLOTS
- rd_point
) % HISI_SAS_QUEUE_SLOTS
;
2454 while (rd_point
!= wr_point
) {
2455 struct hisi_sas_complete_v3_hdr
*complete_hdr
;
2456 struct device
*dev
= hisi_hba
->dev
;
2457 struct hisi_sas_slot
*slot
;
2461 complete_hdr
= &complete_queue
[rd_point
];
2462 dw0
= le32_to_cpu(complete_hdr
->dw0
);
2463 dw1
= le32_to_cpu(complete_hdr
->dw1
);
2464 dw3
= le32_to_cpu(complete_hdr
->dw3
);
2466 iptt
= dw1
& CMPLT_HDR_IPTT_MSK
;
2467 if (unlikely((dw0
& CMPLT_HDR_CMPLT_MSK
) == 0x3) &&
2468 (dw3
& CMPLT_HDR_SATA_DISK_ERR_MSK
)) {
2469 int device_id
= (dw1
& CMPLT_HDR_DEV_ID_MSK
) >>
2470 CMPLT_HDR_DEV_ID_OFF
;
2471 struct hisi_sas_itct
*itct
=
2472 &hisi_hba
->itct
[device_id
];
2473 struct hisi_sas_device
*sas_dev
=
2474 &hisi_hba
->devices
[device_id
];
2475 struct domain_device
*device
= sas_dev
->sas_device
;
2477 dev_err(dev
, "erroneous completion disk err dev id=%d sas_addr=0x%llx CQ hdr: 0x%x 0x%x 0x%x 0x%x\n",
2478 device_id
, itct
->sas_addr
, dw0
, dw1
,
2479 complete_hdr
->act
, dw3
);
2481 if (is_ncq_err_v3_hw(complete_hdr
))
2482 sas_dev
->dev_status
= HISI_SAS_DEV_NCQ_ERR
;
2484 sas_ata_device_link_abort(device
, true);
2485 } else if (likely(iptt
< HISI_SAS_COMMAND_ENTRIES_V3_HW
)) {
2486 slot
= &hisi_hba
->slot_info
[iptt
];
2487 slot
->cmplt_queue_slot
= rd_point
;
2488 slot
->cmplt_queue
= queue
;
2489 slot_complete_v3_hw(hisi_hba
, slot
);
2491 dev_err(dev
, "IPTT %d is invalid, discard it.\n", iptt
);
2493 if (++rd_point
>= HISI_SAS_QUEUE_SLOTS
)
2497 /* update rd_point */
2498 cq
->rd_point
= rd_point
;
2499 hisi_sas_write32(hisi_hba
, COMPL_Q_0_RD_PTR
+ (0x14 * queue
), rd_point
);
2505 static int queue_complete_v3_hw(struct Scsi_Host
*shost
, unsigned int queue
)
2507 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2508 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[queue
];
2511 spin_lock(&cq
->poll_lock
);
2512 completed
= complete_v3_hw(cq
);
2513 spin_unlock(&cq
->poll_lock
);
2518 static irqreturn_t
cq_thread_v3_hw(int irq_no
, void *p
)
2520 struct hisi_sas_cq
*cq
= p
;
2527 static irqreturn_t
cq_interrupt_v3_hw(int irq_no
, void *p
)
2529 struct hisi_sas_cq
*cq
= p
;
2530 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
2533 hisi_sas_write32(hisi_hba
, OQ_INT_SRC
, 1 << queue
);
2535 return IRQ_WAKE_THREAD
;
2538 static void hisi_sas_v3_free_vectors(void *data
)
2540 struct pci_dev
*pdev
= data
;
2542 pci_free_irq_vectors(pdev
);
2545 static int interrupt_preinit_v3_hw(struct hisi_hba
*hisi_hba
)
2547 /* Allocate all MSI vectors to avoid re-insertion issue */
2548 int max_msi
= HISI_SAS_MSI_COUNT_V3_HW
;
2549 int vectors
, min_msi
;
2550 struct Scsi_Host
*shost
= hisi_hba
->shost
;
2551 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2552 struct irq_affinity desc
= {
2553 .pre_vectors
= BASE_VECTORS_V3_HW
,
2556 min_msi
= MIN_AFFINE_VECTORS_V3_HW
;
2557 vectors
= pci_alloc_irq_vectors_affinity(pdev
,
2566 hisi_hba
->cq_nvecs
= vectors
- BASE_VECTORS_V3_HW
- hisi_hba
->iopoll_q_cnt
;
2567 shost
->nr_hw_queues
= hisi_hba
->cq_nvecs
+ hisi_hba
->iopoll_q_cnt
;
2569 return devm_add_action(&pdev
->dev
, hisi_sas_v3_free_vectors
, pdev
);
2572 static int interrupt_init_v3_hw(struct hisi_hba
*hisi_hba
)
2574 struct device
*dev
= hisi_hba
->dev
;
2575 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2578 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 1),
2579 int_phy_up_down_bcast_v3_hw
, 0,
2580 DRV_NAME
" phy", hisi_hba
);
2582 dev_err(dev
, "could not request phy interrupt, rc=%d\n", rc
);
2586 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 2),
2587 int_chnl_int_v3_hw
, 0,
2588 DRV_NAME
" channel", hisi_hba
);
2590 dev_err(dev
, "could not request chnl interrupt, rc=%d\n", rc
);
2594 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 11),
2595 fatal_axi_int_v3_hw
, 0,
2596 DRV_NAME
" fatal", hisi_hba
);
2598 dev_err(dev
, "could not request fatal interrupt, rc=%d\n", rc
);
2602 if (hisi_sas_intr_conv
)
2603 dev_info(dev
, "Enable interrupt converge\n");
2605 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
2606 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2607 int nr
= hisi_sas_intr_conv
? 16 : 16 + i
;
2608 unsigned long irqflags
= hisi_sas_intr_conv
? IRQF_SHARED
:
2611 cq
->irq_no
= pci_irq_vector(pdev
, nr
);
2612 rc
= devm_request_threaded_irq(dev
, cq
->irq_no
,
2616 DRV_NAME
" cq", cq
);
2618 dev_err(dev
, "could not request cq%d interrupt, rc=%d\n",
2622 cq
->irq_mask
= pci_irq_get_affinity(pdev
, i
+ BASE_VECTORS_V3_HW
);
2623 if (!cq
->irq_mask
) {
2624 dev_err(dev
, "could not get cq%d irq affinity!\n", i
);
2632 static int hisi_sas_v3_init(struct hisi_hba
*hisi_hba
)
2636 rc
= hw_init_v3_hw(hisi_hba
);
2640 rc
= interrupt_init_v3_hw(hisi_hba
);
2647 static void phy_set_linkrate_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
,
2648 struct sas_phy_linkrates
*r
)
2650 enum sas_linkrate max
= r
->maximum_linkrate
;
2651 u32 prog_phy_link_rate
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
2652 PROG_PHY_LINK_RATE
);
2654 prog_phy_link_rate
&= ~CFG_PROG_PHY_LINK_RATE_MSK
;
2655 prog_phy_link_rate
|= hisi_sas_get_prog_phy_linkrate_mask(max
);
2656 hisi_sas_phy_write32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
,
2657 prog_phy_link_rate
);
2660 static void interrupt_disable_v3_hw(struct hisi_hba
*hisi_hba
)
2662 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2665 synchronize_irq(pci_irq_vector(pdev
, 1));
2666 synchronize_irq(pci_irq_vector(pdev
, 2));
2667 synchronize_irq(pci_irq_vector(pdev
, 11));
2668 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
2669 hisi_sas_write32(hisi_hba
, OQ0_INT_SRC_MSK
+ 0x4 * i
, 0x1);
2671 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++)
2672 synchronize_irq(pci_irq_vector(pdev
, i
+ 16));
2674 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
, 0xffffffff);
2675 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK2
, 0xffffffff);
2676 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, 0xffffffff);
2677 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0xffffffff);
2679 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2680 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1_MSK
, 0xffffffff);
2681 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2_MSK
, 0xffffffff);
2682 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_NOT_RDY_MSK
, 0x1);
2683 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_PHY_ENA_MSK
, 0x1);
2684 hisi_sas_phy_write32(hisi_hba
, i
, SL_RX_BCAST_CHK_MSK
, 0x1);
2688 static u32
get_phys_state_v3_hw(struct hisi_hba
*hisi_hba
)
2690 return hisi_sas_read32(hisi_hba
, PHY_STATE
);
2693 static int disable_host_v3_hw(struct hisi_hba
*hisi_hba
)
2695 struct device
*dev
= hisi_hba
->dev
;
2696 u32 status
, reg_val
;
2699 hisi_sas_sync_poll_cqs(hisi_hba
);
2700 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0x0);
2702 hisi_sas_stop_phys(hisi_hba
);
2706 reg_val
= hisi_sas_read32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2708 reg_val
|= AM_CTRL_SHUTDOWN_REQ_MSK
;
2709 hisi_sas_write32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2710 AM_CTRL_GLOBAL
, reg_val
);
2712 /* wait until bus idle */
2713 rc
= hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE
+
2714 AM_CURR_TRANS_RETURN
, status
,
2715 status
== 0x3, 10, 100);
2717 dev_err(dev
, "axi bus is not idle, rc=%d\n", rc
);
2724 static int soft_reset_v3_hw(struct hisi_hba
*hisi_hba
)
2726 struct device
*dev
= hisi_hba
->dev
;
2729 interrupt_disable_v3_hw(hisi_hba
);
2730 rc
= disable_host_v3_hw(hisi_hba
);
2732 dev_err(dev
, "soft reset: disable host failed rc=%d\n", rc
);
2736 hisi_sas_init_mem(hisi_hba
);
2738 return hw_init_v3_hw(hisi_hba
);
2741 static int write_gpio_v3_hw(struct hisi_hba
*hisi_hba
, u8 reg_type
,
2742 u8 reg_index
, u8 reg_count
, u8
*write_data
)
2744 struct device
*dev
= hisi_hba
->dev
;
2745 u32
*data
= (u32
*)write_data
;
2749 case SAS_GPIO_REG_TX
:
2750 if ((reg_index
+ reg_count
) > ((hisi_hba
->n_phy
+ 3) / 4)) {
2751 dev_err(dev
, "write gpio: invalid reg range[%d, %d]\n",
2752 reg_index
, reg_index
+ reg_count
- 1);
2756 for (i
= 0; i
< reg_count
; i
++)
2757 hisi_sas_write32(hisi_hba
,
2758 SAS_GPIO_TX_0_1
+ (reg_index
+ i
) * 4,
2762 dev_err(dev
, "write gpio: unsupported or bad reg type %d\n",
2770 static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba
*hisi_hba
,
2771 int delay_ms
, int timeout_ms
)
2773 struct device
*dev
= hisi_hba
->dev
;
2774 int entries
, entries_old
= 0, time
;
2776 for (time
= 0; time
< timeout_ms
; time
+= delay_ms
) {
2777 entries
= hisi_sas_read32(hisi_hba
, CQE_SEND_CNT
);
2778 if (entries
== entries_old
)
2781 entries_old
= entries
;
2785 if (time
>= timeout_ms
) {
2786 dev_dbg(dev
, "Wait commands complete timeout!\n");
2790 dev_dbg(dev
, "wait commands complete %dms\n", time
);
2793 static ssize_t
intr_conv_v3_hw_show(struct device
*dev
,
2794 struct device_attribute
*attr
, char *buf
)
2796 return scnprintf(buf
, PAGE_SIZE
, "%u\n", hisi_sas_intr_conv
);
2798 static DEVICE_ATTR_RO(intr_conv_v3_hw
);
2800 static void config_intr_coal_v3_hw(struct hisi_hba
*hisi_hba
)
2802 /* config those registers between enable and disable PHYs */
2803 hisi_sas_stop_phys(hisi_hba
);
2804 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x3);
2806 if (hisi_hba
->intr_coal_ticks
== 0 ||
2807 hisi_hba
->intr_coal_count
== 0) {
2808 /* configure the interrupt coalescing timeout period 10us */
2809 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
, 0xa);
2810 /* configure the count of CQ entries 10 */
2811 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
, 0xa);
2813 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
,
2814 hisi_hba
->intr_coal_ticks
);
2815 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
,
2816 hisi_hba
->intr_coal_count
);
2818 phys_init_v3_hw(hisi_hba
);
2821 static ssize_t
intr_coal_ticks_v3_hw_show(struct device
*dev
,
2822 struct device_attribute
*attr
,
2825 struct Scsi_Host
*shost
= class_to_shost(dev
);
2826 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2828 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
2829 hisi_hba
->intr_coal_ticks
);
2832 static ssize_t
intr_coal_ticks_v3_hw_store(struct device
*dev
,
2833 struct device_attribute
*attr
,
2834 const char *buf
, size_t count
)
2836 struct Scsi_Host
*shost
= class_to_shost(dev
);
2837 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2838 u32 intr_coal_ticks
;
2841 ret
= kstrtou32(buf
, 10, &intr_coal_ticks
);
2843 dev_err(dev
, "Input data of interrupt coalesce unmatch\n");
2847 if (intr_coal_ticks
>= BIT(24)) {
2848 dev_err(dev
, "intr_coal_ticks must be less than 2^24!\n");
2852 hisi_hba
->intr_coal_ticks
= intr_coal_ticks
;
2854 config_intr_coal_v3_hw(hisi_hba
);
2858 static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw
);
2860 static ssize_t
intr_coal_count_v3_hw_show(struct device
*dev
,
2861 struct device_attribute
2864 struct Scsi_Host
*shost
= class_to_shost(dev
);
2865 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2867 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
2868 hisi_hba
->intr_coal_count
);
2871 static ssize_t
intr_coal_count_v3_hw_store(struct device
*dev
,
2872 struct device_attribute
2873 *attr
, const char *buf
, size_t count
)
2875 struct Scsi_Host
*shost
= class_to_shost(dev
);
2876 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2877 u32 intr_coal_count
;
2880 ret
= kstrtou32(buf
, 10, &intr_coal_count
);
2882 dev_err(dev
, "Input data of interrupt coalesce unmatch\n");
2886 if (intr_coal_count
>= BIT(8)) {
2887 dev_err(dev
, "intr_coal_count must be less than 2^8!\n");
2891 hisi_hba
->intr_coal_count
= intr_coal_count
;
2893 config_intr_coal_v3_hw(hisi_hba
);
2897 static DEVICE_ATTR_RW(intr_coal_count_v3_hw
);
2899 static ssize_t
iopoll_q_cnt_v3_hw_show(struct device
*dev
,
2900 struct device_attribute
2903 struct Scsi_Host
*shost
= class_to_shost(dev
);
2904 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2906 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
2907 hisi_hba
->iopoll_q_cnt
);
2909 static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw
);
2911 static int device_configure_v3_hw(struct scsi_device
*sdev
,
2912 struct queue_limits
*lim
)
2914 struct Scsi_Host
*shost
= dev_to_shost(&sdev
->sdev_gendev
);
2915 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2916 int ret
= hisi_sas_device_configure(sdev
, lim
);
2917 struct device
*dev
= hisi_hba
->dev
;
2922 if (sdev
->type
== TYPE_ENCLOSURE
)
2925 if (!device_link_add(&sdev
->sdev_gendev
, dev
,
2926 DL_FLAG_PM_RUNTIME
| DL_FLAG_RPM_ACTIVE
)) {
2927 if (pm_runtime_enabled(dev
)) {
2928 dev_info(dev
, "add device link failed, disable runtime PM for the host\n");
2929 pm_runtime_disable(dev
);
2936 static struct attribute
*host_v3_hw_attrs
[] = {
2937 &dev_attr_phy_event_threshold
.attr
,
2938 &dev_attr_intr_conv_v3_hw
.attr
,
2939 &dev_attr_intr_coal_ticks_v3_hw
.attr
,
2940 &dev_attr_intr_coal_count_v3_hw
.attr
,
2941 &dev_attr_iopoll_q_cnt_v3_hw
.attr
,
2945 ATTRIBUTE_GROUPS(host_v3_hw
);
2947 static const struct attribute_group
*sdev_groups_v3_hw
[] = {
2948 &sas_ata_sdev_attr_group
,
2952 #define HISI_SAS_DEBUGFS_REG(x) {#x, x}
2954 struct hisi_sas_debugfs_reg_lu
{
2959 struct hisi_sas_debugfs_reg
{
2960 const struct hisi_sas_debugfs_reg_lu
*lu
;
2965 static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu
[] = {
2966 HISI_SAS_DEBUGFS_REG(PHY_CFG
),
2967 HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE
),
2968 HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE
),
2969 HISI_SAS_DEBUGFS_REG(PHY_CTRL
),
2970 HISI_SAS_DEBUGFS_REG(SL_CFG
),
2971 HISI_SAS_DEBUGFS_REG(AIP_LIMIT
),
2972 HISI_SAS_DEBUGFS_REG(SL_CONTROL
),
2973 HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS
),
2974 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0
),
2975 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1
),
2976 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2
),
2977 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3
),
2978 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4
),
2979 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5
),
2980 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6
),
2981 HISI_SAS_DEBUGFS_REG(TXID_AUTO
),
2982 HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0
),
2983 HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H
),
2984 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER
),
2985 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE
),
2986 HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER
),
2987 HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG
),
2988 HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG
),
2989 HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG
),
2990 HISI_SAS_DEBUGFS_REG(CHL_INT0
),
2991 HISI_SAS_DEBUGFS_REG(CHL_INT1
),
2992 HISI_SAS_DEBUGFS_REG(CHL_INT2
),
2993 HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK
),
2994 HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK
),
2995 HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK
),
2996 HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME
),
2997 HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN
),
2998 HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER
),
2999 HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK
),
3000 HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK
),
3001 HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK
),
3002 HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK
),
3003 HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK
),
3004 HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK
),
3005 HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS
),
3006 HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS
),
3007 HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME
),
3008 HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST
),
3009 HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB
),
3010 HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW
),
3011 HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR
),
3012 HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR
),
3016 static const struct hisi_sas_debugfs_reg debugfs_port_reg
= {
3017 .lu
= debugfs_port_reg_lu
,
3019 .base_off
= PORT_BASE
,
3022 static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu
[] = {
3023 HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE
),
3024 HISI_SAS_DEBUGFS_REG(PHY_CONTEXT
),
3025 HISI_SAS_DEBUGFS_REG(PHY_STATE
),
3026 HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA
),
3027 HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE
),
3028 HISI_SAS_DEBUGFS_REG(ITCT_CLR
),
3029 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO
),
3030 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI
),
3031 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO
),
3032 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI
),
3033 HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG
),
3034 HISI_SAS_DEBUGFS_REG(TRANS_LOCK_ICT_TIME
),
3035 HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL
),
3036 HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL
),
3037 HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME
),
3038 HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE
),
3039 HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME
),
3040 HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME
),
3041 HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME
),
3042 HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME
),
3043 HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME
),
3044 HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN
),
3045 HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME
),
3046 HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2
),
3047 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT
),
3048 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE
),
3049 HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS
),
3050 HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS
),
3051 HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO
),
3052 HISI_SAS_DEBUGFS_REG(INT_COAL_EN
),
3053 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME
),
3054 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT
),
3055 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME
),
3056 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT
),
3057 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC
),
3058 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK
),
3059 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1
),
3060 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2
),
3061 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3
),
3062 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1
),
3063 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2
),
3064 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3
),
3065 HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK
),
3066 HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK
),
3067 HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK
),
3068 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR
),
3069 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK
),
3070 HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN
),
3071 HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT
),
3072 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH
),
3073 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR
),
3074 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR
),
3075 HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG
),
3076 HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK
),
3077 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH
),
3078 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR
),
3079 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR
),
3080 HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG
),
3081 HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG
),
3082 HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX
),
3083 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0
),
3084 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1
),
3085 HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1
),
3086 HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD
),
3090 static const struct hisi_sas_debugfs_reg debugfs_global_reg
= {
3091 .lu
= debugfs_global_reg_lu
,
3095 static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu
[] = {
3096 HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS
),
3097 HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS
),
3098 HISI_SAS_DEBUGFS_REG(AXI_CFG
),
3099 HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR
),
3103 static const struct hisi_sas_debugfs_reg debugfs_axi_reg
= {
3104 .lu
= debugfs_axi_reg_lu
,
3106 .base_off
= AXI_MASTER_CFG_BASE
,
3109 static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu
[] = {
3110 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0
),
3111 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1
),
3112 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK
),
3113 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK
),
3114 HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK
),
3115 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2
),
3116 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK
),
3120 static const struct hisi_sas_debugfs_reg debugfs_ras_reg
= {
3121 .lu
= debugfs_ras_reg_lu
,
3123 .base_off
= RAS_BASE
,
3126 static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba
*hisi_hba
)
3128 struct Scsi_Host
*shost
= hisi_hba
->shost
;
3130 scsi_block_requests(shost
);
3131 wait_cmds_complete_timeout_v3_hw(hisi_hba
, 100, 5000);
3133 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
3134 hisi_sas_sync_cqs(hisi_hba
);
3135 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0);
3138 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba
*hisi_hba
)
3140 struct Scsi_Host
*shost
= hisi_hba
->shost
;
3142 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
,
3143 (u32
)((1ULL << hisi_hba
->queue_count
) - 1));
3145 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
3146 scsi_unblock_requests(shost
);
3149 static void read_iost_itct_cache_v3_hw(struct hisi_hba
*hisi_hba
,
3150 enum hisi_sas_debugfs_cache_type type
,
3153 u32 cache_dw_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
*
3154 HISI_SAS_IOST_ITCT_CACHE_NUM
;
3155 struct device
*dev
= hisi_hba
->dev
;
3159 hisi_sas_write32(hisi_hba
, TAB_RD_TYPE
, type
);
3161 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_DW_SZ
; i
++) {
3162 val
= hisi_sas_read32(hisi_hba
, TAB_DFX
);
3163 if (val
== 0xffffffff)
3167 if (val
!= 0xffffffff) {
3168 dev_err(dev
, "Issue occurred in reading IOST/ITCT cache!\n");
3172 memset(buf
, 0, cache_dw_size
* 4);
3175 for (i
= 1; i
< cache_dw_size
; i
++)
3176 buf
[i
] = hisi_sas_read32(hisi_hba
, TAB_DFX
);
3179 static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba
*hisi_hba
)
3182 int phy_no
= hisi_hba
->debugfs_bist_phy_no
;
3186 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
3189 for (i
= 0; i
< FFE_CFG_MAX
; i
++)
3190 hisi_sas_phy_write32(hisi_hba
, phy_no
, TXDEEMPH_G1
+ (i
* 0x4),
3191 hisi_hba
->debugfs_bist_ffe
[phy_no
][i
]);
3194 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SERDES_CFG
);
3195 reg_val
|= CFG_ALOS_CHK_DISABLE_MSK
;
3196 hisi_sas_phy_write32(hisi_hba
, phy_no
, SERDES_CFG
, reg_val
);
3199 static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba
*hisi_hba
)
3202 int phy_no
= hisi_hba
->debugfs_bist_phy_no
;
3204 /* disable loopback */
3205 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SAS_PHY_BIST_CTRL
);
3206 reg_val
&= ~(CFG_RX_BIST_EN_MSK
| CFG_TX_BIST_EN_MSK
|
3208 hisi_sas_phy_write32(hisi_hba
, phy_no
, SAS_PHY_BIST_CTRL
, reg_val
);
3211 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SERDES_CFG
);
3212 reg_val
&= ~CFG_ALOS_CHK_DISABLE_MSK
;
3213 hisi_sas_phy_write32(hisi_hba
, phy_no
, SERDES_CFG
, reg_val
);
3215 /* restore the linkrate */
3216 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
);
3217 /* init OOB link rate as 1.5 Gbits */
3218 reg_val
&= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK
;
3219 reg_val
|= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF
);
3220 hisi_sas_phy_write32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
, reg_val
);
3223 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
3226 #define SAS_PHY_BIST_CODE_INIT 0x1
3227 #define SAS_PHY_BIST_CODE1_INIT 0X80
3228 static int debugfs_set_bist_v3_hw(struct hisi_hba
*hisi_hba
, bool enable
)
3230 u32 reg_val
, mode_tmp
;
3231 u32 linkrate
= hisi_hba
->debugfs_bist_linkrate
;
3232 u32 phy_no
= hisi_hba
->debugfs_bist_phy_no
;
3233 u32
*ffe
= hisi_hba
->debugfs_bist_ffe
[phy_no
];
3234 u32 code_mode
= hisi_hba
->debugfs_bist_code_mode
;
3235 u32 path_mode
= hisi_hba
->debugfs_bist_mode
;
3236 u32
*fix_code
= &hisi_hba
->debugfs_bist_fixed_code
[0];
3237 struct device
*dev
= hisi_hba
->dev
;
3239 dev_info(dev
, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
3240 phy_no
, linkrate
, code_mode
, path_mode
,
3241 ffe
[FFE_SAS_1_5_GBPS
], ffe
[FFE_SAS_3_0_GBPS
],
3242 ffe
[FFE_SAS_6_0_GBPS
], ffe
[FFE_SAS_12_0_GBPS
],
3243 ffe
[FFE_SATA_1_5_GBPS
], ffe
[FFE_SATA_3_0_GBPS
],
3244 ffe
[FFE_SATA_6_0_GBPS
], fix_code
[FIXED_CODE
],
3245 fix_code
[FIXED_CODE_1
]);
3246 mode_tmp
= path_mode
? 2 : 1;
3248 /* some preparations before bist test */
3249 hisi_sas_bist_test_prep_v3_hw(hisi_hba
);
3251 /* set linkrate of bit test*/
3252 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
3253 PROG_PHY_LINK_RATE
);
3254 reg_val
&= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK
;
3255 reg_val
|= (linkrate
<< CFG_PROG_OOB_PHY_LINK_RATE_OFF
);
3256 hisi_sas_phy_write32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
,
3259 /* set code mode of bit test */
3260 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
3262 reg_val
&= ~(CFG_BIST_MODE_SEL_MSK
| CFG_LOOP_TEST_MODE_MSK
|
3263 CFG_RX_BIST_EN_MSK
| CFG_TX_BIST_EN_MSK
|
3265 reg_val
|= ((code_mode
<< CFG_BIST_MODE_SEL_OFF
) |
3266 (mode_tmp
<< CFG_LOOP_TEST_MODE_OFF
) |
3268 hisi_sas_phy_write32(hisi_hba
, phy_no
, SAS_PHY_BIST_CTRL
,
3271 /* set the bist init value */
3272 if (code_mode
== HISI_SAS_BIST_CODE_MODE_FIXED_DATA
) {
3273 reg_val
= hisi_hba
->debugfs_bist_fixed_code
[0];
3274 hisi_sas_phy_write32(hisi_hba
, phy_no
,
3275 SAS_PHY_BIST_CODE
, reg_val
);
3277 reg_val
= hisi_hba
->debugfs_bist_fixed_code
[1];
3278 hisi_sas_phy_write32(hisi_hba
, phy_no
,
3279 SAS_PHY_BIST_CODE1
, reg_val
);
3281 hisi_sas_phy_write32(hisi_hba
, phy_no
,
3283 SAS_PHY_BIST_CODE_INIT
);
3284 hisi_sas_phy_write32(hisi_hba
, phy_no
,
3286 SAS_PHY_BIST_CODE1_INIT
);
3290 reg_val
|= (CFG_RX_BIST_EN_MSK
| CFG_TX_BIST_EN_MSK
);
3291 hisi_sas_phy_write32(hisi_hba
, phy_no
, SAS_PHY_BIST_CTRL
,
3294 /* clear error bit */
3296 hisi_sas_phy_read32(hisi_hba
, phy_no
, SAS_BIST_ERR_CNT
);
3298 /* disable bist test and recover it */
3299 hisi_hba
->debugfs_bist_cnt
+= hisi_sas_phy_read32(hisi_hba
,
3300 phy_no
, SAS_BIST_ERR_CNT
);
3301 hisi_sas_bist_test_restore_v3_hw(hisi_hba
);
3307 static void hisi_sas_map_queues(struct Scsi_Host
*shost
)
3309 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
3310 struct blk_mq_queue_map
*qmap
;
3313 for (i
= 0, qoff
= 0; i
< shost
->nr_maps
; i
++) {
3314 qmap
= &shost
->tag_set
.map
[i
];
3315 if (i
== HCTX_TYPE_DEFAULT
) {
3316 qmap
->nr_queues
= hisi_hba
->cq_nvecs
;
3317 } else if (i
== HCTX_TYPE_POLL
) {
3318 qmap
->nr_queues
= hisi_hba
->iopoll_q_cnt
;
3320 qmap
->nr_queues
= 0;
3324 /* At least one interrupt hardware queue */
3325 if (!qmap
->nr_queues
)
3326 WARN_ON(i
== HCTX_TYPE_DEFAULT
);
3327 qmap
->queue_offset
= qoff
;
3328 if (i
== HCTX_TYPE_POLL
)
3329 blk_mq_map_queues(qmap
);
3331 blk_mq_pci_map_queues(qmap
, hisi_hba
->pci_dev
,
3332 BASE_VECTORS_V3_HW
);
3333 qoff
+= qmap
->nr_queues
;
3337 static const struct scsi_host_template sht_v3_hw
= {
3338 LIBSAS_SHT_BASE_NO_SLAVE_INIT
3339 .device_configure
= device_configure_v3_hw
,
3340 .scan_finished
= hisi_sas_scan_finished
,
3341 .scan_start
= hisi_sas_scan_start
,
3342 .map_queues
= hisi_sas_map_queues
,
3343 .sg_tablesize
= HISI_SAS_SGE_PAGE_CNT
,
3344 .sg_prot_tablesize
= HISI_SAS_SGE_PAGE_CNT
,
3345 .slave_alloc
= hisi_sas_slave_alloc
,
3346 .shost_groups
= host_v3_hw_groups
,
3347 .sdev_groups
= sdev_groups_v3_hw
,
3348 .tag_alloc_policy
= BLK_TAG_ALLOC_RR
,
3349 .host_reset
= hisi_sas_host_reset
,
3351 .mq_poll
= queue_complete_v3_hw
,
3354 static const struct hisi_sas_hw hisi_sas_v3_hw
= {
3355 .setup_itct
= setup_itct_v3_hw
,
3356 .get_wideport_bitmap
= get_wideport_bitmap_v3_hw
,
3357 .complete_hdr_size
= sizeof(struct hisi_sas_complete_v3_hdr
),
3358 .clear_itct
= clear_itct_v3_hw
,
3359 .sl_notify_ssp
= sl_notify_ssp_v3_hw
,
3360 .prep_ssp
= prep_ssp_v3_hw
,
3361 .prep_smp
= prep_smp_v3_hw
,
3362 .prep_stp
= prep_ata_v3_hw
,
3363 .prep_abort
= prep_abort_v3_hw
,
3364 .start_delivery
= start_delivery_v3_hw
,
3365 .phys_init
= phys_init_v3_hw
,
3366 .phy_start
= start_phy_v3_hw
,
3367 .phy_disable
= disable_phy_v3_hw
,
3368 .phy_hard_reset
= phy_hard_reset_v3_hw
,
3369 .phy_get_max_linkrate
= phy_get_max_linkrate_v3_hw
,
3370 .phy_set_linkrate
= phy_set_linkrate_v3_hw
,
3371 .dereg_device
= dereg_device_v3_hw
,
3372 .soft_reset
= soft_reset_v3_hw
,
3373 .get_phys_state
= get_phys_state_v3_hw
,
3374 .get_events
= phy_get_events_v3_hw
,
3375 .write_gpio
= write_gpio_v3_hw
,
3376 .wait_cmds_complete_timeout
= wait_cmds_complete_timeout_v3_hw
,
3377 .debugfs_snapshot_regs
= debugfs_snapshot_regs_v3_hw
,
3380 static int check_fw_info_v3_hw(struct hisi_hba
*hisi_hba
)
3382 struct device
*dev
= hisi_hba
->dev
;
3384 if (hisi_hba
->n_phy
< 0 || hisi_hba
->n_phy
> 8) {
3385 dev_err(dev
, "invalid phy number from FW\n");
3389 if (hisi_hba
->queue_count
< 0 || hisi_hba
->queue_count
> 16) {
3390 dev_err(dev
, "invalid queue count from FW\n");
3397 static struct Scsi_Host
*
3398 hisi_sas_shost_alloc_pci(struct pci_dev
*pdev
)
3400 struct Scsi_Host
*shost
;
3401 struct hisi_hba
*hisi_hba
;
3402 struct device
*dev
= &pdev
->dev
;
3404 shost
= scsi_host_alloc(&sht_v3_hw
, sizeof(*hisi_hba
));
3406 dev_err(dev
, "shost alloc failed\n");
3409 hisi_hba
= shost_priv(shost
);
3411 INIT_WORK(&hisi_hba
->rst_work
, hisi_sas_rst_work_handler
);
3412 hisi_hba
->hw
= &hisi_sas_v3_hw
;
3413 hisi_hba
->pci_dev
= pdev
;
3414 hisi_hba
->dev
= dev
;
3415 hisi_hba
->shost
= shost
;
3416 SHOST_TO_SAS_HA(shost
) = &hisi_hba
->sha
;
3418 if (prot_mask
& ~HISI_SAS_PROT_MASK
)
3419 dev_err(dev
, "unsupported protection mask 0x%x, using default (0x0)\n",
3422 hisi_hba
->prot_mask
= prot_mask
;
3424 if (hisi_sas_get_fw_info(hisi_hba
) < 0)
3427 if (check_fw_info_v3_hw(hisi_hba
) < 0)
3430 if (experimental_iopoll_q_cnt
< 0 ||
3431 experimental_iopoll_q_cnt
>= hisi_hba
->queue_count
)
3432 dev_err(dev
, "iopoll queue count %d cannot exceed or equal 16, using default 0\n",
3433 experimental_iopoll_q_cnt
);
3435 hisi_hba
->iopoll_q_cnt
= experimental_iopoll_q_cnt
;
3437 if (hisi_sas_alloc(hisi_hba
)) {
3438 hisi_sas_free(hisi_hba
);
3444 scsi_host_put(shost
);
3445 dev_err(dev
, "shost alloc failed\n");
3449 static void debugfs_snapshot_cq_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3451 int queue_entry_size
= hisi_hba
->hw
->complete_hdr_size
;
3452 int dump_index
= hisi_hba
->debugfs_dump_index
;
3455 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
3456 memcpy(hisi_hba
->debugfs_cq
[dump_index
][i
].complete_hdr
,
3457 hisi_hba
->complete_hdr
[i
],
3458 HISI_SAS_QUEUE_SLOTS
* queue_entry_size
);
3461 static void debugfs_snapshot_dq_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3463 int queue_entry_size
= sizeof(struct hisi_sas_cmd_hdr
);
3464 int dump_index
= hisi_hba
->debugfs_dump_index
;
3467 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
3468 struct hisi_sas_cmd_hdr
*debugfs_cmd_hdr
, *cmd_hdr
;
3471 debugfs_cmd_hdr
= hisi_hba
->debugfs_dq
[dump_index
][i
].hdr
;
3472 cmd_hdr
= hisi_hba
->cmd_hdr
[i
];
3474 for (j
= 0; j
< HISI_SAS_QUEUE_SLOTS
; j
++)
3475 memcpy(&debugfs_cmd_hdr
[j
], &cmd_hdr
[j
],
3480 static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3482 int dump_index
= hisi_hba
->debugfs_dump_index
;
3483 const struct hisi_sas_debugfs_reg
*port
= &debugfs_port_reg
;
3488 for (phy_cnt
= 0; phy_cnt
< hisi_hba
->n_phy
; phy_cnt
++) {
3489 databuf
= hisi_hba
->debugfs_port_reg
[dump_index
][phy_cnt
].data
;
3490 for (i
= 0; i
< port
->count
; i
++, databuf
++) {
3491 offset
= port
->base_off
+ 4 * i
;
3492 *databuf
= hisi_sas_phy_read32(hisi_hba
, phy_cnt
,
3498 static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3500 int dump_index
= hisi_hba
->debugfs_dump_index
;
3501 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_GLOBAL
].data
;
3504 for (i
= 0; i
< debugfs_global_reg
.count
; i
++, databuf
++)
3505 *databuf
= hisi_sas_read32(hisi_hba
, 4 * i
);
3508 static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3510 int dump_index
= hisi_hba
->debugfs_dump_index
;
3511 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_AXI
].data
;
3512 const struct hisi_sas_debugfs_reg
*axi
= &debugfs_axi_reg
;
3515 for (i
= 0; i
< axi
->count
; i
++, databuf
++)
3516 *databuf
= hisi_sas_read32(hisi_hba
, 4 * i
+ axi
->base_off
);
3519 static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3521 int dump_index
= hisi_hba
->debugfs_dump_index
;
3522 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_RAS
].data
;
3523 const struct hisi_sas_debugfs_reg
*ras
= &debugfs_ras_reg
;
3526 for (i
= 0; i
< ras
->count
; i
++, databuf
++)
3527 *databuf
= hisi_sas_read32(hisi_hba
, 4 * i
+ ras
->base_off
);
3530 static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3532 int dump_index
= hisi_hba
->debugfs_dump_index
;
3533 void *cachebuf
= hisi_hba
->debugfs_itct_cache
[dump_index
].cache
;
3534 void *databuf
= hisi_hba
->debugfs_itct
[dump_index
].itct
;
3535 struct hisi_sas_itct
*itct
;
3538 read_iost_itct_cache_v3_hw(hisi_hba
, HISI_SAS_ITCT_CACHE
, cachebuf
);
3540 itct
= hisi_hba
->itct
;
3542 for (i
= 0; i
< HISI_SAS_MAX_ITCT_ENTRIES
; i
++, itct
++) {
3543 memcpy(databuf
, itct
, sizeof(struct hisi_sas_itct
));
3544 databuf
+= sizeof(struct hisi_sas_itct
);
3548 static void debugfs_snapshot_iost_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3550 int dump_index
= hisi_hba
->debugfs_dump_index
;
3551 int max_command_entries
= HISI_SAS_MAX_COMMANDS
;
3552 void *cachebuf
= hisi_hba
->debugfs_iost_cache
[dump_index
].cache
;
3553 void *databuf
= hisi_hba
->debugfs_iost
[dump_index
].iost
;
3554 struct hisi_sas_iost
*iost
;
3557 read_iost_itct_cache_v3_hw(hisi_hba
, HISI_SAS_IOST_CACHE
, cachebuf
);
3559 iost
= hisi_hba
->iost
;
3561 for (i
= 0; i
< max_command_entries
; i
++, iost
++) {
3562 memcpy(databuf
, iost
, sizeof(struct hisi_sas_iost
));
3563 databuf
+= sizeof(struct hisi_sas_iost
);
3568 debugfs_to_reg_name_v3_hw(int off
, int base_off
,
3569 const struct hisi_sas_debugfs_reg_lu
*lu
)
3571 for (; lu
->name
; lu
++) {
3572 if (off
== lu
->off
- base_off
)
3579 static bool debugfs_dump_is_generated_v3_hw(void *p
)
3581 return p
? true : false;
3584 static void debugfs_print_reg_v3_hw(u32
*regs_val
, struct seq_file
*s
,
3585 const struct hisi_sas_debugfs_reg
*reg
)
3589 for (i
= 0; i
< reg
->count
; i
++) {
3593 name
= debugfs_to_reg_name_v3_hw(off
, reg
->base_off
,
3597 seq_printf(s
, "0x%08x 0x%08x %s\n", off
,
3600 seq_printf(s
, "0x%08x 0x%08x\n", off
,
3605 static int debugfs_global_v3_hw_show(struct seq_file
*s
, void *p
)
3607 struct hisi_sas_debugfs_regs
*global
= s
->private;
3609 if (!debugfs_dump_is_generated_v3_hw(global
->data
))
3612 debugfs_print_reg_v3_hw(global
->data
, s
,
3613 &debugfs_global_reg
);
3617 DEFINE_SHOW_ATTRIBUTE(debugfs_global_v3_hw
);
3619 static int debugfs_axi_v3_hw_show(struct seq_file
*s
, void *p
)
3621 struct hisi_sas_debugfs_regs
*axi
= s
->private;
3623 if (!debugfs_dump_is_generated_v3_hw(axi
->data
))
3626 debugfs_print_reg_v3_hw(axi
->data
, s
,
3631 DEFINE_SHOW_ATTRIBUTE(debugfs_axi_v3_hw
);
3633 static int debugfs_ras_v3_hw_show(struct seq_file
*s
, void *p
)
3635 struct hisi_sas_debugfs_regs
*ras
= s
->private;
3637 if (!debugfs_dump_is_generated_v3_hw(ras
->data
))
3640 debugfs_print_reg_v3_hw(ras
->data
, s
,
3645 DEFINE_SHOW_ATTRIBUTE(debugfs_ras_v3_hw
);
3647 static int debugfs_port_v3_hw_show(struct seq_file
*s
, void *p
)
3649 struct hisi_sas_debugfs_port
*port
= s
->private;
3650 const struct hisi_sas_debugfs_reg
*reg_port
= &debugfs_port_reg
;
3652 if (!debugfs_dump_is_generated_v3_hw(port
->data
))
3655 debugfs_print_reg_v3_hw(port
->data
, s
, reg_port
);
3659 DEFINE_SHOW_ATTRIBUTE(debugfs_port_v3_hw
);
3661 static void debugfs_show_row_64_v3_hw(struct seq_file
*s
, int index
,
3662 int sz
, __le64
*ptr
)
3666 /* completion header size not fixed per HW version */
3667 seq_printf(s
, "index %04d:\n\t", index
);
3668 for (i
= 1; i
<= sz
/ 8; i
++, ptr
++) {
3669 seq_printf(s
, " 0x%016llx", le64_to_cpu(*ptr
));
3671 seq_puts(s
, "\n\t");
3677 static void debugfs_show_row_32_v3_hw(struct seq_file
*s
, int index
,
3678 int sz
, __le32
*ptr
)
3682 /* completion header size not fixed per HW version */
3683 seq_printf(s
, "index %04d:\n\t", index
);
3684 for (i
= 1; i
<= sz
/ 4; i
++, ptr
++) {
3685 seq_printf(s
, " 0x%08x", le32_to_cpu(*ptr
));
3687 seq_puts(s
, "\n\t");
3692 static void debugfs_cq_show_slot_v3_hw(struct seq_file
*s
, int slot
,
3693 struct hisi_sas_debugfs_cq
*debugfs_cq
)
3695 struct hisi_sas_cq
*cq
= debugfs_cq
->cq
;
3696 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
3697 __le32
*complete_hdr
= debugfs_cq
->complete_hdr
+
3698 (hisi_hba
->hw
->complete_hdr_size
* slot
);
3700 debugfs_show_row_32_v3_hw(s
, slot
,
3701 hisi_hba
->hw
->complete_hdr_size
,
3705 static int debugfs_cq_v3_hw_show(struct seq_file
*s
, void *p
)
3707 struct hisi_sas_debugfs_cq
*debugfs_cq
= s
->private;
3710 if (!debugfs_dump_is_generated_v3_hw(debugfs_cq
->complete_hdr
))
3713 for (slot
= 0; slot
< HISI_SAS_QUEUE_SLOTS
; slot
++)
3714 debugfs_cq_show_slot_v3_hw(s
, slot
, debugfs_cq
);
3718 DEFINE_SHOW_ATTRIBUTE(debugfs_cq_v3_hw
);
3720 static void debugfs_dq_show_slot_v3_hw(struct seq_file
*s
, int slot
,
3723 struct hisi_sas_debugfs_dq
*debugfs_dq
= dq_ptr
;
3724 void *cmd_queue
= debugfs_dq
->hdr
;
3725 __le32
*cmd_hdr
= cmd_queue
+
3726 sizeof(struct hisi_sas_cmd_hdr
) * slot
;
3728 debugfs_show_row_32_v3_hw(s
, slot
, sizeof(struct hisi_sas_cmd_hdr
),
3732 static int debugfs_dq_v3_hw_show(struct seq_file
*s
, void *p
)
3734 struct hisi_sas_debugfs_dq
*debugfs_dq
= s
->private;
3737 if (!debugfs_dump_is_generated_v3_hw(debugfs_dq
->hdr
))
3740 for (slot
= 0; slot
< HISI_SAS_QUEUE_SLOTS
; slot
++)
3741 debugfs_dq_show_slot_v3_hw(s
, slot
, s
->private);
3745 DEFINE_SHOW_ATTRIBUTE(debugfs_dq_v3_hw
);
3747 static int debugfs_iost_v3_hw_show(struct seq_file
*s
, void *p
)
3749 struct hisi_sas_debugfs_iost
*debugfs_iost
= s
->private;
3750 struct hisi_sas_iost
*iost
= debugfs_iost
->iost
;
3751 int i
, max_command_entries
= HISI_SAS_MAX_COMMANDS
;
3753 if (!debugfs_dump_is_generated_v3_hw(iost
))
3756 for (i
= 0; i
< max_command_entries
; i
++, iost
++) {
3757 __le64
*data
= &iost
->qw0
;
3759 debugfs_show_row_64_v3_hw(s
, i
, sizeof(*iost
), data
);
3764 DEFINE_SHOW_ATTRIBUTE(debugfs_iost_v3_hw
);
3766 static int debugfs_iost_cache_v3_hw_show(struct seq_file
*s
, void *p
)
3768 struct hisi_sas_debugfs_iost_cache
*debugfs_iost_cache
= s
->private;
3769 struct hisi_sas_iost_itct_cache
*iost_cache
=
3770 debugfs_iost_cache
->cache
;
3771 u32 cache_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
* 4;
3775 if (!debugfs_dump_is_generated_v3_hw(iost_cache
))
3778 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_NUM
; i
++, iost_cache
++) {
3780 * Data struct of IOST cache:
3781 * Data[1]: BIT0~15: Table index
3783 * Data[2]~[9]: IOST table
3785 tab_idx
= (iost_cache
->data
[1] & 0xffff);
3786 iost
= (__le64
*)iost_cache
;
3788 debugfs_show_row_64_v3_hw(s
, tab_idx
, cache_size
, iost
);
3793 DEFINE_SHOW_ATTRIBUTE(debugfs_iost_cache_v3_hw
);
3795 static int debugfs_itct_v3_hw_show(struct seq_file
*s
, void *p
)
3798 struct hisi_sas_debugfs_itct
*debugfs_itct
= s
->private;
3799 struct hisi_sas_itct
*itct
= debugfs_itct
->itct
;
3801 if (!debugfs_dump_is_generated_v3_hw(itct
))
3804 for (i
= 0; i
< HISI_SAS_MAX_ITCT_ENTRIES
; i
++, itct
++) {
3805 __le64
*data
= &itct
->qw0
;
3807 debugfs_show_row_64_v3_hw(s
, i
, sizeof(*itct
), data
);
3812 DEFINE_SHOW_ATTRIBUTE(debugfs_itct_v3_hw
);
3814 static int debugfs_itct_cache_v3_hw_show(struct seq_file
*s
, void *p
)
3816 struct hisi_sas_debugfs_itct_cache
*debugfs_itct_cache
= s
->private;
3817 struct hisi_sas_iost_itct_cache
*itct_cache
=
3818 debugfs_itct_cache
->cache
;
3819 u32 cache_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
* 4;
3823 if (!debugfs_dump_is_generated_v3_hw(itct_cache
))
3826 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_NUM
; i
++, itct_cache
++) {
3828 * Data struct of ITCT cache:
3829 * Data[1]: BIT0~15: Table index
3831 * Data[2]~[9]: ITCT table
3833 tab_idx
= itct_cache
->data
[1] & 0xffff;
3834 itct
= (__le64
*)itct_cache
;
3836 debugfs_show_row_64_v3_hw(s
, tab_idx
, cache_size
, itct
);
3841 DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw
);
3843 static void debugfs_create_files_v3_hw(struct hisi_hba
*hisi_hba
, int index
)
3845 u64
*debugfs_timestamp
;
3846 struct dentry
*dump_dentry
;
3847 struct dentry
*dentry
;
3853 snprintf(name
, 256, "%d", index
);
3855 dump_dentry
= debugfs_create_dir(name
, hisi_hba
->debugfs_dump_dentry
);
3857 debugfs_timestamp
= &hisi_hba
->debugfs_timestamp
[index
];
3859 debugfs_create_u64("timestamp", 0400, dump_dentry
,
3862 debugfs_create_file("global", 0400, dump_dentry
,
3863 &hisi_hba
->debugfs_regs
[index
][DEBUGFS_GLOBAL
],
3864 &debugfs_global_v3_hw_fops
);
3866 /* Create port dir and files */
3867 dentry
= debugfs_create_dir("port", dump_dentry
);
3868 for (p
= 0; p
< hisi_hba
->n_phy
; p
++) {
3869 snprintf(name
, 256, "%d", p
);
3871 debugfs_create_file(name
, 0400, dentry
,
3872 &hisi_hba
->debugfs_port_reg
[index
][p
],
3873 &debugfs_port_v3_hw_fops
);
3876 /* Create CQ dir and files */
3877 dentry
= debugfs_create_dir("cq", dump_dentry
);
3878 for (c
= 0; c
< hisi_hba
->queue_count
; c
++) {
3879 snprintf(name
, 256, "%d", c
);
3881 debugfs_create_file(name
, 0400, dentry
,
3882 &hisi_hba
->debugfs_cq
[index
][c
],
3883 &debugfs_cq_v3_hw_fops
);
3886 /* Create DQ dir and files */
3887 dentry
= debugfs_create_dir("dq", dump_dentry
);
3888 for (d
= 0; d
< hisi_hba
->queue_count
; d
++) {
3889 snprintf(name
, 256, "%d", d
);
3891 debugfs_create_file(name
, 0400, dentry
,
3892 &hisi_hba
->debugfs_dq
[index
][d
],
3893 &debugfs_dq_v3_hw_fops
);
3896 debugfs_create_file("iost", 0400, dump_dentry
,
3897 &hisi_hba
->debugfs_iost
[index
],
3898 &debugfs_iost_v3_hw_fops
);
3900 debugfs_create_file("iost_cache", 0400, dump_dentry
,
3901 &hisi_hba
->debugfs_iost_cache
[index
],
3902 &debugfs_iost_cache_v3_hw_fops
);
3904 debugfs_create_file("itct", 0400, dump_dentry
,
3905 &hisi_hba
->debugfs_itct
[index
],
3906 &debugfs_itct_v3_hw_fops
);
3908 debugfs_create_file("itct_cache", 0400, dump_dentry
,
3909 &hisi_hba
->debugfs_itct_cache
[index
],
3910 &debugfs_itct_cache_v3_hw_fops
);
3912 debugfs_create_file("axi", 0400, dump_dentry
,
3913 &hisi_hba
->debugfs_regs
[index
][DEBUGFS_AXI
],
3914 &debugfs_axi_v3_hw_fops
);
3916 debugfs_create_file("ras", 0400, dump_dentry
,
3917 &hisi_hba
->debugfs_regs
[index
][DEBUGFS_RAS
],
3918 &debugfs_ras_v3_hw_fops
);
3921 static ssize_t
debugfs_trigger_dump_v3_hw_write(struct file
*file
,
3922 const char __user
*user_buf
,
3923 size_t count
, loff_t
*ppos
)
3925 struct hisi_hba
*hisi_hba
= file
->f_inode
->i_private
;
3931 if (copy_from_user(buf
, user_buf
, count
))
3937 down(&hisi_hba
->sem
);
3938 if (debugfs_snapshot_regs_v3_hw(hisi_hba
)) {
3947 static const struct file_operations debugfs_trigger_dump_v3_hw_fops
= {
3948 .write
= &debugfs_trigger_dump_v3_hw_write
,
3949 .owner
= THIS_MODULE
,
3953 HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL
= 0,
3954 HISI_SAS_BIST_LOOPBACK_MODE_SERDES
,
3955 HISI_SAS_BIST_LOOPBACK_MODE_REMOTE
,
3958 static const struct {
3961 } debugfs_loop_linkrate_v3_hw
[] = {
3962 { SAS_LINK_RATE_1_5_GBPS
, "1.5 Gbit" },
3963 { SAS_LINK_RATE_3_0_GBPS
, "3.0 Gbit" },
3964 { SAS_LINK_RATE_6_0_GBPS
, "6.0 Gbit" },
3965 { SAS_LINK_RATE_12_0_GBPS
, "12.0 Gbit" },
3968 static int debugfs_bist_linkrate_v3_hw_show(struct seq_file
*s
, void *p
)
3970 struct hisi_hba
*hisi_hba
= s
->private;
3973 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_linkrate_v3_hw
); i
++) {
3974 int match
= (hisi_hba
->debugfs_bist_linkrate
==
3975 debugfs_loop_linkrate_v3_hw
[i
].value
);
3977 seq_printf(s
, "%s%s%s ", match
? "[" : "",
3978 debugfs_loop_linkrate_v3_hw
[i
].name
,
3986 static ssize_t
debugfs_bist_linkrate_v3_hw_write(struct file
*filp
,
3987 const char __user
*buf
,
3988 size_t count
, loff_t
*ppos
)
3990 struct seq_file
*m
= filp
->private_data
;
3991 struct hisi_hba
*hisi_hba
= m
->private;
3992 char kbuf
[16] = {}, *pkbuf
;
3996 if (hisi_hba
->debugfs_bist_enable
)
3999 if (count
>= sizeof(kbuf
))
4002 if (copy_from_user(kbuf
, buf
, count
))
4005 pkbuf
= strstrip(kbuf
);
4007 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_linkrate_v3_hw
); i
++) {
4008 if (!strncmp(debugfs_loop_linkrate_v3_hw
[i
].name
,
4010 hisi_hba
->debugfs_bist_linkrate
=
4011 debugfs_loop_linkrate_v3_hw
[i
].value
;
4022 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_linkrate_v3_hw
);
4024 static const struct {
4027 } debugfs_loop_code_mode_v3_hw
[] = {
4028 { HISI_SAS_BIST_CODE_MODE_PRBS7
, "PRBS7" },
4029 { HISI_SAS_BIST_CODE_MODE_PRBS23
, "PRBS23" },
4030 { HISI_SAS_BIST_CODE_MODE_PRBS31
, "PRBS31" },
4031 { HISI_SAS_BIST_CODE_MODE_JTPAT
, "JTPAT" },
4032 { HISI_SAS_BIST_CODE_MODE_CJTPAT
, "CJTPAT" },
4033 { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0
, "SCRAMBED_0" },
4034 { HISI_SAS_BIST_CODE_MODE_TRAIN
, "TRAIN" },
4035 { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE
, "TRAIN_DONE" },
4036 { HISI_SAS_BIST_CODE_MODE_HFTP
, "HFTP" },
4037 { HISI_SAS_BIST_CODE_MODE_MFTP
, "MFTP" },
4038 { HISI_SAS_BIST_CODE_MODE_LFTP
, "LFTP" },
4039 { HISI_SAS_BIST_CODE_MODE_FIXED_DATA
, "FIXED_DATA" },
4042 static int debugfs_bist_code_mode_v3_hw_show(struct seq_file
*s
, void *p
)
4044 struct hisi_hba
*hisi_hba
= s
->private;
4047 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_code_mode_v3_hw
); i
++) {
4048 int match
= (hisi_hba
->debugfs_bist_code_mode
==
4049 debugfs_loop_code_mode_v3_hw
[i
].value
);
4051 seq_printf(s
, "%s%s%s ", match
? "[" : "",
4052 debugfs_loop_code_mode_v3_hw
[i
].name
,
4060 static ssize_t
debugfs_bist_code_mode_v3_hw_write(struct file
*filp
,
4061 const char __user
*buf
,
4065 struct seq_file
*m
= filp
->private_data
;
4066 struct hisi_hba
*hisi_hba
= m
->private;
4067 char kbuf
[16] = {}, *pkbuf
;
4071 if (hisi_hba
->debugfs_bist_enable
)
4074 if (count
>= sizeof(kbuf
))
4077 if (copy_from_user(kbuf
, buf
, count
))
4080 pkbuf
= strstrip(kbuf
);
4082 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_code_mode_v3_hw
); i
++) {
4083 if (!strncmp(debugfs_loop_code_mode_v3_hw
[i
].name
,
4085 hisi_hba
->debugfs_bist_code_mode
=
4086 debugfs_loop_code_mode_v3_hw
[i
].value
;
4097 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_code_mode_v3_hw
);
4099 static ssize_t
debugfs_bist_phy_v3_hw_write(struct file
*filp
,
4100 const char __user
*buf
,
4101 size_t count
, loff_t
*ppos
)
4103 struct seq_file
*m
= filp
->private_data
;
4104 struct hisi_hba
*hisi_hba
= m
->private;
4105 unsigned int phy_no
;
4108 if (hisi_hba
->debugfs_bist_enable
)
4111 val
= kstrtouint_from_user(buf
, count
, 0, &phy_no
);
4115 if (phy_no
>= hisi_hba
->n_phy
)
4118 hisi_hba
->debugfs_bist_phy_no
= phy_no
;
4123 static int debugfs_bist_phy_v3_hw_show(struct seq_file
*s
, void *p
)
4125 struct hisi_hba
*hisi_hba
= s
->private;
4127 seq_printf(s
, "%d\n", hisi_hba
->debugfs_bist_phy_no
);
4131 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_phy_v3_hw
);
4133 static ssize_t
debugfs_bist_cnt_v3_hw_write(struct file
*filp
,
4134 const char __user
*buf
,
4135 size_t count
, loff_t
*ppos
)
4137 struct seq_file
*m
= filp
->private_data
;
4138 struct hisi_hba
*hisi_hba
= m
->private;
4142 if (hisi_hba
->debugfs_bist_enable
)
4145 val
= kstrtouint_from_user(buf
, count
, 0, &cnt
);
4152 hisi_hba
->debugfs_bist_cnt
= 0;
4156 static int debugfs_bist_cnt_v3_hw_show(struct seq_file
*s
, void *p
)
4158 struct hisi_hba
*hisi_hba
= s
->private;
4160 seq_printf(s
, "%u\n", hisi_hba
->debugfs_bist_cnt
);
4164 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_cnt_v3_hw
);
4166 static const struct {
4169 } debugfs_loop_modes_v3_hw
[] = {
4170 { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL
, "digital" },
4171 { HISI_SAS_BIST_LOOPBACK_MODE_SERDES
, "serdes" },
4172 { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE
, "remote" },
4175 static int debugfs_bist_mode_v3_hw_show(struct seq_file
*s
, void *p
)
4177 struct hisi_hba
*hisi_hba
= s
->private;
4180 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_modes_v3_hw
); i
++) {
4181 int match
= (hisi_hba
->debugfs_bist_mode
==
4182 debugfs_loop_modes_v3_hw
[i
].value
);
4184 seq_printf(s
, "%s%s%s ", match
? "[" : "",
4185 debugfs_loop_modes_v3_hw
[i
].name
,
4193 static ssize_t
debugfs_bist_mode_v3_hw_write(struct file
*filp
,
4194 const char __user
*buf
,
4195 size_t count
, loff_t
*ppos
)
4197 struct seq_file
*m
= filp
->private_data
;
4198 struct hisi_hba
*hisi_hba
= m
->private;
4199 char kbuf
[16] = {}, *pkbuf
;
4203 if (hisi_hba
->debugfs_bist_enable
)
4206 if (count
>= sizeof(kbuf
))
4209 if (copy_from_user(kbuf
, buf
, count
))
4212 pkbuf
= strstrip(kbuf
);
4214 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_modes_v3_hw
); i
++) {
4215 if (!strncmp(debugfs_loop_modes_v3_hw
[i
].name
, pkbuf
, 16)) {
4216 hisi_hba
->debugfs_bist_mode
=
4217 debugfs_loop_modes_v3_hw
[i
].value
;
4228 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_mode_v3_hw
);
4230 static ssize_t
debugfs_bist_enable_v3_hw_write(struct file
*filp
,
4231 const char __user
*buf
,
4232 size_t count
, loff_t
*ppos
)
4234 struct seq_file
*m
= filp
->private_data
;
4235 struct hisi_hba
*hisi_hba
= m
->private;
4236 unsigned int enable
;
4239 val
= kstrtouint_from_user(buf
, count
, 0, &enable
);
4246 if (enable
== hisi_hba
->debugfs_bist_enable
)
4249 val
= debugfs_set_bist_v3_hw(hisi_hba
, enable
);
4253 hisi_hba
->debugfs_bist_enable
= enable
;
4258 static int debugfs_bist_enable_v3_hw_show(struct seq_file
*s
, void *p
)
4260 struct hisi_hba
*hisi_hba
= s
->private;
4262 seq_printf(s
, "%d\n", hisi_hba
->debugfs_bist_enable
);
4266 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_enable_v3_hw
);
4268 static const struct {
4270 } debugfs_ffe_name_v3_hw
[FFE_CFG_MAX
] = {
4274 { "SAS_12_0_GBPS" },
4276 { "SATA_1_5_GBPS" },
4277 { "SATA_3_0_GBPS" },
4278 { "SATA_6_0_GBPS" },
4281 static ssize_t
debugfs_v3_hw_write(struct file
*filp
,
4282 const char __user
*buf
,
4283 size_t count
, loff_t
*ppos
)
4285 struct seq_file
*m
= filp
->private_data
;
4286 u32
*val
= m
->private;
4289 res
= kstrtouint_from_user(buf
, count
, 0, val
);
4296 static int debugfs_v3_hw_show(struct seq_file
*s
, void *p
)
4298 u32
*val
= s
->private;
4300 seq_printf(s
, "0x%x\n", *val
);
4304 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_v3_hw
);
4306 static ssize_t
debugfs_phy_down_cnt_v3_hw_write(struct file
*filp
,
4307 const char __user
*buf
,
4308 size_t count
, loff_t
*ppos
)
4310 struct seq_file
*s
= filp
->private_data
;
4311 struct hisi_sas_phy
*phy
= s
->private;
4312 unsigned int set_val
;
4315 res
= kstrtouint_from_user(buf
, count
, 0, &set_val
);
4322 atomic_set(&phy
->down_cnt
, 0);
4327 static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file
*s
, void *p
)
4329 struct hisi_sas_phy
*phy
= s
->private;
4331 seq_printf(s
, "%d\n", atomic_read(&phy
->down_cnt
));
4335 DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_phy_down_cnt_v3_hw
);
4337 enum fifo_dump_mode_v3_hw
{
4338 FIFO_DUMP_FORVER
= (1U << 0),
4339 FIFO_DUMP_AFTER_TRIGGER
= (1U << 1),
4340 FIFO_DUMP_UNTILL_TRIGGER
= (1U << 2),
4343 enum fifo_trigger_mode_v3_hw
{
4344 FIFO_TRIGGER_EDGE
= (1U << 0),
4345 FIFO_TRIGGER_SAME_LEVEL
= (1U << 1),
4346 FIFO_TRIGGER_DIFF_LEVEL
= (1U << 2),
4349 static int debugfs_is_fifo_config_valid_v3_hw(struct hisi_sas_phy
*phy
)
4351 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
4353 if (phy
->fifo
.signal_sel
> 0xf) {
4354 dev_info(hisi_hba
->dev
, "Invalid signal select: %u\n",
4355 phy
->fifo
.signal_sel
);
4359 switch (phy
->fifo
.dump_mode
) {
4360 case FIFO_DUMP_FORVER
:
4361 case FIFO_DUMP_AFTER_TRIGGER
:
4362 case FIFO_DUMP_UNTILL_TRIGGER
:
4365 dev_info(hisi_hba
->dev
, "Invalid dump mode: %u\n",
4366 phy
->fifo
.dump_mode
);
4370 /* when FIFO_DUMP_FORVER, no need to check trigger_mode */
4371 if (phy
->fifo
.dump_mode
== FIFO_DUMP_FORVER
)
4374 switch (phy
->fifo
.trigger_mode
) {
4375 case FIFO_TRIGGER_EDGE
:
4376 case FIFO_TRIGGER_SAME_LEVEL
:
4377 case FIFO_TRIGGER_DIFF_LEVEL
:
4380 dev_info(hisi_hba
->dev
, "Invalid trigger mode: %u\n",
4381 phy
->fifo
.trigger_mode
);
4387 static int debugfs_update_fifo_config_v3_hw(struct hisi_sas_phy
*phy
)
4389 u32 trigger_mode
= phy
->fifo
.trigger_mode
;
4390 u32 signal_sel
= phy
->fifo
.signal_sel
;
4391 u32 dump_mode
= phy
->fifo
.dump_mode
;
4392 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
4393 int phy_no
= phy
->sas_phy
.id
;
4397 /* Check the validity of trace FIFO configuration */
4398 res
= debugfs_is_fifo_config_valid_v3_hw(phy
);
4402 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
);
4403 /* Disable trace FIFO before update configuration */
4404 reg_val
|= DFX_FIFO_CTRL_DUMP_DISABLE_MSK
;
4406 /* Update trace FIFO configuration */
4407 reg_val
&= ~(DFX_FIFO_CTRL_DUMP_MODE_MSK
|
4408 DFX_FIFO_CTRL_SIGNAL_SEL_MSK
|
4409 DFX_FIFO_CTRL_TRIGGER_MODE_MSK
);
4411 reg_val
|= ((trigger_mode
<< DFX_FIFO_CTRL_TRIGGER_MODE_OFF
) |
4412 (dump_mode
<< DFX_FIFO_CTRL_DUMP_MODE_OFF
) |
4413 (signal_sel
<< DFX_FIFO_CTRL_SIGNAL_SEL_OFF
));
4414 hisi_sas_phy_write32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
, reg_val
);
4416 hisi_sas_phy_write32(hisi_hba
, phy_no
, DFX_FIFO_DUMP_MSK
,
4417 phy
->fifo
.dump_msk
);
4419 hisi_sas_phy_write32(hisi_hba
, phy_no
, DFX_FIFO_TRIGGER
,
4422 hisi_sas_phy_write32(hisi_hba
, phy_no
, DFX_FIFO_TRIGGER_MSK
,
4423 phy
->fifo
.trigger_msk
);
4425 /* Enable trace FIFO after updated configuration */
4426 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
);
4427 reg_val
&= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK
;
4428 hisi_sas_phy_write32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
, reg_val
);
4433 static ssize_t
debugfs_fifo_update_cfg_v3_hw_write(struct file
*filp
,
4434 const char __user
*buf
,
4435 size_t count
, loff_t
*ppos
)
4437 struct hisi_sas_phy
*phy
= filp
->private_data
;
4441 val
= kstrtobool_from_user(buf
, count
, &update
);
4448 val
= debugfs_update_fifo_config_v3_hw(phy
);
4455 static const struct file_operations debugfs_fifo_update_cfg_v3_hw_fops
= {
4456 .open
= simple_open
,
4457 .write
= debugfs_fifo_update_cfg_v3_hw_write
,
4458 .owner
= THIS_MODULE
,
4461 static void debugfs_read_fifo_data_v3_hw(struct hisi_sas_phy
*phy
)
4463 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
4464 u32
*buf
= phy
->fifo
.rd_data
;
4465 int phy_no
= phy
->sas_phy
.id
;
4469 memset(buf
, 0, sizeof(phy
->fifo
.rd_data
));
4471 /* Disable trace FIFO before read data */
4472 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
);
4473 val
|= DFX_FIFO_CTRL_DUMP_DISABLE_MSK
;
4474 hisi_sas_phy_write32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
, val
);
4476 for (i
= 0; i
< HISI_SAS_FIFO_DATA_DW_SIZE
; i
++) {
4477 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
4482 /* Enable trace FIFO after read data */
4483 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
);
4484 val
&= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK
;
4485 hisi_sas_phy_write32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
, val
);
4488 static int debugfs_fifo_data_v3_hw_show(struct seq_file
*s
, void *p
)
4490 struct hisi_sas_phy
*phy
= s
->private;
4492 debugfs_read_fifo_data_v3_hw(phy
);
4494 debugfs_show_row_32_v3_hw(s
, 0, HISI_SAS_FIFO_DATA_DW_SIZE
* 4,
4495 (__le32
*)phy
->fifo
.rd_data
);
4499 DEFINE_SHOW_ATTRIBUTE(debugfs_fifo_data_v3_hw
);
4501 static void debugfs_fifo_init_v3_hw(struct hisi_hba
*hisi_hba
)
4505 hisi_hba
->debugfs_fifo_dentry
=
4506 debugfs_create_dir("fifo", hisi_hba
->debugfs_dir
);
4508 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
4509 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
4510 struct dentry
*port_dentry
;
4514 /* get default configuration for trace FIFO */
4515 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
);
4516 val
&= DFX_FIFO_CTRL_DUMP_MODE_MSK
;
4517 val
>>= DFX_FIFO_CTRL_DUMP_MODE_OFF
;
4518 phy
->fifo
.dump_mode
= val
;
4520 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
);
4521 val
&= DFX_FIFO_CTRL_TRIGGER_MODE_MSK
;
4522 val
>>= DFX_FIFO_CTRL_TRIGGER_MODE_OFF
;
4523 phy
->fifo
.trigger_mode
= val
;
4525 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_CTRL
);
4526 val
&= DFX_FIFO_CTRL_SIGNAL_SEL_MSK
;
4527 val
>>= DFX_FIFO_CTRL_SIGNAL_SEL_OFF
;
4528 phy
->fifo
.signal_sel
= val
;
4530 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_DUMP_MSK
);
4531 phy
->fifo
.dump_msk
= val
;
4533 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_TRIGGER
);
4534 phy
->fifo
.trigger
= val
;
4535 val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, DFX_FIFO_TRIGGER_MSK
);
4536 phy
->fifo
.trigger_msk
= val
;
4538 snprintf(name
, 256, "%d", phy_no
);
4539 port_dentry
= debugfs_create_dir(name
,
4540 hisi_hba
->debugfs_fifo_dentry
);
4542 debugfs_create_file("update_config", 0200, port_dentry
, phy
,
4543 &debugfs_fifo_update_cfg_v3_hw_fops
);
4545 debugfs_create_file("signal_sel", 0600, port_dentry
,
4546 &phy
->fifo
.signal_sel
,
4547 &debugfs_v3_hw_fops
);
4549 debugfs_create_file("dump_msk", 0600, port_dentry
,
4550 &phy
->fifo
.dump_msk
,
4551 &debugfs_v3_hw_fops
);
4553 debugfs_create_file("dump_mode", 0600, port_dentry
,
4554 &phy
->fifo
.dump_mode
,
4555 &debugfs_v3_hw_fops
);
4557 debugfs_create_file("trigger_mode", 0600, port_dentry
,
4558 &phy
->fifo
.trigger_mode
,
4559 &debugfs_v3_hw_fops
);
4561 debugfs_create_file("trigger", 0600, port_dentry
,
4563 &debugfs_v3_hw_fops
);
4565 debugfs_create_file("trigger_msk", 0600, port_dentry
,
4566 &phy
->fifo
.trigger_msk
,
4567 &debugfs_v3_hw_fops
);
4569 debugfs_create_file("fifo_data", 0400, port_dentry
, phy
,
4570 &debugfs_fifo_data_v3_hw_fops
);
4574 static void debugfs_release_v3_hw(struct hisi_hba
*hisi_hba
, int dump_index
)
4576 struct device
*dev
= hisi_hba
->dev
;
4579 devm_kfree(dev
, hisi_hba
->debugfs_iost_cache
[dump_index
].cache
);
4580 hisi_hba
->debugfs_iost_cache
[dump_index
].cache
= NULL
;
4581 devm_kfree(dev
, hisi_hba
->debugfs_itct_cache
[dump_index
].cache
);
4582 hisi_hba
->debugfs_itct_cache
[dump_index
].cache
= NULL
;
4583 devm_kfree(dev
, hisi_hba
->debugfs_iost
[dump_index
].iost
);
4584 hisi_hba
->debugfs_iost
[dump_index
].iost
= NULL
;
4585 devm_kfree(dev
, hisi_hba
->debugfs_itct
[dump_index
].itct
);
4586 hisi_hba
->debugfs_itct
[dump_index
].itct
= NULL
;
4588 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
4589 devm_kfree(dev
, hisi_hba
->debugfs_dq
[dump_index
][i
].hdr
);
4590 hisi_hba
->debugfs_dq
[dump_index
][i
].hdr
= NULL
;
4593 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
4595 hisi_hba
->debugfs_cq
[dump_index
][i
].complete_hdr
);
4596 hisi_hba
->debugfs_cq
[dump_index
][i
].complete_hdr
= NULL
;
4599 for (i
= 0; i
< DEBUGFS_REGS_NUM
; i
++) {
4600 devm_kfree(dev
, hisi_hba
->debugfs_regs
[dump_index
][i
].data
);
4601 hisi_hba
->debugfs_regs
[dump_index
][i
].data
= NULL
;
4604 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
4605 devm_kfree(dev
, hisi_hba
->debugfs_port_reg
[dump_index
][i
].data
);
4606 hisi_hba
->debugfs_port_reg
[dump_index
][i
].data
= NULL
;
4610 static const struct hisi_sas_debugfs_reg
*debugfs_reg_array_v3_hw
[DEBUGFS_REGS_NUM
] = {
4611 [DEBUGFS_GLOBAL
] = &debugfs_global_reg
,
4612 [DEBUGFS_AXI
] = &debugfs_axi_reg
,
4613 [DEBUGFS_RAS
] = &debugfs_ras_reg
,
4616 static int debugfs_alloc_v3_hw(struct hisi_hba
*hisi_hba
, int dump_index
)
4618 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
4619 struct device
*dev
= hisi_hba
->dev
;
4623 for (r
= 0; r
< DEBUGFS_REGS_NUM
; r
++) {
4624 struct hisi_sas_debugfs_regs
*regs
=
4625 &hisi_hba
->debugfs_regs
[dump_index
][r
];
4627 sz
= debugfs_reg_array_v3_hw
[r
]->count
* 4;
4628 regs
->data
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4631 regs
->hisi_hba
= hisi_hba
;
4634 sz
= debugfs_port_reg
.count
* 4;
4635 for (p
= 0; p
< hisi_hba
->n_phy
; p
++) {
4636 struct hisi_sas_debugfs_port
*port
=
4637 &hisi_hba
->debugfs_port_reg
[dump_index
][p
];
4639 port
->data
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4642 port
->phy
= &hisi_hba
->phy
[p
];
4645 sz
= hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
4646 for (c
= 0; c
< hisi_hba
->queue_count
; c
++) {
4647 struct hisi_sas_debugfs_cq
*cq
=
4648 &hisi_hba
->debugfs_cq
[dump_index
][c
];
4650 cq
->complete_hdr
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4651 if (!cq
->complete_hdr
)
4653 cq
->cq
= &hisi_hba
->cq
[c
];
4656 sz
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
4657 for (d
= 0; d
< hisi_hba
->queue_count
; d
++) {
4658 struct hisi_sas_debugfs_dq
*dq
=
4659 &hisi_hba
->debugfs_dq
[dump_index
][d
];
4661 dq
->hdr
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4664 dq
->dq
= &hisi_hba
->dq
[d
];
4667 sz
= HISI_SAS_MAX_COMMANDS
* sizeof(struct hisi_sas_iost
);
4669 hisi_hba
->debugfs_iost
[dump_index
].iost
=
4670 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4671 if (!hisi_hba
->debugfs_iost
[dump_index
].iost
)
4674 sz
= HISI_SAS_IOST_ITCT_CACHE_NUM
*
4675 sizeof(struct hisi_sas_iost_itct_cache
);
4677 hisi_hba
->debugfs_iost_cache
[dump_index
].cache
=
4678 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4679 if (!hisi_hba
->debugfs_iost_cache
[dump_index
].cache
)
4682 sz
= HISI_SAS_IOST_ITCT_CACHE_NUM
*
4683 sizeof(struct hisi_sas_iost_itct_cache
);
4685 hisi_hba
->debugfs_itct_cache
[dump_index
].cache
=
4686 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4687 if (!hisi_hba
->debugfs_itct_cache
[dump_index
].cache
)
4690 /* New memory allocation must be locate before itct */
4691 sz
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
4693 hisi_hba
->debugfs_itct
[dump_index
].itct
=
4694 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4695 if (!hisi_hba
->debugfs_itct
[dump_index
].itct
)
4700 debugfs_release_v3_hw(hisi_hba
, dump_index
);
4704 static int debugfs_snapshot_regs_v3_hw(struct hisi_hba
*hisi_hba
)
4706 int debugfs_dump_index
= hisi_hba
->debugfs_dump_index
;
4707 struct device
*dev
= hisi_hba
->dev
;
4708 u64 timestamp
= local_clock();
4710 if (debugfs_dump_index
>= hisi_sas_debugfs_dump_count
) {
4711 dev_warn(dev
, "dump count exceeded!\n");
4715 if (debugfs_alloc_v3_hw(hisi_hba
, debugfs_dump_index
)) {
4716 dev_warn(dev
, "failed to alloc memory\n");
4720 do_div(timestamp
, NSEC_PER_MSEC
);
4721 hisi_hba
->debugfs_timestamp
[debugfs_dump_index
] = timestamp
;
4723 debugfs_snapshot_prepare_v3_hw(hisi_hba
);
4725 debugfs_snapshot_global_reg_v3_hw(hisi_hba
);
4726 debugfs_snapshot_port_reg_v3_hw(hisi_hba
);
4727 debugfs_snapshot_axi_reg_v3_hw(hisi_hba
);
4728 debugfs_snapshot_ras_reg_v3_hw(hisi_hba
);
4729 debugfs_snapshot_cq_reg_v3_hw(hisi_hba
);
4730 debugfs_snapshot_dq_reg_v3_hw(hisi_hba
);
4731 debugfs_snapshot_itct_reg_v3_hw(hisi_hba
);
4732 debugfs_snapshot_iost_reg_v3_hw(hisi_hba
);
4734 debugfs_snapshot_restore_v3_hw(hisi_hba
);
4735 hisi_hba
->debugfs_dump_index
++;
4740 static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba
*hisi_hba
)
4742 struct dentry
*dir
= debugfs_create_dir("phy_down_cnt",
4743 hisi_hba
->debugfs_dir
);
4747 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
4748 snprintf(name
, 16, "%d", phy_no
);
4749 debugfs_create_file(name
, 0600, dir
,
4750 &hisi_hba
->phy
[phy_no
],
4751 &debugfs_phy_down_cnt_v3_hw_fops
);
4755 static void debugfs_bist_init_v3_hw(struct hisi_hba
*hisi_hba
)
4757 struct dentry
*ports_dentry
;
4760 hisi_hba
->debugfs_bist_dentry
=
4761 debugfs_create_dir("bist", hisi_hba
->debugfs_dir
);
4762 debugfs_create_file("link_rate", 0600,
4763 hisi_hba
->debugfs_bist_dentry
, hisi_hba
,
4764 &debugfs_bist_linkrate_v3_hw_fops
);
4766 debugfs_create_file("code_mode", 0600,
4767 hisi_hba
->debugfs_bist_dentry
, hisi_hba
,
4768 &debugfs_bist_code_mode_v3_hw_fops
);
4770 debugfs_create_file("fixed_code", 0600,
4771 hisi_hba
->debugfs_bist_dentry
,
4772 &hisi_hba
->debugfs_bist_fixed_code
[0],
4773 &debugfs_v3_hw_fops
);
4775 debugfs_create_file("fixed_code_1", 0600,
4776 hisi_hba
->debugfs_bist_dentry
,
4777 &hisi_hba
->debugfs_bist_fixed_code
[1],
4778 &debugfs_v3_hw_fops
);
4780 debugfs_create_file("phy_id", 0600, hisi_hba
->debugfs_bist_dentry
,
4781 hisi_hba
, &debugfs_bist_phy_v3_hw_fops
);
4783 debugfs_create_file("cnt", 0600, hisi_hba
->debugfs_bist_dentry
,
4784 hisi_hba
, &debugfs_bist_cnt_v3_hw_fops
);
4786 debugfs_create_file("loopback_mode", 0600,
4787 hisi_hba
->debugfs_bist_dentry
,
4788 hisi_hba
, &debugfs_bist_mode_v3_hw_fops
);
4790 debugfs_create_file("enable", 0600, hisi_hba
->debugfs_bist_dentry
,
4791 hisi_hba
, &debugfs_bist_enable_v3_hw_fops
);
4793 ports_dentry
= debugfs_create_dir("port", hisi_hba
->debugfs_bist_dentry
);
4795 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
4796 struct dentry
*port_dentry
;
4797 struct dentry
*ffe_dentry
;
4801 snprintf(name
, 256, "%d", phy_no
);
4802 port_dentry
= debugfs_create_dir(name
, ports_dentry
);
4803 ffe_dentry
= debugfs_create_dir("ffe", port_dentry
);
4804 for (i
= 0; i
< FFE_CFG_MAX
; i
++) {
4807 debugfs_create_file(debugfs_ffe_name_v3_hw
[i
].name
,
4809 &hisi_hba
->debugfs_bist_ffe
[phy_no
][i
],
4810 &debugfs_v3_hw_fops
);
4814 hisi_hba
->debugfs_bist_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
4817 static int debugfs_dump_index_v3_hw_show(struct seq_file
*s
, void *p
)
4819 int *debugfs_dump_index
= s
->private;
4821 if (*debugfs_dump_index
> 0)
4822 seq_printf(s
, "%d\n", *debugfs_dump_index
- 1);
4824 seq_puts(s
, "dump not triggered\n");
4828 DEFINE_SHOW_ATTRIBUTE(debugfs_dump_index_v3_hw
);
4830 static void debugfs_dump_init_v3_hw(struct hisi_hba
*hisi_hba
)
4834 hisi_hba
->debugfs_dump_dentry
=
4835 debugfs_create_dir("dump", hisi_hba
->debugfs_dir
);
4837 debugfs_create_file("latest_dump", 0400, hisi_hba
->debugfs_dump_dentry
,
4838 &hisi_hba
->debugfs_dump_index
,
4839 &debugfs_dump_index_v3_hw_fops
);
4841 for (i
= 0; i
< hisi_sas_debugfs_dump_count
; i
++)
4842 debugfs_create_files_v3_hw(hisi_hba
, i
);
4845 static void debugfs_exit_v3_hw(struct hisi_hba
*hisi_hba
)
4847 debugfs_remove_recursive(hisi_hba
->debugfs_dir
);
4848 hisi_hba
->debugfs_dir
= NULL
;
4851 static void debugfs_init_v3_hw(struct hisi_hba
*hisi_hba
)
4853 struct device
*dev
= hisi_hba
->dev
;
4855 hisi_hba
->debugfs_dir
= debugfs_create_dir(dev_name(dev
),
4856 hisi_sas_debugfs_dir
);
4857 /* create bist structures */
4858 debugfs_bist_init_v3_hw(hisi_hba
);
4860 debugfs_dump_init_v3_hw(hisi_hba
);
4862 debugfs_phy_down_cnt_init_v3_hw(hisi_hba
);
4863 debugfs_fifo_init_v3_hw(hisi_hba
);
4864 debugfs_create_file("trigger_dump", 0200,
4865 hisi_hba
->debugfs_dir
,
4867 &debugfs_trigger_dump_v3_hw_fops
);
4871 hisi_sas_v3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
4873 struct Scsi_Host
*shost
;
4874 struct hisi_hba
*hisi_hba
;
4875 struct device
*dev
= &pdev
->dev
;
4876 struct asd_sas_phy
**arr_phy
;
4877 struct asd_sas_port
**arr_port
;
4878 struct sas_ha_struct
*sha
;
4879 int rc
, phy_nr
, port_nr
, i
;
4881 rc
= pcim_enable_device(pdev
);
4885 pci_set_master(pdev
);
4887 rc
= pcim_iomap_regions(pdev
, 1 << BAR_NO_V3_HW
, DRV_NAME
);
4891 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
4893 dev_err(dev
, "No usable DMA addressing method\n");
4898 shost
= hisi_sas_shost_alloc_pci(pdev
);
4904 sha
= SHOST_TO_SAS_HA(shost
);
4905 hisi_hba
= shost_priv(shost
);
4906 dev_set_drvdata(dev
, sha
);
4908 hisi_hba
->regs
= pcim_iomap_table(pdev
)[BAR_NO_V3_HW
];
4909 if (!hisi_hba
->regs
) {
4910 dev_err(dev
, "cannot map register\n");
4912 goto err_out_free_host
;
4915 phy_nr
= port_nr
= hisi_hba
->n_phy
;
4917 arr_phy
= devm_kcalloc(dev
, phy_nr
, sizeof(void *), GFP_KERNEL
);
4918 arr_port
= devm_kcalloc(dev
, port_nr
, sizeof(void *), GFP_KERNEL
);
4919 if (!arr_phy
|| !arr_port
) {
4921 goto err_out_free_host
;
4924 sha
->sas_phy
= arr_phy
;
4925 sha
->sas_port
= arr_port
;
4927 sha
->lldd_ha
= hisi_hba
;
4929 shost
->transportt
= hisi_sas_stt
;
4930 shost
->max_id
= HISI_SAS_MAX_DEVICES
;
4931 shost
->max_lun
= ~0;
4932 shost
->max_channel
= 1;
4933 shost
->max_cmd_len
= 16;
4934 shost
->can_queue
= HISI_SAS_UNRESERVED_IPTT
;
4935 shost
->cmd_per_lun
= HISI_SAS_UNRESERVED_IPTT
;
4936 if (hisi_hba
->iopoll_q_cnt
)
4941 sha
->sas_ha_name
= DRV_NAME
;
4943 sha
->sas_addr
= &hisi_hba
->sas_addr
[0];
4944 sha
->num_phys
= hisi_hba
->n_phy
;
4946 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
4947 sha
->sas_phy
[i
] = &hisi_hba
->phy
[i
].sas_phy
;
4948 sha
->sas_port
[i
] = &hisi_hba
->port
[i
].sas_port
;
4951 if (hisi_hba
->prot_mask
) {
4952 dev_info(dev
, "Registering for DIF/DIX prot_mask=0x%x\n",
4954 scsi_host_set_prot(hisi_hba
->shost
, prot_mask
);
4955 if (hisi_hba
->prot_mask
& HISI_SAS_DIX_PROT_MASK
)
4956 scsi_host_set_guard(hisi_hba
->shost
,
4957 SHOST_DIX_GUARD_CRC
);
4960 rc
= interrupt_preinit_v3_hw(hisi_hba
);
4962 goto err_out_free_host
;
4964 rc
= scsi_add_host(shost
, dev
);
4966 goto err_out_free_host
;
4968 rc
= sas_register_ha(sha
);
4970 goto err_out_remove_host
;
4972 rc
= hisi_sas_v3_init(hisi_hba
);
4974 goto err_out_unregister_ha
;
4976 scsi_scan_host(shost
);
4977 if (hisi_sas_debugfs_enable
)
4978 debugfs_init_v3_hw(hisi_hba
);
4980 pm_runtime_set_autosuspend_delay(dev
, 5000);
4981 pm_runtime_use_autosuspend(dev
);
4983 * For the situation that there are ATA disks connected with SAS
4984 * controller, it additionally creates ata_port which will affect the
4985 * child_count of hisi_hba->dev. Even if suspended all the disks,
4986 * ata_port is still and the child_count of hisi_hba->dev is not 0.
4987 * So use pm_suspend_ignore_children() to ignore the effect to
4990 pm_suspend_ignore_children(dev
, true);
4991 pm_runtime_put_noidle(&pdev
->dev
);
4995 err_out_unregister_ha
:
4996 sas_unregister_ha(sha
);
4997 err_out_remove_host
:
4998 scsi_remove_host(shost
);
5000 hisi_sas_free(hisi_hba
);
5001 scsi_host_put(shost
);
5007 hisi_sas_v3_destroy_irqs(struct pci_dev
*pdev
, struct hisi_hba
*hisi_hba
)
5011 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, 1), hisi_hba
);
5012 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, 2), hisi_hba
);
5013 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, 11), hisi_hba
);
5014 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
5015 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
5016 int nr
= hisi_sas_intr_conv
? 16 : 16 + i
;
5018 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, nr
), cq
);
5022 static void hisi_sas_v3_remove(struct pci_dev
*pdev
)
5024 struct device
*dev
= &pdev
->dev
;
5025 struct sas_ha_struct
*sha
= dev_get_drvdata(dev
);
5026 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
5027 struct Scsi_Host
*shost
= sha
->shost
;
5029 pm_runtime_get_noresume(dev
);
5030 if (hisi_sas_debugfs_enable
)
5031 debugfs_exit_v3_hw(hisi_hba
);
5033 sas_unregister_ha(sha
);
5034 flush_workqueue(hisi_hba
->wq
);
5035 sas_remove_host(shost
);
5037 hisi_sas_v3_destroy_irqs(pdev
, hisi_hba
);
5038 hisi_sas_free(hisi_hba
);
5039 scsi_host_put(shost
);
5042 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev
*pdev
)
5044 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
5045 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
5046 struct device
*dev
= hisi_hba
->dev
;
5049 dev_info(dev
, "FLR prepare\n");
5050 down(&hisi_hba
->sem
);
5051 set_bit(HISI_SAS_RESETTING_BIT
, &hisi_hba
->flags
);
5052 hisi_sas_controller_reset_prepare(hisi_hba
);
5054 interrupt_disable_v3_hw(hisi_hba
);
5055 rc
= disable_host_v3_hw(hisi_hba
);
5057 dev_err(dev
, "FLR: disable host failed rc=%d\n", rc
);
5060 static void hisi_sas_reset_done_v3_hw(struct pci_dev
*pdev
)
5062 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
5063 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
5064 struct Scsi_Host
*shost
= hisi_hba
->shost
;
5065 struct device
*dev
= hisi_hba
->dev
;
5068 hisi_sas_init_mem(hisi_hba
);
5070 rc
= hw_init_v3_hw(hisi_hba
);
5072 dev_err(dev
, "FLR: hw init failed rc=%d\n", rc
);
5073 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
5074 scsi_unblock_requests(shost
);
5075 clear_bit(HISI_SAS_RESETTING_BIT
, &hisi_hba
->flags
);
5080 hisi_sas_controller_reset_done(hisi_hba
);
5081 dev_info(dev
, "FLR done\n");
5085 /* instances of the controller */
5089 static void enable_host_v3_hw(struct hisi_hba
*hisi_hba
)
5093 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
,
5094 (u32
)((1ULL << hisi_hba
->queue_count
) - 1));
5096 phys_init_v3_hw(hisi_hba
);
5097 reg_val
= hisi_sas_read32(hisi_hba
, AXI_MASTER_CFG_BASE
+
5099 reg_val
&= ~AM_CTRL_SHUTDOWN_REQ_MSK
;
5100 hisi_sas_write32(hisi_hba
, AXI_MASTER_CFG_BASE
+
5101 AM_CTRL_GLOBAL
, reg_val
);
5104 static int _suspend_v3_hw(struct device
*device
)
5106 struct pci_dev
*pdev
= to_pci_dev(device
);
5107 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
5108 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
5109 struct device
*dev
= hisi_hba
->dev
;
5110 struct Scsi_Host
*shost
= hisi_hba
->shost
;
5113 if (!pdev
->pm_cap
) {
5114 dev_err(dev
, "PCI PM not supported\n");
5118 if (test_and_set_bit(HISI_SAS_RESETTING_BIT
, &hisi_hba
->flags
))
5121 dev_warn(dev
, "entering suspend state\n");
5123 scsi_block_requests(shost
);
5124 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
5125 flush_workqueue(hisi_hba
->wq
);
5126 interrupt_disable_v3_hw(hisi_hba
);
5129 if ((device
->power
.runtime_status
== RPM_SUSPENDING
) &&
5130 atomic_read(&device
->power
.usage_count
)) {
5131 dev_err(dev
, "PM suspend: host status cannot be suspended\n");
5137 rc
= disable_host_v3_hw(hisi_hba
);
5139 dev_err(dev
, "PM suspend: disable host failed rc=%d\n", rc
);
5140 goto err_out_recover_host
;
5143 hisi_sas_init_mem(hisi_hba
);
5145 hisi_sas_release_tasks(hisi_hba
);
5147 sas_suspend_ha(sha
);
5149 dev_warn(dev
, "end of suspending controller\n");
5152 err_out_recover_host
:
5153 enable_host_v3_hw(hisi_hba
);
5157 interrupt_enable_v3_hw(hisi_hba
);
5158 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
5159 clear_bit(HISI_SAS_RESETTING_BIT
, &hisi_hba
->flags
);
5160 scsi_unblock_requests(shost
);
5164 static int _resume_v3_hw(struct device
*device
)
5166 struct pci_dev
*pdev
= to_pci_dev(device
);
5167 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
5168 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
5169 struct Scsi_Host
*shost
= hisi_hba
->shost
;
5170 struct device
*dev
= hisi_hba
->dev
;
5172 pci_power_t device_state
= pdev
->current_state
;
5174 dev_warn(dev
, "resuming from operating state [D%d]\n",
5177 scsi_unblock_requests(shost
);
5178 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
5180 sas_prep_resume_ha(sha
);
5181 rc
= hw_init_v3_hw(hisi_hba
);
5183 scsi_remove_host(shost
);
5186 phys_init_v3_hw(hisi_hba
);
5189 * If a directly-attached disk is removed during suspend, a deadlock
5190 * may occur, as the PHYE_RESUME_TIMEOUT processing will require the
5191 * hisi_hba->device to be active, which can only happen when resume
5192 * completes. So don't wait for the HA event workqueue to drain upon
5195 sas_resume_ha_no_sync(sha
);
5196 clear_bit(HISI_SAS_RESETTING_BIT
, &hisi_hba
->flags
);
5198 dev_warn(dev
, "end of resuming controller\n");
5203 static int __maybe_unused
suspend_v3_hw(struct device
*device
)
5205 struct pci_dev
*pdev
= to_pci_dev(device
);
5206 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
5207 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
5210 set_bit(HISI_SAS_PM_BIT
, &hisi_hba
->flags
);
5212 rc
= _suspend_v3_hw(device
);
5214 clear_bit(HISI_SAS_PM_BIT
, &hisi_hba
->flags
);
5219 static int __maybe_unused
resume_v3_hw(struct device
*device
)
5221 struct pci_dev
*pdev
= to_pci_dev(device
);
5222 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
5223 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
5224 int rc
= _resume_v3_hw(device
);
5226 clear_bit(HISI_SAS_PM_BIT
, &hisi_hba
->flags
);
5231 static const struct pci_device_id sas_v3_pci_table
[] = {
5232 { PCI_VDEVICE(HUAWEI
, 0xa230), hip08
},
5235 MODULE_DEVICE_TABLE(pci
, sas_v3_pci_table
);
5237 static const struct pci_error_handlers hisi_sas_err_handler
= {
5238 .reset_prepare
= hisi_sas_reset_prepare_v3_hw
,
5239 .reset_done
= hisi_sas_reset_done_v3_hw
,
5242 static UNIVERSAL_DEV_PM_OPS(hisi_sas_v3_pm_ops
,
5247 static struct pci_driver sas_v3_pci_driver
= {
5249 .id_table
= sas_v3_pci_table
,
5250 .probe
= hisi_sas_v3_probe
,
5251 .remove
= hisi_sas_v3_remove
,
5252 .err_handler
= &hisi_sas_err_handler
,
5253 .driver
.pm
= &hisi_sas_v3_pm_ops
,
5256 module_pci_driver(sas_v3_pci_driver
);
5257 module_param_named(intr_conv
, hisi_sas_intr_conv
, bool, 0444);
5259 MODULE_LICENSE("GPL");
5260 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
5261 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
5262 MODULE_ALIAS("pci:" DRV_NAME
);