1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2017 Hisilicon Limited.
7 #define DRV_NAME "hisi_sas_v3_hw"
9 /* global registers need init */
10 #define DLVRY_QUEUE_ENABLE 0x0
11 #define IOST_BASE_ADDR_LO 0x8
12 #define IOST_BASE_ADDR_HI 0xc
13 #define ITCT_BASE_ADDR_LO 0x10
14 #define ITCT_BASE_ADDR_HI 0x14
15 #define IO_BROKEN_MSG_ADDR_LO 0x18
16 #define IO_BROKEN_MSG_ADDR_HI 0x1c
17 #define PHY_CONTEXT 0x20
18 #define PHY_STATE 0x24
19 #define PHY_PORT_NUM_MA 0x28
20 #define PHY_CONN_RATE 0x30
22 #define ITCT_CLR_EN_OFF 16
23 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
24 #define ITCT_DEV_OFF 0
25 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
26 #define SAS_AXI_USER3 0x50
27 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
28 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
29 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60
30 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64
31 #define CFG_MAX_TAG 0x68
32 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
33 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
34 #define HGC_GET_ITV_TIME 0x90
35 #define DEVICE_MSG_WORK_MODE 0x94
36 #define OPENA_WT_CONTI_TIME 0x9c
37 #define I_T_NEXUS_LOSS_TIME 0xa0
38 #define MAX_CON_TIME_LIMIT_TIME 0xa4
39 #define BUS_INACTIVE_LIMIT_TIME 0xa8
40 #define REJECT_TO_OPEN_LIMIT_TIME 0xac
41 #define CQ_INT_CONVERGE_EN 0xb0
42 #define CFG_AGING_TIME 0xbc
43 #define HGC_DFX_CFG2 0xc0
44 #define CFG_ABT_SET_QUERY_IPTT 0xd4
45 #define CFG_SET_ABORTED_IPTT_OFF 0
46 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF)
47 #define CFG_SET_ABORTED_EN_OFF 12
48 #define CFG_ABT_SET_IPTT_DONE 0xd8
49 #define CFG_ABT_SET_IPTT_DONE_OFF 0
50 #define HGC_IOMB_PROC1_STATUS 0x104
51 #define HGC_LM_DFX_STATUS2 0x128
52 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
53 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
54 HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
55 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
56 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
57 HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
58 #define HGC_CQE_ECC_ADDR 0x13c
59 #define HGC_CQE_ECC_1B_ADDR_OFF 0
60 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
61 #define HGC_CQE_ECC_MB_ADDR_OFF 8
62 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
63 #define HGC_IOST_ECC_ADDR 0x140
64 #define HGC_IOST_ECC_1B_ADDR_OFF 0
65 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
66 #define HGC_IOST_ECC_MB_ADDR_OFF 16
67 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
68 #define HGC_DQE_ECC_ADDR 0x144
69 #define HGC_DQE_ECC_1B_ADDR_OFF 0
70 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
71 #define HGC_DQE_ECC_MB_ADDR_OFF 16
72 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
73 #define CHNL_INT_STATUS 0x148
75 #define HGC_ITCT_ECC_ADDR 0x150
76 #define HGC_ITCT_ECC_1B_ADDR_OFF 0
77 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
78 HGC_ITCT_ECC_1B_ADDR_OFF)
79 #define HGC_ITCT_ECC_MB_ADDR_OFF 16
80 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
81 HGC_ITCT_ECC_MB_ADDR_OFF)
82 #define HGC_AXI_FIFO_ERR_INFO 0x154
83 #define AXI_ERR_INFO_OFF 0
84 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
85 #define FIFO_ERR_INFO_OFF 8
86 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
87 #define TAB_RD_TYPE 0x15c
88 #define INT_COAL_EN 0x19c
89 #define OQ_INT_COAL_TIME 0x1a0
90 #define OQ_INT_COAL_CNT 0x1a4
91 #define ENT_INT_COAL_TIME 0x1a8
92 #define ENT_INT_COAL_CNT 0x1ac
93 #define OQ_INT_SRC 0x1b0
94 #define OQ_INT_SRC_MSK 0x1b4
95 #define ENT_INT_SRC1 0x1b8
96 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
97 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
98 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
99 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
100 #define ENT_INT_SRC2 0x1bc
101 #define ENT_INT_SRC3 0x1c0
102 #define ENT_INT_SRC3_WP_DEPTH_OFF 8
103 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
104 #define ENT_INT_SRC3_RP_DEPTH_OFF 10
105 #define ENT_INT_SRC3_AXI_OFF 11
106 #define ENT_INT_SRC3_FIFO_OFF 12
107 #define ENT_INT_SRC3_LM_OFF 14
108 #define ENT_INT_SRC3_ITC_INT_OFF 15
109 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
110 #define ENT_INT_SRC3_ABT_OFF 16
111 #define ENT_INT_SRC3_DQE_POISON_OFF 18
112 #define ENT_INT_SRC3_IOST_POISON_OFF 19
113 #define ENT_INT_SRC3_ITCT_POISON_OFF 20
114 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21
115 #define ENT_INT_SRC_MSK1 0x1c4
116 #define ENT_INT_SRC_MSK2 0x1c8
117 #define ENT_INT_SRC_MSK3 0x1cc
118 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
119 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0
120 #define CHNL_ENT_INT_MSK 0x1d4
121 #define HGC_COM_INT_MSK 0x1d8
122 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
123 #define SAS_ECC_INTR 0x1e8
124 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
125 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
126 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
127 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
128 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4
129 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5
130 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6
131 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7
132 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8
133 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9
134 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
135 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
136 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12
137 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13
138 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14
139 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15
140 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16
141 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17
142 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18
143 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19
144 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20
145 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21
146 #define SAS_ECC_INTR_MSK 0x1ec
147 #define HGC_ERR_STAT_EN 0x238
148 #define CQE_SEND_CNT 0x248
149 #define DLVRY_Q_0_BASE_ADDR_LO 0x260
150 #define DLVRY_Q_0_BASE_ADDR_HI 0x264
151 #define DLVRY_Q_0_DEPTH 0x268
152 #define DLVRY_Q_0_WR_PTR 0x26c
153 #define DLVRY_Q_0_RD_PTR 0x270
154 #define HYPER_STREAM_ID_EN_CFG 0xc80
155 #define OQ0_INT_SRC_MSK 0xc90
156 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0
157 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4
158 #define COMPL_Q_0_DEPTH 0x4e8
159 #define COMPL_Q_0_WR_PTR 0x4ec
160 #define COMPL_Q_0_RD_PTR 0x4f0
161 #define HGC_RXM_DFX_STATUS14 0xae8
162 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
163 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
164 HGC_RXM_DFX_STATUS14_MEM0_OFF)
165 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
166 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
167 HGC_RXM_DFX_STATUS14_MEM1_OFF)
168 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
169 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
170 HGC_RXM_DFX_STATUS14_MEM2_OFF)
171 #define HGC_RXM_DFX_STATUS15 0xaec
172 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
173 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
174 HGC_RXM_DFX_STATUS15_MEM3_OFF)
175 #define AWQOS_AWCACHE_CFG 0xc84
176 #define ARQOS_ARCACHE_CFG 0xc88
177 #define HILINK_ERR_DFX 0xe04
178 #define SAS_GPIO_CFG_0 0x1000
179 #define SAS_GPIO_CFG_1 0x1004
180 #define SAS_GPIO_TX_0_1 0x1040
181 #define SAS_CFG_DRIVE_VLD 0x1070
183 /* phy registers requiring init */
184 #define PORT_BASE (0x2000)
185 #define PHY_CFG (PORT_BASE + 0x0)
186 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
187 #define PHY_CFG_ENA_OFF 0
188 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
189 #define PHY_CFG_DC_OPT_OFF 2
190 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
191 #define PHY_CFG_PHY_RST_OFF 3
192 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
193 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
194 #define CFG_PROG_PHY_LINK_RATE_OFF 0
195 #define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF)
196 #define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8
197 #define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF)
198 #define PHY_CTRL (PORT_BASE + 0x14)
199 #define PHY_CTRL_RESET_OFF 0
200 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
201 #define CMD_HDR_PIR_OFF 8
202 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
203 #define SERDES_CFG (PORT_BASE + 0x1c)
204 #define CFG_ALOS_CHK_DISABLE_OFF 9
205 #define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF)
206 #define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c)
207 #define CFG_BIST_MODE_SEL_OFF 0
208 #define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF)
209 #define CFG_LOOP_TEST_MODE_OFF 14
210 #define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF)
211 #define CFG_RX_BIST_EN_OFF 16
212 #define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF)
213 #define CFG_TX_BIST_EN_OFF 17
214 #define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF)
215 #define CFG_BIST_TEST_OFF 18
216 #define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF)
217 #define SAS_PHY_BIST_CODE (PORT_BASE + 0x30)
218 #define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34)
219 #define SAS_BIST_ERR_CNT (PORT_BASE + 0x38)
220 #define SL_CFG (PORT_BASE + 0x84)
221 #define AIP_LIMIT (PORT_BASE + 0x90)
222 #define SL_CONTROL (PORT_BASE + 0x94)
223 #define SL_CONTROL_NOTIFY_EN_OFF 0
224 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
225 #define SL_CTA_OFF 17
226 #define SL_CTA_MSK (0x1 << SL_CTA_OFF)
227 #define RX_PRIMS_STATUS (PORT_BASE + 0x98)
228 #define RX_BCAST_CHG_OFF 1
229 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
230 #define TX_ID_DWORD0 (PORT_BASE + 0x9c)
231 #define TX_ID_DWORD1 (PORT_BASE + 0xa0)
232 #define TX_ID_DWORD2 (PORT_BASE + 0xa4)
233 #define TX_ID_DWORD3 (PORT_BASE + 0xa8)
234 #define TX_ID_DWORD4 (PORT_BASE + 0xaC)
235 #define TX_ID_DWORD5 (PORT_BASE + 0xb0)
236 #define TX_ID_DWORD6 (PORT_BASE + 0xb4)
237 #define TXID_AUTO (PORT_BASE + 0xb8)
239 #define CT3_MSK (0x1 << CT3_OFF)
240 #define TX_HARDRST_OFF 2
241 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
242 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
243 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
244 #define STP_LINK_TIMER (PORT_BASE + 0x120)
245 #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
246 #define CON_CFG_DRIVER (PORT_BASE + 0x130)
247 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
248 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
249 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
250 #define CHL_INT0 (PORT_BASE + 0x1b4)
251 #define CHL_INT0_HOTPLUG_TOUT_OFF 0
252 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
253 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1
254 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
255 #define CHL_INT0_SL_PHY_ENABLE_OFF 2
256 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
257 #define CHL_INT0_NOT_RDY_OFF 4
258 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
259 #define CHL_INT0_PHY_RDY_OFF 5
260 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
261 #define CHL_INT1 (PORT_BASE + 0x1b8)
262 #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15
263 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16
264 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17
265 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18
266 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
267 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
268 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
269 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
270 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23
271 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24
272 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26
273 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27
274 #define CHL_INT2 (PORT_BASE + 0x1bc)
275 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
276 #define CHL_INT2_RX_DISP_ERR_OFF 28
277 #define CHL_INT2_RX_CODE_ERR_OFF 29
278 #define CHL_INT2_RX_INVLD_DW_OFF 30
279 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
280 #define CHL_INT0_MSK (PORT_BASE + 0x1c0)
281 #define CHL_INT1_MSK (PORT_BASE + 0x1c4)
282 #define CHL_INT2_MSK (PORT_BASE + 0x1c8)
283 #define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc)
284 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
285 #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4)
286 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
287 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
288 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
289 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
290 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
291 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
292 #define DMA_TX_STATUS (PORT_BASE + 0x2d0)
293 #define DMA_TX_STATUS_BUSY_OFF 0
294 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
295 #define DMA_RX_STATUS (PORT_BASE + 0x2e8)
296 #define DMA_RX_STATUS_BUSY_OFF 0
297 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
299 #define COARSETUNE_TIME (PORT_BASE + 0x304)
300 #define TXDEEMPH_G1 (PORT_BASE + 0x350)
301 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
302 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
303 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
304 #define ERR_CNT_CODE_ERR (PORT_BASE + 0x394)
305 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
307 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
308 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
309 #error Max ITCT exceeded
312 #define AXI_MASTER_CFG_BASE (0x5000)
313 #define AM_CTRL_GLOBAL (0x0)
314 #define AM_CTRL_SHUTDOWN_REQ_OFF 0
315 #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
316 #define AM_CURR_TRANS_RETURN (0x150)
318 #define AM_CFG_MAX_TRANS (0x5010)
319 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
320 #define AXI_CFG (0x5100)
321 #define AM_ROB_ECC_ERR_ADDR (0x510c)
322 #define AM_ROB_ECC_ERR_ADDR_OFF 0
323 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff
325 /* RAS registers need init */
326 #define RAS_BASE (0x6000)
327 #define SAS_RAS_INTR0 (RAS_BASE)
328 #define SAS_RAS_INTR1 (RAS_BASE + 0x04)
329 #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
330 #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
331 #define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c)
332 #define SAS_RAS_INTR2 (RAS_BASE + 0x20)
333 #define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24)
335 /* HW dma structures */
336 /* Delivery queue header */
338 #define CMD_HDR_ABORT_FLAG_OFF 0
339 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
340 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
341 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
342 #define CMD_HDR_RESP_REPORT_OFF 5
343 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
344 #define CMD_HDR_TLR_CTRL_OFF 6
345 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
346 #define CMD_HDR_PORT_OFF 18
347 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
348 #define CMD_HDR_PRIORITY_OFF 27
349 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
350 #define CMD_HDR_CMD_OFF 29
351 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
353 #define CMD_HDR_UNCON_CMD_OFF 3
354 #define CMD_HDR_DIR_OFF 5
355 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
356 #define CMD_HDR_RESET_OFF 7
357 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
358 #define CMD_HDR_VDTL_OFF 10
359 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
360 #define CMD_HDR_FRAME_TYPE_OFF 11
361 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
362 #define CMD_HDR_DEV_ID_OFF 16
363 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
365 #define CMD_HDR_CFL_OFF 0
366 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
367 #define CMD_HDR_NCQ_TAG_OFF 10
368 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
369 #define CMD_HDR_MRFL_OFF 15
370 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
371 #define CMD_HDR_SG_MOD_OFF 24
372 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
374 #define CMD_HDR_IPTT_OFF 0
375 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
377 #define CMD_HDR_DIF_SGL_LEN_OFF 0
378 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
379 #define CMD_HDR_DATA_SGL_LEN_OFF 16
380 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
382 #define CMD_HDR_ADDR_MODE_SEL_OFF 15
383 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF)
384 #define CMD_HDR_ABORT_IPTT_OFF 16
385 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
387 /* Completion header */
389 #define CMPLT_HDR_CMPLT_OFF 0
390 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF)
391 #define CMPLT_HDR_ERROR_PHASE_OFF 2
392 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
393 #define CMPLT_HDR_RSPNS_XFRD_OFF 10
394 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
395 #define CMPLT_HDR_ERX_OFF 12
396 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
397 #define CMPLT_HDR_ABORT_STAT_OFF 13
398 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
400 #define STAT_IO_NOT_VALID 0x1
401 #define STAT_IO_NO_DEVICE 0x2
402 #define STAT_IO_COMPLETE 0x3
403 #define STAT_IO_ABORTED 0x4
405 #define CMPLT_HDR_IPTT_OFF 0
406 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
407 #define CMPLT_HDR_DEV_ID_OFF 16
408 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
410 #define CMPLT_HDR_IO_IN_TARGET_OFF 17
411 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
415 #define ITCT_HDR_DEV_TYPE_OFF 0
416 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
417 #define ITCT_HDR_VALID_OFF 2
418 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
419 #define ITCT_HDR_MCR_OFF 5
420 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
421 #define ITCT_HDR_VLN_OFF 9
422 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
423 #define ITCT_HDR_SMP_TIMEOUT_OFF 16
424 #define ITCT_HDR_AWT_CONTINUE_OFF 25
425 #define ITCT_HDR_PORT_ID_OFF 28
426 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
428 #define ITCT_HDR_INLT_OFF 0
429 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
430 #define ITCT_HDR_RTOLT_OFF 48
431 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
433 struct hisi_sas_protect_iu_v3_hw
{
443 struct hisi_sas_complete_v3_hdr
{
450 struct hisi_sas_err_record_v3
{
452 __le32 trans_tx_fail_type
;
455 __le32 trans_rx_fail_type
;
458 __le16 dma_tx_err_type
;
459 __le16 sipc_rx_err_type
;
462 __le32 dma_rx_err_type
;
465 #define RX_DATA_LEN_UNDERFLOW_OFF 6
466 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
468 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
469 #define HISI_SAS_MSI_COUNT_V3_HW 32
471 #define DIR_NO_DATA 0
473 #define DIR_TO_DEVICE 2
474 #define DIR_RESERVED 3
476 #define FIS_CMD_IS_UNCONSTRAINED(fis) \
477 ((fis.command == ATA_CMD_READ_LOG_EXT) || \
478 (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
479 ((fis.command == ATA_CMD_DEV_RESET) && \
480 ((fis.control & ATA_SRST) != 0)))
482 #define T10_INSRT_EN_OFF 0
483 #define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF)
484 #define T10_RMV_EN_OFF 1
485 #define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF)
486 #define T10_RPLC_EN_OFF 2
487 #define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF)
488 #define T10_CHK_EN_OFF 3
489 #define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF)
490 #define INCR_LBRT_OFF 5
491 #define INCR_LBRT_MSK (1 << INCR_LBRT_OFF)
492 #define USR_DATA_BLOCK_SZ_OFF 20
493 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF)
494 #define T10_CHK_MSK_OFF 16
495 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF)
496 #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF)
498 #define BASE_VECTORS_V3_HW 16
499 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
501 #define CHNL_INT_STS_MSK 0xeeeeeeee
502 #define CHNL_INT_STS_PHY_MSK 0xe
503 #define CHNL_INT_STS_INT0_MSK BIT(1)
504 #define CHNL_INT_STS_INT1_MSK BIT(2)
505 #define CHNL_INT_STS_INT2_MSK BIT(3)
509 DSM_FUNC_ERR_HANDLE_MSI
= 0,
512 static bool hisi_sas_intr_conv
;
513 MODULE_PARM_DESC(intr_conv
, "interrupt converge enable (0-1)");
515 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
516 static int prot_mask
;
517 module_param(prot_mask
, int, 0);
518 MODULE_PARM_DESC(prot_mask
, " host protection capabilities mask, def=0x0 ");
520 static bool auto_affine_msi_experimental
;
521 module_param(auto_affine_msi_experimental
, bool, 0444);
522 MODULE_PARM_DESC(auto_affine_msi_experimental
, "Enable auto-affinity of MSI IRQs as experimental:\n"
525 static void debugfs_work_handler_v3_hw(struct work_struct
*work
);
527 static u32
hisi_sas_read32(struct hisi_hba
*hisi_hba
, u32 off
)
529 void __iomem
*regs
= hisi_hba
->regs
+ off
;
534 static void hisi_sas_write32(struct hisi_hba
*hisi_hba
, u32 off
, u32 val
)
536 void __iomem
*regs
= hisi_hba
->regs
+ off
;
541 static void hisi_sas_phy_write32(struct hisi_hba
*hisi_hba
, int phy_no
,
544 void __iomem
*regs
= hisi_hba
->regs
+ (0x400 * phy_no
) + off
;
549 static u32
hisi_sas_phy_read32(struct hisi_hba
*hisi_hba
,
552 void __iomem
*regs
= hisi_hba
->regs
+ (0x400 * phy_no
) + off
;
557 #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \
560 void __iomem *regs = hisi_hba->regs + off; \
561 readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \
564 #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \
567 void __iomem *regs = hisi_hba->regs + off; \
568 readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
571 static void init_reg_v3_hw(struct hisi_hba
*hisi_hba
)
575 /* Global registers init */
576 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
,
577 (u32
)((1ULL << hisi_hba
->queue_count
) - 1));
578 hisi_sas_write32(hisi_hba
, SAS_AXI_USER3
, 0);
579 hisi_sas_write32(hisi_hba
, CFG_MAX_TAG
, 0xfff0400);
580 hisi_sas_write32(hisi_hba
, HGC_SAS_TXFAIL_RETRY_CTRL
, 0x108);
581 hisi_sas_write32(hisi_hba
, CFG_AGING_TIME
, 0x1);
582 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x1);
583 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
, 0x1);
584 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
, 0x1);
585 hisi_sas_write32(hisi_hba
, CQ_INT_CONVERGE_EN
,
587 hisi_sas_write32(hisi_hba
, OQ_INT_SRC
, 0xffff);
588 hisi_sas_write32(hisi_hba
, ENT_INT_SRC1
, 0xffffffff);
589 hisi_sas_write32(hisi_hba
, ENT_INT_SRC2
, 0xffffffff);
590 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
, 0xffffffff);
591 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
, 0xfefefefe);
592 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK2
, 0xfefefefe);
593 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, 0xffc220ff);
594 hisi_sas_write32(hisi_hba
, CHNL_PHYUPDOWN_INT_MSK
, 0x0);
595 hisi_sas_write32(hisi_hba
, CHNL_ENT_INT_MSK
, 0x0);
596 hisi_sas_write32(hisi_hba
, HGC_COM_INT_MSK
, 0x0);
597 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0x155555);
598 hisi_sas_write32(hisi_hba
, AWQOS_AWCACHE_CFG
, 0xf0f0);
599 hisi_sas_write32(hisi_hba
, ARQOS_ARCACHE_CFG
, 0xf0f0);
600 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
601 hisi_sas_write32(hisi_hba
, OQ0_INT_SRC_MSK
+ 0x4 * i
, 0);
603 hisi_sas_write32(hisi_hba
, HYPER_STREAM_ID_EN_CFG
, 1);
605 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
606 enum sas_linkrate max
;
607 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
608 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
609 u32 prog_phy_link_rate
= hisi_sas_phy_read32(hisi_hba
, i
,
612 prog_phy_link_rate
&= ~CFG_PROG_PHY_LINK_RATE_MSK
;
613 if (!sas_phy
->phy
|| (sas_phy
->phy
->maximum_linkrate
<
614 SAS_LINK_RATE_1_5_GBPS
))
615 max
= SAS_LINK_RATE_12_0_GBPS
;
617 max
= sas_phy
->phy
->maximum_linkrate
;
618 prog_phy_link_rate
|= hisi_sas_get_prog_phy_linkrate_mask(max
);
619 hisi_sas_phy_write32(hisi_hba
, i
, PROG_PHY_LINK_RATE
,
621 hisi_sas_phy_write32(hisi_hba
, i
, SERDES_CFG
, 0xffc00);
622 hisi_sas_phy_write32(hisi_hba
, i
, SAS_RX_TRAIN_TIMER
, 0x13e80);
623 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT0
, 0xffffffff);
624 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1
, 0xffffffff);
625 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2
, 0xffffffff);
626 hisi_sas_phy_write32(hisi_hba
, i
, RXOP_CHECK_CFG_H
, 0x1000);
627 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1_MSK
, 0xf2057fff);
628 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2_MSK
, 0xffffbfe);
629 hisi_sas_phy_write32(hisi_hba
, i
, PHY_CTRL_RDY_MSK
, 0x0);
630 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_NOT_RDY_MSK
, 0x0);
631 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_DWS_RESET_MSK
, 0x0);
632 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_PHY_ENA_MSK
, 0x0);
633 hisi_sas_phy_write32(hisi_hba
, i
, SL_RX_BCAST_CHK_MSK
, 0x0);
634 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_OOB_RESTART_MSK
, 0x1);
635 hisi_sas_phy_write32(hisi_hba
, i
, STP_LINK_TIMER
, 0x7f7a120);
636 hisi_sas_phy_write32(hisi_hba
, i
, CON_CFG_DRIVER
, 0x2a0a01);
637 hisi_sas_phy_write32(hisi_hba
, i
, SAS_SSP_CON_TIMER_CFG
, 0x32);
638 hisi_sas_phy_write32(hisi_hba
, i
, SAS_EC_INT_COAL_TIME
,
640 /* used for 12G negotiate */
641 hisi_sas_phy_write32(hisi_hba
, i
, COARSETUNE_TIME
, 0x1e);
642 hisi_sas_phy_write32(hisi_hba
, i
, AIP_LIMIT
, 0x2ffff);
644 /* get default FFE configuration for BIST */
645 for (j
= 0; j
< FFE_CFG_MAX
; j
++) {
646 u32 val
= hisi_sas_phy_read32(hisi_hba
, i
,
647 TXDEEMPH_G1
+ (j
* 0x4));
648 hisi_hba
->debugfs_bist_ffe
[i
][j
] = val
;
652 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
654 hisi_sas_write32(hisi_hba
,
655 DLVRY_Q_0_BASE_ADDR_HI
+ (i
* 0x14),
656 upper_32_bits(hisi_hba
->cmd_hdr_dma
[i
]));
658 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_BASE_ADDR_LO
+ (i
* 0x14),
659 lower_32_bits(hisi_hba
->cmd_hdr_dma
[i
]));
661 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_DEPTH
+ (i
* 0x14),
662 HISI_SAS_QUEUE_SLOTS
);
664 /* Completion queue */
665 hisi_sas_write32(hisi_hba
, COMPL_Q_0_BASE_ADDR_HI
+ (i
* 0x14),
666 upper_32_bits(hisi_hba
->complete_hdr_dma
[i
]));
668 hisi_sas_write32(hisi_hba
, COMPL_Q_0_BASE_ADDR_LO
+ (i
* 0x14),
669 lower_32_bits(hisi_hba
->complete_hdr_dma
[i
]));
671 hisi_sas_write32(hisi_hba
, COMPL_Q_0_DEPTH
+ (i
* 0x14),
672 HISI_SAS_QUEUE_SLOTS
);
676 hisi_sas_write32(hisi_hba
, ITCT_BASE_ADDR_LO
,
677 lower_32_bits(hisi_hba
->itct_dma
));
679 hisi_sas_write32(hisi_hba
, ITCT_BASE_ADDR_HI
,
680 upper_32_bits(hisi_hba
->itct_dma
));
683 hisi_sas_write32(hisi_hba
, IOST_BASE_ADDR_LO
,
684 lower_32_bits(hisi_hba
->iost_dma
));
686 hisi_sas_write32(hisi_hba
, IOST_BASE_ADDR_HI
,
687 upper_32_bits(hisi_hba
->iost_dma
));
690 hisi_sas_write32(hisi_hba
, IO_BROKEN_MSG_ADDR_LO
,
691 lower_32_bits(hisi_hba
->breakpoint_dma
));
693 hisi_sas_write32(hisi_hba
, IO_BROKEN_MSG_ADDR_HI
,
694 upper_32_bits(hisi_hba
->breakpoint_dma
));
696 /* SATA broken msg */
697 hisi_sas_write32(hisi_hba
, IO_SATA_BROKEN_MSG_ADDR_LO
,
698 lower_32_bits(hisi_hba
->sata_breakpoint_dma
));
700 hisi_sas_write32(hisi_hba
, IO_SATA_BROKEN_MSG_ADDR_HI
,
701 upper_32_bits(hisi_hba
->sata_breakpoint_dma
));
703 /* SATA initial fis */
704 hisi_sas_write32(hisi_hba
, SATA_INITI_D2H_STORE_ADDR_LO
,
705 lower_32_bits(hisi_hba
->initial_fis_dma
));
707 hisi_sas_write32(hisi_hba
, SATA_INITI_D2H_STORE_ADDR_HI
,
708 upper_32_bits(hisi_hba
->initial_fis_dma
));
710 /* RAS registers init */
711 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR0_MASK
, 0x0);
712 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR1_MASK
, 0x0);
713 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR2_MASK
, 0x0);
714 hisi_sas_write32(hisi_hba
, CFG_SAS_RAS_INTR_MASK
, 0x0);
716 /* LED registers init */
717 hisi_sas_write32(hisi_hba
, SAS_CFG_DRIVE_VLD
, 0x80000ff);
718 hisi_sas_write32(hisi_hba
, SAS_GPIO_TX_0_1
, 0x80808080);
719 hisi_sas_write32(hisi_hba
, SAS_GPIO_TX_0_1
+ 0x4, 0x80808080);
720 /* Configure blink generator rate A to 1Hz and B to 4Hz */
721 hisi_sas_write32(hisi_hba
, SAS_GPIO_CFG_1
, 0x121700);
722 hisi_sas_write32(hisi_hba
, SAS_GPIO_CFG_0
, 0x800000);
725 static void config_phy_opt_mode_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
727 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
729 cfg
&= ~PHY_CFG_DC_OPT_MSK
;
730 cfg
|= 1 << PHY_CFG_DC_OPT_OFF
;
731 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
734 static void config_id_frame_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
736 struct sas_identify_frame identify_frame
;
737 u32
*identify_buffer
;
739 memset(&identify_frame
, 0, sizeof(identify_frame
));
740 identify_frame
.dev_type
= SAS_END_DEVICE
;
741 identify_frame
.frame_type
= 0;
742 identify_frame
._un1
= 1;
743 identify_frame
.initiator_bits
= SAS_PROTOCOL_ALL
;
744 identify_frame
.target_bits
= SAS_PROTOCOL_NONE
;
745 memcpy(&identify_frame
._un4_11
[0], hisi_hba
->sas_addr
, SAS_ADDR_SIZE
);
746 memcpy(&identify_frame
.sas_addr
[0], hisi_hba
->sas_addr
, SAS_ADDR_SIZE
);
747 identify_frame
.phy_id
= phy_no
;
748 identify_buffer
= (u32
*)(&identify_frame
);
750 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD0
,
751 __swab32(identify_buffer
[0]));
752 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD1
,
753 __swab32(identify_buffer
[1]));
754 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD2
,
755 __swab32(identify_buffer
[2]));
756 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD3
,
757 __swab32(identify_buffer
[3]));
758 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD4
,
759 __swab32(identify_buffer
[4]));
760 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD5
,
761 __swab32(identify_buffer
[5]));
764 static void setup_itct_v3_hw(struct hisi_hba
*hisi_hba
,
765 struct hisi_sas_device
*sas_dev
)
767 struct domain_device
*device
= sas_dev
->sas_device
;
768 struct device
*dev
= hisi_hba
->dev
;
769 u64 qw0
, device_id
= sas_dev
->device_id
;
770 struct hisi_sas_itct
*itct
= &hisi_hba
->itct
[device_id
];
771 struct domain_device
*parent_dev
= device
->parent
;
772 struct asd_sas_port
*sas_port
= device
->port
;
773 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
776 memset(itct
, 0, sizeof(*itct
));
780 switch (sas_dev
->dev_type
) {
782 case SAS_EDGE_EXPANDER_DEVICE
:
783 case SAS_FANOUT_EXPANDER_DEVICE
:
784 qw0
= HISI_SAS_DEV_TYPE_SSP
<< ITCT_HDR_DEV_TYPE_OFF
;
787 case SAS_SATA_PENDING
:
788 if (parent_dev
&& dev_is_expander(parent_dev
->dev_type
))
789 qw0
= HISI_SAS_DEV_TYPE_STP
<< ITCT_HDR_DEV_TYPE_OFF
;
791 qw0
= HISI_SAS_DEV_TYPE_SATA
<< ITCT_HDR_DEV_TYPE_OFF
;
794 dev_warn(dev
, "setup itct: unsupported dev type (%d)\n",
798 qw0
|= ((1 << ITCT_HDR_VALID_OFF
) |
799 (device
->linkrate
<< ITCT_HDR_MCR_OFF
) |
800 (1 << ITCT_HDR_VLN_OFF
) |
801 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF
) |
802 (1 << ITCT_HDR_AWT_CONTINUE_OFF
) |
803 (port
->id
<< ITCT_HDR_PORT_ID_OFF
));
804 itct
->qw0
= cpu_to_le64(qw0
);
807 memcpy(&sas_addr
, device
->sas_addr
, SAS_ADDR_SIZE
);
808 itct
->sas_addr
= cpu_to_le64(__swab64(sas_addr
));
811 if (!dev_is_sata(device
))
812 itct
->qw2
= cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF
) |
813 (0x1ULL
<< ITCT_HDR_RTOLT_OFF
));
816 static int clear_itct_v3_hw(struct hisi_hba
*hisi_hba
,
817 struct hisi_sas_device
*sas_dev
)
819 DECLARE_COMPLETION_ONSTACK(completion
);
820 u64 dev_id
= sas_dev
->device_id
;
821 struct hisi_sas_itct
*itct
= &hisi_hba
->itct
[dev_id
];
822 u32 reg_val
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC3
);
823 struct device
*dev
= hisi_hba
->dev
;
825 sas_dev
->completion
= &completion
;
827 /* clear the itct interrupt state */
828 if (ENT_INT_SRC3_ITC_INT_MSK
& reg_val
)
829 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
,
830 ENT_INT_SRC3_ITC_INT_MSK
);
832 /* clear the itct table */
833 reg_val
= ITCT_CLR_EN_MSK
| (dev_id
& ITCT_DEV_MSK
);
834 hisi_sas_write32(hisi_hba
, ITCT_CLR
, reg_val
);
836 if (!wait_for_completion_timeout(sas_dev
->completion
,
837 CLEAR_ITCT_TIMEOUT
* HZ
)) {
838 dev_warn(dev
, "failed to clear ITCT\n");
842 memset(itct
, 0, sizeof(struct hisi_sas_itct
));
846 static void dereg_device_v3_hw(struct hisi_hba
*hisi_hba
,
847 struct domain_device
*device
)
849 struct hisi_sas_slot
*slot
, *slot2
;
850 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
851 u32 cfg_abt_set_query_iptt
;
853 cfg_abt_set_query_iptt
= hisi_sas_read32(hisi_hba
,
854 CFG_ABT_SET_QUERY_IPTT
);
855 list_for_each_entry_safe(slot
, slot2
, &sas_dev
->list
, entry
) {
856 cfg_abt_set_query_iptt
&= ~CFG_SET_ABORTED_IPTT_MSK
;
857 cfg_abt_set_query_iptt
|= (1 << CFG_SET_ABORTED_EN_OFF
) |
858 (slot
->idx
<< CFG_SET_ABORTED_IPTT_OFF
);
859 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_QUERY_IPTT
,
860 cfg_abt_set_query_iptt
);
862 cfg_abt_set_query_iptt
&= ~(1 << CFG_SET_ABORTED_EN_OFF
);
863 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_QUERY_IPTT
,
864 cfg_abt_set_query_iptt
);
865 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_IPTT_DONE
,
866 1 << CFG_ABT_SET_IPTT_DONE_OFF
);
869 static int reset_hw_v3_hw(struct hisi_hba
*hisi_hba
)
871 struct device
*dev
= hisi_hba
->dev
;
875 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0);
877 /* Disable all of the PHYs */
878 hisi_sas_stop_phys(hisi_hba
);
881 /* Ensure axi bus idle */
882 ret
= hisi_sas_read32_poll_timeout(AXI_CFG
, val
, !val
,
885 dev_err(dev
, "axi bus is not idle, ret = %d!\n", ret
);
889 if (ACPI_HANDLE(dev
)) {
892 s
= acpi_evaluate_object(ACPI_HANDLE(dev
), "_RST", NULL
, NULL
);
893 if (ACPI_FAILURE(s
)) {
894 dev_err(dev
, "Reset failed\n");
898 dev_err(dev
, "no reset method!\n");
905 static int hw_init_v3_hw(struct hisi_hba
*hisi_hba
)
907 struct device
*dev
= hisi_hba
->dev
;
908 struct acpi_device
*acpi_dev
;
909 union acpi_object
*obj
;
913 rc
= reset_hw_v3_hw(hisi_hba
);
915 dev_err(dev
, "hisi_sas_reset_hw failed, rc=%d\n", rc
);
920 init_reg_v3_hw(hisi_hba
);
922 if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid
)) {
923 dev_err(dev
, "Parse GUID failed\n");
928 * This DSM handles some hardware-related configurations:
929 * 1. Switch over to MSI error handling in kernel
930 * 2. BIOS *may* reset some register values through this method
932 obj
= acpi_evaluate_dsm(ACPI_HANDLE(dev
), &guid
, 0,
933 DSM_FUNC_ERR_HANDLE_MSI
, NULL
);
935 dev_warn(dev
, "can not find DSM method, ignore\n");
939 acpi_dev
= ACPI_COMPANION(dev
);
940 if (!acpi_device_power_manageable(acpi_dev
))
941 dev_notice(dev
, "neither _PS0 nor _PR0 is defined\n");
945 static void enable_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
947 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
949 cfg
|= PHY_CFG_ENA_MSK
;
950 cfg
&= ~PHY_CFG_PHY_RST_MSK
;
951 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
954 static void disable_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
956 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
957 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2_MSK
);
958 static const u32 msk
= BIT(CHL_INT2_RX_DISP_ERR_OFF
) |
959 BIT(CHL_INT2_RX_CODE_ERR_OFF
) |
960 BIT(CHL_INT2_RX_INVLD_DW_OFF
);
963 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2_MSK
, msk
| irq_msk
);
965 cfg
&= ~PHY_CFG_ENA_MSK
;
966 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
970 state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
971 if (state
& BIT(phy_no
)) {
972 cfg
|= PHY_CFG_PHY_RST_MSK
;
973 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
978 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_INVLD_DW
);
979 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DISP_ERR
);
980 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_CODE_ERR
);
982 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2
, msk
);
983 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2_MSK
, irq_msk
);
986 static void start_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
988 config_id_frame_v3_hw(hisi_hba
, phy_no
);
989 config_phy_opt_mode_v3_hw(hisi_hba
, phy_no
);
990 enable_phy_v3_hw(hisi_hba
, phy_no
);
993 static void phy_hard_reset_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
995 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
998 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
999 if (phy
->identify
.device_type
== SAS_END_DEVICE
) {
1000 txid_auto
= hisi_sas_phy_read32(hisi_hba
, phy_no
, TXID_AUTO
);
1001 hisi_sas_phy_write32(hisi_hba
, phy_no
, TXID_AUTO
,
1002 txid_auto
| TX_HARDRST_MSK
);
1005 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
1008 static enum sas_linkrate
phy_get_max_linkrate_v3_hw(void)
1010 return SAS_LINK_RATE_12_0_GBPS
;
1013 static void phys_init_v3_hw(struct hisi_hba
*hisi_hba
)
1017 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
1018 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
1019 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1021 if (!sas_phy
->phy
->enabled
)
1024 hisi_sas_phy_enable(hisi_hba
, i
, 1);
1028 static void sl_notify_ssp_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1032 sl_control
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1033 sl_control
|= SL_CONTROL_NOTIFY_EN_MSK
;
1034 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
, sl_control
);
1036 sl_control
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1037 sl_control
&= ~SL_CONTROL_NOTIFY_EN_MSK
;
1038 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
, sl_control
);
1041 static int get_wideport_bitmap_v3_hw(struct hisi_hba
*hisi_hba
, int port_id
)
1044 u32 phy_port_num_ma
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
1045 u32 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1047 for (i
= 0; i
< hisi_hba
->n_phy
; i
++)
1048 if (phy_state
& BIT(i
))
1049 if (((phy_port_num_ma
>> (i
* 4)) & 0xf) == port_id
)
1055 static void start_delivery_v3_hw(struct hisi_sas_dq
*dq
)
1057 struct hisi_hba
*hisi_hba
= dq
->hisi_hba
;
1058 struct hisi_sas_slot
*s
, *s1
, *s2
= NULL
;
1059 int dlvry_queue
= dq
->id
;
1062 list_for_each_entry_safe(s
, s1
, &dq
->list
, delivery
) {
1066 list_del(&s
->delivery
);
1073 * Ensure that memories for slots built on other CPUs is observed.
1076 wp
= (s2
->dlvry_queue_slot
+ 1) % HISI_SAS_QUEUE_SLOTS
;
1078 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_WR_PTR
+ (dlvry_queue
* 0x14), wp
);
1081 static void prep_prd_sge_v3_hw(struct hisi_hba
*hisi_hba
,
1082 struct hisi_sas_slot
*slot
,
1083 struct hisi_sas_cmd_hdr
*hdr
,
1084 struct scatterlist
*scatter
,
1087 struct hisi_sas_sge_page
*sge_page
= hisi_sas_sge_addr_mem(slot
);
1088 struct scatterlist
*sg
;
1091 for_each_sg(scatter
, sg
, n_elem
, i
) {
1092 struct hisi_sas_sge
*entry
= &sge_page
->sge
[i
];
1094 entry
->addr
= cpu_to_le64(sg_dma_address(sg
));
1095 entry
->page_ctrl_0
= entry
->page_ctrl_1
= 0;
1096 entry
->data_len
= cpu_to_le32(sg_dma_len(sg
));
1097 entry
->data_off
= 0;
1100 hdr
->prd_table_addr
= cpu_to_le64(hisi_sas_sge_addr_dma(slot
));
1102 hdr
->sg_len
|= cpu_to_le32(n_elem
<< CMD_HDR_DATA_SGL_LEN_OFF
);
1105 static void prep_prd_sge_dif_v3_hw(struct hisi_hba
*hisi_hba
,
1106 struct hisi_sas_slot
*slot
,
1107 struct hisi_sas_cmd_hdr
*hdr
,
1108 struct scatterlist
*scatter
,
1111 struct hisi_sas_sge_dif_page
*sge_dif_page
;
1112 struct scatterlist
*sg
;
1115 sge_dif_page
= hisi_sas_sge_dif_addr_mem(slot
);
1117 for_each_sg(scatter
, sg
, n_elem
, i
) {
1118 struct hisi_sas_sge
*entry
= &sge_dif_page
->sge
[i
];
1120 entry
->addr
= cpu_to_le64(sg_dma_address(sg
));
1121 entry
->page_ctrl_0
= 0;
1122 entry
->page_ctrl_1
= 0;
1123 entry
->data_len
= cpu_to_le32(sg_dma_len(sg
));
1124 entry
->data_off
= 0;
1127 hdr
->dif_prd_table_addr
=
1128 cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot
));
1130 hdr
->sg_len
|= cpu_to_le32(n_elem
<< CMD_HDR_DIF_SGL_LEN_OFF
);
1133 static u32
get_prot_chk_msk_v3_hw(struct scsi_cmnd
*scsi_cmnd
)
1135 unsigned char prot_flags
= scsi_cmnd
->prot_flags
;
1137 if (prot_flags
& SCSI_PROT_REF_CHECK
)
1138 return T10_CHK_APP_TAG_MSK
;
1139 return T10_CHK_REF_TAG_MSK
| T10_CHK_APP_TAG_MSK
;
1142 static void fill_prot_v3_hw(struct scsi_cmnd
*scsi_cmnd
,
1143 struct hisi_sas_protect_iu_v3_hw
*prot
)
1145 unsigned char prot_op
= scsi_get_prot_op(scsi_cmnd
);
1146 unsigned int interval
= scsi_prot_interval(scsi_cmnd
);
1147 u32 lbrt_chk_val
= t10_pi_ref_tag(scsi_cmnd
->request
);
1150 case SCSI_PROT_READ_INSERT
:
1151 prot
->dw0
|= T10_INSRT_EN_MSK
;
1152 prot
->lbrtgv
= lbrt_chk_val
;
1154 case SCSI_PROT_READ_STRIP
:
1155 prot
->dw0
|= (T10_RMV_EN_MSK
| T10_CHK_EN_MSK
);
1156 prot
->lbrtcv
= lbrt_chk_val
;
1157 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1159 case SCSI_PROT_READ_PASS
:
1160 prot
->dw0
|= T10_CHK_EN_MSK
;
1161 prot
->lbrtcv
= lbrt_chk_val
;
1162 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1164 case SCSI_PROT_WRITE_INSERT
:
1165 prot
->dw0
|= T10_INSRT_EN_MSK
;
1166 prot
->lbrtgv
= lbrt_chk_val
;
1168 case SCSI_PROT_WRITE_STRIP
:
1169 prot
->dw0
|= (T10_RMV_EN_MSK
| T10_CHK_EN_MSK
);
1170 prot
->lbrtcv
= lbrt_chk_val
;
1172 case SCSI_PROT_WRITE_PASS
:
1173 prot
->dw0
|= T10_CHK_EN_MSK
;
1174 prot
->lbrtcv
= lbrt_chk_val
;
1175 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1178 WARN(1, "prot_op(0x%x) is not valid\n", prot_op
);
1186 prot
->dw0
|= (0x1 << USR_DATA_BLOCK_SZ_OFF
);
1189 prot
->dw0
|= (0x2 << USR_DATA_BLOCK_SZ_OFF
);
1192 WARN(1, "protection interval (0x%x) invalid\n",
1197 prot
->dw0
|= INCR_LBRT_MSK
;
1200 static void prep_ssp_v3_hw(struct hisi_hba
*hisi_hba
,
1201 struct hisi_sas_slot
*slot
)
1203 struct sas_task
*task
= slot
->task
;
1204 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1205 struct domain_device
*device
= task
->dev
;
1206 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1207 struct hisi_sas_port
*port
= slot
->port
;
1208 struct sas_ssp_task
*ssp_task
= &task
->ssp_task
;
1209 struct scsi_cmnd
*scsi_cmnd
= ssp_task
->cmd
;
1210 struct hisi_sas_tmf_task
*tmf
= slot
->tmf
;
1211 int has_data
= 0, priority
= !!tmf
;
1212 unsigned char prot_op
;
1214 u32 dw1
= 0, dw2
= 0, len
= 0;
1216 hdr
->dw0
= cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF
) |
1217 (2 << CMD_HDR_TLR_CTRL_OFF
) |
1218 (port
->id
<< CMD_HDR_PORT_OFF
) |
1219 (priority
<< CMD_HDR_PRIORITY_OFF
) |
1220 (1 << CMD_HDR_CMD_OFF
)); /* ssp */
1222 dw1
= 1 << CMD_HDR_VDTL_OFF
;
1224 dw1
|= 2 << CMD_HDR_FRAME_TYPE_OFF
;
1225 dw1
|= DIR_NO_DATA
<< CMD_HDR_DIR_OFF
;
1227 prot_op
= scsi_get_prot_op(scsi_cmnd
);
1228 dw1
|= 1 << CMD_HDR_FRAME_TYPE_OFF
;
1229 switch (scsi_cmnd
->sc_data_direction
) {
1232 dw1
|= DIR_TO_DEVICE
<< CMD_HDR_DIR_OFF
;
1234 case DMA_FROM_DEVICE
:
1236 dw1
|= DIR_TO_INI
<< CMD_HDR_DIR_OFF
;
1239 dw1
&= ~CMD_HDR_DIR_MSK
;
1243 /* map itct entry */
1244 dw1
|= sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
;
1246 dw2
= (((sizeof(struct ssp_command_iu
) + sizeof(struct ssp_frame_hdr
)
1247 + 3) / 4) << CMD_HDR_CFL_OFF
) |
1248 ((HISI_SAS_MAX_SSP_RESP_SZ
/ 4) << CMD_HDR_MRFL_OFF
) |
1249 (2 << CMD_HDR_SG_MOD_OFF
);
1250 hdr
->dw2
= cpu_to_le32(dw2
);
1251 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1254 prep_prd_sge_v3_hw(hisi_hba
, slot
, hdr
, task
->scatter
,
1257 if (scsi_prot_sg_count(scsi_cmnd
))
1258 prep_prd_sge_dif_v3_hw(hisi_hba
, slot
, hdr
,
1259 scsi_prot_sglist(scsi_cmnd
),
1263 hdr
->cmd_table_addr
= cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot
));
1264 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1266 buf_cmd
= hisi_sas_cmd_hdr_addr_mem(slot
) +
1267 sizeof(struct ssp_frame_hdr
);
1269 memcpy(buf_cmd
, &task
->ssp_task
.LUN
, 8);
1271 buf_cmd
[9] = ssp_task
->task_attr
| (ssp_task
->task_prio
<< 3);
1272 memcpy(buf_cmd
+ 12, scsi_cmnd
->cmnd
, scsi_cmnd
->cmd_len
);
1274 buf_cmd
[10] = tmf
->tmf
;
1276 case TMF_ABORT_TASK
:
1277 case TMF_QUERY_TASK
:
1279 (tmf
->tag_of_task_to_be_managed
>> 8) & 0xff;
1281 tmf
->tag_of_task_to_be_managed
& 0xff;
1288 if (has_data
&& (prot_op
!= SCSI_PROT_NORMAL
)) {
1289 struct hisi_sas_protect_iu_v3_hw prot
;
1292 hdr
->dw7
|= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF
);
1293 dw1
|= CMD_HDR_PIR_MSK
;
1294 buf_cmd_prot
= hisi_sas_cmd_hdr_addr_mem(slot
) +
1295 sizeof(struct ssp_frame_hdr
) +
1296 sizeof(struct ssp_command_iu
);
1298 memset(&prot
, 0, sizeof(struct hisi_sas_protect_iu_v3_hw
));
1299 fill_prot_v3_hw(scsi_cmnd
, &prot
);
1300 memcpy(buf_cmd_prot
, &prot
,
1301 sizeof(struct hisi_sas_protect_iu_v3_hw
));
1303 * For READ, we need length of info read to memory, while for
1304 * WRITE we need length of data written to the disk.
1306 if (prot_op
== SCSI_PROT_WRITE_INSERT
||
1307 prot_op
== SCSI_PROT_READ_INSERT
||
1308 prot_op
== SCSI_PROT_WRITE_PASS
||
1309 prot_op
== SCSI_PROT_READ_PASS
) {
1310 unsigned int interval
= scsi_prot_interval(scsi_cmnd
);
1311 unsigned int ilog2_interval
= ilog2(interval
);
1313 len
= (task
->total_xfer_len
>> ilog2_interval
) * 8;
1317 hdr
->dw1
= cpu_to_le32(dw1
);
1319 hdr
->data_transfer_len
= cpu_to_le32(task
->total_xfer_len
+ len
);
1322 static void prep_smp_v3_hw(struct hisi_hba
*hisi_hba
,
1323 struct hisi_sas_slot
*slot
)
1325 struct sas_task
*task
= slot
->task
;
1326 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1327 struct domain_device
*device
= task
->dev
;
1328 struct hisi_sas_port
*port
= slot
->port
;
1329 struct scatterlist
*sg_req
;
1330 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1331 dma_addr_t req_dma_addr
;
1332 unsigned int req_len
;
1335 sg_req
= &task
->smp_task
.smp_req
;
1336 req_len
= sg_dma_len(sg_req
);
1337 req_dma_addr
= sg_dma_address(sg_req
);
1341 hdr
->dw0
= cpu_to_le32((port
->id
<< CMD_HDR_PORT_OFF
) |
1342 (1 << CMD_HDR_PRIORITY_OFF
) | /* high pri */
1343 (2 << CMD_HDR_CMD_OFF
)); /* smp */
1345 /* map itct entry */
1346 hdr
->dw1
= cpu_to_le32((sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
) |
1347 (1 << CMD_HDR_FRAME_TYPE_OFF
) |
1348 (DIR_NO_DATA
<< CMD_HDR_DIR_OFF
));
1351 hdr
->dw2
= cpu_to_le32((((req_len
- 4) / 4) << CMD_HDR_CFL_OFF
) |
1352 (HISI_SAS_MAX_SMP_RESP_SZ
/ 4 <<
1355 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
<< CMD_HDR_IPTT_OFF
);
1357 hdr
->cmd_table_addr
= cpu_to_le64(req_dma_addr
);
1358 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1361 static void prep_ata_v3_hw(struct hisi_hba
*hisi_hba
,
1362 struct hisi_sas_slot
*slot
)
1364 struct sas_task
*task
= slot
->task
;
1365 struct domain_device
*device
= task
->dev
;
1366 struct domain_device
*parent_dev
= device
->parent
;
1367 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1368 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1369 struct asd_sas_port
*sas_port
= device
->port
;
1370 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
1372 int has_data
= 0, hdr_tag
= 0;
1373 u32 dw1
= 0, dw2
= 0;
1375 hdr
->dw0
= cpu_to_le32(port
->id
<< CMD_HDR_PORT_OFF
);
1376 if (parent_dev
&& dev_is_expander(parent_dev
->dev_type
))
1377 hdr
->dw0
|= cpu_to_le32(3 << CMD_HDR_CMD_OFF
);
1379 hdr
->dw0
|= cpu_to_le32(4U << CMD_HDR_CMD_OFF
);
1381 switch (task
->data_dir
) {
1384 dw1
|= DIR_TO_DEVICE
<< CMD_HDR_DIR_OFF
;
1386 case DMA_FROM_DEVICE
:
1388 dw1
|= DIR_TO_INI
<< CMD_HDR_DIR_OFF
;
1391 dw1
&= ~CMD_HDR_DIR_MSK
;
1394 if ((task
->ata_task
.fis
.command
== ATA_CMD_DEV_RESET
) &&
1395 (task
->ata_task
.fis
.control
& ATA_SRST
))
1396 dw1
|= 1 << CMD_HDR_RESET_OFF
;
1398 dw1
|= (hisi_sas_get_ata_protocol(
1399 &task
->ata_task
.fis
, task
->data_dir
))
1400 << CMD_HDR_FRAME_TYPE_OFF
;
1401 dw1
|= sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
;
1403 if (FIS_CMD_IS_UNCONSTRAINED(task
->ata_task
.fis
))
1404 dw1
|= 1 << CMD_HDR_UNCON_CMD_OFF
;
1406 hdr
->dw1
= cpu_to_le32(dw1
);
1409 if (task
->ata_task
.use_ncq
) {
1410 struct ata_queued_cmd
*qc
= task
->uldd_task
;
1413 task
->ata_task
.fis
.sector_count
|= (u8
) (hdr_tag
<< 3);
1414 dw2
|= hdr_tag
<< CMD_HDR_NCQ_TAG_OFF
;
1417 dw2
|= (HISI_SAS_MAX_STP_RESP_SZ
/ 4) << CMD_HDR_CFL_OFF
|
1418 2 << CMD_HDR_SG_MOD_OFF
;
1419 hdr
->dw2
= cpu_to_le32(dw2
);
1422 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1425 prep_prd_sge_v3_hw(hisi_hba
, slot
, hdr
, task
->scatter
,
1428 hdr
->data_transfer_len
= cpu_to_le32(task
->total_xfer_len
);
1429 hdr
->cmd_table_addr
= cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot
));
1430 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1432 buf_cmd
= hisi_sas_cmd_hdr_addr_mem(slot
);
1434 if (likely(!task
->ata_task
.device_control_reg_update
))
1435 task
->ata_task
.fis
.flags
|= 0x80; /* C=1: update ATA cmd reg */
1436 /* fill in command FIS */
1437 memcpy(buf_cmd
, &task
->ata_task
.fis
, sizeof(struct host_to_dev_fis
));
1440 static void prep_abort_v3_hw(struct hisi_hba
*hisi_hba
,
1441 struct hisi_sas_slot
*slot
,
1442 int device_id
, int abort_flag
, int tag_to_abort
)
1444 struct sas_task
*task
= slot
->task
;
1445 struct domain_device
*dev
= task
->dev
;
1446 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1447 struct hisi_sas_port
*port
= slot
->port
;
1450 hdr
->dw0
= cpu_to_le32((5U << CMD_HDR_CMD_OFF
) | /*abort*/
1451 (port
->id
<< CMD_HDR_PORT_OFF
) |
1453 << CMD_HDR_ABORT_DEVICE_TYPE_OFF
) |
1455 << CMD_HDR_ABORT_FLAG_OFF
));
1458 hdr
->dw1
= cpu_to_le32(device_id
1459 << CMD_HDR_DEV_ID_OFF
);
1462 hdr
->dw7
= cpu_to_le32(tag_to_abort
<< CMD_HDR_ABORT_IPTT_OFF
);
1463 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1466 static irqreturn_t
phy_up_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1470 u32 context
, port_id
, link_rate
;
1471 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1472 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1473 struct device
*dev
= hisi_hba
->dev
;
1474 unsigned long flags
;
1476 del_timer(&phy
->timer
);
1477 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_PHY_ENA_MSK
, 1);
1479 port_id
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
1480 port_id
= (port_id
>> (4 * phy_no
)) & 0xf;
1481 link_rate
= hisi_sas_read32(hisi_hba
, PHY_CONN_RATE
);
1482 link_rate
= (link_rate
>> (phy_no
* 4)) & 0xf;
1484 if (port_id
== 0xf) {
1485 dev_err(dev
, "phyup: phy%d invalid portid\n", phy_no
);
1489 sas_phy
->linkrate
= link_rate
;
1490 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
1492 /* Check for SATA dev */
1493 context
= hisi_sas_read32(hisi_hba
, PHY_CONTEXT
);
1494 if (context
& (1 << phy_no
)) {
1495 struct hisi_sas_initial_fis
*initial_fis
;
1496 struct dev_to_host_fis
*fis
;
1497 u8 attached_sas_addr
[SAS_ADDR_SIZE
] = {0};
1498 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1500 dev_info(dev
, "phyup: phy%d link_rate=%d(sata)\n", phy_no
, link_rate
);
1501 initial_fis
= &hisi_hba
->initial_fis
[phy_no
];
1502 fis
= &initial_fis
->fis
;
1504 /* check ERR bit of Status Register */
1505 if (fis
->status
& ATA_ERR
) {
1506 dev_warn(dev
, "sata int: phy%d FIS status: 0x%x\n",
1507 phy_no
, fis
->status
);
1508 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1513 sas_phy
->oob_mode
= SATA_OOB_MODE
;
1514 attached_sas_addr
[0] = 0x50;
1515 attached_sas_addr
[6] = shost
->host_no
;
1516 attached_sas_addr
[7] = phy_no
;
1517 memcpy(sas_phy
->attached_sas_addr
,
1520 memcpy(sas_phy
->frame_rcvd
, fis
,
1521 sizeof(struct dev_to_host_fis
));
1522 phy
->phy_type
|= PORT_TYPE_SATA
;
1523 phy
->identify
.device_type
= SAS_SATA_DEV
;
1524 phy
->frame_rcvd_size
= sizeof(struct dev_to_host_fis
);
1525 phy
->identify
.target_port_protocols
= SAS_PROTOCOL_SATA
;
1527 u32
*frame_rcvd
= (u32
*)sas_phy
->frame_rcvd
;
1528 struct sas_identify_frame
*id
=
1529 (struct sas_identify_frame
*)frame_rcvd
;
1531 dev_info(dev
, "phyup: phy%d link_rate=%d\n", phy_no
, link_rate
);
1532 for (i
= 0; i
< 6; i
++) {
1533 u32 idaf
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1534 RX_IDAF_DWORD0
+ (i
* 4));
1535 frame_rcvd
[i
] = __swab32(idaf
);
1537 sas_phy
->oob_mode
= SAS_OOB_MODE
;
1538 memcpy(sas_phy
->attached_sas_addr
,
1541 phy
->phy_type
|= PORT_TYPE_SAS
;
1542 phy
->identify
.device_type
= id
->dev_type
;
1543 phy
->frame_rcvd_size
= sizeof(struct sas_identify_frame
);
1544 if (phy
->identify
.device_type
== SAS_END_DEVICE
)
1545 phy
->identify
.target_port_protocols
=
1547 else if (phy
->identify
.device_type
!= SAS_PHY_UNUSED
)
1548 phy
->identify
.target_port_protocols
=
1552 phy
->port_id
= port_id
;
1553 phy
->phy_attached
= 1;
1554 hisi_sas_notify_phy_event(phy
, HISI_PHYE_PHY_UP
);
1556 spin_lock_irqsave(&phy
->lock
, flags
);
1557 if (phy
->reset_completion
) {
1559 complete(phy
->reset_completion
);
1561 spin_unlock_irqrestore(&phy
->lock
, flags
);
1563 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1564 CHL_INT0_SL_PHY_ENABLE_MSK
);
1565 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_PHY_ENA_MSK
, 0);
1570 static irqreturn_t
phy_down_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1572 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1573 u32 phy_state
, sl_ctrl
, txid_auto
;
1574 struct device
*dev
= hisi_hba
->dev
;
1576 atomic_inc(&phy
->down_cnt
);
1578 del_timer(&phy
->timer
);
1579 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_NOT_RDY_MSK
, 1);
1581 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1582 dev_info(dev
, "phydown: phy%d phy_state=0x%x\n", phy_no
, phy_state
);
1583 hisi_sas_phy_down(hisi_hba
, phy_no
, (phy_state
& 1 << phy_no
) ? 1 : 0);
1585 sl_ctrl
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1586 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
,
1587 sl_ctrl
&(~SL_CTA_MSK
));
1589 txid_auto
= hisi_sas_phy_read32(hisi_hba
, phy_no
, TXID_AUTO
);
1590 hisi_sas_phy_write32(hisi_hba
, phy_no
, TXID_AUTO
,
1591 txid_auto
| CT3_MSK
);
1593 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
, CHL_INT0_NOT_RDY_MSK
);
1594 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_NOT_RDY_MSK
, 0);
1599 static irqreturn_t
phy_bcast_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1601 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1602 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1603 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1606 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_RX_BCAST_CHK_MSK
, 1);
1607 bcast_status
= hisi_sas_phy_read32(hisi_hba
, phy_no
, RX_PRIMS_STATUS
);
1608 if ((bcast_status
& RX_BCAST_CHG_MSK
) &&
1609 !test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
1610 sas_ha
->notify_port_event(sas_phy
, PORTE_BROADCAST_RCVD
);
1611 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1612 CHL_INT0_SL_RX_BCST_ACK_MSK
);
1613 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_RX_BCAST_CHK_MSK
, 0);
1618 static irqreturn_t
int_phy_up_down_bcast_v3_hw(int irq_no
, void *p
)
1620 struct hisi_hba
*hisi_hba
= p
;
1623 irqreturn_t res
= IRQ_NONE
;
1625 irq_msk
= hisi_sas_read32(hisi_hba
, CHNL_INT_STATUS
)
1629 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1631 u32 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1632 int rdy
= phy_state
& (1 << phy_no
);
1635 if (irq_value
& CHL_INT0_SL_PHY_ENABLE_MSK
)
1637 if (phy_up_v3_hw(phy_no
, hisi_hba
)
1640 if (irq_value
& CHL_INT0_SL_RX_BCST_ACK_MSK
)
1642 if (phy_bcast_v3_hw(phy_no
, hisi_hba
)
1646 if (irq_value
& CHL_INT0_NOT_RDY_MSK
)
1648 if (phy_down_v3_hw(phy_no
, hisi_hba
)
1660 static const struct hisi_sas_hw_error port_axi_error
[] = {
1662 .irq_msk
= BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF
),
1663 .msg
= "dmac_tx_ecc_bad_err",
1666 .irq_msk
= BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF
),
1667 .msg
= "dmac_rx_ecc_bad_err",
1670 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF
),
1671 .msg
= "dma_tx_axi_wr_err",
1674 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF
),
1675 .msg
= "dma_tx_axi_rd_err",
1678 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF
),
1679 .msg
= "dma_rx_axi_wr_err",
1682 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF
),
1683 .msg
= "dma_rx_axi_rd_err",
1686 .irq_msk
= BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF
),
1687 .msg
= "dma_tx_fifo_err",
1690 .irq_msk
= BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF
),
1691 .msg
= "dma_rx_fifo_err",
1694 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF
),
1695 .msg
= "dma_tx_axi_ruser_err",
1698 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF
),
1699 .msg
= "dma_rx_axi_ruser_err",
1703 static void handle_chl_int1_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1705 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT1
);
1706 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT1_MSK
);
1707 struct device
*dev
= hisi_hba
->dev
;
1710 irq_value
&= ~irq_msk
;
1714 for (i
= 0; i
< ARRAY_SIZE(port_axi_error
); i
++) {
1715 const struct hisi_sas_hw_error
*error
= &port_axi_error
[i
];
1717 if (!(irq_value
& error
->irq_msk
))
1720 dev_err(dev
, "%s error (phy%d 0x%x) found!\n",
1721 error
->msg
, phy_no
, irq_value
);
1722 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
1725 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT1
, irq_value
);
1728 static void phy_get_events_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1730 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1731 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1732 struct sas_phy
*sphy
= sas_phy
->phy
;
1733 unsigned long flags
;
1736 spin_lock_irqsave(&phy
->lock
, flags
);
1738 /* loss dword sync */
1739 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DWS_LOST
);
1740 sphy
->loss_of_dword_sync_count
+= reg_value
;
1742 /* phy reset problem */
1743 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_RESET_PROB
);
1744 sphy
->phy_reset_problem_count
+= reg_value
;
1747 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_INVLD_DW
);
1748 sphy
->invalid_dword_count
+= reg_value
;
1751 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DISP_ERR
);
1752 sphy
->running_disparity_error_count
+= reg_value
;
1754 /* code violation error */
1755 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_CODE_ERR
);
1756 phy
->code_violation_err_count
+= reg_value
;
1758 spin_unlock_irqrestore(&phy
->lock
, flags
);
1761 static void handle_chl_int2_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1763 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2_MSK
);
1764 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2
);
1765 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1766 struct pci_dev
*pci_dev
= hisi_hba
->pci_dev
;
1767 struct device
*dev
= hisi_hba
->dev
;
1768 static const u32 msk
= BIT(CHL_INT2_RX_DISP_ERR_OFF
) |
1769 BIT(CHL_INT2_RX_CODE_ERR_OFF
) |
1770 BIT(CHL_INT2_RX_INVLD_DW_OFF
);
1772 irq_value
&= ~irq_msk
;
1776 if (irq_value
& BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF
)) {
1777 dev_warn(dev
, "phy%d identify timeout\n", phy_no
);
1778 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1781 if (irq_value
& BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF
)) {
1782 u32 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1783 STP_LINK_TIMEOUT_STATE
);
1785 dev_warn(dev
, "phy%d stp link timeout (0x%x)\n",
1787 if (reg_value
& BIT(4))
1788 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1791 if (pci_dev
->revision
> 0x20 && (irq_value
& msk
)) {
1792 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1793 struct sas_phy
*sphy
= sas_phy
->phy
;
1795 phy_get_events_v3_hw(hisi_hba
, phy_no
);
1797 if (irq_value
& BIT(CHL_INT2_RX_INVLD_DW_OFF
))
1798 dev_info(dev
, "phy%d invalid dword cnt: %u\n", phy_no
,
1799 sphy
->invalid_dword_count
);
1801 if (irq_value
& BIT(CHL_INT2_RX_CODE_ERR_OFF
))
1802 dev_info(dev
, "phy%d code violation cnt: %u\n", phy_no
,
1803 phy
->code_violation_err_count
);
1805 if (irq_value
& BIT(CHL_INT2_RX_DISP_ERR_OFF
))
1806 dev_info(dev
, "phy%d disparity error cnt: %u\n", phy_no
,
1807 sphy
->running_disparity_error_count
);
1810 if ((irq_value
& BIT(CHL_INT2_RX_INVLD_DW_OFF
)) &&
1811 (pci_dev
->revision
== 0x20)) {
1815 rc
= hisi_sas_read32_poll_timeout_atomic(
1816 HILINK_ERR_DFX
, reg_value
,
1817 !((reg_value
>> 8) & BIT(phy_no
)),
1820 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1823 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2
, irq_value
);
1826 static void handle_chl_int0_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1828 u32 irq_value0
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT0
);
1830 if (irq_value0
& CHL_INT0_PHY_RDY_MSK
)
1831 hisi_sas_phy_oob_ready(hisi_hba
, phy_no
);
1833 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1834 irq_value0
& (~CHL_INT0_SL_RX_BCST_ACK_MSK
)
1835 & (~CHL_INT0_SL_PHY_ENABLE_MSK
)
1836 & (~CHL_INT0_NOT_RDY_MSK
));
1839 static irqreturn_t
int_chnl_int_v3_hw(int irq_no
, void *p
)
1841 struct hisi_hba
*hisi_hba
= p
;
1845 irq_msk
= hisi_sas_read32(hisi_hba
, CHNL_INT_STATUS
)
1849 if (irq_msk
& (CHNL_INT_STS_INT0_MSK
<< (phy_no
* CHNL_WIDTH
)))
1850 handle_chl_int0_v3_hw(hisi_hba
, phy_no
);
1852 if (irq_msk
& (CHNL_INT_STS_INT1_MSK
<< (phy_no
* CHNL_WIDTH
)))
1853 handle_chl_int1_v3_hw(hisi_hba
, phy_no
);
1855 if (irq_msk
& (CHNL_INT_STS_INT2_MSK
<< (phy_no
* CHNL_WIDTH
)))
1856 handle_chl_int2_v3_hw(hisi_hba
, phy_no
);
1858 irq_msk
&= ~(CHNL_INT_STS_PHY_MSK
<< (phy_no
* CHNL_WIDTH
));
1865 static const struct hisi_sas_hw_error multi_bit_ecc_errors
[] = {
1867 .irq_msk
= BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF
),
1868 .msk
= HGC_DQE_ECC_MB_ADDR_MSK
,
1869 .shift
= HGC_DQE_ECC_MB_ADDR_OFF
,
1870 .msg
= "hgc_dqe_eccbad_intr",
1871 .reg
= HGC_DQE_ECC_ADDR
,
1874 .irq_msk
= BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF
),
1875 .msk
= HGC_IOST_ECC_MB_ADDR_MSK
,
1876 .shift
= HGC_IOST_ECC_MB_ADDR_OFF
,
1877 .msg
= "hgc_iost_eccbad_intr",
1878 .reg
= HGC_IOST_ECC_ADDR
,
1881 .irq_msk
= BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF
),
1882 .msk
= HGC_ITCT_ECC_MB_ADDR_MSK
,
1883 .shift
= HGC_ITCT_ECC_MB_ADDR_OFF
,
1884 .msg
= "hgc_itct_eccbad_intr",
1885 .reg
= HGC_ITCT_ECC_ADDR
,
1888 .irq_msk
= BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF
),
1889 .msk
= HGC_LM_DFX_STATUS2_IOSTLIST_MSK
,
1890 .shift
= HGC_LM_DFX_STATUS2_IOSTLIST_OFF
,
1891 .msg
= "hgc_iostl_eccbad_intr",
1892 .reg
= HGC_LM_DFX_STATUS2
,
1895 .irq_msk
= BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF
),
1896 .msk
= HGC_LM_DFX_STATUS2_ITCTLIST_MSK
,
1897 .shift
= HGC_LM_DFX_STATUS2_ITCTLIST_OFF
,
1898 .msg
= "hgc_itctl_eccbad_intr",
1899 .reg
= HGC_LM_DFX_STATUS2
,
1902 .irq_msk
= BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF
),
1903 .msk
= HGC_CQE_ECC_MB_ADDR_MSK
,
1904 .shift
= HGC_CQE_ECC_MB_ADDR_OFF
,
1905 .msg
= "hgc_cqe_eccbad_intr",
1906 .reg
= HGC_CQE_ECC_ADDR
,
1909 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF
),
1910 .msk
= HGC_RXM_DFX_STATUS14_MEM0_MSK
,
1911 .shift
= HGC_RXM_DFX_STATUS14_MEM0_OFF
,
1912 .msg
= "rxm_mem0_eccbad_intr",
1913 .reg
= HGC_RXM_DFX_STATUS14
,
1916 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF
),
1917 .msk
= HGC_RXM_DFX_STATUS14_MEM1_MSK
,
1918 .shift
= HGC_RXM_DFX_STATUS14_MEM1_OFF
,
1919 .msg
= "rxm_mem1_eccbad_intr",
1920 .reg
= HGC_RXM_DFX_STATUS14
,
1923 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF
),
1924 .msk
= HGC_RXM_DFX_STATUS14_MEM2_MSK
,
1925 .shift
= HGC_RXM_DFX_STATUS14_MEM2_OFF
,
1926 .msg
= "rxm_mem2_eccbad_intr",
1927 .reg
= HGC_RXM_DFX_STATUS14
,
1930 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF
),
1931 .msk
= HGC_RXM_DFX_STATUS15_MEM3_MSK
,
1932 .shift
= HGC_RXM_DFX_STATUS15_MEM3_OFF
,
1933 .msg
= "rxm_mem3_eccbad_intr",
1934 .reg
= HGC_RXM_DFX_STATUS15
,
1937 .irq_msk
= BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF
),
1938 .msk
= AM_ROB_ECC_ERR_ADDR_MSK
,
1939 .shift
= AM_ROB_ECC_ERR_ADDR_OFF
,
1940 .msg
= "ooo_ram_eccbad_intr",
1941 .reg
= AM_ROB_ECC_ERR_ADDR
,
1945 static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba
*hisi_hba
,
1948 struct device
*dev
= hisi_hba
->dev
;
1949 const struct hisi_sas_hw_error
*ecc_error
;
1953 for (i
= 0; i
< ARRAY_SIZE(multi_bit_ecc_errors
); i
++) {
1954 ecc_error
= &multi_bit_ecc_errors
[i
];
1955 if (irq_value
& ecc_error
->irq_msk
) {
1956 val
= hisi_sas_read32(hisi_hba
, ecc_error
->reg
);
1957 val
&= ecc_error
->msk
;
1958 val
>>= ecc_error
->shift
;
1959 dev_err(dev
, "%s (0x%x) found: mem addr is 0x%08X\n",
1960 ecc_error
->msg
, irq_value
, val
);
1961 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
1966 static void fatal_ecc_int_v3_hw(struct hisi_hba
*hisi_hba
)
1968 u32 irq_value
, irq_msk
;
1970 irq_msk
= hisi_sas_read32(hisi_hba
, SAS_ECC_INTR_MSK
);
1971 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0xffffffff);
1973 irq_value
= hisi_sas_read32(hisi_hba
, SAS_ECC_INTR
);
1975 multi_bit_ecc_error_process_v3_hw(hisi_hba
, irq_value
);
1977 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR
, irq_value
);
1978 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, irq_msk
);
1981 static const struct hisi_sas_hw_error axi_error
[] = {
1982 { .msk
= BIT(0), .msg
= "IOST_AXI_W_ERR" },
1983 { .msk
= BIT(1), .msg
= "IOST_AXI_R_ERR" },
1984 { .msk
= BIT(2), .msg
= "ITCT_AXI_W_ERR" },
1985 { .msk
= BIT(3), .msg
= "ITCT_AXI_R_ERR" },
1986 { .msk
= BIT(4), .msg
= "SATA_AXI_W_ERR" },
1987 { .msk
= BIT(5), .msg
= "SATA_AXI_R_ERR" },
1988 { .msk
= BIT(6), .msg
= "DQE_AXI_R_ERR" },
1989 { .msk
= BIT(7), .msg
= "CQE_AXI_W_ERR" },
1993 static const struct hisi_sas_hw_error fifo_error
[] = {
1994 { .msk
= BIT(8), .msg
= "CQE_WINFO_FIFO" },
1995 { .msk
= BIT(9), .msg
= "CQE_MSG_FIFIO" },
1996 { .msk
= BIT(10), .msg
= "GETDQE_FIFO" },
1997 { .msk
= BIT(11), .msg
= "CMDP_FIFO" },
1998 { .msk
= BIT(12), .msg
= "AWTCTRL_FIFO" },
2002 static const struct hisi_sas_hw_error fatal_axi_error
[] = {
2004 .irq_msk
= BIT(ENT_INT_SRC3_WP_DEPTH_OFF
),
2005 .msg
= "write pointer and depth",
2008 .irq_msk
= BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF
),
2009 .msg
= "iptt no match slot",
2012 .irq_msk
= BIT(ENT_INT_SRC3_RP_DEPTH_OFF
),
2013 .msg
= "read pointer and depth",
2016 .irq_msk
= BIT(ENT_INT_SRC3_AXI_OFF
),
2017 .reg
= HGC_AXI_FIFO_ERR_INFO
,
2021 .irq_msk
= BIT(ENT_INT_SRC3_FIFO_OFF
),
2022 .reg
= HGC_AXI_FIFO_ERR_INFO
,
2026 .irq_msk
= BIT(ENT_INT_SRC3_LM_OFF
),
2027 .msg
= "LM add/fetch list",
2030 .irq_msk
= BIT(ENT_INT_SRC3_ABT_OFF
),
2031 .msg
= "SAS_HGC_ABT fetch LM list",
2034 .irq_msk
= BIT(ENT_INT_SRC3_DQE_POISON_OFF
),
2035 .msg
= "read dqe poison",
2038 .irq_msk
= BIT(ENT_INT_SRC3_IOST_POISON_OFF
),
2039 .msg
= "read iost poison",
2042 .irq_msk
= BIT(ENT_INT_SRC3_ITCT_POISON_OFF
),
2043 .msg
= "read itct poison",
2046 .irq_msk
= BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF
),
2047 .msg
= "read itct ncq poison",
2052 static irqreturn_t
fatal_axi_int_v3_hw(int irq_no
, void *p
)
2054 u32 irq_value
, irq_msk
;
2055 struct hisi_hba
*hisi_hba
= p
;
2056 struct device
*dev
= hisi_hba
->dev
;
2057 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2060 irq_msk
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC_MSK3
);
2061 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, irq_msk
| 0x1df00);
2063 irq_value
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC3
);
2064 irq_value
&= ~irq_msk
;
2066 for (i
= 0; i
< ARRAY_SIZE(fatal_axi_error
); i
++) {
2067 const struct hisi_sas_hw_error
*error
= &fatal_axi_error
[i
];
2069 if (!(irq_value
& error
->irq_msk
))
2073 const struct hisi_sas_hw_error
*sub
= error
->sub
;
2074 u32 err_value
= hisi_sas_read32(hisi_hba
, error
->reg
);
2076 for (; sub
->msk
|| sub
->msg
; sub
++) {
2077 if (!(err_value
& sub
->msk
))
2080 dev_err(dev
, "%s error (0x%x) found!\n",
2081 sub
->msg
, irq_value
);
2082 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2085 dev_err(dev
, "%s error (0x%x) found!\n",
2086 error
->msg
, irq_value
);
2087 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2090 if (pdev
->revision
< 0x21) {
2093 reg_val
= hisi_sas_read32(hisi_hba
,
2094 AXI_MASTER_CFG_BASE
+
2096 reg_val
|= AM_CTRL_SHUTDOWN_REQ_MSK
;
2097 hisi_sas_write32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2098 AM_CTRL_GLOBAL
, reg_val
);
2102 fatal_ecc_int_v3_hw(hisi_hba
);
2104 if (irq_value
& BIT(ENT_INT_SRC3_ITC_INT_OFF
)) {
2105 u32 reg_val
= hisi_sas_read32(hisi_hba
, ITCT_CLR
);
2106 u32 dev_id
= reg_val
& ITCT_DEV_MSK
;
2107 struct hisi_sas_device
*sas_dev
=
2108 &hisi_hba
->devices
[dev_id
];
2110 hisi_sas_write32(hisi_hba
, ITCT_CLR
, 0);
2111 dev_dbg(dev
, "clear ITCT ok\n");
2112 complete(sas_dev
->completion
);
2115 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
, irq_value
& 0x1df00);
2116 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, irq_msk
);
2122 slot_err_v3_hw(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
2123 struct hisi_sas_slot
*slot
)
2125 struct task_status_struct
*ts
= &task
->task_status
;
2126 struct hisi_sas_complete_v3_hdr
*complete_queue
=
2127 hisi_hba
->complete_hdr
[slot
->cmplt_queue
];
2128 struct hisi_sas_complete_v3_hdr
*complete_hdr
=
2129 &complete_queue
[slot
->cmplt_queue_slot
];
2130 struct hisi_sas_err_record_v3
*record
=
2131 hisi_sas_status_buf_addr_mem(slot
);
2132 u32 dma_rx_err_type
= le32_to_cpu(record
->dma_rx_err_type
);
2133 u32 trans_tx_fail_type
= le32_to_cpu(record
->trans_tx_fail_type
);
2134 u32 dw3
= le32_to_cpu(complete_hdr
->dw3
);
2136 switch (task
->task_proto
) {
2137 case SAS_PROTOCOL_SSP
:
2138 if (dma_rx_err_type
& RX_DATA_LEN_UNDERFLOW_MSK
) {
2139 ts
->residual
= trans_tx_fail_type
;
2140 ts
->stat
= SAS_DATA_UNDERRUN
;
2141 } else if (dw3
& CMPLT_HDR_IO_IN_TARGET_MSK
) {
2142 ts
->stat
= SAS_QUEUE_FULL
;
2145 ts
->stat
= SAS_OPEN_REJECT
;
2146 ts
->open_rej_reason
= SAS_OREJ_RSVD_RETRY
;
2149 case SAS_PROTOCOL_SATA
:
2150 case SAS_PROTOCOL_STP
:
2151 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
2152 if (dma_rx_err_type
& RX_DATA_LEN_UNDERFLOW_MSK
) {
2153 ts
->residual
= trans_tx_fail_type
;
2154 ts
->stat
= SAS_DATA_UNDERRUN
;
2155 } else if (dw3
& CMPLT_HDR_IO_IN_TARGET_MSK
) {
2156 ts
->stat
= SAS_PHY_DOWN
;
2159 ts
->stat
= SAS_OPEN_REJECT
;
2160 ts
->open_rej_reason
= SAS_OREJ_RSVD_RETRY
;
2162 hisi_sas_sata_done(task
, slot
);
2164 case SAS_PROTOCOL_SMP
:
2165 ts
->stat
= SAM_STAT_CHECK_CONDITION
;
2172 static void slot_complete_v3_hw(struct hisi_hba
*hisi_hba
,
2173 struct hisi_sas_slot
*slot
)
2175 struct sas_task
*task
= slot
->task
;
2176 struct hisi_sas_device
*sas_dev
;
2177 struct device
*dev
= hisi_hba
->dev
;
2178 struct task_status_struct
*ts
;
2179 struct domain_device
*device
;
2180 struct sas_ha_struct
*ha
;
2181 struct hisi_sas_complete_v3_hdr
*complete_queue
=
2182 hisi_hba
->complete_hdr
[slot
->cmplt_queue
];
2183 struct hisi_sas_complete_v3_hdr
*complete_hdr
=
2184 &complete_queue
[slot
->cmplt_queue_slot
];
2185 unsigned long flags
;
2186 bool is_internal
= slot
->is_internal
;
2189 if (unlikely(!task
|| !task
->lldd_task
|| !task
->dev
))
2192 ts
= &task
->task_status
;
2194 ha
= device
->port
->ha
;
2195 sas_dev
= device
->lldd_dev
;
2197 spin_lock_irqsave(&task
->task_state_lock
, flags
);
2198 task
->task_state_flags
&=
2199 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
2200 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2202 memset(ts
, 0, sizeof(*ts
));
2203 ts
->resp
= SAS_TASK_COMPLETE
;
2205 if (unlikely(!sas_dev
)) {
2206 dev_dbg(dev
, "slot complete: port has not device\n");
2207 ts
->stat
= SAS_PHY_DOWN
;
2211 dw0
= le32_to_cpu(complete_hdr
->dw0
);
2212 dw1
= le32_to_cpu(complete_hdr
->dw1
);
2213 dw3
= le32_to_cpu(complete_hdr
->dw3
);
2216 * Use SAS+TMF status codes
2218 switch ((dw0
& CMPLT_HDR_ABORT_STAT_MSK
) >> CMPLT_HDR_ABORT_STAT_OFF
) {
2219 case STAT_IO_ABORTED
:
2220 /* this IO has been aborted by abort command */
2221 ts
->stat
= SAS_ABORTED_TASK
;
2223 case STAT_IO_COMPLETE
:
2224 /* internal abort command complete */
2225 ts
->stat
= TMF_RESP_FUNC_SUCC
;
2227 case STAT_IO_NO_DEVICE
:
2228 ts
->stat
= TMF_RESP_FUNC_COMPLETE
;
2230 case STAT_IO_NOT_VALID
:
2232 * abort single IO, the controller can't find the IO
2234 ts
->stat
= TMF_RESP_FUNC_FAILED
;
2240 /* check for erroneous completion */
2241 if ((dw0
& CMPLT_HDR_CMPLT_MSK
) == 0x3) {
2242 u32
*error_info
= hisi_sas_status_buf_addr_mem(slot
);
2244 slot_err_v3_hw(hisi_hba
, task
, slot
);
2245 if (ts
->stat
!= SAS_DATA_UNDERRUN
)
2246 dev_info(dev
, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
2247 slot
->idx
, task
, sas_dev
->device_id
,
2248 dw0
, dw1
, complete_hdr
->act
, dw3
,
2249 error_info
[0], error_info
[1],
2250 error_info
[2], error_info
[3]);
2251 if (unlikely(slot
->abort
)) {
2252 sas_task_abort(task
);
2258 switch (task
->task_proto
) {
2259 case SAS_PROTOCOL_SSP
: {
2260 struct ssp_response_iu
*iu
=
2261 hisi_sas_status_buf_addr_mem(slot
) +
2262 sizeof(struct hisi_sas_err_record
);
2264 sas_ssp_task_response(dev
, task
, iu
);
2267 case SAS_PROTOCOL_SMP
: {
2268 struct scatterlist
*sg_resp
= &task
->smp_task
.smp_resp
;
2269 void *to
= page_address(sg_page(sg_resp
));
2271 ts
->stat
= SAM_STAT_GOOD
;
2273 dma_unmap_sg(dev
, &task
->smp_task
.smp_req
, 1,
2275 memcpy(to
+ sg_resp
->offset
,
2276 hisi_sas_status_buf_addr_mem(slot
) +
2277 sizeof(struct hisi_sas_err_record
),
2281 case SAS_PROTOCOL_SATA
:
2282 case SAS_PROTOCOL_STP
:
2283 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
2284 ts
->stat
= SAM_STAT_GOOD
;
2285 hisi_sas_sata_done(task
, slot
);
2288 ts
->stat
= SAM_STAT_CHECK_CONDITION
;
2292 if (!slot
->port
->port_attached
) {
2293 dev_warn(dev
, "slot complete: port %d has removed\n",
2294 slot
->port
->sas_port
.id
);
2295 ts
->stat
= SAS_PHY_DOWN
;
2299 spin_lock_irqsave(&task
->task_state_lock
, flags
);
2300 if (task
->task_state_flags
& SAS_TASK_STATE_ABORTED
) {
2301 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2302 dev_info(dev
, "slot complete: task(%pK) aborted\n", task
);
2305 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
2306 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2307 hisi_sas_slot_task_free(hisi_hba
, task
, slot
);
2309 if (!is_internal
&& (task
->task_proto
!= SAS_PROTOCOL_SMP
)) {
2310 spin_lock_irqsave(&device
->done_lock
, flags
);
2311 if (test_bit(SAS_HA_FROZEN
, &ha
->state
)) {
2312 spin_unlock_irqrestore(&device
->done_lock
, flags
);
2313 dev_info(dev
, "slot complete: task(%pK) ignored\n ",
2317 spin_unlock_irqrestore(&device
->done_lock
, flags
);
2320 if (task
->task_done
)
2321 task
->task_done(task
);
2324 static irqreturn_t
cq_thread_v3_hw(int irq_no
, void *p
)
2326 struct hisi_sas_cq
*cq
= p
;
2327 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
2328 struct hisi_sas_slot
*slot
;
2329 struct hisi_sas_complete_v3_hdr
*complete_queue
;
2330 u32 rd_point
= cq
->rd_point
, wr_point
;
2333 complete_queue
= hisi_hba
->complete_hdr
[queue
];
2335 wr_point
= hisi_sas_read32(hisi_hba
, COMPL_Q_0_WR_PTR
+
2338 while (rd_point
!= wr_point
) {
2339 struct hisi_sas_complete_v3_hdr
*complete_hdr
;
2340 struct device
*dev
= hisi_hba
->dev
;
2344 complete_hdr
= &complete_queue
[rd_point
];
2345 dw1
= le32_to_cpu(complete_hdr
->dw1
);
2347 iptt
= dw1
& CMPLT_HDR_IPTT_MSK
;
2348 if (likely(iptt
< HISI_SAS_COMMAND_ENTRIES_V3_HW
)) {
2349 slot
= &hisi_hba
->slot_info
[iptt
];
2350 slot
->cmplt_queue_slot
= rd_point
;
2351 slot
->cmplt_queue
= queue
;
2352 slot_complete_v3_hw(hisi_hba
, slot
);
2354 dev_err(dev
, "IPTT %d is invalid, discard it.\n", iptt
);
2356 if (++rd_point
>= HISI_SAS_QUEUE_SLOTS
)
2360 /* update rd_point */
2361 cq
->rd_point
= rd_point
;
2362 hisi_sas_write32(hisi_hba
, COMPL_Q_0_RD_PTR
+ (0x14 * queue
), rd_point
);
2367 static irqreturn_t
cq_interrupt_v3_hw(int irq_no
, void *p
)
2369 struct hisi_sas_cq
*cq
= p
;
2370 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
2373 hisi_sas_write32(hisi_hba
, OQ_INT_SRC
, 1 << queue
);
2375 return IRQ_WAKE_THREAD
;
2378 static int interrupt_preinit_v3_hw(struct hisi_hba
*hisi_hba
)
2381 int max_msi
= HISI_SAS_MSI_COUNT_V3_HW
, min_msi
;
2382 struct Scsi_Host
*shost
= hisi_hba
->shost
;
2383 struct irq_affinity desc
= {
2384 .pre_vectors
= BASE_VECTORS_V3_HW
,
2387 min_msi
= MIN_AFFINE_VECTORS_V3_HW
;
2388 vectors
= pci_alloc_irq_vectors_affinity(hisi_hba
->pci_dev
,
2397 hisi_hba
->cq_nvecs
= vectors
- BASE_VECTORS_V3_HW
;
2398 shost
->nr_hw_queues
= hisi_hba
->cq_nvecs
;
2403 static int interrupt_init_v3_hw(struct hisi_hba
*hisi_hba
)
2405 struct device
*dev
= hisi_hba
->dev
;
2406 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2409 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 1),
2410 int_phy_up_down_bcast_v3_hw
, 0,
2411 DRV_NAME
" phy", hisi_hba
);
2413 dev_err(dev
, "could not request phy interrupt, rc=%d\n", rc
);
2417 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 2),
2418 int_chnl_int_v3_hw
, 0,
2419 DRV_NAME
" channel", hisi_hba
);
2421 dev_err(dev
, "could not request chnl interrupt, rc=%d\n", rc
);
2425 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 11),
2426 fatal_axi_int_v3_hw
, 0,
2427 DRV_NAME
" fatal", hisi_hba
);
2429 dev_err(dev
, "could not request fatal interrupt, rc=%d\n", rc
);
2433 if (hisi_sas_intr_conv
)
2434 dev_info(dev
, "Enable interrupt converge\n");
2436 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
2437 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2438 int nr
= hisi_sas_intr_conv
? 16 : 16 + i
;
2439 unsigned long irqflags
= hisi_sas_intr_conv
? IRQF_SHARED
:
2442 cq
->irq_no
= pci_irq_vector(pdev
, nr
);
2443 rc
= devm_request_threaded_irq(dev
, cq
->irq_no
,
2447 DRV_NAME
" cq", cq
);
2449 dev_err(dev
, "could not request cq%d interrupt, rc=%d\n",
2453 cq
->irq_mask
= pci_irq_get_affinity(pdev
, i
+ BASE_VECTORS_V3_HW
);
2454 if (!cq
->irq_mask
) {
2455 dev_err(dev
, "could not get cq%d irq affinity!\n", i
);
2463 static int hisi_sas_v3_init(struct hisi_hba
*hisi_hba
)
2467 rc
= hw_init_v3_hw(hisi_hba
);
2471 rc
= interrupt_init_v3_hw(hisi_hba
);
2478 static void phy_set_linkrate_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
,
2479 struct sas_phy_linkrates
*r
)
2481 enum sas_linkrate max
= r
->maximum_linkrate
;
2482 u32 prog_phy_link_rate
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
2483 PROG_PHY_LINK_RATE
);
2485 prog_phy_link_rate
&= ~CFG_PROG_PHY_LINK_RATE_MSK
;
2486 prog_phy_link_rate
|= hisi_sas_get_prog_phy_linkrate_mask(max
);
2487 hisi_sas_phy_write32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
,
2488 prog_phy_link_rate
);
2491 static void interrupt_disable_v3_hw(struct hisi_hba
*hisi_hba
)
2493 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2496 synchronize_irq(pci_irq_vector(pdev
, 1));
2497 synchronize_irq(pci_irq_vector(pdev
, 2));
2498 synchronize_irq(pci_irq_vector(pdev
, 11));
2499 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
2500 hisi_sas_write32(hisi_hba
, OQ0_INT_SRC_MSK
+ 0x4 * i
, 0x1);
2502 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++)
2503 synchronize_irq(pci_irq_vector(pdev
, i
+ 16));
2505 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
, 0xffffffff);
2506 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK2
, 0xffffffff);
2507 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, 0xffffffff);
2508 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0xffffffff);
2510 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2511 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1_MSK
, 0xffffffff);
2512 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2_MSK
, 0xffffffff);
2513 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_NOT_RDY_MSK
, 0x1);
2514 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_PHY_ENA_MSK
, 0x1);
2515 hisi_sas_phy_write32(hisi_hba
, i
, SL_RX_BCAST_CHK_MSK
, 0x1);
2519 static u32
get_phys_state_v3_hw(struct hisi_hba
*hisi_hba
)
2521 return hisi_sas_read32(hisi_hba
, PHY_STATE
);
2524 static int disable_host_v3_hw(struct hisi_hba
*hisi_hba
)
2526 struct device
*dev
= hisi_hba
->dev
;
2527 u32 status
, reg_val
;
2530 interrupt_disable_v3_hw(hisi_hba
);
2531 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0x0);
2533 hisi_sas_stop_phys(hisi_hba
);
2537 reg_val
= hisi_sas_read32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2539 reg_val
|= AM_CTRL_SHUTDOWN_REQ_MSK
;
2540 hisi_sas_write32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2541 AM_CTRL_GLOBAL
, reg_val
);
2543 /* wait until bus idle */
2544 rc
= hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE
+
2545 AM_CURR_TRANS_RETURN
, status
,
2546 status
== 0x3, 10, 100);
2548 dev_err(dev
, "axi bus is not idle, rc=%d\n", rc
);
2555 static int soft_reset_v3_hw(struct hisi_hba
*hisi_hba
)
2557 struct device
*dev
= hisi_hba
->dev
;
2560 rc
= disable_host_v3_hw(hisi_hba
);
2562 dev_err(dev
, "soft reset: disable host failed rc=%d\n", rc
);
2566 hisi_sas_init_mem(hisi_hba
);
2568 return hw_init_v3_hw(hisi_hba
);
2571 static int write_gpio_v3_hw(struct hisi_hba
*hisi_hba
, u8 reg_type
,
2572 u8 reg_index
, u8 reg_count
, u8
*write_data
)
2574 struct device
*dev
= hisi_hba
->dev
;
2575 u32
*data
= (u32
*)write_data
;
2579 case SAS_GPIO_REG_TX
:
2580 if ((reg_index
+ reg_count
) > ((hisi_hba
->n_phy
+ 3) / 4)) {
2581 dev_err(dev
, "write gpio: invalid reg range[%d, %d]\n",
2582 reg_index
, reg_index
+ reg_count
- 1);
2586 for (i
= 0; i
< reg_count
; i
++)
2587 hisi_sas_write32(hisi_hba
,
2588 SAS_GPIO_TX_0_1
+ (reg_index
+ i
) * 4,
2592 dev_err(dev
, "write gpio: unsupported or bad reg type %d\n",
2600 static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba
*hisi_hba
,
2601 int delay_ms
, int timeout_ms
)
2603 struct device
*dev
= hisi_hba
->dev
;
2604 int entries
, entries_old
= 0, time
;
2606 for (time
= 0; time
< timeout_ms
; time
+= delay_ms
) {
2607 entries
= hisi_sas_read32(hisi_hba
, CQE_SEND_CNT
);
2608 if (entries
== entries_old
)
2611 entries_old
= entries
;
2615 if (time
>= timeout_ms
) {
2616 dev_dbg(dev
, "Wait commands complete timeout!\n");
2620 dev_dbg(dev
, "wait commands complete %dms\n", time
);
2623 static ssize_t
intr_conv_v3_hw_show(struct device
*dev
,
2624 struct device_attribute
*attr
, char *buf
)
2626 return scnprintf(buf
, PAGE_SIZE
, "%u\n", hisi_sas_intr_conv
);
2628 static DEVICE_ATTR_RO(intr_conv_v3_hw
);
2630 static void config_intr_coal_v3_hw(struct hisi_hba
*hisi_hba
)
2632 /* config those registers between enable and disable PHYs */
2633 hisi_sas_stop_phys(hisi_hba
);
2635 if (hisi_hba
->intr_coal_ticks
== 0 ||
2636 hisi_hba
->intr_coal_count
== 0) {
2637 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x1);
2638 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
, 0x1);
2639 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
, 0x1);
2641 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x3);
2642 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
,
2643 hisi_hba
->intr_coal_ticks
);
2644 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
,
2645 hisi_hba
->intr_coal_count
);
2647 phys_init_v3_hw(hisi_hba
);
2650 static ssize_t
intr_coal_ticks_v3_hw_show(struct device
*dev
,
2651 struct device_attribute
*attr
,
2654 struct Scsi_Host
*shost
= class_to_shost(dev
);
2655 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2657 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
2658 hisi_hba
->intr_coal_ticks
);
2661 static ssize_t
intr_coal_ticks_v3_hw_store(struct device
*dev
,
2662 struct device_attribute
*attr
,
2663 const char *buf
, size_t count
)
2665 struct Scsi_Host
*shost
= class_to_shost(dev
);
2666 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2667 u32 intr_coal_ticks
;
2670 ret
= kstrtou32(buf
, 10, &intr_coal_ticks
);
2672 dev_err(dev
, "Input data of interrupt coalesce unmatch\n");
2676 if (intr_coal_ticks
>= BIT(24)) {
2677 dev_err(dev
, "intr_coal_ticks must be less than 2^24!\n");
2681 hisi_hba
->intr_coal_ticks
= intr_coal_ticks
;
2683 config_intr_coal_v3_hw(hisi_hba
);
2687 static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw
);
2689 static ssize_t
intr_coal_count_v3_hw_show(struct device
*dev
,
2690 struct device_attribute
2693 struct Scsi_Host
*shost
= class_to_shost(dev
);
2694 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2696 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
2697 hisi_hba
->intr_coal_count
);
2700 static ssize_t
intr_coal_count_v3_hw_store(struct device
*dev
,
2701 struct device_attribute
2702 *attr
, const char *buf
, size_t count
)
2704 struct Scsi_Host
*shost
= class_to_shost(dev
);
2705 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2706 u32 intr_coal_count
;
2709 ret
= kstrtou32(buf
, 10, &intr_coal_count
);
2711 dev_err(dev
, "Input data of interrupt coalesce unmatch\n");
2715 if (intr_coal_count
>= BIT(8)) {
2716 dev_err(dev
, "intr_coal_count must be less than 2^8!\n");
2720 hisi_hba
->intr_coal_count
= intr_coal_count
;
2722 config_intr_coal_v3_hw(hisi_hba
);
2726 static DEVICE_ATTR_RW(intr_coal_count_v3_hw
);
2728 static int slave_configure_v3_hw(struct scsi_device
*sdev
)
2730 struct Scsi_Host
*shost
= dev_to_shost(&sdev
->sdev_gendev
);
2731 struct domain_device
*ddev
= sdev_to_domain_dev(sdev
);
2732 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2733 struct device
*dev
= hisi_hba
->dev
;
2734 int ret
= sas_slave_configure(sdev
);
2738 if (!dev_is_sata(ddev
))
2739 sas_change_queue_depth(sdev
, 64);
2741 if (sdev
->type
== TYPE_ENCLOSURE
)
2744 if (!device_link_add(&sdev
->sdev_gendev
, dev
,
2745 DL_FLAG_PM_RUNTIME
| DL_FLAG_RPM_ACTIVE
)) {
2746 if (pm_runtime_enabled(dev
)) {
2747 dev_info(dev
, "add device link failed, disable runtime PM for the host\n");
2748 pm_runtime_disable(dev
);
2755 static struct device_attribute
*host_attrs_v3_hw
[] = {
2756 &dev_attr_phy_event_threshold
,
2757 &dev_attr_intr_conv_v3_hw
,
2758 &dev_attr_intr_coal_ticks_v3_hw
,
2759 &dev_attr_intr_coal_count_v3_hw
,
2763 #define HISI_SAS_DEBUGFS_REG(x) {#x, x}
2765 struct hisi_sas_debugfs_reg_lu
{
2770 struct hisi_sas_debugfs_reg
{
2771 const struct hisi_sas_debugfs_reg_lu
*lu
;
2776 static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu
[] = {
2777 HISI_SAS_DEBUGFS_REG(PHY_CFG
),
2778 HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE
),
2779 HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE
),
2780 HISI_SAS_DEBUGFS_REG(PHY_CTRL
),
2781 HISI_SAS_DEBUGFS_REG(SL_CFG
),
2782 HISI_SAS_DEBUGFS_REG(AIP_LIMIT
),
2783 HISI_SAS_DEBUGFS_REG(SL_CONTROL
),
2784 HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS
),
2785 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0
),
2786 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1
),
2787 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2
),
2788 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3
),
2789 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4
),
2790 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5
),
2791 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6
),
2792 HISI_SAS_DEBUGFS_REG(TXID_AUTO
),
2793 HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0
),
2794 HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H
),
2795 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER
),
2796 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE
),
2797 HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER
),
2798 HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG
),
2799 HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG
),
2800 HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG
),
2801 HISI_SAS_DEBUGFS_REG(CHL_INT0
),
2802 HISI_SAS_DEBUGFS_REG(CHL_INT1
),
2803 HISI_SAS_DEBUGFS_REG(CHL_INT2
),
2804 HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK
),
2805 HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK
),
2806 HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK
),
2807 HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME
),
2808 HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN
),
2809 HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER
),
2810 HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK
),
2811 HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK
),
2812 HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK
),
2813 HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK
),
2814 HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK
),
2815 HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK
),
2816 HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS
),
2817 HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS
),
2818 HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME
),
2819 HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST
),
2820 HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB
),
2821 HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW
),
2822 HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR
),
2823 HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR
),
2827 static const struct hisi_sas_debugfs_reg debugfs_port_reg
= {
2828 .lu
= debugfs_port_reg_lu
,
2830 .base_off
= PORT_BASE
,
2833 static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu
[] = {
2834 HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE
),
2835 HISI_SAS_DEBUGFS_REG(PHY_CONTEXT
),
2836 HISI_SAS_DEBUGFS_REG(PHY_STATE
),
2837 HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA
),
2838 HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE
),
2839 HISI_SAS_DEBUGFS_REG(ITCT_CLR
),
2840 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO
),
2841 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI
),
2842 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO
),
2843 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI
),
2844 HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG
),
2845 HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL
),
2846 HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL
),
2847 HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME
),
2848 HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE
),
2849 HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME
),
2850 HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME
),
2851 HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME
),
2852 HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME
),
2853 HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME
),
2854 HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN
),
2855 HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME
),
2856 HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2
),
2857 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT
),
2858 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE
),
2859 HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS
),
2860 HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS
),
2861 HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO
),
2862 HISI_SAS_DEBUGFS_REG(INT_COAL_EN
),
2863 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME
),
2864 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT
),
2865 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME
),
2866 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT
),
2867 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC
),
2868 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK
),
2869 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1
),
2870 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2
),
2871 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3
),
2872 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1
),
2873 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2
),
2874 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3
),
2875 HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK
),
2876 HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK
),
2877 HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK
),
2878 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR
),
2879 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK
),
2880 HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN
),
2881 HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT
),
2882 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH
),
2883 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR
),
2884 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR
),
2885 HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG
),
2886 HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK
),
2887 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH
),
2888 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR
),
2889 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR
),
2890 HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG
),
2891 HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG
),
2892 HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX
),
2893 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0
),
2894 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1
),
2895 HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1
),
2896 HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD
),
2900 static const struct hisi_sas_debugfs_reg debugfs_global_reg
= {
2901 .lu
= debugfs_global_reg_lu
,
2905 static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu
[] = {
2906 HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS
),
2907 HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS
),
2908 HISI_SAS_DEBUGFS_REG(AXI_CFG
),
2909 HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR
),
2913 static const struct hisi_sas_debugfs_reg debugfs_axi_reg
= {
2914 .lu
= debugfs_axi_reg_lu
,
2916 .base_off
= AXI_MASTER_CFG_BASE
,
2919 static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu
[] = {
2920 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0
),
2921 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1
),
2922 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK
),
2923 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK
),
2924 HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK
),
2925 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2
),
2926 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK
),
2930 static const struct hisi_sas_debugfs_reg debugfs_ras_reg
= {
2931 .lu
= debugfs_ras_reg_lu
,
2933 .base_off
= RAS_BASE
,
2936 static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba
*hisi_hba
)
2938 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
2940 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0);
2942 wait_cmds_complete_timeout_v3_hw(hisi_hba
, 100, 5000);
2944 hisi_sas_sync_irqs(hisi_hba
);
2947 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba
*hisi_hba
)
2949 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
,
2950 (u32
)((1ULL << hisi_hba
->queue_count
) - 1));
2952 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
2955 static void read_iost_itct_cache_v3_hw(struct hisi_hba
*hisi_hba
,
2956 enum hisi_sas_debugfs_cache_type type
,
2959 u32 cache_dw_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
*
2960 HISI_SAS_IOST_ITCT_CACHE_NUM
;
2961 struct device
*dev
= hisi_hba
->dev
;
2965 hisi_sas_write32(hisi_hba
, TAB_RD_TYPE
, type
);
2967 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_DW_SZ
; i
++) {
2968 val
= hisi_sas_read32(hisi_hba
, TAB_DFX
);
2969 if (val
== 0xffffffff)
2973 if (val
!= 0xffffffff) {
2974 dev_err(dev
, "Issue occurred in reading IOST/ITCT cache!\n");
2978 memset(buf
, 0, cache_dw_size
* 4);
2981 for (i
= 1; i
< cache_dw_size
; i
++)
2982 buf
[i
] = hisi_sas_read32(hisi_hba
, TAB_DFX
);
2985 static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba
*hisi_hba
)
2988 int phy_no
= hisi_hba
->debugfs_bist_phy_no
;
2992 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
2995 for (i
= 0; i
< FFE_CFG_MAX
; i
++)
2996 hisi_sas_phy_write32(hisi_hba
, phy_no
, TXDEEMPH_G1
+ (i
* 0x4),
2997 hisi_hba
->debugfs_bist_ffe
[phy_no
][i
]);
3000 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SERDES_CFG
);
3001 reg_val
|= CFG_ALOS_CHK_DISABLE_MSK
;
3002 hisi_sas_phy_write32(hisi_hba
, phy_no
, SERDES_CFG
, reg_val
);
3005 static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba
*hisi_hba
)
3008 int phy_no
= hisi_hba
->debugfs_bist_phy_no
;
3010 /* disable loopback */
3011 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SAS_PHY_BIST_CTRL
);
3012 reg_val
&= ~(CFG_RX_BIST_EN_MSK
| CFG_TX_BIST_EN_MSK
|
3014 hisi_sas_phy_write32(hisi_hba
, phy_no
, SAS_PHY_BIST_CTRL
, reg_val
);
3017 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SERDES_CFG
);
3018 reg_val
&= ~CFG_ALOS_CHK_DISABLE_MSK
;
3019 hisi_sas_phy_write32(hisi_hba
, phy_no
, SERDES_CFG
, reg_val
);
3021 /* restore the linkrate */
3022 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
);
3023 /* init OOB link rate as 1.5 Gbits */
3024 reg_val
&= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK
;
3025 reg_val
|= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF
);
3026 hisi_sas_phy_write32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
, reg_val
);
3029 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
3032 #define SAS_PHY_BIST_CODE_INIT 0x1
3033 #define SAS_PHY_BIST_CODE1_INIT 0X80
3034 static int debugfs_set_bist_v3_hw(struct hisi_hba
*hisi_hba
, bool enable
)
3036 u32 reg_val
, mode_tmp
;
3037 u32 linkrate
= hisi_hba
->debugfs_bist_linkrate
;
3038 u32 phy_no
= hisi_hba
->debugfs_bist_phy_no
;
3039 u32
*ffe
= hisi_hba
->debugfs_bist_ffe
[phy_no
];
3040 u32 code_mode
= hisi_hba
->debugfs_bist_code_mode
;
3041 u32 path_mode
= hisi_hba
->debugfs_bist_mode
;
3042 u32
*fix_code
= &hisi_hba
->debugfs_bist_fixed_code
[0];
3043 struct device
*dev
= hisi_hba
->dev
;
3045 dev_info(dev
, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
3046 phy_no
, linkrate
, code_mode
, path_mode
,
3047 ffe
[FFE_SAS_1_5_GBPS
], ffe
[FFE_SAS_3_0_GBPS
],
3048 ffe
[FFE_SAS_6_0_GBPS
], ffe
[FFE_SAS_12_0_GBPS
],
3049 ffe
[FFE_SATA_1_5_GBPS
], ffe
[FFE_SATA_3_0_GBPS
],
3050 ffe
[FFE_SATA_6_0_GBPS
], fix_code
[FIXED_CODE
],
3051 fix_code
[FIXED_CODE_1
]);
3052 mode_tmp
= path_mode
? 2 : 1;
3054 /* some preparations before bist test */
3055 hisi_sas_bist_test_prep_v3_hw(hisi_hba
);
3057 /* set linkrate of bit test*/
3058 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
3059 PROG_PHY_LINK_RATE
);
3060 reg_val
&= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK
;
3061 reg_val
|= (linkrate
<< CFG_PROG_OOB_PHY_LINK_RATE_OFF
);
3062 hisi_sas_phy_write32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
,
3065 /* set code mode of bit test */
3066 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
3068 reg_val
&= ~(CFG_BIST_MODE_SEL_MSK
| CFG_LOOP_TEST_MODE_MSK
|
3069 CFG_RX_BIST_EN_MSK
| CFG_TX_BIST_EN_MSK
|
3071 reg_val
|= ((code_mode
<< CFG_BIST_MODE_SEL_OFF
) |
3072 (mode_tmp
<< CFG_LOOP_TEST_MODE_OFF
) |
3074 hisi_sas_phy_write32(hisi_hba
, phy_no
, SAS_PHY_BIST_CTRL
,
3077 /* set the bist init value */
3078 if (code_mode
== HISI_SAS_BIST_CODE_MODE_FIXED_DATA
) {
3079 reg_val
= hisi_hba
->debugfs_bist_fixed_code
[0];
3080 hisi_sas_phy_write32(hisi_hba
, phy_no
,
3081 SAS_PHY_BIST_CODE
, reg_val
);
3083 reg_val
= hisi_hba
->debugfs_bist_fixed_code
[1];
3084 hisi_sas_phy_write32(hisi_hba
, phy_no
,
3085 SAS_PHY_BIST_CODE1
, reg_val
);
3087 hisi_sas_phy_write32(hisi_hba
, phy_no
,
3089 SAS_PHY_BIST_CODE_INIT
);
3090 hisi_sas_phy_write32(hisi_hba
, phy_no
,
3092 SAS_PHY_BIST_CODE1_INIT
);
3096 reg_val
|= (CFG_RX_BIST_EN_MSK
| CFG_TX_BIST_EN_MSK
);
3097 hisi_sas_phy_write32(hisi_hba
, phy_no
, SAS_PHY_BIST_CTRL
,
3100 /* clear error bit */
3102 hisi_sas_phy_read32(hisi_hba
, phy_no
, SAS_BIST_ERR_CNT
);
3104 /* disable bist test and recover it */
3105 hisi_hba
->debugfs_bist_cnt
+= hisi_sas_phy_read32(hisi_hba
,
3106 phy_no
, SAS_BIST_ERR_CNT
);
3107 hisi_sas_bist_test_restore_v3_hw(hisi_hba
);
3113 static int hisi_sas_map_queues(struct Scsi_Host
*shost
)
3115 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
3116 struct blk_mq_queue_map
*qmap
= &shost
->tag_set
.map
[HCTX_TYPE_DEFAULT
];
3118 return blk_mq_pci_map_queues(qmap
, hisi_hba
->pci_dev
,
3119 BASE_VECTORS_V3_HW
);
3122 static struct scsi_host_template sht_v3_hw
= {
3124 .proc_name
= DRV_NAME
,
3125 .module
= THIS_MODULE
,
3126 .queuecommand
= sas_queuecommand
,
3127 .dma_need_drain
= ata_scsi_dma_need_drain
,
3128 .target_alloc
= sas_target_alloc
,
3129 .slave_configure
= slave_configure_v3_hw
,
3130 .scan_finished
= hisi_sas_scan_finished
,
3131 .scan_start
= hisi_sas_scan_start
,
3132 .map_queues
= hisi_sas_map_queues
,
3133 .change_queue_depth
= sas_change_queue_depth
,
3134 .bios_param
= sas_bios_param
,
3136 .sg_tablesize
= HISI_SAS_SGE_PAGE_CNT
,
3137 .sg_prot_tablesize
= HISI_SAS_SGE_PAGE_CNT
,
3138 .max_sectors
= SCSI_DEFAULT_MAX_SECTORS
,
3139 .eh_device_reset_handler
= sas_eh_device_reset_handler
,
3140 .eh_target_reset_handler
= sas_eh_target_reset_handler
,
3141 .target_destroy
= sas_target_destroy
,
3143 #ifdef CONFIG_COMPAT
3144 .compat_ioctl
= sas_ioctl
,
3146 .shost_attrs
= host_attrs_v3_hw
,
3147 .tag_alloc_policy
= BLK_TAG_ALLOC_RR
,
3148 .host_reset
= hisi_sas_host_reset
,
3152 static const struct hisi_sas_hw hisi_sas_v3_hw
= {
3153 .setup_itct
= setup_itct_v3_hw
,
3154 .get_wideport_bitmap
= get_wideport_bitmap_v3_hw
,
3155 .complete_hdr_size
= sizeof(struct hisi_sas_complete_v3_hdr
),
3156 .clear_itct
= clear_itct_v3_hw
,
3157 .sl_notify_ssp
= sl_notify_ssp_v3_hw
,
3158 .prep_ssp
= prep_ssp_v3_hw
,
3159 .prep_smp
= prep_smp_v3_hw
,
3160 .prep_stp
= prep_ata_v3_hw
,
3161 .prep_abort
= prep_abort_v3_hw
,
3162 .start_delivery
= start_delivery_v3_hw
,
3163 .phys_init
= phys_init_v3_hw
,
3164 .phy_start
= start_phy_v3_hw
,
3165 .phy_disable
= disable_phy_v3_hw
,
3166 .phy_hard_reset
= phy_hard_reset_v3_hw
,
3167 .phy_get_max_linkrate
= phy_get_max_linkrate_v3_hw
,
3168 .phy_set_linkrate
= phy_set_linkrate_v3_hw
,
3169 .dereg_device
= dereg_device_v3_hw
,
3170 .soft_reset
= soft_reset_v3_hw
,
3171 .get_phys_state
= get_phys_state_v3_hw
,
3172 .get_events
= phy_get_events_v3_hw
,
3173 .write_gpio
= write_gpio_v3_hw
,
3174 .wait_cmds_complete_timeout
= wait_cmds_complete_timeout_v3_hw
,
3177 static struct Scsi_Host
*
3178 hisi_sas_shost_alloc_pci(struct pci_dev
*pdev
)
3180 struct Scsi_Host
*shost
;
3181 struct hisi_hba
*hisi_hba
;
3182 struct device
*dev
= &pdev
->dev
;
3184 shost
= scsi_host_alloc(&sht_v3_hw
, sizeof(*hisi_hba
));
3186 dev_err(dev
, "shost alloc failed\n");
3189 hisi_hba
= shost_priv(shost
);
3191 INIT_WORK(&hisi_hba
->rst_work
, hisi_sas_rst_work_handler
);
3192 INIT_WORK(&hisi_hba
->debugfs_work
, debugfs_work_handler_v3_hw
);
3193 hisi_hba
->hw
= &hisi_sas_v3_hw
;
3194 hisi_hba
->pci_dev
= pdev
;
3195 hisi_hba
->dev
= dev
;
3196 hisi_hba
->shost
= shost
;
3197 SHOST_TO_SAS_HA(shost
) = &hisi_hba
->sha
;
3199 if (prot_mask
& ~HISI_SAS_PROT_MASK
)
3200 dev_err(dev
, "unsupported protection mask 0x%x, using default (0x0)\n",
3203 hisi_hba
->prot_mask
= prot_mask
;
3205 if (hisi_sas_get_fw_info(hisi_hba
) < 0)
3208 if (hisi_sas_alloc(hisi_hba
)) {
3209 hisi_sas_free(hisi_hba
);
3215 scsi_host_put(shost
);
3216 dev_err(dev
, "shost alloc failed\n");
3220 static void debugfs_snapshot_cq_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3222 int queue_entry_size
= hisi_hba
->hw
->complete_hdr_size
;
3223 int dump_index
= hisi_hba
->debugfs_dump_index
;
3226 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
3227 memcpy(hisi_hba
->debugfs_cq
[dump_index
][i
].complete_hdr
,
3228 hisi_hba
->complete_hdr
[i
],
3229 HISI_SAS_QUEUE_SLOTS
* queue_entry_size
);
3232 static void debugfs_snapshot_dq_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3234 int queue_entry_size
= sizeof(struct hisi_sas_cmd_hdr
);
3235 int dump_index
= hisi_hba
->debugfs_dump_index
;
3238 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
3239 struct hisi_sas_cmd_hdr
*debugfs_cmd_hdr
, *cmd_hdr
;
3242 debugfs_cmd_hdr
= hisi_hba
->debugfs_dq
[dump_index
][i
].hdr
;
3243 cmd_hdr
= hisi_hba
->cmd_hdr
[i
];
3245 for (j
= 0; j
< HISI_SAS_QUEUE_SLOTS
; j
++)
3246 memcpy(&debugfs_cmd_hdr
[j
], &cmd_hdr
[j
],
3251 static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3253 int dump_index
= hisi_hba
->debugfs_dump_index
;
3254 const struct hisi_sas_debugfs_reg
*port
= &debugfs_port_reg
;
3259 for (phy_cnt
= 0; phy_cnt
< hisi_hba
->n_phy
; phy_cnt
++) {
3260 databuf
= hisi_hba
->debugfs_port_reg
[dump_index
][phy_cnt
].data
;
3261 for (i
= 0; i
< port
->count
; i
++, databuf
++) {
3262 offset
= port
->base_off
+ 4 * i
;
3263 *databuf
= hisi_sas_phy_read32(hisi_hba
, phy_cnt
,
3269 static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3271 int dump_index
= hisi_hba
->debugfs_dump_index
;
3272 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_GLOBAL
].data
;
3275 for (i
= 0; i
< debugfs_axi_reg
.count
; i
++, databuf
++)
3276 *databuf
= hisi_sas_read32(hisi_hba
, 4 * i
);
3279 static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3281 int dump_index
= hisi_hba
->debugfs_dump_index
;
3282 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_AXI
].data
;
3283 const struct hisi_sas_debugfs_reg
*axi
= &debugfs_axi_reg
;
3286 for (i
= 0; i
< axi
->count
; i
++, databuf
++)
3287 *databuf
= hisi_sas_read32(hisi_hba
, 4 * i
+ axi
->base_off
);
3290 static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3292 int dump_index
= hisi_hba
->debugfs_dump_index
;
3293 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_RAS
].data
;
3294 const struct hisi_sas_debugfs_reg
*ras
= &debugfs_ras_reg
;
3297 for (i
= 0; i
< ras
->count
; i
++, databuf
++)
3298 *databuf
= hisi_sas_read32(hisi_hba
, 4 * i
+ ras
->base_off
);
3301 static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3303 int dump_index
= hisi_hba
->debugfs_dump_index
;
3304 void *cachebuf
= hisi_hba
->debugfs_itct_cache
[dump_index
].cache
;
3305 void *databuf
= hisi_hba
->debugfs_itct
[dump_index
].itct
;
3306 struct hisi_sas_itct
*itct
;
3309 read_iost_itct_cache_v3_hw(hisi_hba
, HISI_SAS_ITCT_CACHE
, cachebuf
);
3311 itct
= hisi_hba
->itct
;
3313 for (i
= 0; i
< HISI_SAS_MAX_ITCT_ENTRIES
; i
++, itct
++) {
3314 memcpy(databuf
, itct
, sizeof(struct hisi_sas_itct
));
3315 databuf
+= sizeof(struct hisi_sas_itct
);
3319 static void debugfs_snapshot_iost_reg_v3_hw(struct hisi_hba
*hisi_hba
)
3321 int dump_index
= hisi_hba
->debugfs_dump_index
;
3322 int max_command_entries
= HISI_SAS_MAX_COMMANDS
;
3323 void *cachebuf
= hisi_hba
->debugfs_iost_cache
[dump_index
].cache
;
3324 void *databuf
= hisi_hba
->debugfs_iost
[dump_index
].iost
;
3325 struct hisi_sas_iost
*iost
;
3328 read_iost_itct_cache_v3_hw(hisi_hba
, HISI_SAS_IOST_CACHE
, cachebuf
);
3330 iost
= hisi_hba
->iost
;
3332 for (i
= 0; i
< max_command_entries
; i
++, iost
++) {
3333 memcpy(databuf
, iost
, sizeof(struct hisi_sas_iost
));
3334 databuf
+= sizeof(struct hisi_sas_iost
);
3339 debugfs_to_reg_name_v3_hw(int off
, int base_off
,
3340 const struct hisi_sas_debugfs_reg_lu
*lu
)
3342 for (; lu
->name
; lu
++) {
3343 if (off
== lu
->off
- base_off
)
3350 static void debugfs_print_reg_v3_hw(u32
*regs_val
, struct seq_file
*s
,
3351 const struct hisi_sas_debugfs_reg
*reg
)
3355 for (i
= 0; i
< reg
->count
; i
++) {
3359 name
= debugfs_to_reg_name_v3_hw(off
, reg
->base_off
,
3363 seq_printf(s
, "0x%08x 0x%08x %s\n", off
,
3366 seq_printf(s
, "0x%08x 0x%08x\n", off
,
3371 static int debugfs_global_v3_hw_show(struct seq_file
*s
, void *p
)
3373 struct hisi_sas_debugfs_regs
*global
= s
->private;
3375 debugfs_print_reg_v3_hw(global
->data
, s
,
3376 &debugfs_global_reg
);
3380 DEFINE_SHOW_ATTRIBUTE(debugfs_global_v3_hw
);
3382 static int debugfs_axi_v3_hw_show(struct seq_file
*s
, void *p
)
3384 struct hisi_sas_debugfs_regs
*axi
= s
->private;
3386 debugfs_print_reg_v3_hw(axi
->data
, s
,
3391 DEFINE_SHOW_ATTRIBUTE(debugfs_axi_v3_hw
);
3393 static int debugfs_ras_v3_hw_show(struct seq_file
*s
, void *p
)
3395 struct hisi_sas_debugfs_regs
*ras
= s
->private;
3397 debugfs_print_reg_v3_hw(ras
->data
, s
,
3402 DEFINE_SHOW_ATTRIBUTE(debugfs_ras_v3_hw
);
3404 static int debugfs_port_v3_hw_show(struct seq_file
*s
, void *p
)
3406 struct hisi_sas_debugfs_port
*port
= s
->private;
3407 const struct hisi_sas_debugfs_reg
*reg_port
= &debugfs_port_reg
;
3409 debugfs_print_reg_v3_hw(port
->data
, s
, reg_port
);
3413 DEFINE_SHOW_ATTRIBUTE(debugfs_port_v3_hw
);
3415 static void debugfs_show_row_64_v3_hw(struct seq_file
*s
, int index
,
3416 int sz
, __le64
*ptr
)
3420 /* completion header size not fixed per HW version */
3421 seq_printf(s
, "index %04d:\n\t", index
);
3422 for (i
= 1; i
<= sz
/ 8; i
++, ptr
++) {
3423 seq_printf(s
, " 0x%016llx", le64_to_cpu(*ptr
));
3425 seq_puts(s
, "\n\t");
3431 static void debugfs_show_row_32_v3_hw(struct seq_file
*s
, int index
,
3432 int sz
, __le32
*ptr
)
3436 /* completion header size not fixed per HW version */
3437 seq_printf(s
, "index %04d:\n\t", index
);
3438 for (i
= 1; i
<= sz
/ 4; i
++, ptr
++) {
3439 seq_printf(s
, " 0x%08x", le32_to_cpu(*ptr
));
3441 seq_puts(s
, "\n\t");
3446 static void debugfs_cq_show_slot_v3_hw(struct seq_file
*s
, int slot
,
3447 struct hisi_sas_debugfs_cq
*debugfs_cq
)
3449 struct hisi_sas_cq
*cq
= debugfs_cq
->cq
;
3450 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
3451 __le32
*complete_hdr
= debugfs_cq
->complete_hdr
+
3452 (hisi_hba
->hw
->complete_hdr_size
* slot
);
3454 debugfs_show_row_32_v3_hw(s
, slot
,
3455 hisi_hba
->hw
->complete_hdr_size
,
3459 static int debugfs_cq_v3_hw_show(struct seq_file
*s
, void *p
)
3461 struct hisi_sas_debugfs_cq
*debugfs_cq
= s
->private;
3464 for (slot
= 0; slot
< HISI_SAS_QUEUE_SLOTS
; slot
++)
3465 debugfs_cq_show_slot_v3_hw(s
, slot
, debugfs_cq
);
3469 DEFINE_SHOW_ATTRIBUTE(debugfs_cq_v3_hw
);
3471 static void debugfs_dq_show_slot_v3_hw(struct seq_file
*s
, int slot
,
3474 struct hisi_sas_debugfs_dq
*debugfs_dq
= dq_ptr
;
3475 void *cmd_queue
= debugfs_dq
->hdr
;
3476 __le32
*cmd_hdr
= cmd_queue
+
3477 sizeof(struct hisi_sas_cmd_hdr
) * slot
;
3479 debugfs_show_row_32_v3_hw(s
, slot
, sizeof(struct hisi_sas_cmd_hdr
),
3483 static int debugfs_dq_v3_hw_show(struct seq_file
*s
, void *p
)
3487 for (slot
= 0; slot
< HISI_SAS_QUEUE_SLOTS
; slot
++)
3488 debugfs_dq_show_slot_v3_hw(s
, slot
, s
->private);
3492 DEFINE_SHOW_ATTRIBUTE(debugfs_dq_v3_hw
);
3494 static int debugfs_iost_v3_hw_show(struct seq_file
*s
, void *p
)
3496 struct hisi_sas_debugfs_iost
*debugfs_iost
= s
->private;
3497 struct hisi_sas_iost
*iost
= debugfs_iost
->iost
;
3498 int i
, max_command_entries
= HISI_SAS_MAX_COMMANDS
;
3500 for (i
= 0; i
< max_command_entries
; i
++, iost
++) {
3501 __le64
*data
= &iost
->qw0
;
3503 debugfs_show_row_64_v3_hw(s
, i
, sizeof(*iost
), data
);
3508 DEFINE_SHOW_ATTRIBUTE(debugfs_iost_v3_hw
);
3510 static int debugfs_iost_cache_v3_hw_show(struct seq_file
*s
, void *p
)
3512 struct hisi_sas_debugfs_iost_cache
*debugfs_iost_cache
= s
->private;
3513 struct hisi_sas_iost_itct_cache
*iost_cache
=
3514 debugfs_iost_cache
->cache
;
3515 u32 cache_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
* 4;
3519 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_NUM
; i
++, iost_cache
++) {
3521 * Data struct of IOST cache:
3522 * Data[1]: BIT0~15: Table index
3524 * Data[2]~[9]: IOST table
3526 tab_idx
= (iost_cache
->data
[1] & 0xffff);
3527 iost
= (__le64
*)iost_cache
;
3529 debugfs_show_row_64_v3_hw(s
, tab_idx
, cache_size
, iost
);
3534 DEFINE_SHOW_ATTRIBUTE(debugfs_iost_cache_v3_hw
);
3536 static int debugfs_itct_v3_hw_show(struct seq_file
*s
, void *p
)
3539 struct hisi_sas_debugfs_itct
*debugfs_itct
= s
->private;
3540 struct hisi_sas_itct
*itct
= debugfs_itct
->itct
;
3542 for (i
= 0; i
< HISI_SAS_MAX_ITCT_ENTRIES
; i
++, itct
++) {
3543 __le64
*data
= &itct
->qw0
;
3545 debugfs_show_row_64_v3_hw(s
, i
, sizeof(*itct
), data
);
3550 DEFINE_SHOW_ATTRIBUTE(debugfs_itct_v3_hw
);
3552 static int debugfs_itct_cache_v3_hw_show(struct seq_file
*s
, void *p
)
3554 struct hisi_sas_debugfs_itct_cache
*debugfs_itct_cache
= s
->private;
3555 struct hisi_sas_iost_itct_cache
*itct_cache
=
3556 debugfs_itct_cache
->cache
;
3557 u32 cache_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
* 4;
3561 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_NUM
; i
++, itct_cache
++) {
3563 * Data struct of ITCT cache:
3564 * Data[1]: BIT0~15: Table index
3566 * Data[2]~[9]: ITCT table
3568 tab_idx
= itct_cache
->data
[1] & 0xffff;
3569 itct
= (__le64
*)itct_cache
;
3571 debugfs_show_row_64_v3_hw(s
, tab_idx
, cache_size
, itct
);
3576 DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw
);
3578 static void debugfs_create_files_v3_hw(struct hisi_hba
*hisi_hba
)
3580 u64
*debugfs_timestamp
;
3581 int dump_index
= hisi_hba
->debugfs_dump_index
;
3582 struct dentry
*dump_dentry
;
3583 struct dentry
*dentry
;
3589 snprintf(name
, 256, "%d", dump_index
);
3591 dump_dentry
= debugfs_create_dir(name
, hisi_hba
->debugfs_dump_dentry
);
3593 debugfs_timestamp
= &hisi_hba
->debugfs_timestamp
[dump_index
];
3595 debugfs_create_u64("timestamp", 0400, dump_dentry
,
3598 debugfs_create_file("global", 0400, dump_dentry
,
3599 &hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_GLOBAL
],
3600 &debugfs_global_v3_hw_fops
);
3602 /* Create port dir and files */
3603 dentry
= debugfs_create_dir("port", dump_dentry
);
3604 for (p
= 0; p
< hisi_hba
->n_phy
; p
++) {
3605 snprintf(name
, 256, "%d", p
);
3607 debugfs_create_file(name
, 0400, dentry
,
3608 &hisi_hba
->debugfs_port_reg
[dump_index
][p
],
3609 &debugfs_port_v3_hw_fops
);
3612 /* Create CQ dir and files */
3613 dentry
= debugfs_create_dir("cq", dump_dentry
);
3614 for (c
= 0; c
< hisi_hba
->queue_count
; c
++) {
3615 snprintf(name
, 256, "%d", c
);
3617 debugfs_create_file(name
, 0400, dentry
,
3618 &hisi_hba
->debugfs_cq
[dump_index
][c
],
3619 &debugfs_cq_v3_hw_fops
);
3622 /* Create DQ dir and files */
3623 dentry
= debugfs_create_dir("dq", dump_dentry
);
3624 for (d
= 0; d
< hisi_hba
->queue_count
; d
++) {
3625 snprintf(name
, 256, "%d", d
);
3627 debugfs_create_file(name
, 0400, dentry
,
3628 &hisi_hba
->debugfs_dq
[dump_index
][d
],
3629 &debugfs_dq_v3_hw_fops
);
3632 debugfs_create_file("iost", 0400, dump_dentry
,
3633 &hisi_hba
->debugfs_iost
[dump_index
],
3634 &debugfs_iost_v3_hw_fops
);
3636 debugfs_create_file("iost_cache", 0400, dump_dentry
,
3637 &hisi_hba
->debugfs_iost_cache
[dump_index
],
3638 &debugfs_iost_cache_v3_hw_fops
);
3640 debugfs_create_file("itct", 0400, dump_dentry
,
3641 &hisi_hba
->debugfs_itct
[dump_index
],
3642 &debugfs_itct_v3_hw_fops
);
3644 debugfs_create_file("itct_cache", 0400, dump_dentry
,
3645 &hisi_hba
->debugfs_itct_cache
[dump_index
],
3646 &debugfs_itct_cache_v3_hw_fops
);
3648 debugfs_create_file("axi", 0400, dump_dentry
,
3649 &hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_AXI
],
3650 &debugfs_axi_v3_hw_fops
);
3652 debugfs_create_file("ras", 0400, dump_dentry
,
3653 &hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_RAS
],
3654 &debugfs_ras_v3_hw_fops
);
3657 static void debugfs_snapshot_regs_v3_hw(struct hisi_hba
*hisi_hba
)
3659 debugfs_snapshot_prepare_v3_hw(hisi_hba
);
3661 debugfs_snapshot_global_reg_v3_hw(hisi_hba
);
3662 debugfs_snapshot_port_reg_v3_hw(hisi_hba
);
3663 debugfs_snapshot_axi_reg_v3_hw(hisi_hba
);
3664 debugfs_snapshot_ras_reg_v3_hw(hisi_hba
);
3665 debugfs_snapshot_cq_reg_v3_hw(hisi_hba
);
3666 debugfs_snapshot_dq_reg_v3_hw(hisi_hba
);
3667 debugfs_snapshot_itct_reg_v3_hw(hisi_hba
);
3668 debugfs_snapshot_iost_reg_v3_hw(hisi_hba
);
3670 debugfs_create_files_v3_hw(hisi_hba
);
3672 debugfs_snapshot_restore_v3_hw(hisi_hba
);
3675 static ssize_t
debugfs_trigger_dump_v3_hw_write(struct file
*file
,
3676 const char __user
*user_buf
,
3677 size_t count
, loff_t
*ppos
)
3679 struct hisi_hba
*hisi_hba
= file
->f_inode
->i_private
;
3682 if (hisi_hba
->debugfs_dump_index
>= hisi_sas_debugfs_dump_count
)
3688 if (copy_from_user(buf
, user_buf
, count
))
3694 queue_work(hisi_hba
->wq
, &hisi_hba
->debugfs_work
);
3699 static const struct file_operations debugfs_trigger_dump_v3_hw_fops
= {
3700 .write
= &debugfs_trigger_dump_v3_hw_write
,
3701 .owner
= THIS_MODULE
,
3705 HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL
= 0,
3706 HISI_SAS_BIST_LOOPBACK_MODE_SERDES
,
3707 HISI_SAS_BIST_LOOPBACK_MODE_REMOTE
,
3710 static const struct {
3713 } debugfs_loop_linkrate_v3_hw
[] = {
3714 { SAS_LINK_RATE_1_5_GBPS
, "1.5 Gbit" },
3715 { SAS_LINK_RATE_3_0_GBPS
, "3.0 Gbit" },
3716 { SAS_LINK_RATE_6_0_GBPS
, "6.0 Gbit" },
3717 { SAS_LINK_RATE_12_0_GBPS
, "12.0 Gbit" },
3720 static int debugfs_bist_linkrate_v3_hw_show(struct seq_file
*s
, void *p
)
3722 struct hisi_hba
*hisi_hba
= s
->private;
3725 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_linkrate_v3_hw
); i
++) {
3726 int match
= (hisi_hba
->debugfs_bist_linkrate
==
3727 debugfs_loop_linkrate_v3_hw
[i
].value
);
3729 seq_printf(s
, "%s%s%s ", match
? "[" : "",
3730 debugfs_loop_linkrate_v3_hw
[i
].name
,
3738 static ssize_t
debugfs_bist_linkrate_v3_hw_write(struct file
*filp
,
3739 const char __user
*buf
,
3740 size_t count
, loff_t
*ppos
)
3742 struct seq_file
*m
= filp
->private_data
;
3743 struct hisi_hba
*hisi_hba
= m
->private;
3744 char kbuf
[16] = {}, *pkbuf
;
3748 if (hisi_hba
->debugfs_bist_enable
)
3751 if (count
>= sizeof(kbuf
))
3754 if (copy_from_user(kbuf
, buf
, count
))
3757 pkbuf
= strstrip(kbuf
);
3759 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_linkrate_v3_hw
); i
++) {
3760 if (!strncmp(debugfs_loop_linkrate_v3_hw
[i
].name
,
3762 hisi_hba
->debugfs_bist_linkrate
=
3763 debugfs_loop_linkrate_v3_hw
[i
].value
;
3775 static int debugfs_bist_linkrate_v3_hw_open(struct inode
*inode
,
3778 return single_open(filp
, debugfs_bist_linkrate_v3_hw_show
,
3782 static const struct file_operations debugfs_bist_linkrate_v3_hw_fops
= {
3783 .open
= debugfs_bist_linkrate_v3_hw_open
,
3785 .write
= debugfs_bist_linkrate_v3_hw_write
,
3786 .llseek
= seq_lseek
,
3787 .release
= single_release
,
3788 .owner
= THIS_MODULE
,
3791 static const struct {
3794 } debugfs_loop_code_mode_v3_hw
[] = {
3795 { HISI_SAS_BIST_CODE_MODE_PRBS7
, "PRBS7" },
3796 { HISI_SAS_BIST_CODE_MODE_PRBS23
, "PRBS23" },
3797 { HISI_SAS_BIST_CODE_MODE_PRBS31
, "PRBS31" },
3798 { HISI_SAS_BIST_CODE_MODE_JTPAT
, "JTPAT" },
3799 { HISI_SAS_BIST_CODE_MODE_CJTPAT
, "CJTPAT" },
3800 { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0
, "SCRAMBED_0" },
3801 { HISI_SAS_BIST_CODE_MODE_TRAIN
, "TRAIN" },
3802 { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE
, "TRAIN_DONE" },
3803 { HISI_SAS_BIST_CODE_MODE_HFTP
, "HFTP" },
3804 { HISI_SAS_BIST_CODE_MODE_MFTP
, "MFTP" },
3805 { HISI_SAS_BIST_CODE_MODE_LFTP
, "LFTP" },
3806 { HISI_SAS_BIST_CODE_MODE_FIXED_DATA
, "FIXED_DATA" },
3809 static int debugfs_bist_code_mode_v3_hw_show(struct seq_file
*s
, void *p
)
3811 struct hisi_hba
*hisi_hba
= s
->private;
3814 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_code_mode_v3_hw
); i
++) {
3815 int match
= (hisi_hba
->debugfs_bist_code_mode
==
3816 debugfs_loop_code_mode_v3_hw
[i
].value
);
3818 seq_printf(s
, "%s%s%s ", match
? "[" : "",
3819 debugfs_loop_code_mode_v3_hw
[i
].name
,
3827 static ssize_t
debugfs_bist_code_mode_v3_hw_write(struct file
*filp
,
3828 const char __user
*buf
,
3832 struct seq_file
*m
= filp
->private_data
;
3833 struct hisi_hba
*hisi_hba
= m
->private;
3834 char kbuf
[16] = {}, *pkbuf
;
3838 if (hisi_hba
->debugfs_bist_enable
)
3841 if (count
>= sizeof(kbuf
))
3844 if (copy_from_user(kbuf
, buf
, count
))
3847 pkbuf
= strstrip(kbuf
);
3849 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_code_mode_v3_hw
); i
++) {
3850 if (!strncmp(debugfs_loop_code_mode_v3_hw
[i
].name
,
3852 hisi_hba
->debugfs_bist_code_mode
=
3853 debugfs_loop_code_mode_v3_hw
[i
].value
;
3865 static int debugfs_bist_code_mode_v3_hw_open(struct inode
*inode
,
3868 return single_open(filp
, debugfs_bist_code_mode_v3_hw_show
,
3872 static const struct file_operations debugfs_bist_code_mode_v3_hw_fops
= {
3873 .open
= debugfs_bist_code_mode_v3_hw_open
,
3875 .write
= debugfs_bist_code_mode_v3_hw_write
,
3876 .llseek
= seq_lseek
,
3877 .release
= single_release
,
3878 .owner
= THIS_MODULE
,
3881 static ssize_t
debugfs_bist_phy_v3_hw_write(struct file
*filp
,
3882 const char __user
*buf
,
3883 size_t count
, loff_t
*ppos
)
3885 struct seq_file
*m
= filp
->private_data
;
3886 struct hisi_hba
*hisi_hba
= m
->private;
3887 unsigned int phy_no
;
3890 if (hisi_hba
->debugfs_bist_enable
)
3893 val
= kstrtouint_from_user(buf
, count
, 0, &phy_no
);
3897 if (phy_no
>= hisi_hba
->n_phy
)
3900 hisi_hba
->debugfs_bist_phy_no
= phy_no
;
3905 static int debugfs_bist_phy_v3_hw_show(struct seq_file
*s
, void *p
)
3907 struct hisi_hba
*hisi_hba
= s
->private;
3909 seq_printf(s
, "%d\n", hisi_hba
->debugfs_bist_phy_no
);
3914 static int debugfs_bist_phy_v3_hw_open(struct inode
*inode
,
3917 return single_open(filp
, debugfs_bist_phy_v3_hw_show
,
3921 static const struct file_operations debugfs_bist_phy_v3_hw_fops
= {
3922 .open
= debugfs_bist_phy_v3_hw_open
,
3924 .write
= debugfs_bist_phy_v3_hw_write
,
3925 .llseek
= seq_lseek
,
3926 .release
= single_release
,
3927 .owner
= THIS_MODULE
,
3930 static const struct {
3933 } debugfs_loop_modes_v3_hw
[] = {
3934 { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL
, "digital" },
3935 { HISI_SAS_BIST_LOOPBACK_MODE_SERDES
, "serdes" },
3936 { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE
, "remote" },
3939 static int debugfs_bist_mode_v3_hw_show(struct seq_file
*s
, void *p
)
3941 struct hisi_hba
*hisi_hba
= s
->private;
3944 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_modes_v3_hw
); i
++) {
3945 int match
= (hisi_hba
->debugfs_bist_mode
==
3946 debugfs_loop_modes_v3_hw
[i
].value
);
3948 seq_printf(s
, "%s%s%s ", match
? "[" : "",
3949 debugfs_loop_modes_v3_hw
[i
].name
,
3957 static ssize_t
debugfs_bist_mode_v3_hw_write(struct file
*filp
,
3958 const char __user
*buf
,
3959 size_t count
, loff_t
*ppos
)
3961 struct seq_file
*m
= filp
->private_data
;
3962 struct hisi_hba
*hisi_hba
= m
->private;
3963 char kbuf
[16] = {}, *pkbuf
;
3967 if (hisi_hba
->debugfs_bist_enable
)
3970 if (count
>= sizeof(kbuf
))
3973 if (copy_from_user(kbuf
, buf
, count
))
3976 pkbuf
= strstrip(kbuf
);
3978 for (i
= 0; i
< ARRAY_SIZE(debugfs_loop_modes_v3_hw
); i
++) {
3979 if (!strncmp(debugfs_loop_modes_v3_hw
[i
].name
, pkbuf
, 16)) {
3980 hisi_hba
->debugfs_bist_mode
=
3981 debugfs_loop_modes_v3_hw
[i
].value
;
3993 static int debugfs_bist_mode_v3_hw_open(struct inode
*inode
,
3996 return single_open(filp
, debugfs_bist_mode_v3_hw_show
,
4000 static const struct file_operations debugfs_bist_mode_v3_hw_fops
= {
4001 .open
= debugfs_bist_mode_v3_hw_open
,
4003 .write
= debugfs_bist_mode_v3_hw_write
,
4004 .llseek
= seq_lseek
,
4005 .release
= single_release
,
4006 .owner
= THIS_MODULE
,
4009 static ssize_t
debugfs_bist_enable_v3_hw_write(struct file
*filp
,
4010 const char __user
*buf
,
4011 size_t count
, loff_t
*ppos
)
4013 struct seq_file
*m
= filp
->private_data
;
4014 struct hisi_hba
*hisi_hba
= m
->private;
4015 unsigned int enable
;
4018 val
= kstrtouint_from_user(buf
, count
, 0, &enable
);
4025 if (enable
== hisi_hba
->debugfs_bist_enable
)
4028 val
= debugfs_set_bist_v3_hw(hisi_hba
, enable
);
4032 hisi_hba
->debugfs_bist_enable
= enable
;
4037 static int debugfs_bist_enable_v3_hw_show(struct seq_file
*s
, void *p
)
4039 struct hisi_hba
*hisi_hba
= s
->private;
4041 seq_printf(s
, "%d\n", hisi_hba
->debugfs_bist_enable
);
4046 static int debugfs_bist_enable_v3_hw_open(struct inode
*inode
,
4049 return single_open(filp
, debugfs_bist_enable_v3_hw_show
,
4053 static const struct file_operations debugfs_bist_enable_v3_hw_fops
= {
4054 .open
= debugfs_bist_enable_v3_hw_open
,
4056 .write
= debugfs_bist_enable_v3_hw_write
,
4057 .llseek
= seq_lseek
,
4058 .release
= single_release
,
4059 .owner
= THIS_MODULE
,
4062 static const struct {
4064 } debugfs_ffe_name_v3_hw
[FFE_CFG_MAX
] = {
4068 { "SAS_12_0_GBPS" },
4070 { "SATA_1_5_GBPS" },
4071 { "SATA_3_0_GBPS" },
4072 { "SATA_6_0_GBPS" },
4075 static ssize_t
debugfs_v3_hw_write(struct file
*filp
,
4076 const char __user
*buf
,
4077 size_t count
, loff_t
*ppos
)
4079 struct seq_file
*m
= filp
->private_data
;
4080 u32
*val
= m
->private;
4083 res
= kstrtouint_from_user(buf
, count
, 0, val
);
4090 static int debugfs_v3_hw_show(struct seq_file
*s
, void *p
)
4092 u32
*val
= s
->private;
4094 seq_printf(s
, "0x%x\n", *val
);
4099 static int debugfs_v3_hw_open(struct inode
*inode
, struct file
*filp
)
4101 return single_open(filp
, debugfs_v3_hw_show
,
4105 static const struct file_operations debugfs_v3_hw_fops
= {
4106 .open
= debugfs_v3_hw_open
,
4108 .write
= debugfs_v3_hw_write
,
4109 .llseek
= seq_lseek
,
4110 .release
= single_release
,
4111 .owner
= THIS_MODULE
,
4114 static ssize_t
debugfs_phy_down_cnt_v3_hw_write(struct file
*filp
,
4115 const char __user
*buf
,
4116 size_t count
, loff_t
*ppos
)
4118 struct seq_file
*s
= filp
->private_data
;
4119 struct hisi_sas_phy
*phy
= s
->private;
4120 unsigned int set_val
;
4123 res
= kstrtouint_from_user(buf
, count
, 0, &set_val
);
4130 atomic_set(&phy
->down_cnt
, 0);
4135 static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file
*s
, void *p
)
4137 struct hisi_sas_phy
*phy
= s
->private;
4139 seq_printf(s
, "%d\n", atomic_read(&phy
->down_cnt
));
4144 static int debugfs_phy_down_cnt_v3_hw_open(struct inode
*inode
,
4147 return single_open(filp
, debugfs_phy_down_cnt_v3_hw_show
,
4151 static const struct file_operations debugfs_phy_down_cnt_v3_hw_fops
= {
4152 .open
= debugfs_phy_down_cnt_v3_hw_open
,
4154 .write
= debugfs_phy_down_cnt_v3_hw_write
,
4155 .llseek
= seq_lseek
,
4156 .release
= single_release
,
4157 .owner
= THIS_MODULE
,
4160 static void debugfs_work_handler_v3_hw(struct work_struct
*work
)
4162 struct hisi_hba
*hisi_hba
=
4163 container_of(work
, struct hisi_hba
, debugfs_work
);
4164 int debugfs_dump_index
= hisi_hba
->debugfs_dump_index
;
4165 struct device
*dev
= hisi_hba
->dev
;
4166 u64 timestamp
= local_clock();
4168 if (debugfs_dump_index
>= hisi_sas_debugfs_dump_count
) {
4169 dev_warn(dev
, "dump count exceeded!\n");
4173 do_div(timestamp
, NSEC_PER_MSEC
);
4174 hisi_hba
->debugfs_timestamp
[debugfs_dump_index
] = timestamp
;
4176 debugfs_snapshot_regs_v3_hw(hisi_hba
);
4177 hisi_hba
->debugfs_dump_index
++;
4180 static void debugfs_release_v3_hw(struct hisi_hba
*hisi_hba
, int dump_index
)
4182 struct device
*dev
= hisi_hba
->dev
;
4185 devm_kfree(dev
, hisi_hba
->debugfs_iost_cache
[dump_index
].cache
);
4186 devm_kfree(dev
, hisi_hba
->debugfs_itct_cache
[dump_index
].cache
);
4187 devm_kfree(dev
, hisi_hba
->debugfs_iost
[dump_index
].iost
);
4188 devm_kfree(dev
, hisi_hba
->debugfs_itct
[dump_index
].itct
);
4190 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
4191 devm_kfree(dev
, hisi_hba
->debugfs_dq
[dump_index
][i
].hdr
);
4193 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
4195 hisi_hba
->debugfs_cq
[dump_index
][i
].complete_hdr
);
4197 for (i
= 0; i
< DEBUGFS_REGS_NUM
; i
++)
4198 devm_kfree(dev
, hisi_hba
->debugfs_regs
[dump_index
][i
].data
);
4200 for (i
= 0; i
< hisi_hba
->n_phy
; i
++)
4201 devm_kfree(dev
, hisi_hba
->debugfs_port_reg
[dump_index
][i
].data
);
4204 static const struct hisi_sas_debugfs_reg
*debugfs_reg_array_v3_hw
[DEBUGFS_REGS_NUM
] = {
4205 [DEBUGFS_GLOBAL
] = &debugfs_global_reg
,
4206 [DEBUGFS_AXI
] = &debugfs_axi_reg
,
4207 [DEBUGFS_RAS
] = &debugfs_ras_reg
,
4210 static int debugfs_alloc_v3_hw(struct hisi_hba
*hisi_hba
, int dump_index
)
4212 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
4213 struct device
*dev
= hisi_hba
->dev
;
4217 for (r
= 0; r
< DEBUGFS_REGS_NUM
; r
++) {
4218 struct hisi_sas_debugfs_regs
*regs
=
4219 &hisi_hba
->debugfs_regs
[dump_index
][r
];
4221 sz
= debugfs_reg_array_v3_hw
[r
]->count
* 4;
4222 regs
->data
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4225 regs
->hisi_hba
= hisi_hba
;
4228 sz
= debugfs_port_reg
.count
* 4;
4229 for (p
= 0; p
< hisi_hba
->n_phy
; p
++) {
4230 struct hisi_sas_debugfs_port
*port
=
4231 &hisi_hba
->debugfs_port_reg
[dump_index
][p
];
4233 port
->data
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4236 port
->phy
= &hisi_hba
->phy
[p
];
4239 sz
= hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
4240 for (c
= 0; c
< hisi_hba
->queue_count
; c
++) {
4241 struct hisi_sas_debugfs_cq
*cq
=
4242 &hisi_hba
->debugfs_cq
[dump_index
][c
];
4244 cq
->complete_hdr
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4245 if (!cq
->complete_hdr
)
4247 cq
->cq
= &hisi_hba
->cq
[c
];
4250 sz
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
4251 for (d
= 0; d
< hisi_hba
->queue_count
; d
++) {
4252 struct hisi_sas_debugfs_dq
*dq
=
4253 &hisi_hba
->debugfs_dq
[dump_index
][d
];
4255 dq
->hdr
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4258 dq
->dq
= &hisi_hba
->dq
[d
];
4261 sz
= HISI_SAS_MAX_COMMANDS
* sizeof(struct hisi_sas_iost
);
4263 hisi_hba
->debugfs_iost
[dump_index
].iost
=
4264 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4265 if (!hisi_hba
->debugfs_iost
[dump_index
].iost
)
4268 sz
= HISI_SAS_IOST_ITCT_CACHE_NUM
*
4269 sizeof(struct hisi_sas_iost_itct_cache
);
4271 hisi_hba
->debugfs_iost_cache
[dump_index
].cache
=
4272 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4273 if (!hisi_hba
->debugfs_iost_cache
[dump_index
].cache
)
4276 sz
= HISI_SAS_IOST_ITCT_CACHE_NUM
*
4277 sizeof(struct hisi_sas_iost_itct_cache
);
4279 hisi_hba
->debugfs_itct_cache
[dump_index
].cache
=
4280 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4281 if (!hisi_hba
->debugfs_itct_cache
[dump_index
].cache
)
4284 /* New memory allocation must be locate before itct */
4285 sz
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
4287 hisi_hba
->debugfs_itct
[dump_index
].itct
=
4288 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
4289 if (!hisi_hba
->debugfs_itct
[dump_index
].itct
)
4294 for (i
= 0; i
< hisi_sas_debugfs_dump_count
; i
++)
4295 debugfs_release_v3_hw(hisi_hba
, i
);
4299 static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba
*hisi_hba
)
4301 struct dentry
*dir
= debugfs_create_dir("phy_down_cnt",
4302 hisi_hba
->debugfs_dir
);
4306 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
4307 snprintf(name
, 16, "%d", phy_no
);
4308 debugfs_create_file(name
, 0600, dir
,
4309 &hisi_hba
->phy
[phy_no
],
4310 &debugfs_phy_down_cnt_v3_hw_fops
);
4314 static void debugfs_bist_init_v3_hw(struct hisi_hba
*hisi_hba
)
4316 struct dentry
*ports_dentry
;
4319 hisi_hba
->debugfs_bist_dentry
=
4320 debugfs_create_dir("bist", hisi_hba
->debugfs_dir
);
4321 debugfs_create_file("link_rate", 0600,
4322 hisi_hba
->debugfs_bist_dentry
, hisi_hba
,
4323 &debugfs_bist_linkrate_v3_hw_fops
);
4325 debugfs_create_file("code_mode", 0600,
4326 hisi_hba
->debugfs_bist_dentry
, hisi_hba
,
4327 &debugfs_bist_code_mode_v3_hw_fops
);
4329 debugfs_create_file("fixed_code", 0600,
4330 hisi_hba
->debugfs_bist_dentry
,
4331 &hisi_hba
->debugfs_bist_fixed_code
[0],
4332 &debugfs_v3_hw_fops
);
4334 debugfs_create_file("fixed_code_1", 0600,
4335 hisi_hba
->debugfs_bist_dentry
,
4336 &hisi_hba
->debugfs_bist_fixed_code
[1],
4337 &debugfs_v3_hw_fops
);
4339 debugfs_create_file("phy_id", 0600, hisi_hba
->debugfs_bist_dentry
,
4340 hisi_hba
, &debugfs_bist_phy_v3_hw_fops
);
4342 debugfs_create_u32("cnt", 0600, hisi_hba
->debugfs_bist_dentry
,
4343 &hisi_hba
->debugfs_bist_cnt
);
4345 debugfs_create_file("loopback_mode", 0600,
4346 hisi_hba
->debugfs_bist_dentry
,
4347 hisi_hba
, &debugfs_bist_mode_v3_hw_fops
);
4349 debugfs_create_file("enable", 0600, hisi_hba
->debugfs_bist_dentry
,
4350 hisi_hba
, &debugfs_bist_enable_v3_hw_fops
);
4352 ports_dentry
= debugfs_create_dir("port", hisi_hba
->debugfs_bist_dentry
);
4354 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
4355 struct dentry
*port_dentry
;
4356 struct dentry
*ffe_dentry
;
4360 snprintf(name
, 256, "%d", phy_no
);
4361 port_dentry
= debugfs_create_dir(name
, ports_dentry
);
4362 ffe_dentry
= debugfs_create_dir("ffe", port_dentry
);
4363 for (i
= 0; i
< FFE_CFG_MAX
; i
++) {
4366 debugfs_create_file(debugfs_ffe_name_v3_hw
[i
].name
,
4368 &hisi_hba
->debugfs_bist_ffe
[phy_no
][i
],
4369 &debugfs_v3_hw_fops
);
4373 hisi_hba
->debugfs_bist_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
4376 static void debugfs_init_v3_hw(struct hisi_hba
*hisi_hba
)
4378 struct device
*dev
= hisi_hba
->dev
;
4381 hisi_hba
->debugfs_dir
= debugfs_create_dir(dev_name(dev
),
4382 hisi_sas_debugfs_dir
);
4383 debugfs_create_file("trigger_dump", 0200,
4384 hisi_hba
->debugfs_dir
,
4386 &debugfs_trigger_dump_v3_hw_fops
);
4388 /* create bist structures */
4389 debugfs_bist_init_v3_hw(hisi_hba
);
4391 hisi_hba
->debugfs_dump_dentry
=
4392 debugfs_create_dir("dump", hisi_hba
->debugfs_dir
);
4394 debugfs_phy_down_cnt_init_v3_hw(hisi_hba
);
4396 for (i
= 0; i
< hisi_sas_debugfs_dump_count
; i
++) {
4397 if (debugfs_alloc_v3_hw(hisi_hba
, i
)) {
4398 debugfs_remove_recursive(hisi_hba
->debugfs_dir
);
4399 dev_dbg(dev
, "failed to init debugfs!\n");
4405 static void debugfs_exit_v3_hw(struct hisi_hba
*hisi_hba
)
4407 debugfs_remove_recursive(hisi_hba
->debugfs_dir
);
4411 hisi_sas_v3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
4413 struct Scsi_Host
*shost
;
4414 struct hisi_hba
*hisi_hba
;
4415 struct device
*dev
= &pdev
->dev
;
4416 struct asd_sas_phy
**arr_phy
;
4417 struct asd_sas_port
**arr_port
;
4418 struct sas_ha_struct
*sha
;
4419 int rc
, phy_nr
, port_nr
, i
;
4421 rc
= pci_enable_device(pdev
);
4425 pci_set_master(pdev
);
4427 rc
= pci_request_regions(pdev
, DRV_NAME
);
4429 goto err_out_disable_device
;
4431 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
4433 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
4435 dev_err(dev
, "No usable DMA addressing method\n");
4437 goto err_out_regions
;
4440 shost
= hisi_sas_shost_alloc_pci(pdev
);
4443 goto err_out_regions
;
4446 sha
= SHOST_TO_SAS_HA(shost
);
4447 hisi_hba
= shost_priv(shost
);
4448 dev_set_drvdata(dev
, sha
);
4450 hisi_hba
->regs
= pcim_iomap(pdev
, 5, 0);
4451 if (!hisi_hba
->regs
) {
4452 dev_err(dev
, "cannot map register\n");
4457 phy_nr
= port_nr
= hisi_hba
->n_phy
;
4459 arr_phy
= devm_kcalloc(dev
, phy_nr
, sizeof(void *), GFP_KERNEL
);
4460 arr_port
= devm_kcalloc(dev
, port_nr
, sizeof(void *), GFP_KERNEL
);
4461 if (!arr_phy
|| !arr_port
) {
4466 sha
->sas_phy
= arr_phy
;
4467 sha
->sas_port
= arr_port
;
4468 sha
->core
.shost
= shost
;
4469 sha
->lldd_ha
= hisi_hba
;
4471 shost
->transportt
= hisi_sas_stt
;
4472 shost
->max_id
= HISI_SAS_MAX_DEVICES
;
4473 shost
->max_lun
= ~0;
4474 shost
->max_channel
= 1;
4475 shost
->max_cmd_len
= 16;
4476 shost
->can_queue
= HISI_SAS_UNRESERVED_IPTT
;
4477 shost
->cmd_per_lun
= HISI_SAS_UNRESERVED_IPTT
;
4479 sha
->sas_ha_name
= DRV_NAME
;
4481 sha
->lldd_module
= THIS_MODULE
;
4482 sha
->sas_addr
= &hisi_hba
->sas_addr
[0];
4483 sha
->num_phys
= hisi_hba
->n_phy
;
4485 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
4486 sha
->sas_phy
[i
] = &hisi_hba
->phy
[i
].sas_phy
;
4487 sha
->sas_port
[i
] = &hisi_hba
->port
[i
].sas_port
;
4490 if (hisi_hba
->prot_mask
) {
4491 dev_info(dev
, "Registering for DIF/DIX prot_mask=0x%x\n",
4493 scsi_host_set_prot(hisi_hba
->shost
, prot_mask
);
4494 if (hisi_hba
->prot_mask
& HISI_SAS_DIX_PROT_MASK
)
4495 scsi_host_set_guard(hisi_hba
->shost
,
4496 SHOST_DIX_GUARD_CRC
);
4499 if (hisi_sas_debugfs_enable
)
4500 debugfs_init_v3_hw(hisi_hba
);
4502 rc
= interrupt_preinit_v3_hw(hisi_hba
);
4504 goto err_out_debugfs
;
4505 dev_err(dev
, "%d hw queues\n", shost
->nr_hw_queues
);
4506 rc
= scsi_add_host(shost
, dev
);
4508 goto err_out_free_irq_vectors
;
4510 rc
= sas_register_ha(sha
);
4512 goto err_out_register_ha
;
4514 rc
= hisi_sas_v3_init(hisi_hba
);
4516 goto err_out_register_ha
;
4518 scsi_scan_host(shost
);
4521 * For the situation that there are ATA disks connected with SAS
4522 * controller, it additionally creates ata_port which will affect the
4523 * child_count of hisi_hba->dev. Even if suspended all the disks,
4524 * ata_port is still and the child_count of hisi_hba->dev is not 0.
4525 * So use pm_suspend_ignore_children() to ignore the effect to
4528 pm_suspend_ignore_children(dev
, true);
4529 pm_runtime_put_noidle(&pdev
->dev
);
4533 err_out_register_ha
:
4534 scsi_remove_host(shost
);
4535 err_out_free_irq_vectors
:
4536 pci_free_irq_vectors(pdev
);
4538 debugfs_exit_v3_hw(hisi_hba
);
4540 hisi_sas_free(hisi_hba
);
4541 scsi_host_put(shost
);
4543 pci_release_regions(pdev
);
4544 err_out_disable_device
:
4545 pci_disable_device(pdev
);
4551 hisi_sas_v3_destroy_irqs(struct pci_dev
*pdev
, struct hisi_hba
*hisi_hba
)
4555 free_irq(pci_irq_vector(pdev
, 1), hisi_hba
);
4556 free_irq(pci_irq_vector(pdev
, 2), hisi_hba
);
4557 free_irq(pci_irq_vector(pdev
, 11), hisi_hba
);
4558 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
4559 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
4560 int nr
= hisi_sas_intr_conv
? 16 : 16 + i
;
4562 free_irq(pci_irq_vector(pdev
, nr
), cq
);
4564 pci_free_irq_vectors(pdev
);
4567 static void hisi_sas_v3_remove(struct pci_dev
*pdev
)
4569 struct device
*dev
= &pdev
->dev
;
4570 struct sas_ha_struct
*sha
= dev_get_drvdata(dev
);
4571 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
4572 struct Scsi_Host
*shost
= sha
->core
.shost
;
4574 pm_runtime_get_noresume(dev
);
4575 if (timer_pending(&hisi_hba
->timer
))
4576 del_timer(&hisi_hba
->timer
);
4578 sas_unregister_ha(sha
);
4579 sas_remove_host(sha
->core
.shost
);
4581 hisi_sas_v3_destroy_irqs(pdev
, hisi_hba
);
4582 pci_release_regions(pdev
);
4583 pci_disable_device(pdev
);
4584 hisi_sas_free(hisi_hba
);
4585 debugfs_exit_v3_hw(hisi_hba
);
4586 scsi_host_put(shost
);
4589 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev
*pdev
)
4591 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
4592 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
4593 struct device
*dev
= hisi_hba
->dev
;
4596 dev_info(dev
, "FLR prepare\n");
4597 set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
4598 hisi_sas_controller_reset_prepare(hisi_hba
);
4600 rc
= disable_host_v3_hw(hisi_hba
);
4602 dev_err(dev
, "FLR: disable host failed rc=%d\n", rc
);
4605 static void hisi_sas_reset_done_v3_hw(struct pci_dev
*pdev
)
4607 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
4608 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
4609 struct device
*dev
= hisi_hba
->dev
;
4612 hisi_sas_init_mem(hisi_hba
);
4614 rc
= hw_init_v3_hw(hisi_hba
);
4616 dev_err(dev
, "FLR: hw init failed rc=%d\n", rc
);
4620 hisi_sas_controller_reset_done(hisi_hba
);
4621 dev_info(dev
, "FLR done\n");
4625 /* instances of the controller */
4629 static int _suspend_v3_hw(struct device
*device
)
4631 struct pci_dev
*pdev
= to_pci_dev(device
);
4632 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
4633 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
4634 struct device
*dev
= hisi_hba
->dev
;
4635 struct Scsi_Host
*shost
= hisi_hba
->shost
;
4638 if (!pdev
->pm_cap
) {
4639 dev_err(dev
, "PCI PM not supported\n");
4643 if (test_and_set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
4646 scsi_block_requests(shost
);
4647 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
4648 flush_workqueue(hisi_hba
->wq
);
4650 rc
= disable_host_v3_hw(hisi_hba
);
4652 dev_err(dev
, "PM suspend: disable host failed rc=%d\n", rc
);
4653 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
4654 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
4655 scsi_unblock_requests(shost
);
4659 hisi_sas_init_mem(hisi_hba
);
4661 dev_warn(dev
, "entering suspend state\n");
4663 hisi_sas_release_tasks(hisi_hba
);
4665 sas_suspend_ha(sha
);
4669 static int _resume_v3_hw(struct device
*device
)
4671 struct pci_dev
*pdev
= to_pci_dev(device
);
4672 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
4673 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
4674 struct Scsi_Host
*shost
= hisi_hba
->shost
;
4675 struct device
*dev
= hisi_hba
->dev
;
4677 pci_power_t device_state
= pdev
->current_state
;
4679 dev_warn(dev
, "resuming from operating state [D%d]\n",
4682 scsi_unblock_requests(shost
);
4683 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
4685 sas_prep_resume_ha(sha
);
4686 rc
= hw_init_v3_hw(hisi_hba
);
4688 scsi_remove_host(shost
);
4691 phys_init_v3_hw(hisi_hba
);
4693 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
4698 static int __maybe_unused
suspend_v3_hw(struct device
*device
)
4700 struct pci_dev
*pdev
= to_pci_dev(device
);
4701 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
4702 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
4705 set_bit(HISI_SAS_PM_BIT
, &hisi_hba
->flags
);
4707 rc
= _suspend_v3_hw(device
);
4709 clear_bit(HISI_SAS_PM_BIT
, &hisi_hba
->flags
);
4714 static int __maybe_unused
resume_v3_hw(struct device
*device
)
4716 struct pci_dev
*pdev
= to_pci_dev(device
);
4717 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
4718 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
4719 int rc
= _resume_v3_hw(device
);
4721 clear_bit(HISI_SAS_PM_BIT
, &hisi_hba
->flags
);
4726 static const struct pci_device_id sas_v3_pci_table
[] = {
4727 { PCI_VDEVICE(HUAWEI
, 0xa230), hip08
},
4730 MODULE_DEVICE_TABLE(pci
, sas_v3_pci_table
);
4732 static const struct pci_error_handlers hisi_sas_err_handler
= {
4733 .reset_prepare
= hisi_sas_reset_prepare_v3_hw
,
4734 .reset_done
= hisi_sas_reset_done_v3_hw
,
4737 static UNIVERSAL_DEV_PM_OPS(hisi_sas_v3_pm_ops
,
4742 static struct pci_driver sas_v3_pci_driver
= {
4744 .id_table
= sas_v3_pci_table
,
4745 .probe
= hisi_sas_v3_probe
,
4746 .remove
= hisi_sas_v3_remove
,
4747 .err_handler
= &hisi_sas_err_handler
,
4748 .driver
.pm
= &hisi_sas_v3_pm_ops
,
4751 module_pci_driver(sas_v3_pci_driver
);
4752 module_param_named(intr_conv
, hisi_sas_intr_conv
, bool, 0444);
4754 MODULE_LICENSE("GPL");
4755 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
4756 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
4757 MODULE_ALIAS("pci:" DRV_NAME
);