1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2017 Hisilicon Limited.
7 #define DRV_NAME "hisi_sas_v3_hw"
9 /* global registers need init */
10 #define DLVRY_QUEUE_ENABLE 0x0
11 #define IOST_BASE_ADDR_LO 0x8
12 #define IOST_BASE_ADDR_HI 0xc
13 #define ITCT_BASE_ADDR_LO 0x10
14 #define ITCT_BASE_ADDR_HI 0x14
15 #define IO_BROKEN_MSG_ADDR_LO 0x18
16 #define IO_BROKEN_MSG_ADDR_HI 0x1c
17 #define PHY_CONTEXT 0x20
18 #define PHY_STATE 0x24
19 #define PHY_PORT_NUM_MA 0x28
20 #define PHY_CONN_RATE 0x30
22 #define ITCT_CLR_EN_OFF 16
23 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
24 #define ITCT_DEV_OFF 0
25 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
26 #define SAS_AXI_USER3 0x50
27 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
28 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
29 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60
30 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64
31 #define CFG_MAX_TAG 0x68
32 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
33 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
34 #define HGC_GET_ITV_TIME 0x90
35 #define DEVICE_MSG_WORK_MODE 0x94
36 #define OPENA_WT_CONTI_TIME 0x9c
37 #define I_T_NEXUS_LOSS_TIME 0xa0
38 #define MAX_CON_TIME_LIMIT_TIME 0xa4
39 #define BUS_INACTIVE_LIMIT_TIME 0xa8
40 #define REJECT_TO_OPEN_LIMIT_TIME 0xac
41 #define CQ_INT_CONVERGE_EN 0xb0
42 #define CFG_AGING_TIME 0xbc
43 #define HGC_DFX_CFG2 0xc0
44 #define CFG_ABT_SET_QUERY_IPTT 0xd4
45 #define CFG_SET_ABORTED_IPTT_OFF 0
46 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF)
47 #define CFG_SET_ABORTED_EN_OFF 12
48 #define CFG_ABT_SET_IPTT_DONE 0xd8
49 #define CFG_ABT_SET_IPTT_DONE_OFF 0
50 #define HGC_IOMB_PROC1_STATUS 0x104
51 #define HGC_LM_DFX_STATUS2 0x128
52 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0
53 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
54 HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
55 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12
56 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
57 HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
58 #define HGC_CQE_ECC_ADDR 0x13c
59 #define HGC_CQE_ECC_1B_ADDR_OFF 0
60 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
61 #define HGC_CQE_ECC_MB_ADDR_OFF 8
62 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
63 #define HGC_IOST_ECC_ADDR 0x140
64 #define HGC_IOST_ECC_1B_ADDR_OFF 0
65 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
66 #define HGC_IOST_ECC_MB_ADDR_OFF 16
67 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
68 #define HGC_DQE_ECC_ADDR 0x144
69 #define HGC_DQE_ECC_1B_ADDR_OFF 0
70 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
71 #define HGC_DQE_ECC_MB_ADDR_OFF 16
72 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
73 #define CHNL_INT_STATUS 0x148
75 #define HGC_ITCT_ECC_ADDR 0x150
76 #define HGC_ITCT_ECC_1B_ADDR_OFF 0
77 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
78 HGC_ITCT_ECC_1B_ADDR_OFF)
79 #define HGC_ITCT_ECC_MB_ADDR_OFF 16
80 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \
81 HGC_ITCT_ECC_MB_ADDR_OFF)
82 #define HGC_AXI_FIFO_ERR_INFO 0x154
83 #define AXI_ERR_INFO_OFF 0
84 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
85 #define FIFO_ERR_INFO_OFF 8
86 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
87 #define TAB_RD_TYPE 0x15c
88 #define INT_COAL_EN 0x19c
89 #define OQ_INT_COAL_TIME 0x1a0
90 #define OQ_INT_COAL_CNT 0x1a4
91 #define ENT_INT_COAL_TIME 0x1a8
92 #define ENT_INT_COAL_CNT 0x1ac
93 #define OQ_INT_SRC 0x1b0
94 #define OQ_INT_SRC_MSK 0x1b4
95 #define ENT_INT_SRC1 0x1b8
96 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
97 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
98 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
99 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
100 #define ENT_INT_SRC2 0x1bc
101 #define ENT_INT_SRC3 0x1c0
102 #define ENT_INT_SRC3_WP_DEPTH_OFF 8
103 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
104 #define ENT_INT_SRC3_RP_DEPTH_OFF 10
105 #define ENT_INT_SRC3_AXI_OFF 11
106 #define ENT_INT_SRC3_FIFO_OFF 12
107 #define ENT_INT_SRC3_LM_OFF 14
108 #define ENT_INT_SRC3_ITC_INT_OFF 15
109 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
110 #define ENT_INT_SRC3_ABT_OFF 16
111 #define ENT_INT_SRC3_DQE_POISON_OFF 18
112 #define ENT_INT_SRC3_IOST_POISON_OFF 19
113 #define ENT_INT_SRC3_ITCT_POISON_OFF 20
114 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21
115 #define ENT_INT_SRC_MSK1 0x1c4
116 #define ENT_INT_SRC_MSK2 0x1c8
117 #define ENT_INT_SRC_MSK3 0x1cc
118 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
119 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0
120 #define CHNL_ENT_INT_MSK 0x1d4
121 #define HGC_COM_INT_MSK 0x1d8
122 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
123 #define SAS_ECC_INTR 0x1e8
124 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0
125 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1
126 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2
127 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3
128 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4
129 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5
130 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6
131 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7
132 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8
133 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9
134 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10
135 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11
136 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12
137 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13
138 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14
139 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15
140 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16
141 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17
142 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18
143 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19
144 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20
145 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21
146 #define SAS_ECC_INTR_MSK 0x1ec
147 #define HGC_ERR_STAT_EN 0x238
148 #define CQE_SEND_CNT 0x248
149 #define DLVRY_Q_0_BASE_ADDR_LO 0x260
150 #define DLVRY_Q_0_BASE_ADDR_HI 0x264
151 #define DLVRY_Q_0_DEPTH 0x268
152 #define DLVRY_Q_0_WR_PTR 0x26c
153 #define DLVRY_Q_0_RD_PTR 0x270
154 #define HYPER_STREAM_ID_EN_CFG 0xc80
155 #define OQ0_INT_SRC_MSK 0xc90
156 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0
157 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4
158 #define COMPL_Q_0_DEPTH 0x4e8
159 #define COMPL_Q_0_WR_PTR 0x4ec
160 #define COMPL_Q_0_RD_PTR 0x4f0
161 #define HGC_RXM_DFX_STATUS14 0xae8
162 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0
163 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \
164 HGC_RXM_DFX_STATUS14_MEM0_OFF)
165 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9
166 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \
167 HGC_RXM_DFX_STATUS14_MEM1_OFF)
168 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18
169 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \
170 HGC_RXM_DFX_STATUS14_MEM2_OFF)
171 #define HGC_RXM_DFX_STATUS15 0xaec
172 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0
173 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \
174 HGC_RXM_DFX_STATUS15_MEM3_OFF)
175 #define AWQOS_AWCACHE_CFG 0xc84
176 #define ARQOS_ARCACHE_CFG 0xc88
177 #define HILINK_ERR_DFX 0xe04
178 #define SAS_GPIO_CFG_0 0x1000
179 #define SAS_GPIO_CFG_1 0x1004
180 #define SAS_GPIO_TX_0_1 0x1040
181 #define SAS_CFG_DRIVE_VLD 0x1070
183 /* phy registers requiring init */
184 #define PORT_BASE (0x2000)
185 #define PHY_CFG (PORT_BASE + 0x0)
186 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
187 #define PHY_CFG_ENA_OFF 0
188 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
189 #define PHY_CFG_DC_OPT_OFF 2
190 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
191 #define PHY_CFG_PHY_RST_OFF 3
192 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
193 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
194 #define CFG_PROG_PHY_LINK_RATE_OFF 8
195 #define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF)
196 #define PHY_CTRL (PORT_BASE + 0x14)
197 #define PHY_CTRL_RESET_OFF 0
198 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
199 #define CMD_HDR_PIR_OFF 8
200 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
201 #define SERDES_CFG (PORT_BASE + 0x1c)
202 #define CFG_ALOS_CHK_DISABLE_OFF 9
203 #define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF)
204 #define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c)
205 #define CFG_BIST_MODE_SEL_OFF 0
206 #define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF)
207 #define CFG_LOOP_TEST_MODE_OFF 14
208 #define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF)
209 #define CFG_RX_BIST_EN_OFF 16
210 #define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF)
211 #define CFG_TX_BIST_EN_OFF 17
212 #define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF)
213 #define CFG_BIST_TEST_OFF 18
214 #define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF)
215 #define SAS_PHY_BIST_CODE (PORT_BASE + 0x30)
216 #define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34)
217 #define SAS_BIST_ERR_CNT (PORT_BASE + 0x38)
218 #define SL_CFG (PORT_BASE + 0x84)
219 #define AIP_LIMIT (PORT_BASE + 0x90)
220 #define SL_CONTROL (PORT_BASE + 0x94)
221 #define SL_CONTROL_NOTIFY_EN_OFF 0
222 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
223 #define SL_CTA_OFF 17
224 #define SL_CTA_MSK (0x1 << SL_CTA_OFF)
225 #define RX_PRIMS_STATUS (PORT_BASE + 0x98)
226 #define RX_BCAST_CHG_OFF 1
227 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
228 #define TX_ID_DWORD0 (PORT_BASE + 0x9c)
229 #define TX_ID_DWORD1 (PORT_BASE + 0xa0)
230 #define TX_ID_DWORD2 (PORT_BASE + 0xa4)
231 #define TX_ID_DWORD3 (PORT_BASE + 0xa8)
232 #define TX_ID_DWORD4 (PORT_BASE + 0xaC)
233 #define TX_ID_DWORD5 (PORT_BASE + 0xb0)
234 #define TX_ID_DWORD6 (PORT_BASE + 0xb4)
235 #define TXID_AUTO (PORT_BASE + 0xb8)
237 #define CT3_MSK (0x1 << CT3_OFF)
238 #define TX_HARDRST_OFF 2
239 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
240 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
241 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
242 #define STP_LINK_TIMER (PORT_BASE + 0x120)
243 #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
244 #define CON_CFG_DRIVER (PORT_BASE + 0x130)
245 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
246 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
247 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
248 #define CHL_INT0 (PORT_BASE + 0x1b4)
249 #define CHL_INT0_HOTPLUG_TOUT_OFF 0
250 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
251 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1
252 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
253 #define CHL_INT0_SL_PHY_ENABLE_OFF 2
254 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
255 #define CHL_INT0_NOT_RDY_OFF 4
256 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
257 #define CHL_INT0_PHY_RDY_OFF 5
258 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
259 #define CHL_INT1 (PORT_BASE + 0x1b8)
260 #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15
261 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16
262 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17
263 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18
264 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
265 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
266 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
267 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
268 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23
269 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24
270 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26
271 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27
272 #define CHL_INT2 (PORT_BASE + 0x1bc)
273 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
274 #define CHL_INT2_RX_DISP_ERR_OFF 28
275 #define CHL_INT2_RX_CODE_ERR_OFF 29
276 #define CHL_INT2_RX_INVLD_DW_OFF 30
277 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
278 #define CHL_INT0_MSK (PORT_BASE + 0x1c0)
279 #define CHL_INT1_MSK (PORT_BASE + 0x1c4)
280 #define CHL_INT2_MSK (PORT_BASE + 0x1c8)
281 #define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc)
282 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
283 #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4)
284 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
285 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
286 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
287 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
288 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
289 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
290 #define DMA_TX_STATUS (PORT_BASE + 0x2d0)
291 #define DMA_TX_STATUS_BUSY_OFF 0
292 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
293 #define DMA_RX_STATUS (PORT_BASE + 0x2e8)
294 #define DMA_RX_STATUS_BUSY_OFF 0
295 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
297 #define COARSETUNE_TIME (PORT_BASE + 0x304)
298 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
299 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
300 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
301 #define ERR_CNT_CODE_ERR (PORT_BASE + 0x394)
302 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
304 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
305 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
306 #error Max ITCT exceeded
309 #define AXI_MASTER_CFG_BASE (0x5000)
310 #define AM_CTRL_GLOBAL (0x0)
311 #define AM_CTRL_SHUTDOWN_REQ_OFF 0
312 #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
313 #define AM_CURR_TRANS_RETURN (0x150)
315 #define AM_CFG_MAX_TRANS (0x5010)
316 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
317 #define AXI_CFG (0x5100)
318 #define AM_ROB_ECC_ERR_ADDR (0x510c)
319 #define AM_ROB_ECC_ERR_ADDR_OFF 0
320 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff
322 /* RAS registers need init */
323 #define RAS_BASE (0x6000)
324 #define SAS_RAS_INTR0 (RAS_BASE)
325 #define SAS_RAS_INTR1 (RAS_BASE + 0x04)
326 #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
327 #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
328 #define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c)
329 #define SAS_RAS_INTR2 (RAS_BASE + 0x20)
330 #define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24)
332 /* HW dma structures */
333 /* Delivery queue header */
335 #define CMD_HDR_ABORT_FLAG_OFF 0
336 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
337 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
338 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
339 #define CMD_HDR_RESP_REPORT_OFF 5
340 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
341 #define CMD_HDR_TLR_CTRL_OFF 6
342 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
343 #define CMD_HDR_PORT_OFF 18
344 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
345 #define CMD_HDR_PRIORITY_OFF 27
346 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
347 #define CMD_HDR_CMD_OFF 29
348 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
350 #define CMD_HDR_UNCON_CMD_OFF 3
351 #define CMD_HDR_DIR_OFF 5
352 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
353 #define CMD_HDR_RESET_OFF 7
354 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
355 #define CMD_HDR_VDTL_OFF 10
356 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
357 #define CMD_HDR_FRAME_TYPE_OFF 11
358 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
359 #define CMD_HDR_DEV_ID_OFF 16
360 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
362 #define CMD_HDR_CFL_OFF 0
363 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
364 #define CMD_HDR_NCQ_TAG_OFF 10
365 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
366 #define CMD_HDR_MRFL_OFF 15
367 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
368 #define CMD_HDR_SG_MOD_OFF 24
369 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
371 #define CMD_HDR_IPTT_OFF 0
372 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
374 #define CMD_HDR_DIF_SGL_LEN_OFF 0
375 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
376 #define CMD_HDR_DATA_SGL_LEN_OFF 16
377 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
379 #define CMD_HDR_ADDR_MODE_SEL_OFF 15
380 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF)
381 #define CMD_HDR_ABORT_IPTT_OFF 16
382 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
384 /* Completion header */
386 #define CMPLT_HDR_CMPLT_OFF 0
387 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF)
388 #define CMPLT_HDR_ERROR_PHASE_OFF 2
389 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
390 #define CMPLT_HDR_RSPNS_XFRD_OFF 10
391 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
392 #define CMPLT_HDR_ERX_OFF 12
393 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
394 #define CMPLT_HDR_ABORT_STAT_OFF 13
395 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
397 #define STAT_IO_NOT_VALID 0x1
398 #define STAT_IO_NO_DEVICE 0x2
399 #define STAT_IO_COMPLETE 0x3
400 #define STAT_IO_ABORTED 0x4
402 #define CMPLT_HDR_IPTT_OFF 0
403 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
404 #define CMPLT_HDR_DEV_ID_OFF 16
405 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
407 #define CMPLT_HDR_IO_IN_TARGET_OFF 17
408 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
412 #define ITCT_HDR_DEV_TYPE_OFF 0
413 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
414 #define ITCT_HDR_VALID_OFF 2
415 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
416 #define ITCT_HDR_MCR_OFF 5
417 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
418 #define ITCT_HDR_VLN_OFF 9
419 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
420 #define ITCT_HDR_SMP_TIMEOUT_OFF 16
421 #define ITCT_HDR_AWT_CONTINUE_OFF 25
422 #define ITCT_HDR_PORT_ID_OFF 28
423 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
425 #define ITCT_HDR_INLT_OFF 0
426 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
427 #define ITCT_HDR_RTOLT_OFF 48
428 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
430 struct hisi_sas_protect_iu_v3_hw
{
440 struct hisi_sas_complete_v3_hdr
{
447 struct hisi_sas_err_record_v3
{
449 __le32 trans_tx_fail_type
;
452 __le32 trans_rx_fail_type
;
455 __le16 dma_tx_err_type
;
456 __le16 sipc_rx_err_type
;
459 __le32 dma_rx_err_type
;
462 #define RX_DATA_LEN_UNDERFLOW_OFF 6
463 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
465 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
466 #define HISI_SAS_MSI_COUNT_V3_HW 32
468 #define DIR_NO_DATA 0
470 #define DIR_TO_DEVICE 2
471 #define DIR_RESERVED 3
473 #define FIS_CMD_IS_UNCONSTRAINED(fis) \
474 ((fis.command == ATA_CMD_READ_LOG_EXT) || \
475 (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
476 ((fis.command == ATA_CMD_DEV_RESET) && \
477 ((fis.control & ATA_SRST) != 0)))
479 #define T10_INSRT_EN_OFF 0
480 #define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF)
481 #define T10_RMV_EN_OFF 1
482 #define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF)
483 #define T10_RPLC_EN_OFF 2
484 #define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF)
485 #define T10_CHK_EN_OFF 3
486 #define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF)
487 #define INCR_LBRT_OFF 5
488 #define INCR_LBRT_MSK (1 << INCR_LBRT_OFF)
489 #define USR_DATA_BLOCK_SZ_OFF 20
490 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF)
491 #define T10_CHK_MSK_OFF 16
492 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF)
493 #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF)
495 #define BASE_VECTORS_V3_HW 16
496 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
498 #define CHNL_INT_STS_MSK 0xeeeeeeee
499 #define CHNL_INT_STS_PHY_MSK 0xe
500 #define CHNL_INT_STS_INT0_MSK BIT(1)
501 #define CHNL_INT_STS_INT1_MSK BIT(2)
502 #define CHNL_INT_STS_INT2_MSK BIT(3)
506 DSM_FUNC_ERR_HANDLE_MSI
= 0,
509 static bool hisi_sas_intr_conv
;
510 MODULE_PARM_DESC(intr_conv
, "interrupt converge enable (0-1)");
512 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
513 static int prot_mask
;
514 module_param(prot_mask
, int, 0);
515 MODULE_PARM_DESC(prot_mask
, " host protection capabilities mask, def=0x0 ");
517 static bool auto_affine_msi_experimental
;
518 module_param(auto_affine_msi_experimental
, bool, 0444);
519 MODULE_PARM_DESC(auto_affine_msi_experimental
, "Enable auto-affinity of MSI IRQs as experimental:\n"
522 static u32
hisi_sas_read32(struct hisi_hba
*hisi_hba
, u32 off
)
524 void __iomem
*regs
= hisi_hba
->regs
+ off
;
529 static void hisi_sas_write32(struct hisi_hba
*hisi_hba
, u32 off
, u32 val
)
531 void __iomem
*regs
= hisi_hba
->regs
+ off
;
536 static void hisi_sas_phy_write32(struct hisi_hba
*hisi_hba
, int phy_no
,
539 void __iomem
*regs
= hisi_hba
->regs
+ (0x400 * phy_no
) + off
;
544 static u32
hisi_sas_phy_read32(struct hisi_hba
*hisi_hba
,
547 void __iomem
*regs
= hisi_hba
->regs
+ (0x400 * phy_no
) + off
;
552 #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \
555 void __iomem *regs = hisi_hba->regs + off; \
556 readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \
559 #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \
562 void __iomem *regs = hisi_hba->regs + off; \
563 readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
566 static void init_reg_v3_hw(struct hisi_hba
*hisi_hba
)
570 /* Global registers init */
571 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
,
572 (u32
)((1ULL << hisi_hba
->queue_count
) - 1));
573 hisi_sas_write32(hisi_hba
, SAS_AXI_USER3
, 0);
574 hisi_sas_write32(hisi_hba
, CFG_MAX_TAG
, 0xfff0400);
575 hisi_sas_write32(hisi_hba
, HGC_SAS_TXFAIL_RETRY_CTRL
, 0x108);
576 hisi_sas_write32(hisi_hba
, CFG_AGING_TIME
, 0x1);
577 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x1);
578 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
, 0x1);
579 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
, 0x1);
580 hisi_sas_write32(hisi_hba
, CQ_INT_CONVERGE_EN
,
582 hisi_sas_write32(hisi_hba
, OQ_INT_SRC
, 0xffff);
583 hisi_sas_write32(hisi_hba
, ENT_INT_SRC1
, 0xffffffff);
584 hisi_sas_write32(hisi_hba
, ENT_INT_SRC2
, 0xffffffff);
585 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
, 0xffffffff);
586 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
, 0xfefefefe);
587 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK2
, 0xfefefefe);
588 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, 0xffc220ff);
589 hisi_sas_write32(hisi_hba
, CHNL_PHYUPDOWN_INT_MSK
, 0x0);
590 hisi_sas_write32(hisi_hba
, CHNL_ENT_INT_MSK
, 0x0);
591 hisi_sas_write32(hisi_hba
, HGC_COM_INT_MSK
, 0x0);
592 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0x155555);
593 hisi_sas_write32(hisi_hba
, AWQOS_AWCACHE_CFG
, 0xf0f0);
594 hisi_sas_write32(hisi_hba
, ARQOS_ARCACHE_CFG
, 0xf0f0);
595 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
596 hisi_sas_write32(hisi_hba
, OQ0_INT_SRC_MSK
+0x4*i
, 0);
598 hisi_sas_write32(hisi_hba
, HYPER_STREAM_ID_EN_CFG
, 1);
600 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
601 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
602 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
603 u32 prog_phy_link_rate
= 0x800;
605 if (!sas_phy
->phy
|| (sas_phy
->phy
->maximum_linkrate
<
606 SAS_LINK_RATE_1_5_GBPS
)) {
607 prog_phy_link_rate
= 0x855;
609 enum sas_linkrate max
= sas_phy
->phy
->maximum_linkrate
;
612 hisi_sas_get_prog_phy_linkrate_mask(max
) |
615 hisi_sas_phy_write32(hisi_hba
, i
, PROG_PHY_LINK_RATE
,
617 hisi_sas_phy_write32(hisi_hba
, i
, SERDES_CFG
, 0xffc00);
618 hisi_sas_phy_write32(hisi_hba
, i
, SAS_RX_TRAIN_TIMER
, 0x13e80);
619 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT0
, 0xffffffff);
620 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1
, 0xffffffff);
621 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2
, 0xffffffff);
622 hisi_sas_phy_write32(hisi_hba
, i
, RXOP_CHECK_CFG_H
, 0x1000);
623 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1_MSK
, 0xf2057fff);
624 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2_MSK
, 0xffffbfe);
625 hisi_sas_phy_write32(hisi_hba
, i
, PHY_CTRL_RDY_MSK
, 0x0);
626 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_NOT_RDY_MSK
, 0x0);
627 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_DWS_RESET_MSK
, 0x0);
628 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_PHY_ENA_MSK
, 0x0);
629 hisi_sas_phy_write32(hisi_hba
, i
, SL_RX_BCAST_CHK_MSK
, 0x0);
630 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_OOB_RESTART_MSK
, 0x1);
631 hisi_sas_phy_write32(hisi_hba
, i
, STP_LINK_TIMER
, 0x7f7a120);
632 hisi_sas_phy_write32(hisi_hba
, i
, CON_CFG_DRIVER
, 0x2a0a01);
633 hisi_sas_phy_write32(hisi_hba
, i
, SAS_SSP_CON_TIMER_CFG
, 0x32);
634 hisi_sas_phy_write32(hisi_hba
, i
, SAS_EC_INT_COAL_TIME
,
636 /* used for 12G negotiate */
637 hisi_sas_phy_write32(hisi_hba
, i
, COARSETUNE_TIME
, 0x1e);
638 hisi_sas_phy_write32(hisi_hba
, i
, AIP_LIMIT
, 0x2ffff);
641 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
643 hisi_sas_write32(hisi_hba
,
644 DLVRY_Q_0_BASE_ADDR_HI
+ (i
* 0x14),
645 upper_32_bits(hisi_hba
->cmd_hdr_dma
[i
]));
647 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_BASE_ADDR_LO
+ (i
* 0x14),
648 lower_32_bits(hisi_hba
->cmd_hdr_dma
[i
]));
650 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_DEPTH
+ (i
* 0x14),
651 HISI_SAS_QUEUE_SLOTS
);
653 /* Completion queue */
654 hisi_sas_write32(hisi_hba
, COMPL_Q_0_BASE_ADDR_HI
+ (i
* 0x14),
655 upper_32_bits(hisi_hba
->complete_hdr_dma
[i
]));
657 hisi_sas_write32(hisi_hba
, COMPL_Q_0_BASE_ADDR_LO
+ (i
* 0x14),
658 lower_32_bits(hisi_hba
->complete_hdr_dma
[i
]));
660 hisi_sas_write32(hisi_hba
, COMPL_Q_0_DEPTH
+ (i
* 0x14),
661 HISI_SAS_QUEUE_SLOTS
);
665 hisi_sas_write32(hisi_hba
, ITCT_BASE_ADDR_LO
,
666 lower_32_bits(hisi_hba
->itct_dma
));
668 hisi_sas_write32(hisi_hba
, ITCT_BASE_ADDR_HI
,
669 upper_32_bits(hisi_hba
->itct_dma
));
672 hisi_sas_write32(hisi_hba
, IOST_BASE_ADDR_LO
,
673 lower_32_bits(hisi_hba
->iost_dma
));
675 hisi_sas_write32(hisi_hba
, IOST_BASE_ADDR_HI
,
676 upper_32_bits(hisi_hba
->iost_dma
));
679 hisi_sas_write32(hisi_hba
, IO_BROKEN_MSG_ADDR_LO
,
680 lower_32_bits(hisi_hba
->breakpoint_dma
));
682 hisi_sas_write32(hisi_hba
, IO_BROKEN_MSG_ADDR_HI
,
683 upper_32_bits(hisi_hba
->breakpoint_dma
));
685 /* SATA broken msg */
686 hisi_sas_write32(hisi_hba
, IO_SATA_BROKEN_MSG_ADDR_LO
,
687 lower_32_bits(hisi_hba
->sata_breakpoint_dma
));
689 hisi_sas_write32(hisi_hba
, IO_SATA_BROKEN_MSG_ADDR_HI
,
690 upper_32_bits(hisi_hba
->sata_breakpoint_dma
));
692 /* SATA initial fis */
693 hisi_sas_write32(hisi_hba
, SATA_INITI_D2H_STORE_ADDR_LO
,
694 lower_32_bits(hisi_hba
->initial_fis_dma
));
696 hisi_sas_write32(hisi_hba
, SATA_INITI_D2H_STORE_ADDR_HI
,
697 upper_32_bits(hisi_hba
->initial_fis_dma
));
699 /* RAS registers init */
700 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR0_MASK
, 0x0);
701 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR1_MASK
, 0x0);
702 hisi_sas_write32(hisi_hba
, SAS_RAS_INTR2_MASK
, 0x0);
703 hisi_sas_write32(hisi_hba
, CFG_SAS_RAS_INTR_MASK
, 0x0);
705 /* LED registers init */
706 hisi_sas_write32(hisi_hba
, SAS_CFG_DRIVE_VLD
, 0x80000ff);
707 hisi_sas_write32(hisi_hba
, SAS_GPIO_TX_0_1
, 0x80808080);
708 hisi_sas_write32(hisi_hba
, SAS_GPIO_TX_0_1
+ 0x4, 0x80808080);
709 /* Configure blink generator rate A to 1Hz and B to 4Hz */
710 hisi_sas_write32(hisi_hba
, SAS_GPIO_CFG_1
, 0x121700);
711 hisi_sas_write32(hisi_hba
, SAS_GPIO_CFG_0
, 0x800000);
714 static void config_phy_opt_mode_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
716 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
718 cfg
&= ~PHY_CFG_DC_OPT_MSK
;
719 cfg
|= 1 << PHY_CFG_DC_OPT_OFF
;
720 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
723 static void config_id_frame_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
725 struct sas_identify_frame identify_frame
;
726 u32
*identify_buffer
;
728 memset(&identify_frame
, 0, sizeof(identify_frame
));
729 identify_frame
.dev_type
= SAS_END_DEVICE
;
730 identify_frame
.frame_type
= 0;
731 identify_frame
._un1
= 1;
732 identify_frame
.initiator_bits
= SAS_PROTOCOL_ALL
;
733 identify_frame
.target_bits
= SAS_PROTOCOL_NONE
;
734 memcpy(&identify_frame
._un4_11
[0], hisi_hba
->sas_addr
, SAS_ADDR_SIZE
);
735 memcpy(&identify_frame
.sas_addr
[0], hisi_hba
->sas_addr
, SAS_ADDR_SIZE
);
736 identify_frame
.phy_id
= phy_no
;
737 identify_buffer
= (u32
*)(&identify_frame
);
739 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD0
,
740 __swab32(identify_buffer
[0]));
741 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD1
,
742 __swab32(identify_buffer
[1]));
743 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD2
,
744 __swab32(identify_buffer
[2]));
745 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD3
,
746 __swab32(identify_buffer
[3]));
747 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD4
,
748 __swab32(identify_buffer
[4]));
749 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD5
,
750 __swab32(identify_buffer
[5]));
753 static void setup_itct_v3_hw(struct hisi_hba
*hisi_hba
,
754 struct hisi_sas_device
*sas_dev
)
756 struct domain_device
*device
= sas_dev
->sas_device
;
757 struct device
*dev
= hisi_hba
->dev
;
758 u64 qw0
, device_id
= sas_dev
->device_id
;
759 struct hisi_sas_itct
*itct
= &hisi_hba
->itct
[device_id
];
760 struct domain_device
*parent_dev
= device
->parent
;
761 struct asd_sas_port
*sas_port
= device
->port
;
762 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
765 memset(itct
, 0, sizeof(*itct
));
769 switch (sas_dev
->dev_type
) {
771 case SAS_EDGE_EXPANDER_DEVICE
:
772 case SAS_FANOUT_EXPANDER_DEVICE
:
773 qw0
= HISI_SAS_DEV_TYPE_SSP
<< ITCT_HDR_DEV_TYPE_OFF
;
776 case SAS_SATA_PENDING
:
777 if (parent_dev
&& dev_is_expander(parent_dev
->dev_type
))
778 qw0
= HISI_SAS_DEV_TYPE_STP
<< ITCT_HDR_DEV_TYPE_OFF
;
780 qw0
= HISI_SAS_DEV_TYPE_SATA
<< ITCT_HDR_DEV_TYPE_OFF
;
783 dev_warn(dev
, "setup itct: unsupported dev type (%d)\n",
787 qw0
|= ((1 << ITCT_HDR_VALID_OFF
) |
788 (device
->linkrate
<< ITCT_HDR_MCR_OFF
) |
789 (1 << ITCT_HDR_VLN_OFF
) |
790 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF
) |
791 (1 << ITCT_HDR_AWT_CONTINUE_OFF
) |
792 (port
->id
<< ITCT_HDR_PORT_ID_OFF
));
793 itct
->qw0
= cpu_to_le64(qw0
);
796 memcpy(&sas_addr
, device
->sas_addr
, SAS_ADDR_SIZE
);
797 itct
->sas_addr
= cpu_to_le64(__swab64(sas_addr
));
800 if (!dev_is_sata(device
))
801 itct
->qw2
= cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF
) |
802 (0x1ULL
<< ITCT_HDR_RTOLT_OFF
));
805 static int clear_itct_v3_hw(struct hisi_hba
*hisi_hba
,
806 struct hisi_sas_device
*sas_dev
)
808 DECLARE_COMPLETION_ONSTACK(completion
);
809 u64 dev_id
= sas_dev
->device_id
;
810 struct hisi_sas_itct
*itct
= &hisi_hba
->itct
[dev_id
];
811 u32 reg_val
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC3
);
812 struct device
*dev
= hisi_hba
->dev
;
814 sas_dev
->completion
= &completion
;
816 /* clear the itct interrupt state */
817 if (ENT_INT_SRC3_ITC_INT_MSK
& reg_val
)
818 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
,
819 ENT_INT_SRC3_ITC_INT_MSK
);
821 /* clear the itct table */
822 reg_val
= ITCT_CLR_EN_MSK
| (dev_id
& ITCT_DEV_MSK
);
823 hisi_sas_write32(hisi_hba
, ITCT_CLR
, reg_val
);
825 if (!wait_for_completion_timeout(sas_dev
->completion
,
826 CLEAR_ITCT_TIMEOUT
* HZ
)) {
827 dev_warn(dev
, "failed to clear ITCT\n");
831 memset(itct
, 0, sizeof(struct hisi_sas_itct
));
835 static void dereg_device_v3_hw(struct hisi_hba
*hisi_hba
,
836 struct domain_device
*device
)
838 struct hisi_sas_slot
*slot
, *slot2
;
839 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
840 u32 cfg_abt_set_query_iptt
;
842 cfg_abt_set_query_iptt
= hisi_sas_read32(hisi_hba
,
843 CFG_ABT_SET_QUERY_IPTT
);
844 list_for_each_entry_safe(slot
, slot2
, &sas_dev
->list
, entry
) {
845 cfg_abt_set_query_iptt
&= ~CFG_SET_ABORTED_IPTT_MSK
;
846 cfg_abt_set_query_iptt
|= (1 << CFG_SET_ABORTED_EN_OFF
) |
847 (slot
->idx
<< CFG_SET_ABORTED_IPTT_OFF
);
848 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_QUERY_IPTT
,
849 cfg_abt_set_query_iptt
);
851 cfg_abt_set_query_iptt
&= ~(1 << CFG_SET_ABORTED_EN_OFF
);
852 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_QUERY_IPTT
,
853 cfg_abt_set_query_iptt
);
854 hisi_sas_write32(hisi_hba
, CFG_ABT_SET_IPTT_DONE
,
855 1 << CFG_ABT_SET_IPTT_DONE_OFF
);
858 static int reset_hw_v3_hw(struct hisi_hba
*hisi_hba
)
860 struct device
*dev
= hisi_hba
->dev
;
864 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0);
866 /* Disable all of the PHYs */
867 hisi_sas_stop_phys(hisi_hba
);
870 /* Ensure axi bus idle */
871 ret
= hisi_sas_read32_poll_timeout(AXI_CFG
, val
, !val
,
874 dev_err(dev
, "axi bus is not idle, ret = %d!\n", ret
);
878 if (ACPI_HANDLE(dev
)) {
881 s
= acpi_evaluate_object(ACPI_HANDLE(dev
), "_RST", NULL
, NULL
);
882 if (ACPI_FAILURE(s
)) {
883 dev_err(dev
, "Reset failed\n");
887 dev_err(dev
, "no reset method!\n");
894 static int hw_init_v3_hw(struct hisi_hba
*hisi_hba
)
896 struct device
*dev
= hisi_hba
->dev
;
897 union acpi_object
*obj
;
901 rc
= reset_hw_v3_hw(hisi_hba
);
903 dev_err(dev
, "hisi_sas_reset_hw failed, rc=%d", rc
);
908 init_reg_v3_hw(hisi_hba
);
910 if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid
)) {
911 dev_err(dev
, "Parse GUID failed\n");
915 /* Switch over to MSI handling , from PCI AER default */
916 obj
= acpi_evaluate_dsm(ACPI_HANDLE(dev
), &guid
, 0,
917 DSM_FUNC_ERR_HANDLE_MSI
, NULL
);
919 dev_warn(dev
, "Switch over to MSI handling failed\n");
926 static void enable_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
928 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
930 cfg
|= PHY_CFG_ENA_MSK
;
931 cfg
&= ~PHY_CFG_PHY_RST_MSK
;
932 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
935 static void disable_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
937 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
938 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2_MSK
);
939 static const u32 msk
= BIT(CHL_INT2_RX_DISP_ERR_OFF
) |
940 BIT(CHL_INT2_RX_CODE_ERR_OFF
) |
941 BIT(CHL_INT2_RX_INVLD_DW_OFF
);
944 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2_MSK
, msk
| irq_msk
);
946 cfg
&= ~PHY_CFG_ENA_MSK
;
947 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
951 state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
952 if (state
& BIT(phy_no
)) {
953 cfg
|= PHY_CFG_PHY_RST_MSK
;
954 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
959 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_INVLD_DW
);
960 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DISP_ERR
);
961 hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_CODE_ERR
);
963 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2
, msk
);
964 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2_MSK
, irq_msk
);
967 static void start_phy_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
969 config_id_frame_v3_hw(hisi_hba
, phy_no
);
970 config_phy_opt_mode_v3_hw(hisi_hba
, phy_no
);
971 enable_phy_v3_hw(hisi_hba
, phy_no
);
974 static void phy_hard_reset_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
976 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
979 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
980 if (phy
->identify
.device_type
== SAS_END_DEVICE
) {
981 txid_auto
= hisi_sas_phy_read32(hisi_hba
, phy_no
, TXID_AUTO
);
982 hisi_sas_phy_write32(hisi_hba
, phy_no
, TXID_AUTO
,
983 txid_auto
| TX_HARDRST_MSK
);
986 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
989 static enum sas_linkrate
phy_get_max_linkrate_v3_hw(void)
991 return SAS_LINK_RATE_12_0_GBPS
;
994 static void phys_init_v3_hw(struct hisi_hba
*hisi_hba
)
998 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
999 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
1000 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1002 if (!sas_phy
->phy
->enabled
)
1005 hisi_sas_phy_enable(hisi_hba
, i
, 1);
1009 static void sl_notify_ssp_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1013 sl_control
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1014 sl_control
|= SL_CONTROL_NOTIFY_EN_MSK
;
1015 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
, sl_control
);
1017 sl_control
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1018 sl_control
&= ~SL_CONTROL_NOTIFY_EN_MSK
;
1019 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
, sl_control
);
1022 static int get_wideport_bitmap_v3_hw(struct hisi_hba
*hisi_hba
, int port_id
)
1025 u32 phy_port_num_ma
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
1026 u32 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1028 for (i
= 0; i
< hisi_hba
->n_phy
; i
++)
1029 if (phy_state
& BIT(i
))
1030 if (((phy_port_num_ma
>> (i
* 4)) & 0xf) == port_id
)
1036 static void start_delivery_v3_hw(struct hisi_sas_dq
*dq
)
1038 struct hisi_hba
*hisi_hba
= dq
->hisi_hba
;
1039 struct hisi_sas_slot
*s
, *s1
, *s2
= NULL
;
1040 int dlvry_queue
= dq
->id
;
1043 list_for_each_entry_safe(s
, s1
, &dq
->list
, delivery
) {
1047 list_del(&s
->delivery
);
1054 * Ensure that memories for slots built on other CPUs is observed.
1057 wp
= (s2
->dlvry_queue_slot
+ 1) % HISI_SAS_QUEUE_SLOTS
;
1059 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_WR_PTR
+ (dlvry_queue
* 0x14), wp
);
1062 static void prep_prd_sge_v3_hw(struct hisi_hba
*hisi_hba
,
1063 struct hisi_sas_slot
*slot
,
1064 struct hisi_sas_cmd_hdr
*hdr
,
1065 struct scatterlist
*scatter
,
1068 struct hisi_sas_sge_page
*sge_page
= hisi_sas_sge_addr_mem(slot
);
1069 struct scatterlist
*sg
;
1072 for_each_sg(scatter
, sg
, n_elem
, i
) {
1073 struct hisi_sas_sge
*entry
= &sge_page
->sge
[i
];
1075 entry
->addr
= cpu_to_le64(sg_dma_address(sg
));
1076 entry
->page_ctrl_0
= entry
->page_ctrl_1
= 0;
1077 entry
->data_len
= cpu_to_le32(sg_dma_len(sg
));
1078 entry
->data_off
= 0;
1081 hdr
->prd_table_addr
= cpu_to_le64(hisi_sas_sge_addr_dma(slot
));
1083 hdr
->sg_len
|= cpu_to_le32(n_elem
<< CMD_HDR_DATA_SGL_LEN_OFF
);
1086 static void prep_prd_sge_dif_v3_hw(struct hisi_hba
*hisi_hba
,
1087 struct hisi_sas_slot
*slot
,
1088 struct hisi_sas_cmd_hdr
*hdr
,
1089 struct scatterlist
*scatter
,
1092 struct hisi_sas_sge_dif_page
*sge_dif_page
;
1093 struct scatterlist
*sg
;
1096 sge_dif_page
= hisi_sas_sge_dif_addr_mem(slot
);
1098 for_each_sg(scatter
, sg
, n_elem
, i
) {
1099 struct hisi_sas_sge
*entry
= &sge_dif_page
->sge
[i
];
1101 entry
->addr
= cpu_to_le64(sg_dma_address(sg
));
1102 entry
->page_ctrl_0
= 0;
1103 entry
->page_ctrl_1
= 0;
1104 entry
->data_len
= cpu_to_le32(sg_dma_len(sg
));
1105 entry
->data_off
= 0;
1108 hdr
->dif_prd_table_addr
=
1109 cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot
));
1111 hdr
->sg_len
|= cpu_to_le32(n_elem
<< CMD_HDR_DIF_SGL_LEN_OFF
);
1114 static u32
get_prot_chk_msk_v3_hw(struct scsi_cmnd
*scsi_cmnd
)
1116 unsigned char prot_flags
= scsi_cmnd
->prot_flags
;
1118 if (prot_flags
& SCSI_PROT_REF_CHECK
)
1119 return T10_CHK_APP_TAG_MSK
;
1120 return T10_CHK_REF_TAG_MSK
| T10_CHK_APP_TAG_MSK
;
1123 static void fill_prot_v3_hw(struct scsi_cmnd
*scsi_cmnd
,
1124 struct hisi_sas_protect_iu_v3_hw
*prot
)
1126 unsigned char prot_op
= scsi_get_prot_op(scsi_cmnd
);
1127 unsigned int interval
= scsi_prot_interval(scsi_cmnd
);
1128 u32 lbrt_chk_val
= t10_pi_ref_tag(scsi_cmnd
->request
);
1131 case SCSI_PROT_READ_INSERT
:
1132 prot
->dw0
|= T10_INSRT_EN_MSK
;
1133 prot
->lbrtgv
= lbrt_chk_val
;
1135 case SCSI_PROT_READ_STRIP
:
1136 prot
->dw0
|= (T10_RMV_EN_MSK
| T10_CHK_EN_MSK
);
1137 prot
->lbrtcv
= lbrt_chk_val
;
1138 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1140 case SCSI_PROT_READ_PASS
:
1141 prot
->dw0
|= T10_CHK_EN_MSK
;
1142 prot
->lbrtcv
= lbrt_chk_val
;
1143 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1145 case SCSI_PROT_WRITE_INSERT
:
1146 prot
->dw0
|= T10_INSRT_EN_MSK
;
1147 prot
->lbrtgv
= lbrt_chk_val
;
1149 case SCSI_PROT_WRITE_STRIP
:
1150 prot
->dw0
|= (T10_RMV_EN_MSK
| T10_CHK_EN_MSK
);
1151 prot
->lbrtcv
= lbrt_chk_val
;
1153 case SCSI_PROT_WRITE_PASS
:
1154 prot
->dw0
|= T10_CHK_EN_MSK
;
1155 prot
->lbrtcv
= lbrt_chk_val
;
1156 prot
->dw4
|= get_prot_chk_msk_v3_hw(scsi_cmnd
);
1159 WARN(1, "prot_op(0x%x) is not valid\n", prot_op
);
1167 prot
->dw0
|= (0x1 << USR_DATA_BLOCK_SZ_OFF
);
1170 prot
->dw0
|= (0x2 << USR_DATA_BLOCK_SZ_OFF
);
1173 WARN(1, "protection interval (0x%x) invalid\n",
1178 prot
->dw0
|= INCR_LBRT_MSK
;
1181 static void prep_ssp_v3_hw(struct hisi_hba
*hisi_hba
,
1182 struct hisi_sas_slot
*slot
)
1184 struct sas_task
*task
= slot
->task
;
1185 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1186 struct domain_device
*device
= task
->dev
;
1187 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1188 struct hisi_sas_port
*port
= slot
->port
;
1189 struct sas_ssp_task
*ssp_task
= &task
->ssp_task
;
1190 struct scsi_cmnd
*scsi_cmnd
= ssp_task
->cmd
;
1191 struct hisi_sas_tmf_task
*tmf
= slot
->tmf
;
1192 int has_data
= 0, priority
= !!tmf
;
1193 unsigned char prot_op
;
1195 u32 dw1
= 0, dw2
= 0, len
= 0;
1197 hdr
->dw0
= cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF
) |
1198 (2 << CMD_HDR_TLR_CTRL_OFF
) |
1199 (port
->id
<< CMD_HDR_PORT_OFF
) |
1200 (priority
<< CMD_HDR_PRIORITY_OFF
) |
1201 (1 << CMD_HDR_CMD_OFF
)); /* ssp */
1203 dw1
= 1 << CMD_HDR_VDTL_OFF
;
1205 dw1
|= 2 << CMD_HDR_FRAME_TYPE_OFF
;
1206 dw1
|= DIR_NO_DATA
<< CMD_HDR_DIR_OFF
;
1208 prot_op
= scsi_get_prot_op(scsi_cmnd
);
1209 dw1
|= 1 << CMD_HDR_FRAME_TYPE_OFF
;
1210 switch (scsi_cmnd
->sc_data_direction
) {
1213 dw1
|= DIR_TO_DEVICE
<< CMD_HDR_DIR_OFF
;
1215 case DMA_FROM_DEVICE
:
1217 dw1
|= DIR_TO_INI
<< CMD_HDR_DIR_OFF
;
1220 dw1
&= ~CMD_HDR_DIR_MSK
;
1224 /* map itct entry */
1225 dw1
|= sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
;
1227 dw2
= (((sizeof(struct ssp_command_iu
) + sizeof(struct ssp_frame_hdr
)
1228 + 3) / 4) << CMD_HDR_CFL_OFF
) |
1229 ((HISI_SAS_MAX_SSP_RESP_SZ
/ 4) << CMD_HDR_MRFL_OFF
) |
1230 (2 << CMD_HDR_SG_MOD_OFF
);
1231 hdr
->dw2
= cpu_to_le32(dw2
);
1232 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1235 prep_prd_sge_v3_hw(hisi_hba
, slot
, hdr
, task
->scatter
,
1238 if (scsi_prot_sg_count(scsi_cmnd
))
1239 prep_prd_sge_dif_v3_hw(hisi_hba
, slot
, hdr
,
1240 scsi_prot_sglist(scsi_cmnd
),
1244 hdr
->cmd_table_addr
= cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot
));
1245 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1247 buf_cmd
= hisi_sas_cmd_hdr_addr_mem(slot
) +
1248 sizeof(struct ssp_frame_hdr
);
1250 memcpy(buf_cmd
, &task
->ssp_task
.LUN
, 8);
1252 buf_cmd
[9] = ssp_task
->task_attr
| (ssp_task
->task_prio
<< 3);
1253 memcpy(buf_cmd
+ 12, scsi_cmnd
->cmnd
, scsi_cmnd
->cmd_len
);
1255 buf_cmd
[10] = tmf
->tmf
;
1257 case TMF_ABORT_TASK
:
1258 case TMF_QUERY_TASK
:
1260 (tmf
->tag_of_task_to_be_managed
>> 8) & 0xff;
1262 tmf
->tag_of_task_to_be_managed
& 0xff;
1269 if (has_data
&& (prot_op
!= SCSI_PROT_NORMAL
)) {
1270 struct hisi_sas_protect_iu_v3_hw prot
;
1273 hdr
->dw7
|= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF
);
1274 dw1
|= CMD_HDR_PIR_MSK
;
1275 buf_cmd_prot
= hisi_sas_cmd_hdr_addr_mem(slot
) +
1276 sizeof(struct ssp_frame_hdr
) +
1277 sizeof(struct ssp_command_iu
);
1279 memset(&prot
, 0, sizeof(struct hisi_sas_protect_iu_v3_hw
));
1280 fill_prot_v3_hw(scsi_cmnd
, &prot
);
1281 memcpy(buf_cmd_prot
, &prot
,
1282 sizeof(struct hisi_sas_protect_iu_v3_hw
));
1284 * For READ, we need length of info read to memory, while for
1285 * WRITE we need length of data written to the disk.
1287 if (prot_op
== SCSI_PROT_WRITE_INSERT
||
1288 prot_op
== SCSI_PROT_READ_INSERT
||
1289 prot_op
== SCSI_PROT_WRITE_PASS
||
1290 prot_op
== SCSI_PROT_READ_PASS
) {
1291 unsigned int interval
= scsi_prot_interval(scsi_cmnd
);
1292 unsigned int ilog2_interval
= ilog2(interval
);
1294 len
= (task
->total_xfer_len
>> ilog2_interval
) * 8;
1298 hdr
->dw1
= cpu_to_le32(dw1
);
1300 hdr
->data_transfer_len
= cpu_to_le32(task
->total_xfer_len
+ len
);
1303 static void prep_smp_v3_hw(struct hisi_hba
*hisi_hba
,
1304 struct hisi_sas_slot
*slot
)
1306 struct sas_task
*task
= slot
->task
;
1307 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1308 struct domain_device
*device
= task
->dev
;
1309 struct hisi_sas_port
*port
= slot
->port
;
1310 struct scatterlist
*sg_req
;
1311 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1312 dma_addr_t req_dma_addr
;
1313 unsigned int req_len
;
1316 sg_req
= &task
->smp_task
.smp_req
;
1317 req_len
= sg_dma_len(sg_req
);
1318 req_dma_addr
= sg_dma_address(sg_req
);
1322 hdr
->dw0
= cpu_to_le32((port
->id
<< CMD_HDR_PORT_OFF
) |
1323 (1 << CMD_HDR_PRIORITY_OFF
) | /* high pri */
1324 (2 << CMD_HDR_CMD_OFF
)); /* smp */
1326 /* map itct entry */
1327 hdr
->dw1
= cpu_to_le32((sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
) |
1328 (1 << CMD_HDR_FRAME_TYPE_OFF
) |
1329 (DIR_NO_DATA
<< CMD_HDR_DIR_OFF
));
1332 hdr
->dw2
= cpu_to_le32((((req_len
- 4) / 4) << CMD_HDR_CFL_OFF
) |
1333 (HISI_SAS_MAX_SMP_RESP_SZ
/ 4 <<
1336 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
<< CMD_HDR_IPTT_OFF
);
1338 hdr
->cmd_table_addr
= cpu_to_le64(req_dma_addr
);
1339 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1343 static void prep_ata_v3_hw(struct hisi_hba
*hisi_hba
,
1344 struct hisi_sas_slot
*slot
)
1346 struct sas_task
*task
= slot
->task
;
1347 struct domain_device
*device
= task
->dev
;
1348 struct domain_device
*parent_dev
= device
->parent
;
1349 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1350 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1351 struct asd_sas_port
*sas_port
= device
->port
;
1352 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
1354 int has_data
= 0, hdr_tag
= 0;
1355 u32 dw1
= 0, dw2
= 0;
1357 hdr
->dw0
= cpu_to_le32(port
->id
<< CMD_HDR_PORT_OFF
);
1358 if (parent_dev
&& dev_is_expander(parent_dev
->dev_type
))
1359 hdr
->dw0
|= cpu_to_le32(3 << CMD_HDR_CMD_OFF
);
1361 hdr
->dw0
|= cpu_to_le32(4U << CMD_HDR_CMD_OFF
);
1363 switch (task
->data_dir
) {
1366 dw1
|= DIR_TO_DEVICE
<< CMD_HDR_DIR_OFF
;
1368 case DMA_FROM_DEVICE
:
1370 dw1
|= DIR_TO_INI
<< CMD_HDR_DIR_OFF
;
1373 dw1
&= ~CMD_HDR_DIR_MSK
;
1376 if ((task
->ata_task
.fis
.command
== ATA_CMD_DEV_RESET
) &&
1377 (task
->ata_task
.fis
.control
& ATA_SRST
))
1378 dw1
|= 1 << CMD_HDR_RESET_OFF
;
1380 dw1
|= (hisi_sas_get_ata_protocol(
1381 &task
->ata_task
.fis
, task
->data_dir
))
1382 << CMD_HDR_FRAME_TYPE_OFF
;
1383 dw1
|= sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
;
1385 if (FIS_CMD_IS_UNCONSTRAINED(task
->ata_task
.fis
))
1386 dw1
|= 1 << CMD_HDR_UNCON_CMD_OFF
;
1388 hdr
->dw1
= cpu_to_le32(dw1
);
1391 if (task
->ata_task
.use_ncq
) {
1392 struct ata_queued_cmd
*qc
= task
->uldd_task
;
1395 task
->ata_task
.fis
.sector_count
|= (u8
) (hdr_tag
<< 3);
1396 dw2
|= hdr_tag
<< CMD_HDR_NCQ_TAG_OFF
;
1399 dw2
|= (HISI_SAS_MAX_STP_RESP_SZ
/ 4) << CMD_HDR_CFL_OFF
|
1400 2 << CMD_HDR_SG_MOD_OFF
;
1401 hdr
->dw2
= cpu_to_le32(dw2
);
1404 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1407 prep_prd_sge_v3_hw(hisi_hba
, slot
, hdr
, task
->scatter
,
1410 hdr
->data_transfer_len
= cpu_to_le32(task
->total_xfer_len
);
1411 hdr
->cmd_table_addr
= cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot
));
1412 hdr
->sts_buffer_addr
= cpu_to_le64(hisi_sas_status_buf_addr_dma(slot
));
1414 buf_cmd
= hisi_sas_cmd_hdr_addr_mem(slot
);
1416 if (likely(!task
->ata_task
.device_control_reg_update
))
1417 task
->ata_task
.fis
.flags
|= 0x80; /* C=1: update ATA cmd reg */
1418 /* fill in command FIS */
1419 memcpy(buf_cmd
, &task
->ata_task
.fis
, sizeof(struct host_to_dev_fis
));
1422 static void prep_abort_v3_hw(struct hisi_hba
*hisi_hba
,
1423 struct hisi_sas_slot
*slot
,
1424 int device_id
, int abort_flag
, int tag_to_abort
)
1426 struct sas_task
*task
= slot
->task
;
1427 struct domain_device
*dev
= task
->dev
;
1428 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1429 struct hisi_sas_port
*port
= slot
->port
;
1432 hdr
->dw0
= cpu_to_le32((5U << CMD_HDR_CMD_OFF
) | /*abort*/
1433 (port
->id
<< CMD_HDR_PORT_OFF
) |
1435 << CMD_HDR_ABORT_DEVICE_TYPE_OFF
) |
1437 << CMD_HDR_ABORT_FLAG_OFF
));
1440 hdr
->dw1
= cpu_to_le32(device_id
1441 << CMD_HDR_DEV_ID_OFF
);
1444 hdr
->dw7
= cpu_to_le32(tag_to_abort
<< CMD_HDR_ABORT_IPTT_OFF
);
1445 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1449 static irqreturn_t
phy_up_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1453 u32 context
, port_id
, link_rate
;
1454 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1455 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1456 struct device
*dev
= hisi_hba
->dev
;
1457 unsigned long flags
;
1459 del_timer(&phy
->timer
);
1460 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_PHY_ENA_MSK
, 1);
1462 port_id
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
1463 port_id
= (port_id
>> (4 * phy_no
)) & 0xf;
1464 link_rate
= hisi_sas_read32(hisi_hba
, PHY_CONN_RATE
);
1465 link_rate
= (link_rate
>> (phy_no
* 4)) & 0xf;
1467 if (port_id
== 0xf) {
1468 dev_err(dev
, "phyup: phy%d invalid portid\n", phy_no
);
1472 sas_phy
->linkrate
= link_rate
;
1473 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
1475 /* Check for SATA dev */
1476 context
= hisi_sas_read32(hisi_hba
, PHY_CONTEXT
);
1477 if (context
& (1 << phy_no
)) {
1478 struct hisi_sas_initial_fis
*initial_fis
;
1479 struct dev_to_host_fis
*fis
;
1480 u8 attached_sas_addr
[SAS_ADDR_SIZE
] = {0};
1481 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1483 dev_info(dev
, "phyup: phy%d link_rate=%d(sata)\n", phy_no
, link_rate
);
1484 initial_fis
= &hisi_hba
->initial_fis
[phy_no
];
1485 fis
= &initial_fis
->fis
;
1487 /* check ERR bit of Status Register */
1488 if (fis
->status
& ATA_ERR
) {
1489 dev_warn(dev
, "sata int: phy%d FIS status: 0x%x\n",
1490 phy_no
, fis
->status
);
1491 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1496 sas_phy
->oob_mode
= SATA_OOB_MODE
;
1497 attached_sas_addr
[0] = 0x50;
1498 attached_sas_addr
[6] = shost
->host_no
;
1499 attached_sas_addr
[7] = phy_no
;
1500 memcpy(sas_phy
->attached_sas_addr
,
1503 memcpy(sas_phy
->frame_rcvd
, fis
,
1504 sizeof(struct dev_to_host_fis
));
1505 phy
->phy_type
|= PORT_TYPE_SATA
;
1506 phy
->identify
.device_type
= SAS_SATA_DEV
;
1507 phy
->frame_rcvd_size
= sizeof(struct dev_to_host_fis
);
1508 phy
->identify
.target_port_protocols
= SAS_PROTOCOL_SATA
;
1510 u32
*frame_rcvd
= (u32
*)sas_phy
->frame_rcvd
;
1511 struct sas_identify_frame
*id
=
1512 (struct sas_identify_frame
*)frame_rcvd
;
1514 dev_info(dev
, "phyup: phy%d link_rate=%d\n", phy_no
, link_rate
);
1515 for (i
= 0; i
< 6; i
++) {
1516 u32 idaf
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1517 RX_IDAF_DWORD0
+ (i
* 4));
1518 frame_rcvd
[i
] = __swab32(idaf
);
1520 sas_phy
->oob_mode
= SAS_OOB_MODE
;
1521 memcpy(sas_phy
->attached_sas_addr
,
1524 phy
->phy_type
|= PORT_TYPE_SAS
;
1525 phy
->identify
.device_type
= id
->dev_type
;
1526 phy
->frame_rcvd_size
= sizeof(struct sas_identify_frame
);
1527 if (phy
->identify
.device_type
== SAS_END_DEVICE
)
1528 phy
->identify
.target_port_protocols
=
1530 else if (phy
->identify
.device_type
!= SAS_PHY_UNUSED
)
1531 phy
->identify
.target_port_protocols
=
1535 phy
->port_id
= port_id
;
1536 phy
->phy_attached
= 1;
1537 hisi_sas_notify_phy_event(phy
, HISI_PHYE_PHY_UP
);
1539 spin_lock_irqsave(&phy
->lock
, flags
);
1540 if (phy
->reset_completion
) {
1542 complete(phy
->reset_completion
);
1544 spin_unlock_irqrestore(&phy
->lock
, flags
);
1546 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1547 CHL_INT0_SL_PHY_ENABLE_MSK
);
1548 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_PHY_ENA_MSK
, 0);
1553 static irqreturn_t
phy_down_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1555 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1556 u32 phy_state
, sl_ctrl
, txid_auto
;
1557 struct device
*dev
= hisi_hba
->dev
;
1559 atomic_inc(&phy
->down_cnt
);
1561 del_timer(&phy
->timer
);
1562 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_NOT_RDY_MSK
, 1);
1564 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1565 dev_info(dev
, "phydown: phy%d phy_state=0x%x\n", phy_no
, phy_state
);
1566 hisi_sas_phy_down(hisi_hba
, phy_no
, (phy_state
& 1 << phy_no
) ? 1 : 0);
1568 sl_ctrl
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
1569 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
,
1570 sl_ctrl
&(~SL_CTA_MSK
));
1572 txid_auto
= hisi_sas_phy_read32(hisi_hba
, phy_no
, TXID_AUTO
);
1573 hisi_sas_phy_write32(hisi_hba
, phy_no
, TXID_AUTO
,
1574 txid_auto
| CT3_MSK
);
1576 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
, CHL_INT0_NOT_RDY_MSK
);
1577 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_NOT_RDY_MSK
, 0);
1582 static irqreturn_t
phy_bcast_v3_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1584 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1585 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1586 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1589 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_RX_BCAST_CHK_MSK
, 1);
1590 bcast_status
= hisi_sas_phy_read32(hisi_hba
, phy_no
, RX_PRIMS_STATUS
);
1591 if ((bcast_status
& RX_BCAST_CHG_MSK
) &&
1592 !test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
1593 sas_ha
->notify_port_event(sas_phy
, PORTE_BROADCAST_RCVD
);
1594 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1595 CHL_INT0_SL_RX_BCST_ACK_MSK
);
1596 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_RX_BCAST_CHK_MSK
, 0);
1601 static irqreturn_t
int_phy_up_down_bcast_v3_hw(int irq_no
, void *p
)
1603 struct hisi_hba
*hisi_hba
= p
;
1606 irqreturn_t res
= IRQ_NONE
;
1608 irq_msk
= hisi_sas_read32(hisi_hba
, CHNL_INT_STATUS
)
1612 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1614 u32 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1615 int rdy
= phy_state
& (1 << phy_no
);
1618 if (irq_value
& CHL_INT0_SL_PHY_ENABLE_MSK
)
1620 if (phy_up_v3_hw(phy_no
, hisi_hba
)
1623 if (irq_value
& CHL_INT0_SL_RX_BCST_ACK_MSK
)
1625 if (phy_bcast_v3_hw(phy_no
, hisi_hba
)
1629 if (irq_value
& CHL_INT0_NOT_RDY_MSK
)
1631 if (phy_down_v3_hw(phy_no
, hisi_hba
)
1643 static const struct hisi_sas_hw_error port_axi_error
[] = {
1645 .irq_msk
= BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF
),
1646 .msg
= "dmac_tx_ecc_bad_err",
1649 .irq_msk
= BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF
),
1650 .msg
= "dmac_rx_ecc_bad_err",
1653 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF
),
1654 .msg
= "dma_tx_axi_wr_err",
1657 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF
),
1658 .msg
= "dma_tx_axi_rd_err",
1661 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF
),
1662 .msg
= "dma_rx_axi_wr_err",
1665 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF
),
1666 .msg
= "dma_rx_axi_rd_err",
1669 .irq_msk
= BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF
),
1670 .msg
= "dma_tx_fifo_err",
1673 .irq_msk
= BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF
),
1674 .msg
= "dma_rx_fifo_err",
1677 .irq_msk
= BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF
),
1678 .msg
= "dma_tx_axi_ruser_err",
1681 .irq_msk
= BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF
),
1682 .msg
= "dma_rx_axi_ruser_err",
1686 static void handle_chl_int1_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1688 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT1
);
1689 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT1_MSK
);
1690 struct device
*dev
= hisi_hba
->dev
;
1693 irq_value
&= ~irq_msk
;
1697 for (i
= 0; i
< ARRAY_SIZE(port_axi_error
); i
++) {
1698 const struct hisi_sas_hw_error
*error
= &port_axi_error
[i
];
1700 if (!(irq_value
& error
->irq_msk
))
1703 dev_err(dev
, "%s error (phy%d 0x%x) found!\n",
1704 error
->msg
, phy_no
, irq_value
);
1705 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
1708 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT1
, irq_value
);
1711 static void phy_get_events_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1713 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1714 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1715 struct sas_phy
*sphy
= sas_phy
->phy
;
1716 unsigned long flags
;
1719 spin_lock_irqsave(&phy
->lock
, flags
);
1721 /* loss dword sync */
1722 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DWS_LOST
);
1723 sphy
->loss_of_dword_sync_count
+= reg_value
;
1725 /* phy reset problem */
1726 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_RESET_PROB
);
1727 sphy
->phy_reset_problem_count
+= reg_value
;
1730 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_INVLD_DW
);
1731 sphy
->invalid_dword_count
+= reg_value
;
1734 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_DISP_ERR
);
1735 sphy
->running_disparity_error_count
+= reg_value
;
1737 /* code violation error */
1738 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, ERR_CNT_CODE_ERR
);
1739 phy
->code_violation_err_count
+= reg_value
;
1741 spin_unlock_irqrestore(&phy
->lock
, flags
);
1744 static void handle_chl_int2_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1746 u32 irq_msk
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2_MSK
);
1747 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT2
);
1748 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1749 struct pci_dev
*pci_dev
= hisi_hba
->pci_dev
;
1750 struct device
*dev
= hisi_hba
->dev
;
1751 static const u32 msk
= BIT(CHL_INT2_RX_DISP_ERR_OFF
) |
1752 BIT(CHL_INT2_RX_CODE_ERR_OFF
) |
1753 BIT(CHL_INT2_RX_INVLD_DW_OFF
);
1755 irq_value
&= ~irq_msk
;
1759 if (irq_value
& BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF
)) {
1760 dev_warn(dev
, "phy%d identify timeout\n", phy_no
);
1761 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1764 if (irq_value
& BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF
)) {
1765 u32 reg_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1766 STP_LINK_TIMEOUT_STATE
);
1768 dev_warn(dev
, "phy%d stp link timeout (0x%x)\n",
1770 if (reg_value
& BIT(4))
1771 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1774 if (pci_dev
->revision
> 0x20 && (irq_value
& msk
)) {
1775 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1776 struct sas_phy
*sphy
= sas_phy
->phy
;
1778 phy_get_events_v3_hw(hisi_hba
, phy_no
);
1780 if (irq_value
& BIT(CHL_INT2_RX_INVLD_DW_OFF
))
1781 dev_info(dev
, "phy%d invalid dword cnt: %u\n", phy_no
,
1782 sphy
->invalid_dword_count
);
1784 if (irq_value
& BIT(CHL_INT2_RX_CODE_ERR_OFF
))
1785 dev_info(dev
, "phy%d code violation cnt: %u\n", phy_no
,
1786 phy
->code_violation_err_count
);
1788 if (irq_value
& BIT(CHL_INT2_RX_DISP_ERR_OFF
))
1789 dev_info(dev
, "phy%d disparity error cnt: %u\n", phy_no
,
1790 sphy
->running_disparity_error_count
);
1793 if ((irq_value
& BIT(CHL_INT2_RX_INVLD_DW_OFF
)) &&
1794 (pci_dev
->revision
== 0x20)) {
1798 rc
= hisi_sas_read32_poll_timeout_atomic(
1799 HILINK_ERR_DFX
, reg_value
,
1800 !((reg_value
>> 8) & BIT(phy_no
)),
1803 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
1806 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT2
, irq_value
);
1809 static void handle_chl_int0_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
1811 u32 irq_value0
= hisi_sas_phy_read32(hisi_hba
, phy_no
, CHL_INT0
);
1813 if (irq_value0
& CHL_INT0_PHY_RDY_MSK
)
1814 hisi_sas_phy_oob_ready(hisi_hba
, phy_no
);
1816 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1817 irq_value0
& (~CHL_INT0_SL_RX_BCST_ACK_MSK
)
1818 & (~CHL_INT0_SL_PHY_ENABLE_MSK
)
1819 & (~CHL_INT0_NOT_RDY_MSK
));
1822 static irqreturn_t
int_chnl_int_v3_hw(int irq_no
, void *p
)
1824 struct hisi_hba
*hisi_hba
= p
;
1828 irq_msk
= hisi_sas_read32(hisi_hba
, CHNL_INT_STATUS
)
1832 if (irq_msk
& (CHNL_INT_STS_INT0_MSK
<< (phy_no
* CHNL_WIDTH
)))
1833 handle_chl_int0_v3_hw(hisi_hba
, phy_no
);
1835 if (irq_msk
& (CHNL_INT_STS_INT1_MSK
<< (phy_no
* CHNL_WIDTH
)))
1836 handle_chl_int1_v3_hw(hisi_hba
, phy_no
);
1838 if (irq_msk
& (CHNL_INT_STS_INT2_MSK
<< (phy_no
* CHNL_WIDTH
)))
1839 handle_chl_int2_v3_hw(hisi_hba
, phy_no
);
1841 irq_msk
&= ~(CHNL_INT_STS_PHY_MSK
<< (phy_no
* CHNL_WIDTH
));
1848 static const struct hisi_sas_hw_error multi_bit_ecc_errors
[] = {
1850 .irq_msk
= BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF
),
1851 .msk
= HGC_DQE_ECC_MB_ADDR_MSK
,
1852 .shift
= HGC_DQE_ECC_MB_ADDR_OFF
,
1853 .msg
= "hgc_dqe_eccbad_intr",
1854 .reg
= HGC_DQE_ECC_ADDR
,
1857 .irq_msk
= BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF
),
1858 .msk
= HGC_IOST_ECC_MB_ADDR_MSK
,
1859 .shift
= HGC_IOST_ECC_MB_ADDR_OFF
,
1860 .msg
= "hgc_iost_eccbad_intr",
1861 .reg
= HGC_IOST_ECC_ADDR
,
1864 .irq_msk
= BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF
),
1865 .msk
= HGC_ITCT_ECC_MB_ADDR_MSK
,
1866 .shift
= HGC_ITCT_ECC_MB_ADDR_OFF
,
1867 .msg
= "hgc_itct_eccbad_intr",
1868 .reg
= HGC_ITCT_ECC_ADDR
,
1871 .irq_msk
= BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF
),
1872 .msk
= HGC_LM_DFX_STATUS2_IOSTLIST_MSK
,
1873 .shift
= HGC_LM_DFX_STATUS2_IOSTLIST_OFF
,
1874 .msg
= "hgc_iostl_eccbad_intr",
1875 .reg
= HGC_LM_DFX_STATUS2
,
1878 .irq_msk
= BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF
),
1879 .msk
= HGC_LM_DFX_STATUS2_ITCTLIST_MSK
,
1880 .shift
= HGC_LM_DFX_STATUS2_ITCTLIST_OFF
,
1881 .msg
= "hgc_itctl_eccbad_intr",
1882 .reg
= HGC_LM_DFX_STATUS2
,
1885 .irq_msk
= BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF
),
1886 .msk
= HGC_CQE_ECC_MB_ADDR_MSK
,
1887 .shift
= HGC_CQE_ECC_MB_ADDR_OFF
,
1888 .msg
= "hgc_cqe_eccbad_intr",
1889 .reg
= HGC_CQE_ECC_ADDR
,
1892 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF
),
1893 .msk
= HGC_RXM_DFX_STATUS14_MEM0_MSK
,
1894 .shift
= HGC_RXM_DFX_STATUS14_MEM0_OFF
,
1895 .msg
= "rxm_mem0_eccbad_intr",
1896 .reg
= HGC_RXM_DFX_STATUS14
,
1899 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF
),
1900 .msk
= HGC_RXM_DFX_STATUS14_MEM1_MSK
,
1901 .shift
= HGC_RXM_DFX_STATUS14_MEM1_OFF
,
1902 .msg
= "rxm_mem1_eccbad_intr",
1903 .reg
= HGC_RXM_DFX_STATUS14
,
1906 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF
),
1907 .msk
= HGC_RXM_DFX_STATUS14_MEM2_MSK
,
1908 .shift
= HGC_RXM_DFX_STATUS14_MEM2_OFF
,
1909 .msg
= "rxm_mem2_eccbad_intr",
1910 .reg
= HGC_RXM_DFX_STATUS14
,
1913 .irq_msk
= BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF
),
1914 .msk
= HGC_RXM_DFX_STATUS15_MEM3_MSK
,
1915 .shift
= HGC_RXM_DFX_STATUS15_MEM3_OFF
,
1916 .msg
= "rxm_mem3_eccbad_intr",
1917 .reg
= HGC_RXM_DFX_STATUS15
,
1920 .irq_msk
= BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF
),
1921 .msk
= AM_ROB_ECC_ERR_ADDR_MSK
,
1922 .shift
= AM_ROB_ECC_ERR_ADDR_OFF
,
1923 .msg
= "ooo_ram_eccbad_intr",
1924 .reg
= AM_ROB_ECC_ERR_ADDR
,
1928 static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba
*hisi_hba
,
1931 struct device
*dev
= hisi_hba
->dev
;
1932 const struct hisi_sas_hw_error
*ecc_error
;
1936 for (i
= 0; i
< ARRAY_SIZE(multi_bit_ecc_errors
); i
++) {
1937 ecc_error
= &multi_bit_ecc_errors
[i
];
1938 if (irq_value
& ecc_error
->irq_msk
) {
1939 val
= hisi_sas_read32(hisi_hba
, ecc_error
->reg
);
1940 val
&= ecc_error
->msk
;
1941 val
>>= ecc_error
->shift
;
1942 dev_err(dev
, "%s (0x%x) found: mem addr is 0x%08X\n",
1943 ecc_error
->msg
, irq_value
, val
);
1944 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
1949 static void fatal_ecc_int_v3_hw(struct hisi_hba
*hisi_hba
)
1951 u32 irq_value
, irq_msk
;
1953 irq_msk
= hisi_sas_read32(hisi_hba
, SAS_ECC_INTR_MSK
);
1954 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0xffffffff);
1956 irq_value
= hisi_sas_read32(hisi_hba
, SAS_ECC_INTR
);
1958 multi_bit_ecc_error_process_v3_hw(hisi_hba
, irq_value
);
1960 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR
, irq_value
);
1961 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, irq_msk
);
1964 static const struct hisi_sas_hw_error axi_error
[] = {
1965 { .msk
= BIT(0), .msg
= "IOST_AXI_W_ERR" },
1966 { .msk
= BIT(1), .msg
= "IOST_AXI_R_ERR" },
1967 { .msk
= BIT(2), .msg
= "ITCT_AXI_W_ERR" },
1968 { .msk
= BIT(3), .msg
= "ITCT_AXI_R_ERR" },
1969 { .msk
= BIT(4), .msg
= "SATA_AXI_W_ERR" },
1970 { .msk
= BIT(5), .msg
= "SATA_AXI_R_ERR" },
1971 { .msk
= BIT(6), .msg
= "DQE_AXI_R_ERR" },
1972 { .msk
= BIT(7), .msg
= "CQE_AXI_W_ERR" },
1976 static const struct hisi_sas_hw_error fifo_error
[] = {
1977 { .msk
= BIT(8), .msg
= "CQE_WINFO_FIFO" },
1978 { .msk
= BIT(9), .msg
= "CQE_MSG_FIFIO" },
1979 { .msk
= BIT(10), .msg
= "GETDQE_FIFO" },
1980 { .msk
= BIT(11), .msg
= "CMDP_FIFO" },
1981 { .msk
= BIT(12), .msg
= "AWTCTRL_FIFO" },
1985 static const struct hisi_sas_hw_error fatal_axi_error
[] = {
1987 .irq_msk
= BIT(ENT_INT_SRC3_WP_DEPTH_OFF
),
1988 .msg
= "write pointer and depth",
1991 .irq_msk
= BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF
),
1992 .msg
= "iptt no match slot",
1995 .irq_msk
= BIT(ENT_INT_SRC3_RP_DEPTH_OFF
),
1996 .msg
= "read pointer and depth",
1999 .irq_msk
= BIT(ENT_INT_SRC3_AXI_OFF
),
2000 .reg
= HGC_AXI_FIFO_ERR_INFO
,
2004 .irq_msk
= BIT(ENT_INT_SRC3_FIFO_OFF
),
2005 .reg
= HGC_AXI_FIFO_ERR_INFO
,
2009 .irq_msk
= BIT(ENT_INT_SRC3_LM_OFF
),
2010 .msg
= "LM add/fetch list",
2013 .irq_msk
= BIT(ENT_INT_SRC3_ABT_OFF
),
2014 .msg
= "SAS_HGC_ABT fetch LM list",
2017 .irq_msk
= BIT(ENT_INT_SRC3_DQE_POISON_OFF
),
2018 .msg
= "read dqe poison",
2021 .irq_msk
= BIT(ENT_INT_SRC3_IOST_POISON_OFF
),
2022 .msg
= "read iost poison",
2025 .irq_msk
= BIT(ENT_INT_SRC3_ITCT_POISON_OFF
),
2026 .msg
= "read itct poison",
2029 .irq_msk
= BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF
),
2030 .msg
= "read itct ncq poison",
2035 static irqreturn_t
fatal_axi_int_v3_hw(int irq_no
, void *p
)
2037 u32 irq_value
, irq_msk
;
2038 struct hisi_hba
*hisi_hba
= p
;
2039 struct device
*dev
= hisi_hba
->dev
;
2040 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2043 irq_msk
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC_MSK3
);
2044 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, irq_msk
| 0x1df00);
2046 irq_value
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC3
);
2047 irq_value
&= ~irq_msk
;
2049 for (i
= 0; i
< ARRAY_SIZE(fatal_axi_error
); i
++) {
2050 const struct hisi_sas_hw_error
*error
= &fatal_axi_error
[i
];
2052 if (!(irq_value
& error
->irq_msk
))
2056 const struct hisi_sas_hw_error
*sub
= error
->sub
;
2057 u32 err_value
= hisi_sas_read32(hisi_hba
, error
->reg
);
2059 for (; sub
->msk
|| sub
->msg
; sub
++) {
2060 if (!(err_value
& sub
->msk
))
2063 dev_err(dev
, "%s error (0x%x) found!\n",
2064 sub
->msg
, irq_value
);
2065 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2068 dev_err(dev
, "%s error (0x%x) found!\n",
2069 error
->msg
, irq_value
);
2070 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2073 if (pdev
->revision
< 0x21) {
2076 reg_val
= hisi_sas_read32(hisi_hba
,
2077 AXI_MASTER_CFG_BASE
+
2079 reg_val
|= AM_CTRL_SHUTDOWN_REQ_MSK
;
2080 hisi_sas_write32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2081 AM_CTRL_GLOBAL
, reg_val
);
2085 fatal_ecc_int_v3_hw(hisi_hba
);
2087 if (irq_value
& BIT(ENT_INT_SRC3_ITC_INT_OFF
)) {
2088 u32 reg_val
= hisi_sas_read32(hisi_hba
, ITCT_CLR
);
2089 u32 dev_id
= reg_val
& ITCT_DEV_MSK
;
2090 struct hisi_sas_device
*sas_dev
=
2091 &hisi_hba
->devices
[dev_id
];
2093 hisi_sas_write32(hisi_hba
, ITCT_CLR
, 0);
2094 dev_dbg(dev
, "clear ITCT ok\n");
2095 complete(sas_dev
->completion
);
2098 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
, irq_value
& 0x1df00);
2099 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, irq_msk
);
2105 slot_err_v3_hw(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
2106 struct hisi_sas_slot
*slot
)
2108 struct task_status_struct
*ts
= &task
->task_status
;
2109 struct hisi_sas_complete_v3_hdr
*complete_queue
=
2110 hisi_hba
->complete_hdr
[slot
->cmplt_queue
];
2111 struct hisi_sas_complete_v3_hdr
*complete_hdr
=
2112 &complete_queue
[slot
->cmplt_queue_slot
];
2113 struct hisi_sas_err_record_v3
*record
=
2114 hisi_sas_status_buf_addr_mem(slot
);
2115 u32 dma_rx_err_type
= le32_to_cpu(record
->dma_rx_err_type
);
2116 u32 trans_tx_fail_type
= le32_to_cpu(record
->trans_tx_fail_type
);
2117 u32 dw3
= le32_to_cpu(complete_hdr
->dw3
);
2119 switch (task
->task_proto
) {
2120 case SAS_PROTOCOL_SSP
:
2121 if (dma_rx_err_type
& RX_DATA_LEN_UNDERFLOW_MSK
) {
2122 ts
->residual
= trans_tx_fail_type
;
2123 ts
->stat
= SAS_DATA_UNDERRUN
;
2124 } else if (dw3
& CMPLT_HDR_IO_IN_TARGET_MSK
) {
2125 ts
->stat
= SAS_QUEUE_FULL
;
2128 ts
->stat
= SAS_OPEN_REJECT
;
2129 ts
->open_rej_reason
= SAS_OREJ_RSVD_RETRY
;
2132 case SAS_PROTOCOL_SATA
:
2133 case SAS_PROTOCOL_STP
:
2134 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
2135 if (dma_rx_err_type
& RX_DATA_LEN_UNDERFLOW_MSK
) {
2136 ts
->residual
= trans_tx_fail_type
;
2137 ts
->stat
= SAS_DATA_UNDERRUN
;
2138 } else if (dw3
& CMPLT_HDR_IO_IN_TARGET_MSK
) {
2139 ts
->stat
= SAS_PHY_DOWN
;
2142 ts
->stat
= SAS_OPEN_REJECT
;
2143 ts
->open_rej_reason
= SAS_OREJ_RSVD_RETRY
;
2145 hisi_sas_sata_done(task
, slot
);
2147 case SAS_PROTOCOL_SMP
:
2148 ts
->stat
= SAM_STAT_CHECK_CONDITION
;
2156 slot_complete_v3_hw(struct hisi_hba
*hisi_hba
, struct hisi_sas_slot
*slot
)
2158 struct sas_task
*task
= slot
->task
;
2159 struct hisi_sas_device
*sas_dev
;
2160 struct device
*dev
= hisi_hba
->dev
;
2161 struct task_status_struct
*ts
;
2162 struct domain_device
*device
;
2163 struct sas_ha_struct
*ha
;
2164 enum exec_status sts
;
2165 struct hisi_sas_complete_v3_hdr
*complete_queue
=
2166 hisi_hba
->complete_hdr
[slot
->cmplt_queue
];
2167 struct hisi_sas_complete_v3_hdr
*complete_hdr
=
2168 &complete_queue
[slot
->cmplt_queue_slot
];
2169 unsigned long flags
;
2170 bool is_internal
= slot
->is_internal
;
2173 if (unlikely(!task
|| !task
->lldd_task
|| !task
->dev
))
2176 ts
= &task
->task_status
;
2178 ha
= device
->port
->ha
;
2179 sas_dev
= device
->lldd_dev
;
2181 spin_lock_irqsave(&task
->task_state_lock
, flags
);
2182 task
->task_state_flags
&=
2183 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
2184 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2186 memset(ts
, 0, sizeof(*ts
));
2187 ts
->resp
= SAS_TASK_COMPLETE
;
2189 if (unlikely(!sas_dev
)) {
2190 dev_dbg(dev
, "slot complete: port has not device\n");
2191 ts
->stat
= SAS_PHY_DOWN
;
2195 dw0
= le32_to_cpu(complete_hdr
->dw0
);
2196 dw1
= le32_to_cpu(complete_hdr
->dw1
);
2197 dw3
= le32_to_cpu(complete_hdr
->dw3
);
2200 * Use SAS+TMF status codes
2202 switch ((dw0
& CMPLT_HDR_ABORT_STAT_MSK
) >> CMPLT_HDR_ABORT_STAT_OFF
) {
2203 case STAT_IO_ABORTED
:
2204 /* this IO has been aborted by abort command */
2205 ts
->stat
= SAS_ABORTED_TASK
;
2207 case STAT_IO_COMPLETE
:
2208 /* internal abort command complete */
2209 ts
->stat
= TMF_RESP_FUNC_SUCC
;
2211 case STAT_IO_NO_DEVICE
:
2212 ts
->stat
= TMF_RESP_FUNC_COMPLETE
;
2214 case STAT_IO_NOT_VALID
:
2216 * abort single IO, the controller can't find the IO
2218 ts
->stat
= TMF_RESP_FUNC_FAILED
;
2224 /* check for erroneous completion */
2225 if ((dw0
& CMPLT_HDR_CMPLT_MSK
) == 0x3) {
2226 u32
*error_info
= hisi_sas_status_buf_addr_mem(slot
);
2228 slot_err_v3_hw(hisi_hba
, task
, slot
);
2229 if (ts
->stat
!= SAS_DATA_UNDERRUN
)
2230 dev_info(dev
, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
2231 slot
->idx
, task
, sas_dev
->device_id
,
2232 dw0
, dw1
, complete_hdr
->act
, dw3
,
2233 error_info
[0], error_info
[1],
2234 error_info
[2], error_info
[3]);
2235 if (unlikely(slot
->abort
))
2240 switch (task
->task_proto
) {
2241 case SAS_PROTOCOL_SSP
: {
2242 struct ssp_response_iu
*iu
=
2243 hisi_sas_status_buf_addr_mem(slot
) +
2244 sizeof(struct hisi_sas_err_record
);
2246 sas_ssp_task_response(dev
, task
, iu
);
2249 case SAS_PROTOCOL_SMP
: {
2250 struct scatterlist
*sg_resp
= &task
->smp_task
.smp_resp
;
2251 void *to
= page_address(sg_page(sg_resp
));
2253 ts
->stat
= SAM_STAT_GOOD
;
2255 dma_unmap_sg(dev
, &task
->smp_task
.smp_req
, 1,
2257 memcpy(to
+ sg_resp
->offset
,
2258 hisi_sas_status_buf_addr_mem(slot
) +
2259 sizeof(struct hisi_sas_err_record
),
2263 case SAS_PROTOCOL_SATA
:
2264 case SAS_PROTOCOL_STP
:
2265 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
2266 ts
->stat
= SAM_STAT_GOOD
;
2267 hisi_sas_sata_done(task
, slot
);
2270 ts
->stat
= SAM_STAT_CHECK_CONDITION
;
2274 if (!slot
->port
->port_attached
) {
2275 dev_warn(dev
, "slot complete: port %d has removed\n",
2276 slot
->port
->sas_port
.id
);
2277 ts
->stat
= SAS_PHY_DOWN
;
2282 spin_lock_irqsave(&task
->task_state_lock
, flags
);
2283 if (task
->task_state_flags
& SAS_TASK_STATE_ABORTED
) {
2284 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2285 dev_info(dev
, "slot complete: task(%pK) aborted\n", task
);
2286 return SAS_ABORTED_TASK
;
2288 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
2289 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2290 hisi_sas_slot_task_free(hisi_hba
, task
, slot
);
2292 if (!is_internal
&& (task
->task_proto
!= SAS_PROTOCOL_SMP
)) {
2293 spin_lock_irqsave(&device
->done_lock
, flags
);
2294 if (test_bit(SAS_HA_FROZEN
, &ha
->state
)) {
2295 spin_unlock_irqrestore(&device
->done_lock
, flags
);
2296 dev_info(dev
, "slot complete: task(%pK) ignored\n ",
2300 spin_unlock_irqrestore(&device
->done_lock
, flags
);
2303 if (task
->task_done
)
2304 task
->task_done(task
);
2309 static irqreturn_t
cq_thread_v3_hw(int irq_no
, void *p
)
2311 struct hisi_sas_cq
*cq
= p
;
2312 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
2313 struct hisi_sas_slot
*slot
;
2314 struct hisi_sas_complete_v3_hdr
*complete_queue
;
2315 u32 rd_point
= cq
->rd_point
, wr_point
;
2318 complete_queue
= hisi_hba
->complete_hdr
[queue
];
2320 wr_point
= hisi_sas_read32(hisi_hba
, COMPL_Q_0_WR_PTR
+
2323 while (rd_point
!= wr_point
) {
2324 struct hisi_sas_complete_v3_hdr
*complete_hdr
;
2325 struct device
*dev
= hisi_hba
->dev
;
2329 complete_hdr
= &complete_queue
[rd_point
];
2330 dw1
= le32_to_cpu(complete_hdr
->dw1
);
2332 iptt
= dw1
& CMPLT_HDR_IPTT_MSK
;
2333 if (likely(iptt
< HISI_SAS_COMMAND_ENTRIES_V3_HW
)) {
2334 slot
= &hisi_hba
->slot_info
[iptt
];
2335 slot
->cmplt_queue_slot
= rd_point
;
2336 slot
->cmplt_queue
= queue
;
2337 slot_complete_v3_hw(hisi_hba
, slot
);
2339 dev_err(dev
, "IPTT %d is invalid, discard it.\n", iptt
);
2341 if (++rd_point
>= HISI_SAS_QUEUE_SLOTS
)
2345 /* update rd_point */
2346 cq
->rd_point
= rd_point
;
2347 hisi_sas_write32(hisi_hba
, COMPL_Q_0_RD_PTR
+ (0x14 * queue
), rd_point
);
2352 static irqreturn_t
cq_interrupt_v3_hw(int irq_no
, void *p
)
2354 struct hisi_sas_cq
*cq
= p
;
2355 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
2358 hisi_sas_write32(hisi_hba
, OQ_INT_SRC
, 1 << queue
);
2360 return IRQ_WAKE_THREAD
;
2363 static void setup_reply_map_v3_hw(struct hisi_hba
*hisi_hba
, int nvecs
)
2365 const struct cpumask
*mask
;
2368 for (queue
= 0; queue
< nvecs
; queue
++) {
2369 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[queue
];
2371 mask
= pci_irq_get_affinity(hisi_hba
->pci_dev
, queue
+
2372 BASE_VECTORS_V3_HW
);
2375 cq
->irq_mask
= mask
;
2376 for_each_cpu(cpu
, mask
)
2377 hisi_hba
->reply_map
[cpu
] = queue
;
2382 for_each_possible_cpu(cpu
)
2383 hisi_hba
->reply_map
[cpu
] = cpu
% hisi_hba
->queue_count
;
2384 /* Don't clean all CQ masks */
2387 static int interrupt_init_v3_hw(struct hisi_hba
*hisi_hba
)
2389 struct device
*dev
= hisi_hba
->dev
;
2390 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2392 int max_msi
= HISI_SAS_MSI_COUNT_V3_HW
, min_msi
;
2394 if (auto_affine_msi_experimental
) {
2395 struct irq_affinity desc
= {
2396 .pre_vectors
= BASE_VECTORS_V3_HW
,
2399 dev_info(dev
, "Enable MSI auto-affinity\n");
2401 min_msi
= MIN_AFFINE_VECTORS_V3_HW
;
2403 hisi_hba
->reply_map
= devm_kcalloc(dev
, nr_cpu_ids
,
2404 sizeof(unsigned int),
2406 if (!hisi_hba
->reply_map
)
2408 vectors
= pci_alloc_irq_vectors_affinity(hisi_hba
->pci_dev
,
2415 setup_reply_map_v3_hw(hisi_hba
, vectors
- BASE_VECTORS_V3_HW
);
2418 vectors
= pci_alloc_irq_vectors(hisi_hba
->pci_dev
, min_msi
,
2419 max_msi
, PCI_IRQ_MSI
);
2424 hisi_hba
->cq_nvecs
= vectors
- BASE_VECTORS_V3_HW
;
2426 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 1),
2427 int_phy_up_down_bcast_v3_hw
, 0,
2428 DRV_NAME
" phy", hisi_hba
);
2430 dev_err(dev
, "could not request phy interrupt, rc=%d\n", rc
);
2432 goto free_irq_vectors
;
2435 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 2),
2436 int_chnl_int_v3_hw
, 0,
2437 DRV_NAME
" channel", hisi_hba
);
2439 dev_err(dev
, "could not request chnl interrupt, rc=%d\n", rc
);
2441 goto free_irq_vectors
;
2444 rc
= devm_request_irq(dev
, pci_irq_vector(pdev
, 11),
2445 fatal_axi_int_v3_hw
, 0,
2446 DRV_NAME
" fatal", hisi_hba
);
2448 dev_err(dev
, "could not request fatal interrupt, rc=%d\n", rc
);
2450 goto free_irq_vectors
;
2453 if (hisi_sas_intr_conv
)
2454 dev_info(dev
, "Enable interrupt converge\n");
2456 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
2457 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2458 int nr
= hisi_sas_intr_conv
? 16 : 16 + i
;
2459 unsigned long irqflags
= hisi_sas_intr_conv
? IRQF_SHARED
:
2462 cq
->irq_no
= pci_irq_vector(pdev
, nr
);
2463 rc
= devm_request_threaded_irq(dev
, cq
->irq_no
,
2467 DRV_NAME
" cq", cq
);
2469 dev_err(dev
, "could not request cq%d interrupt, rc=%d\n",
2472 goto free_irq_vectors
;
2479 pci_free_irq_vectors(pdev
);
2483 static int hisi_sas_v3_init(struct hisi_hba
*hisi_hba
)
2487 rc
= hw_init_v3_hw(hisi_hba
);
2491 rc
= interrupt_init_v3_hw(hisi_hba
);
2498 static void phy_set_linkrate_v3_hw(struct hisi_hba
*hisi_hba
, int phy_no
,
2499 struct sas_phy_linkrates
*r
)
2501 enum sas_linkrate max
= r
->maximum_linkrate
;
2502 u32 prog_phy_link_rate
= 0x800;
2504 prog_phy_link_rate
|= hisi_sas_get_prog_phy_linkrate_mask(max
);
2505 hisi_sas_phy_write32(hisi_hba
, phy_no
, PROG_PHY_LINK_RATE
,
2506 prog_phy_link_rate
);
2509 static void interrupt_disable_v3_hw(struct hisi_hba
*hisi_hba
)
2511 struct pci_dev
*pdev
= hisi_hba
->pci_dev
;
2514 synchronize_irq(pci_irq_vector(pdev
, 1));
2515 synchronize_irq(pci_irq_vector(pdev
, 2));
2516 synchronize_irq(pci_irq_vector(pdev
, 11));
2517 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2518 hisi_sas_write32(hisi_hba
, OQ0_INT_SRC_MSK
+ 0x4 * i
, 0x1);
2519 synchronize_irq(pci_irq_vector(pdev
, i
+ 16));
2522 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
, 0xffffffff);
2523 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK2
, 0xffffffff);
2524 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, 0xffffffff);
2525 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0xffffffff);
2527 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2528 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1_MSK
, 0xffffffff);
2529 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2_MSK
, 0xffffffff);
2530 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_NOT_RDY_MSK
, 0x1);
2531 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_PHY_ENA_MSK
, 0x1);
2532 hisi_sas_phy_write32(hisi_hba
, i
, SL_RX_BCAST_CHK_MSK
, 0x1);
2536 static u32
get_phys_state_v3_hw(struct hisi_hba
*hisi_hba
)
2538 return hisi_sas_read32(hisi_hba
, PHY_STATE
);
2541 static int disable_host_v3_hw(struct hisi_hba
*hisi_hba
)
2543 struct device
*dev
= hisi_hba
->dev
;
2544 u32 status
, reg_val
;
2547 interrupt_disable_v3_hw(hisi_hba
);
2548 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0x0);
2550 hisi_sas_stop_phys(hisi_hba
);
2554 reg_val
= hisi_sas_read32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2556 reg_val
|= AM_CTRL_SHUTDOWN_REQ_MSK
;
2557 hisi_sas_write32(hisi_hba
, AXI_MASTER_CFG_BASE
+
2558 AM_CTRL_GLOBAL
, reg_val
);
2560 /* wait until bus idle */
2561 rc
= hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE
+
2562 AM_CURR_TRANS_RETURN
, status
,
2563 status
== 0x3, 10, 100);
2565 dev_err(dev
, "axi bus is not idle, rc=%d\n", rc
);
2572 static int soft_reset_v3_hw(struct hisi_hba
*hisi_hba
)
2574 struct device
*dev
= hisi_hba
->dev
;
2577 rc
= disable_host_v3_hw(hisi_hba
);
2579 dev_err(dev
, "soft reset: disable host failed rc=%d\n", rc
);
2583 hisi_sas_init_mem(hisi_hba
);
2585 return hw_init_v3_hw(hisi_hba
);
2588 static int write_gpio_v3_hw(struct hisi_hba
*hisi_hba
, u8 reg_type
,
2589 u8 reg_index
, u8 reg_count
, u8
*write_data
)
2591 struct device
*dev
= hisi_hba
->dev
;
2592 u32
*data
= (u32
*)write_data
;
2596 case SAS_GPIO_REG_TX
:
2597 if ((reg_index
+ reg_count
) > ((hisi_hba
->n_phy
+ 3) / 4)) {
2598 dev_err(dev
, "write gpio: invalid reg range[%d, %d]\n",
2599 reg_index
, reg_index
+ reg_count
- 1);
2603 for (i
= 0; i
< reg_count
; i
++)
2604 hisi_sas_write32(hisi_hba
,
2605 SAS_GPIO_TX_0_1
+ (reg_index
+ i
) * 4,
2609 dev_err(dev
, "write gpio: unsupported or bad reg type %d\n",
2617 static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba
*hisi_hba
,
2618 int delay_ms
, int timeout_ms
)
2620 struct device
*dev
= hisi_hba
->dev
;
2621 int entries
, entries_old
= 0, time
;
2623 for (time
= 0; time
< timeout_ms
; time
+= delay_ms
) {
2624 entries
= hisi_sas_read32(hisi_hba
, CQE_SEND_CNT
);
2625 if (entries
== entries_old
)
2628 entries_old
= entries
;
2632 if (time
>= timeout_ms
) {
2633 dev_dbg(dev
, "Wait commands complete timeout!\n");
2637 dev_dbg(dev
, "wait commands complete %dms\n", time
);
2640 static ssize_t
intr_conv_v3_hw_show(struct device
*dev
,
2641 struct device_attribute
*attr
, char *buf
)
2643 return scnprintf(buf
, PAGE_SIZE
, "%u\n", hisi_sas_intr_conv
);
2645 static DEVICE_ATTR_RO(intr_conv_v3_hw
);
2647 static void config_intr_coal_v3_hw(struct hisi_hba
*hisi_hba
)
2649 /* config those registers between enable and disable PHYs */
2650 hisi_sas_stop_phys(hisi_hba
);
2652 if (hisi_hba
->intr_coal_ticks
== 0 ||
2653 hisi_hba
->intr_coal_count
== 0) {
2654 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x1);
2655 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
, 0x1);
2656 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
, 0x1);
2658 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x3);
2659 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
,
2660 hisi_hba
->intr_coal_ticks
);
2661 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
,
2662 hisi_hba
->intr_coal_count
);
2664 phys_init_v3_hw(hisi_hba
);
2667 static ssize_t
intr_coal_ticks_v3_hw_show(struct device
*dev
,
2668 struct device_attribute
*attr
,
2671 struct Scsi_Host
*shost
= class_to_shost(dev
);
2672 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2674 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
2675 hisi_hba
->intr_coal_ticks
);
2678 static ssize_t
intr_coal_ticks_v3_hw_store(struct device
*dev
,
2679 struct device_attribute
*attr
,
2680 const char *buf
, size_t count
)
2682 struct Scsi_Host
*shost
= class_to_shost(dev
);
2683 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2684 u32 intr_coal_ticks
;
2687 ret
= kstrtou32(buf
, 10, &intr_coal_ticks
);
2689 dev_err(dev
, "Input data of interrupt coalesce unmatch\n");
2693 if (intr_coal_ticks
>= BIT(24)) {
2694 dev_err(dev
, "intr_coal_ticks must be less than 2^24!\n");
2698 hisi_hba
->intr_coal_ticks
= intr_coal_ticks
;
2700 config_intr_coal_v3_hw(hisi_hba
);
2704 static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw
);
2706 static ssize_t
intr_coal_count_v3_hw_show(struct device
*dev
,
2707 struct device_attribute
2710 struct Scsi_Host
*shost
= class_to_shost(dev
);
2711 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2713 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
2714 hisi_hba
->intr_coal_count
);
2717 static ssize_t
intr_coal_count_v3_hw_store(struct device
*dev
,
2718 struct device_attribute
2719 *attr
, const char *buf
, size_t count
)
2721 struct Scsi_Host
*shost
= class_to_shost(dev
);
2722 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2723 u32 intr_coal_count
;
2726 ret
= kstrtou32(buf
, 10, &intr_coal_count
);
2728 dev_err(dev
, "Input data of interrupt coalesce unmatch\n");
2732 if (intr_coal_count
>= BIT(8)) {
2733 dev_err(dev
, "intr_coal_count must be less than 2^8!\n");
2737 hisi_hba
->intr_coal_count
= intr_coal_count
;
2739 config_intr_coal_v3_hw(hisi_hba
);
2743 static DEVICE_ATTR_RW(intr_coal_count_v3_hw
);
2745 static struct device_attribute
*host_attrs_v3_hw
[] = {
2746 &dev_attr_phy_event_threshold
,
2747 &dev_attr_intr_conv_v3_hw
,
2748 &dev_attr_intr_coal_ticks_v3_hw
,
2749 &dev_attr_intr_coal_count_v3_hw
,
2753 static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu
[] = {
2754 HISI_SAS_DEBUGFS_REG(PHY_CFG
),
2755 HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE
),
2756 HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE
),
2757 HISI_SAS_DEBUGFS_REG(PHY_CTRL
),
2758 HISI_SAS_DEBUGFS_REG(SL_CFG
),
2759 HISI_SAS_DEBUGFS_REG(AIP_LIMIT
),
2760 HISI_SAS_DEBUGFS_REG(SL_CONTROL
),
2761 HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS
),
2762 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0
),
2763 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1
),
2764 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2
),
2765 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3
),
2766 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4
),
2767 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5
),
2768 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6
),
2769 HISI_SAS_DEBUGFS_REG(TXID_AUTO
),
2770 HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0
),
2771 HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H
),
2772 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER
),
2773 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE
),
2774 HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER
),
2775 HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG
),
2776 HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG
),
2777 HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG
),
2778 HISI_SAS_DEBUGFS_REG(CHL_INT0
),
2779 HISI_SAS_DEBUGFS_REG(CHL_INT1
),
2780 HISI_SAS_DEBUGFS_REG(CHL_INT2
),
2781 HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK
),
2782 HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK
),
2783 HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK
),
2784 HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME
),
2785 HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN
),
2786 HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER
),
2787 HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK
),
2788 HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK
),
2789 HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK
),
2790 HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK
),
2791 HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK
),
2792 HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK
),
2793 HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS
),
2794 HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS
),
2795 HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME
),
2796 HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST
),
2797 HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB
),
2798 HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW
),
2799 HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR
),
2800 HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR
),
2804 static const struct hisi_sas_debugfs_reg debugfs_port_reg
= {
2805 .lu
= debugfs_port_reg_lu
,
2807 .base_off
= PORT_BASE
,
2808 .read_port_reg
= hisi_sas_phy_read32
,
2811 static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu
[] = {
2812 HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE
),
2813 HISI_SAS_DEBUGFS_REG(PHY_CONTEXT
),
2814 HISI_SAS_DEBUGFS_REG(PHY_STATE
),
2815 HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA
),
2816 HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE
),
2817 HISI_SAS_DEBUGFS_REG(ITCT_CLR
),
2818 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO
),
2819 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI
),
2820 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO
),
2821 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI
),
2822 HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG
),
2823 HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL
),
2824 HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL
),
2825 HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME
),
2826 HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE
),
2827 HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME
),
2828 HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME
),
2829 HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME
),
2830 HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME
),
2831 HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME
),
2832 HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN
),
2833 HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME
),
2834 HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2
),
2835 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT
),
2836 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE
),
2837 HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS
),
2838 HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS
),
2839 HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO
),
2840 HISI_SAS_DEBUGFS_REG(INT_COAL_EN
),
2841 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME
),
2842 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT
),
2843 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME
),
2844 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT
),
2845 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC
),
2846 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK
),
2847 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1
),
2848 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2
),
2849 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3
),
2850 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1
),
2851 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2
),
2852 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3
),
2853 HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK
),
2854 HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK
),
2855 HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK
),
2856 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR
),
2857 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK
),
2858 HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN
),
2859 HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT
),
2860 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH
),
2861 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR
),
2862 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR
),
2863 HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG
),
2864 HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK
),
2865 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH
),
2866 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR
),
2867 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR
),
2868 HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG
),
2869 HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG
),
2870 HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX
),
2871 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0
),
2872 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1
),
2873 HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1
),
2874 HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD
),
2878 static const struct hisi_sas_debugfs_reg debugfs_global_reg
= {
2879 .lu
= debugfs_global_reg_lu
,
2881 .read_global_reg
= hisi_sas_read32
,
2884 static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu
[] = {
2885 HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS
),
2886 HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS
),
2887 HISI_SAS_DEBUGFS_REG(AXI_CFG
),
2888 HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR
),
2892 static const struct hisi_sas_debugfs_reg debugfs_axi_reg
= {
2893 .lu
= debugfs_axi_reg_lu
,
2895 .base_off
= AXI_MASTER_CFG_BASE
,
2896 .read_global_reg
= hisi_sas_read32
,
2899 static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu
[] = {
2900 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1
),
2901 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK
),
2902 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK
),
2903 HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK
),
2904 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2
),
2905 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK
),
2909 static const struct hisi_sas_debugfs_reg debugfs_ras_reg
= {
2910 .lu
= debugfs_ras_reg_lu
,
2912 .base_off
= RAS_BASE
,
2913 .read_global_reg
= hisi_sas_read32
,
2916 static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba
*hisi_hba
)
2918 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
2920 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0);
2922 wait_cmds_complete_timeout_v3_hw(hisi_hba
, 100, 5000);
2924 hisi_sas_sync_irqs(hisi_hba
);
2927 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba
*hisi_hba
)
2929 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
,
2930 (u32
)((1ULL << hisi_hba
->queue_count
) - 1));
2932 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
2935 static void read_iost_itct_cache_v3_hw(struct hisi_hba
*hisi_hba
,
2936 enum hisi_sas_debugfs_cache_type type
,
2939 u32 cache_dw_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
*
2940 HISI_SAS_IOST_ITCT_CACHE_NUM
;
2944 hisi_sas_write32(hisi_hba
, TAB_RD_TYPE
, type
);
2946 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_DW_SZ
; i
++) {
2947 val
= hisi_sas_read32(hisi_hba
, TAB_DFX
);
2948 if (val
== 0xffffffff)
2952 if (val
!= 0xffffffff) {
2953 pr_err("Issue occur when reading IOST/ITCT cache!\n");
2957 memset(buf
, 0, cache_dw_size
* 4);
2960 for (i
= 1; i
< cache_dw_size
; i
++)
2961 buf
[i
] = hisi_sas_read32(hisi_hba
, TAB_DFX
);
2964 static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba
*hisi_hba
)
2967 int phy_id
= hisi_hba
->debugfs_bist_phy_no
;
2970 hisi_sas_phy_enable(hisi_hba
, phy_id
, 0);
2973 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_id
, SERDES_CFG
);
2974 reg_val
|= CFG_ALOS_CHK_DISABLE_MSK
;
2975 hisi_sas_phy_write32(hisi_hba
, phy_id
, SERDES_CFG
, reg_val
);
2978 static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba
*hisi_hba
)
2981 int phy_id
= hisi_hba
->debugfs_bist_phy_no
;
2983 /* disable loopback */
2984 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_id
, SAS_PHY_BIST_CTRL
);
2985 reg_val
&= ~(CFG_RX_BIST_EN_MSK
| CFG_TX_BIST_EN_MSK
|
2987 hisi_sas_phy_write32(hisi_hba
, phy_id
, SAS_PHY_BIST_CTRL
, reg_val
);
2990 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_id
, SERDES_CFG
);
2991 reg_val
&= ~CFG_ALOS_CHK_DISABLE_MSK
;
2992 hisi_sas_phy_write32(hisi_hba
, phy_id
, SERDES_CFG
, reg_val
);
2994 /* restore the linkrate */
2995 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_id
, PROG_PHY_LINK_RATE
);
2996 /* init OOB link rate as 1.5 Gbits */
2997 reg_val
&= ~CFG_PROG_PHY_LINK_RATE_MSK
;
2998 reg_val
|= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF
);
2999 hisi_sas_phy_write32(hisi_hba
, phy_id
, PROG_PHY_LINK_RATE
, reg_val
);
3002 hisi_sas_phy_enable(hisi_hba
, phy_id
, 1);
3005 #define SAS_PHY_BIST_CODE_INIT 0x1
3006 #define SAS_PHY_BIST_CODE1_INIT 0X80
3007 static int debugfs_set_bist_v3_hw(struct hisi_hba
*hisi_hba
, bool enable
)
3009 u32 reg_val
, mode_tmp
;
3010 u32 linkrate
= hisi_hba
->debugfs_bist_linkrate
;
3011 u32 phy_id
= hisi_hba
->debugfs_bist_phy_no
;
3012 u32 code_mode
= hisi_hba
->debugfs_bist_code_mode
;
3013 u32 path_mode
= hisi_hba
->debugfs_bist_mode
;
3014 struct device
*dev
= hisi_hba
->dev
;
3016 dev_info(dev
, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n",
3017 linkrate
, phy_id
, code_mode
, path_mode
);
3018 mode_tmp
= path_mode
? 2 : 1;
3020 /* some preparations before bist test */
3021 hisi_sas_bist_test_prep_v3_hw(hisi_hba
);
3023 /* set linkrate of bit test*/
3024 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_id
,
3025 PROG_PHY_LINK_RATE
);
3026 reg_val
&= ~CFG_PROG_PHY_LINK_RATE_MSK
;
3027 reg_val
|= (linkrate
<< CFG_PROG_PHY_LINK_RATE_OFF
);
3028 hisi_sas_phy_write32(hisi_hba
, phy_id
,
3029 PROG_PHY_LINK_RATE
, reg_val
);
3031 /* set code mode of bit test */
3032 reg_val
= hisi_sas_phy_read32(hisi_hba
, phy_id
,
3034 reg_val
&= ~(CFG_BIST_MODE_SEL_MSK
|
3035 CFG_LOOP_TEST_MODE_MSK
|
3036 CFG_RX_BIST_EN_MSK
|
3037 CFG_TX_BIST_EN_MSK
|
3039 reg_val
|= ((code_mode
<< CFG_BIST_MODE_SEL_OFF
) |
3040 (mode_tmp
<< CFG_LOOP_TEST_MODE_OFF
) |
3042 hisi_sas_phy_write32(hisi_hba
, phy_id
,
3043 SAS_PHY_BIST_CTRL
, reg_val
);
3045 /* set the bist init value */
3046 hisi_sas_phy_write32(hisi_hba
, phy_id
,
3048 SAS_PHY_BIST_CODE_INIT
);
3049 hisi_sas_phy_write32(hisi_hba
, phy_id
,
3051 SAS_PHY_BIST_CODE1_INIT
);
3054 reg_val
|= (CFG_RX_BIST_EN_MSK
| CFG_TX_BIST_EN_MSK
);
3055 hisi_sas_phy_write32(hisi_hba
, phy_id
,
3056 SAS_PHY_BIST_CTRL
, reg_val
);
3058 /* clear error bit */
3060 hisi_sas_phy_read32(hisi_hba
, phy_id
, SAS_BIST_ERR_CNT
);
3062 /* disable bist test and recover it */
3063 hisi_hba
->debugfs_bist_cnt
+= hisi_sas_phy_read32(hisi_hba
,
3064 phy_id
, SAS_BIST_ERR_CNT
);
3065 hisi_sas_bist_test_restore_v3_hw(hisi_hba
);
3071 static struct scsi_host_template sht_v3_hw
= {
3073 .module
= THIS_MODULE
,
3074 .queuecommand
= sas_queuecommand
,
3075 .target_alloc
= sas_target_alloc
,
3076 .slave_configure
= hisi_sas_slave_configure
,
3077 .scan_finished
= hisi_sas_scan_finished
,
3078 .scan_start
= hisi_sas_scan_start
,
3079 .change_queue_depth
= sas_change_queue_depth
,
3080 .bios_param
= sas_bios_param
,
3082 .sg_tablesize
= HISI_SAS_SGE_PAGE_CNT
,
3083 .sg_prot_tablesize
= HISI_SAS_SGE_PAGE_CNT
,
3084 .max_sectors
= SCSI_DEFAULT_MAX_SECTORS
,
3085 .eh_device_reset_handler
= sas_eh_device_reset_handler
,
3086 .eh_target_reset_handler
= sas_eh_target_reset_handler
,
3087 .target_destroy
= sas_target_destroy
,
3089 #ifdef CONFIG_COMPAT
3090 .compat_ioctl
= sas_ioctl
,
3092 .shost_attrs
= host_attrs_v3_hw
,
3093 .tag_alloc_policy
= BLK_TAG_ALLOC_RR
,
3094 .host_reset
= hisi_sas_host_reset
,
3097 static const struct hisi_sas_hw hisi_sas_v3_hw
= {
3098 .hw_init
= hisi_sas_v3_init
,
3099 .setup_itct
= setup_itct_v3_hw
,
3100 .get_wideport_bitmap
= get_wideport_bitmap_v3_hw
,
3101 .complete_hdr_size
= sizeof(struct hisi_sas_complete_v3_hdr
),
3102 .clear_itct
= clear_itct_v3_hw
,
3103 .sl_notify_ssp
= sl_notify_ssp_v3_hw
,
3104 .prep_ssp
= prep_ssp_v3_hw
,
3105 .prep_smp
= prep_smp_v3_hw
,
3106 .prep_stp
= prep_ata_v3_hw
,
3107 .prep_abort
= prep_abort_v3_hw
,
3108 .start_delivery
= start_delivery_v3_hw
,
3109 .phys_init
= phys_init_v3_hw
,
3110 .phy_start
= start_phy_v3_hw
,
3111 .phy_disable
= disable_phy_v3_hw
,
3112 .phy_hard_reset
= phy_hard_reset_v3_hw
,
3113 .phy_get_max_linkrate
= phy_get_max_linkrate_v3_hw
,
3114 .phy_set_linkrate
= phy_set_linkrate_v3_hw
,
3115 .dereg_device
= dereg_device_v3_hw
,
3116 .soft_reset
= soft_reset_v3_hw
,
3117 .get_phys_state
= get_phys_state_v3_hw
,
3118 .get_events
= phy_get_events_v3_hw
,
3119 .write_gpio
= write_gpio_v3_hw
,
3120 .wait_cmds_complete_timeout
= wait_cmds_complete_timeout_v3_hw
,
3121 .debugfs_reg_array
[DEBUGFS_GLOBAL
] = &debugfs_global_reg
,
3122 .debugfs_reg_array
[DEBUGFS_AXI
] = &debugfs_axi_reg
,
3123 .debugfs_reg_array
[DEBUGFS_RAS
] = &debugfs_ras_reg
,
3124 .debugfs_reg_port
= &debugfs_port_reg
,
3125 .snapshot_prepare
= debugfs_snapshot_prepare_v3_hw
,
3126 .snapshot_restore
= debugfs_snapshot_restore_v3_hw
,
3127 .read_iost_itct_cache
= read_iost_itct_cache_v3_hw
,
3128 .set_bist
= debugfs_set_bist_v3_hw
,
3131 static struct Scsi_Host
*
3132 hisi_sas_shost_alloc_pci(struct pci_dev
*pdev
)
3134 struct Scsi_Host
*shost
;
3135 struct hisi_hba
*hisi_hba
;
3136 struct device
*dev
= &pdev
->dev
;
3138 shost
= scsi_host_alloc(&sht_v3_hw
, sizeof(*hisi_hba
));
3140 dev_err(dev
, "shost alloc failed\n");
3143 hisi_hba
= shost_priv(shost
);
3145 INIT_WORK(&hisi_hba
->rst_work
, hisi_sas_rst_work_handler
);
3146 INIT_WORK(&hisi_hba
->debugfs_work
, hisi_sas_debugfs_work_handler
);
3147 hisi_hba
->hw
= &hisi_sas_v3_hw
;
3148 hisi_hba
->pci_dev
= pdev
;
3149 hisi_hba
->dev
= dev
;
3150 hisi_hba
->shost
= shost
;
3151 SHOST_TO_SAS_HA(shost
) = &hisi_hba
->sha
;
3153 if (prot_mask
& ~HISI_SAS_PROT_MASK
)
3154 dev_err(dev
, "unsupported protection mask 0x%x, using default (0x0)\n",
3157 hisi_hba
->prot_mask
= prot_mask
;
3159 if (hisi_sas_get_fw_info(hisi_hba
) < 0)
3162 if (hisi_sas_alloc(hisi_hba
)) {
3163 hisi_sas_free(hisi_hba
);
3169 scsi_host_put(shost
);
3170 dev_err(dev
, "shost alloc failed\n");
3175 hisi_sas_v3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3177 struct Scsi_Host
*shost
;
3178 struct hisi_hba
*hisi_hba
;
3179 struct device
*dev
= &pdev
->dev
;
3180 struct asd_sas_phy
**arr_phy
;
3181 struct asd_sas_port
**arr_port
;
3182 struct sas_ha_struct
*sha
;
3183 int rc
, phy_nr
, port_nr
, i
;
3185 rc
= pci_enable_device(pdev
);
3189 pci_set_master(pdev
);
3191 rc
= pci_request_regions(pdev
, DRV_NAME
);
3193 goto err_out_disable_device
;
3195 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
3197 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3199 dev_err(dev
, "No usable DMA addressing method\n");
3201 goto err_out_regions
;
3204 shost
= hisi_sas_shost_alloc_pci(pdev
);
3207 goto err_out_regions
;
3210 sha
= SHOST_TO_SAS_HA(shost
);
3211 hisi_hba
= shost_priv(shost
);
3212 dev_set_drvdata(dev
, sha
);
3214 hisi_hba
->regs
= pcim_iomap(pdev
, 5, 0);
3215 if (!hisi_hba
->regs
) {
3216 dev_err(dev
, "cannot map register\n");
3221 phy_nr
= port_nr
= hisi_hba
->n_phy
;
3223 arr_phy
= devm_kcalloc(dev
, phy_nr
, sizeof(void *), GFP_KERNEL
);
3224 arr_port
= devm_kcalloc(dev
, port_nr
, sizeof(void *), GFP_KERNEL
);
3225 if (!arr_phy
|| !arr_port
) {
3230 sha
->sas_phy
= arr_phy
;
3231 sha
->sas_port
= arr_port
;
3232 sha
->core
.shost
= shost
;
3233 sha
->lldd_ha
= hisi_hba
;
3235 shost
->transportt
= hisi_sas_stt
;
3236 shost
->max_id
= HISI_SAS_MAX_DEVICES
;
3237 shost
->max_lun
= ~0;
3238 shost
->max_channel
= 1;
3239 shost
->max_cmd_len
= 16;
3240 shost
->can_queue
= HISI_SAS_UNRESERVED_IPTT
;
3241 shost
->cmd_per_lun
= HISI_SAS_UNRESERVED_IPTT
;
3243 sha
->sas_ha_name
= DRV_NAME
;
3245 sha
->lldd_module
= THIS_MODULE
;
3246 sha
->sas_addr
= &hisi_hba
->sas_addr
[0];
3247 sha
->num_phys
= hisi_hba
->n_phy
;
3249 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
3250 sha
->sas_phy
[i
] = &hisi_hba
->phy
[i
].sas_phy
;
3251 sha
->sas_port
[i
] = &hisi_hba
->port
[i
].sas_port
;
3254 if (hisi_hba
->prot_mask
) {
3255 dev_info(dev
, "Registering for DIF/DIX prot_mask=0x%x\n",
3257 scsi_host_set_prot(hisi_hba
->shost
, prot_mask
);
3258 if (hisi_hba
->prot_mask
& HISI_SAS_DIX_PROT_MASK
)
3259 scsi_host_set_guard(hisi_hba
->shost
,
3260 SHOST_DIX_GUARD_CRC
);
3263 if (hisi_sas_debugfs_enable
)
3264 hisi_sas_debugfs_init(hisi_hba
);
3266 rc
= scsi_add_host(shost
, dev
);
3270 rc
= sas_register_ha(sha
);
3272 goto err_out_register_ha
;
3274 rc
= hisi_hba
->hw
->hw_init(hisi_hba
);
3276 goto err_out_register_ha
;
3278 scsi_scan_host(shost
);
3282 err_out_register_ha
:
3283 scsi_remove_host(shost
);
3285 hisi_sas_debugfs_exit(hisi_hba
);
3286 scsi_host_put(shost
);
3288 pci_release_regions(pdev
);
3289 err_out_disable_device
:
3290 pci_disable_device(pdev
);
3296 hisi_sas_v3_destroy_irqs(struct pci_dev
*pdev
, struct hisi_hba
*hisi_hba
)
3300 free_irq(pci_irq_vector(pdev
, 1), hisi_hba
);
3301 free_irq(pci_irq_vector(pdev
, 2), hisi_hba
);
3302 free_irq(pci_irq_vector(pdev
, 11), hisi_hba
);
3303 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
3304 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
3305 int nr
= hisi_sas_intr_conv
? 16 : 16 + i
;
3307 free_irq(pci_irq_vector(pdev
, nr
), cq
);
3309 pci_free_irq_vectors(pdev
);
3312 static void hisi_sas_v3_remove(struct pci_dev
*pdev
)
3314 struct device
*dev
= &pdev
->dev
;
3315 struct sas_ha_struct
*sha
= dev_get_drvdata(dev
);
3316 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
3317 struct Scsi_Host
*shost
= sha
->core
.shost
;
3319 if (timer_pending(&hisi_hba
->timer
))
3320 del_timer(&hisi_hba
->timer
);
3322 sas_unregister_ha(sha
);
3323 sas_remove_host(sha
->core
.shost
);
3325 hisi_sas_v3_destroy_irqs(pdev
, hisi_hba
);
3326 pci_release_regions(pdev
);
3327 pci_disable_device(pdev
);
3328 hisi_sas_free(hisi_hba
);
3329 hisi_sas_debugfs_exit(hisi_hba
);
3330 scsi_host_put(shost
);
3333 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev
*pdev
)
3335 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
3336 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
3337 struct device
*dev
= hisi_hba
->dev
;
3340 dev_info(dev
, "FLR prepare\n");
3341 set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
3342 hisi_sas_controller_reset_prepare(hisi_hba
);
3344 rc
= disable_host_v3_hw(hisi_hba
);
3346 dev_err(dev
, "FLR: disable host failed rc=%d\n", rc
);
3349 static void hisi_sas_reset_done_v3_hw(struct pci_dev
*pdev
)
3351 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
3352 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
3353 struct device
*dev
= hisi_hba
->dev
;
3356 hisi_sas_init_mem(hisi_hba
);
3358 rc
= hw_init_v3_hw(hisi_hba
);
3360 dev_err(dev
, "FLR: hw init failed rc=%d\n", rc
);
3364 hisi_sas_controller_reset_done(hisi_hba
);
3365 dev_info(dev
, "FLR done\n");
3369 /* instances of the controller */
3373 static int hisi_sas_v3_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3375 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
3376 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
3377 struct device
*dev
= hisi_hba
->dev
;
3378 struct Scsi_Host
*shost
= hisi_hba
->shost
;
3379 pci_power_t device_state
;
3382 if (!pdev
->pm_cap
) {
3383 dev_err(dev
, "PCI PM not supported\n");
3387 if (test_and_set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
3390 scsi_block_requests(shost
);
3391 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
3392 flush_workqueue(hisi_hba
->wq
);
3394 rc
= disable_host_v3_hw(hisi_hba
);
3396 dev_err(dev
, "PM suspend: disable host failed rc=%d\n", rc
);
3397 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
3398 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
3399 scsi_unblock_requests(shost
);
3403 hisi_sas_init_mem(hisi_hba
);
3405 device_state
= pci_choose_state(pdev
, state
);
3406 dev_warn(dev
, "entering operating state [D%d]\n",
3408 pci_save_state(pdev
);
3409 pci_disable_device(pdev
);
3410 pci_set_power_state(pdev
, device_state
);
3412 hisi_sas_release_tasks(hisi_hba
);
3414 sas_suspend_ha(sha
);
3418 static int hisi_sas_v3_resume(struct pci_dev
*pdev
)
3420 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
3421 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
3422 struct Scsi_Host
*shost
= hisi_hba
->shost
;
3423 struct device
*dev
= hisi_hba
->dev
;
3425 pci_power_t device_state
= pdev
->current_state
;
3427 dev_warn(dev
, "resuming from operating state [D%d]\n",
3429 pci_set_power_state(pdev
, PCI_D0
);
3430 pci_enable_wake(pdev
, PCI_D0
, 0);
3431 pci_restore_state(pdev
);
3432 rc
= pci_enable_device(pdev
);
3434 dev_err(dev
, "enable device failed during resume (%d)\n", rc
);
3438 pci_set_master(pdev
);
3439 scsi_unblock_requests(shost
);
3440 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
3442 sas_prep_resume_ha(sha
);
3443 rc
= hw_init_v3_hw(hisi_hba
);
3445 scsi_remove_host(shost
);
3446 pci_disable_device(pdev
);
3449 hisi_hba
->hw
->phys_init(hisi_hba
);
3451 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
3456 static const struct pci_device_id sas_v3_pci_table
[] = {
3457 { PCI_VDEVICE(HUAWEI
, 0xa230), hip08
},
3460 MODULE_DEVICE_TABLE(pci
, sas_v3_pci_table
);
3462 static const struct pci_error_handlers hisi_sas_err_handler
= {
3463 .reset_prepare
= hisi_sas_reset_prepare_v3_hw
,
3464 .reset_done
= hisi_sas_reset_done_v3_hw
,
3467 static struct pci_driver sas_v3_pci_driver
= {
3469 .id_table
= sas_v3_pci_table
,
3470 .probe
= hisi_sas_v3_probe
,
3471 .remove
= hisi_sas_v3_remove
,
3472 .suspend
= hisi_sas_v3_suspend
,
3473 .resume
= hisi_sas_v3_resume
,
3474 .err_handler
= &hisi_sas_err_handler
,
3477 module_pci_driver(sas_v3_pci_driver
);
3478 module_param_named(intr_conv
, hisi_sas_intr_conv
, bool, 0444);
3480 MODULE_LICENSE("GPL");
3481 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
3482 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
3483 MODULE_ALIAS("pci:" DRV_NAME
);