1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <linux/bitops.h>
37 #include <linux/delay.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/errno.h>
40 #include <linux/interrupt.h>
41 #include <linux/kernel.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/string.h>
48 #include "qed_init_ops.h"
51 #include "qed_reg_addr.h"
53 #include "qed_sriov.h"
57 qed_int_comp_cb_t comp_cb
;
61 struct qed_sb_sp_info
{
62 struct qed_sb_info sb_info
;
64 /* per protocol index data */
65 struct qed_pi_info pi_info_arr
[PIS_PER_SB_E4
];
68 enum qed_attention_type
{
73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
76 struct aeu_invert_reg_bit
{
79 #define ATTENTION_PARITY (1 << 0)
81 #define ATTENTION_LENGTH_MASK (0x00000ff0)
82 #define ATTENTION_LENGTH_SHIFT (4)
83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
84 ATTENTION_LENGTH_SHIFT)
85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
90 /* Multiple bits start with this offset */
91 #define ATTENTION_OFFSET_MASK (0x000ff000)
92 #define ATTENTION_OFFSET_SHIFT (12)
94 #define ATTENTION_BB_MASK (0x00700000)
95 #define ATTENTION_BB_SHIFT (20)
96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
97 #define ATTENTION_BB_DIFFERENT BIT(23)
101 /* Callback to call if attention will be triggered */
102 int (*cb
)(struct qed_hwfn
*p_hwfn
);
104 enum block_id block_index
;
107 struct aeu_invert_reg
{
108 struct aeu_invert_reg_bit bits
[32];
111 #define MAX_ATTN_GRPS (8)
112 #define NUM_ATTN_REGS (9)
114 /* Specific HW attention callbacks */
115 static int qed_mcp_attn_cb(struct qed_hwfn
*p_hwfn
)
117 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_STATE
);
119 /* This might occur on certain instances; Log it once then mask it */
120 DP_INFO(p_hwfn
->cdev
, "MCP_REG_CPU_STATE: %08x - Masking...\n",
122 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_EVENT_MASK
,
128 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
129 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
130 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
131 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
133 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
135 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
137 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
139 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
141 static int qed_pswhst_attn_cb(struct qed_hwfn
*p_hwfn
)
143 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
144 PSWHST_REG_INCORRECT_ACCESS_VALID
);
146 if (tmp
& QED_PSWHST_ATTENTION_INCORRECT_ACCESS
) {
147 u32 addr
, data
, length
;
149 addr
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
150 PSWHST_REG_INCORRECT_ACCESS_ADDRESS
);
151 data
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
152 PSWHST_REG_INCORRECT_ACCESS_DATA
);
153 length
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
154 PSWHST_REG_INCORRECT_ACCESS_LENGTH
);
156 DP_INFO(p_hwfn
->cdev
,
157 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
159 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_PF_ID
),
160 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_VF_ID
),
162 ATTENTION_INCORRECT_ACCESS_VF_VALID
),
164 ATTENTION_INCORRECT_ACCESS_CLIENT
),
165 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_WR
),
167 ATTENTION_INCORRECT_ACCESS_BYTE_EN
),
174 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
175 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
176 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
177 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
178 #define QED_GRC_ATTENTION_MASTER_MASK (0xf)
179 #define QED_GRC_ATTENTION_MASTER_SHIFT (24)
180 #define QED_GRC_ATTENTION_PF_MASK (0xf)
181 #define QED_GRC_ATTENTION_PF_SHIFT (0)
182 #define QED_GRC_ATTENTION_VF_MASK (0xff)
183 #define QED_GRC_ATTENTION_VF_SHIFT (4)
184 #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
185 #define QED_GRC_ATTENTION_PRIV_SHIFT (14)
186 #define QED_GRC_ATTENTION_PRIV_VF (0)
187 static const char *attn_master_to_str(u8 master
)
190 case 1: return "PXP";
191 case 2: return "MCP";
192 case 3: return "MSDM";
193 case 4: return "PSDM";
194 case 5: return "YSDM";
195 case 6: return "USDM";
196 case 7: return "TSDM";
197 case 8: return "XSDM";
198 case 9: return "DBU";
199 case 10: return "DMAE";
205 static int qed_grc_attn_cb(struct qed_hwfn
*p_hwfn
)
209 /* We've already cleared the timeout interrupt register, so we learn
210 * of interrupts via the validity register
212 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
213 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
);
214 if (!(tmp
& QED_GRC_ATTENTION_VALID_BIT
))
217 /* Read the GRC timeout information */
218 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
219 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0
);
220 tmp2
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
221 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1
);
223 DP_INFO(p_hwfn
->cdev
,
224 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
226 (tmp
& QED_GRC_ATTENTION_RDWR_BIT
) ? "Write to" : "Read from",
227 GET_FIELD(tmp
, QED_GRC_ATTENTION_ADDRESS
) << 2,
228 attn_master_to_str(GET_FIELD(tmp
, QED_GRC_ATTENTION_MASTER
)),
229 GET_FIELD(tmp2
, QED_GRC_ATTENTION_PF
),
230 (GET_FIELD(tmp2
, QED_GRC_ATTENTION_PRIV
) ==
231 QED_GRC_ATTENTION_PRIV_VF
) ? "VF" : "(Irrelevant)",
232 GET_FIELD(tmp2
, QED_GRC_ATTENTION_VF
));
235 /* Regardles of anything else, clean the validity bit */
236 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
237 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
, 0);
241 #define PGLUE_ATTENTION_VALID (1 << 29)
242 #define PGLUE_ATTENTION_RD_VALID (1 << 26)
243 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
244 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
245 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
247 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
248 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
249 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
251 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
252 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
253 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
255 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
256 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
257 #define PGLUE_ATTENTION_ILT_VALID (1 << 23)
258 static int qed_pglub_rbc_attn_cb(struct qed_hwfn
*p_hwfn
)
262 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
263 PGLUE_B_REG_TX_ERR_WR_DETAILS2
);
264 if (tmp
& PGLUE_ATTENTION_VALID
) {
265 u32 addr_lo
, addr_hi
, details
;
267 addr_lo
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
268 PGLUE_B_REG_TX_ERR_WR_ADD_31_0
);
269 addr_hi
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
270 PGLUE_B_REG_TX_ERR_WR_ADD_63_32
);
271 details
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
272 PGLUE_B_REG_TX_ERR_WR_DETAILS
);
275 "Illegal write by chip to [%08x:%08x] blocked.\n"
276 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
277 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
278 addr_hi
, addr_lo
, details
,
279 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
280 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
282 PGLUE_ATTENTION_DETAILS_VF_VALID
) ? 1 : 0,
285 PGLUE_ATTENTION_DETAILS2_WAS_ERR
) ? 1 : 0,
287 PGLUE_ATTENTION_DETAILS2_BME
) ? 1 : 0,
289 PGLUE_ATTENTION_DETAILS2_FID_EN
) ? 1 : 0);
292 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
293 PGLUE_B_REG_TX_ERR_RD_DETAILS2
);
294 if (tmp
& PGLUE_ATTENTION_RD_VALID
) {
295 u32 addr_lo
, addr_hi
, details
;
297 addr_lo
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
298 PGLUE_B_REG_TX_ERR_RD_ADD_31_0
);
299 addr_hi
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
300 PGLUE_B_REG_TX_ERR_RD_ADD_63_32
);
301 details
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
302 PGLUE_B_REG_TX_ERR_RD_DETAILS
);
305 "Illegal read by chip from [%08x:%08x] blocked.\n"
306 " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
307 " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
308 addr_hi
, addr_lo
, details
,
309 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
310 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
312 PGLUE_ATTENTION_DETAILS_VF_VALID
) ? 1 : 0,
314 GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_WAS_ERR
) ? 1
316 GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_BME
) ? 1 : 0,
317 GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_FID_EN
) ? 1
321 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
322 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL
);
323 if (tmp
& PGLUE_ATTENTION_ICPL_VALID
)
324 DP_INFO(p_hwfn
, "ICPL error - %08x\n", tmp
);
326 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
327 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS
);
328 if (tmp
& PGLUE_ATTENTION_ZLR_VALID
) {
329 u32 addr_hi
, addr_lo
;
331 addr_lo
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
332 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0
);
333 addr_hi
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
334 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32
);
336 DP_INFO(p_hwfn
, "ZLR eror - %08x [Address %08x:%08x]\n",
337 tmp
, addr_hi
, addr_lo
);
340 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
341 PGLUE_B_REG_VF_ILT_ERR_DETAILS2
);
342 if (tmp
& PGLUE_ATTENTION_ILT_VALID
) {
343 u32 addr_hi
, addr_lo
, details
;
345 addr_lo
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
346 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0
);
347 addr_hi
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
348 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32
);
349 details
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
350 PGLUE_B_REG_VF_ILT_ERR_DETAILS
);
353 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
354 details
, tmp
, addr_hi
, addr_lo
);
357 /* Clear the indications */
358 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
359 PGLUE_B_REG_LATCHED_ERRORS_CLR
, (1 << 2));
364 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
365 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
366 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
367 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
368 static int qed_dorq_attn_cb(struct qed_hwfn
*p_hwfn
)
372 reason
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, DORQ_REG_DB_DROP_REASON
) &
373 QED_DORQ_ATTENTION_REASON_MASK
;
375 u32 details
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
376 DORQ_REG_DB_DROP_DETAILS
);
378 DP_INFO(p_hwfn
->cdev
,
379 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
380 qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
381 DORQ_REG_DB_DROP_DETAILS_ADDRESS
),
382 (u16
)(details
& QED_DORQ_ATTENTION_OPAQUE_MASK
),
383 GET_FIELD(details
, QED_DORQ_ATTENTION_SIZE
) * 4,
390 /* Instead of major changes to the data-structure, we have a some 'special'
391 * identifiers for sources that changed meaning between adapters.
393 enum aeu_invert_reg_special_type
{
394 AEU_INVERT_REG_SPECIAL_CNIG_0
,
395 AEU_INVERT_REG_SPECIAL_CNIG_1
,
396 AEU_INVERT_REG_SPECIAL_CNIG_2
,
397 AEU_INVERT_REG_SPECIAL_CNIG_3
,
398 AEU_INVERT_REG_SPECIAL_MAX
,
401 static struct aeu_invert_reg_bit
402 aeu_descs_special
[AEU_INVERT_REG_SPECIAL_MAX
] = {
403 {"CNIG port 0", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
404 {"CNIG port 1", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
405 {"CNIG port 2", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
406 {"CNIG port 3", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
409 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
410 static struct aeu_invert_reg aeu_descs
[NUM_ATTN_REGS
] = {
412 { /* After Invert 1 */
414 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
419 { /* After Invert 2 */
420 {"PGLUE config_space", ATTENTION_SINGLE
,
422 {"PGLUE misc_flr", ATTENTION_SINGLE
,
424 {"PGLUE B RBC", ATTENTION_PAR_INT
,
425 qed_pglub_rbc_attn_cb
, BLOCK_PGLUE_B
},
426 {"PGLUE misc_mctp", ATTENTION_SINGLE
,
428 {"Flash event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
429 {"SMB event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
430 {"Main Power", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
431 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT
) |
432 (1 << ATTENTION_OFFSET_SHIFT
),
434 {"PCIE glue/PXP VPD %d",
435 (16 << ATTENTION_LENGTH_SHIFT
), NULL
, BLOCK_PGLCS
},
440 { /* After Invert 3 */
441 {"General Attention %d",
442 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
447 { /* After Invert 4 */
448 {"General Attention 32", ATTENTION_SINGLE
,
450 {"General Attention %d",
451 (2 << ATTENTION_LENGTH_SHIFT
) |
452 (33 << ATTENTION_OFFSET_SHIFT
), NULL
, MAX_BLOCK_ID
},
453 {"General Attention 35", ATTENTION_SINGLE
,
456 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
457 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0
),
460 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
461 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1
),
464 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
465 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2
),
468 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
469 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3
),
471 {"MCP CPU", ATTENTION_SINGLE
,
472 qed_mcp_attn_cb
, MAX_BLOCK_ID
},
473 {"MCP Watchdog timer", ATTENTION_SINGLE
,
475 {"MCP M2P", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
476 {"AVS stop status ready", ATTENTION_SINGLE
,
478 {"MSTAT", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
479 {"MSTAT per-path", ATTENTION_PAR_INT
,
481 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT
),
483 {"NIG", ATTENTION_PAR_INT
, NULL
, BLOCK_NIG
},
484 {"BMB/OPTE/MCP", ATTENTION_PAR_INT
, NULL
, BLOCK_BMB
},
485 {"BTB", ATTENTION_PAR_INT
, NULL
, BLOCK_BTB
},
486 {"BRB", ATTENTION_PAR_INT
, NULL
, BLOCK_BRB
},
487 {"PRS", ATTENTION_PAR_INT
, NULL
, BLOCK_PRS
},
492 { /* After Invert 5 */
493 {"SRC", ATTENTION_PAR_INT
, NULL
, BLOCK_SRC
},
494 {"PB Client1", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB1
},
495 {"PB Client2", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB2
},
496 {"RPB", ATTENTION_PAR_INT
, NULL
, BLOCK_RPB
},
497 {"PBF", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF
},
498 {"QM", ATTENTION_PAR_INT
, NULL
, BLOCK_QM
},
499 {"TM", ATTENTION_PAR_INT
, NULL
, BLOCK_TM
},
500 {"MCM", ATTENTION_PAR_INT
, NULL
, BLOCK_MCM
},
501 {"MSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSDM
},
502 {"MSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSEM
},
503 {"PCM", ATTENTION_PAR_INT
, NULL
, BLOCK_PCM
},
504 {"PSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSDM
},
505 {"PSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSEM
},
506 {"TCM", ATTENTION_PAR_INT
, NULL
, BLOCK_TCM
},
507 {"TSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSDM
},
508 {"TSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSEM
},
513 { /* After Invert 6 */
514 {"UCM", ATTENTION_PAR_INT
, NULL
, BLOCK_UCM
},
515 {"USDM", ATTENTION_PAR_INT
, NULL
, BLOCK_USDM
},
516 {"USEM", ATTENTION_PAR_INT
, NULL
, BLOCK_USEM
},
517 {"XCM", ATTENTION_PAR_INT
, NULL
, BLOCK_XCM
},
518 {"XSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSDM
},
519 {"XSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSEM
},
520 {"YCM", ATTENTION_PAR_INT
, NULL
, BLOCK_YCM
},
521 {"YSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSDM
},
522 {"YSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSEM
},
523 {"XYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_XYLD
},
524 {"TMLD", ATTENTION_PAR_INT
, NULL
, BLOCK_TMLD
},
525 {"MYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_MULD
},
526 {"YULD", ATTENTION_PAR_INT
, NULL
, BLOCK_YULD
},
527 {"DORQ", ATTENTION_PAR_INT
,
528 qed_dorq_attn_cb
, BLOCK_DORQ
},
529 {"DBG", ATTENTION_PAR_INT
, NULL
, BLOCK_DBG
},
530 {"IPC", ATTENTION_PAR_INT
, NULL
, BLOCK_IPC
},
535 { /* After Invert 7 */
536 {"CCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_CCFC
},
537 {"CDU", ATTENTION_PAR_INT
, NULL
, BLOCK_CDU
},
538 {"DMAE", ATTENTION_PAR_INT
, NULL
, BLOCK_DMAE
},
539 {"IGU", ATTENTION_PAR_INT
, NULL
, BLOCK_IGU
},
540 {"ATC", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
541 {"CAU", ATTENTION_PAR_INT
, NULL
, BLOCK_CAU
},
542 {"PTU", ATTENTION_PAR_INT
, NULL
, BLOCK_PTU
},
543 {"PRM", ATTENTION_PAR_INT
, NULL
, BLOCK_PRM
},
544 {"TCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_TCFC
},
545 {"RDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_RDIF
},
546 {"TDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_TDIF
},
547 {"RSS", ATTENTION_PAR_INT
, NULL
, BLOCK_RSS
},
548 {"MISC", ATTENTION_PAR_INT
, NULL
, BLOCK_MISC
},
549 {"MISCS", ATTENTION_PAR_INT
, NULL
, BLOCK_MISCS
},
550 {"PCIE", ATTENTION_PAR
, NULL
, BLOCK_PCIE
},
551 {"Vaux PCI core", ATTENTION_SINGLE
, NULL
, BLOCK_PGLCS
},
552 {"PSWRQ", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRQ
},
557 { /* After Invert 8 */
558 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT
,
560 {"PSWWR", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWWR
},
561 {"PSWWR (pci_clk)", ATTENTION_PAR_INT
,
563 {"PSWRD", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRD
},
564 {"PSWRD (pci_clk)", ATTENTION_PAR_INT
,
566 {"PSWHST", ATTENTION_PAR_INT
,
567 qed_pswhst_attn_cb
, BLOCK_PSWHST
},
568 {"PSWHST (pci_clk)", ATTENTION_PAR_INT
,
569 NULL
, BLOCK_PSWHST2
},
570 {"GRC", ATTENTION_PAR_INT
,
571 qed_grc_attn_cb
, BLOCK_GRC
},
572 {"CPMU", ATTENTION_PAR_INT
, NULL
, BLOCK_CPMU
},
573 {"NCSI", ATTENTION_PAR_INT
, NULL
, BLOCK_NCSI
},
574 {"MSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
575 {"PSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
576 {"TSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
577 {"USEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
578 {"XSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
579 {"YSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
580 {"pxp_misc_mps", ATTENTION_PAR
, NULL
, BLOCK_PGLCS
},
581 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE
,
583 {"PERST_B assertion", ATTENTION_SINGLE
,
585 {"PERST_B deassertion", ATTENTION_SINGLE
,
587 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT
),
593 { /* After Invert 9 */
594 {"MCP Latched memory", ATTENTION_PAR
,
596 {"MCP Latched scratchpad cache", ATTENTION_SINGLE
,
598 {"MCP Latched ump_tx", ATTENTION_PAR
,
600 {"MCP Latched scratchpad", ATTENTION_PAR
,
602 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT
),
608 static struct aeu_invert_reg_bit
*
609 qed_int_aeu_translate(struct qed_hwfn
*p_hwfn
,
610 struct aeu_invert_reg_bit
*p_bit
)
612 if (!QED_IS_BB(p_hwfn
->cdev
))
615 if (!(p_bit
->flags
& ATTENTION_BB_DIFFERENT
))
618 return &aeu_descs_special
[(p_bit
->flags
& ATTENTION_BB_MASK
) >>
622 static bool qed_int_is_parity_flag(struct qed_hwfn
*p_hwfn
,
623 struct aeu_invert_reg_bit
*p_bit
)
625 return !!(qed_int_aeu_translate(p_hwfn
, p_bit
)->flags
&
629 #define ATTN_STATE_BITS (0xfff)
630 #define ATTN_BITS_MASKABLE (0x3ff)
631 struct qed_sb_attn_info
{
632 /* Virtual & Physical address of the SB */
633 struct atten_status_block
*sb_attn
;
636 /* Last seen running index */
639 /* A mask of the AEU bits resulting in a parity error */
640 u32 parity_mask
[NUM_ATTN_REGS
];
642 /* A pointer to the attention description structure */
643 struct aeu_invert_reg
*p_aeu_desc
;
645 /* Previously asserted attentions, which are still unasserted */
648 /* Cleanup address for the link's general hw attention */
652 static inline u16
qed_attn_update_idx(struct qed_hwfn
*p_hwfn
,
653 struct qed_sb_attn_info
*p_sb_desc
)
657 /* Make certain HW write took affect */
660 index
= le16_to_cpu(p_sb_desc
->sb_attn
->sb_index
);
661 if (p_sb_desc
->index
!= index
) {
662 p_sb_desc
->index
= index
;
666 /* Make certain we got a consistent view with HW */
673 * @brief qed_int_assertion - handles asserted attention bits
676 * @param asserted_bits newly asserted bits
679 static int qed_int_assertion(struct qed_hwfn
*p_hwfn
, u16 asserted_bits
)
681 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
684 /* Mask the source of the attention in the IGU */
685 igu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
686 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "IGU mask: 0x%08x --> 0x%08x\n",
687 igu_mask
, igu_mask
& ~(asserted_bits
& ATTN_BITS_MASKABLE
));
688 igu_mask
&= ~(asserted_bits
& ATTN_BITS_MASKABLE
);
689 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, igu_mask
);
691 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
692 "inner known ATTN state: 0x%04x --> 0x%04x\n",
693 sb_attn_sw
->known_attn
,
694 sb_attn_sw
->known_attn
| asserted_bits
);
695 sb_attn_sw
->known_attn
|= asserted_bits
;
697 /* Handle MCP events */
698 if (asserted_bits
& 0x100) {
699 qed_mcp_handle_events(p_hwfn
, p_hwfn
->p_dpc_ptt
);
700 /* Clean the MCP attention */
701 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
702 sb_attn_sw
->mfw_attn_addr
, 0);
705 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
706 GTT_BAR0_MAP_REG_IGU_CMD
+
707 ((IGU_CMD_ATTN_BIT_SET_UPPER
-
708 IGU_CMD_INT_ACK_BASE
) << 3),
711 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "set cmd IGU: 0x%04x\n",
717 static void qed_int_attn_print(struct qed_hwfn
*p_hwfn
,
719 enum dbg_attn_type type
, bool b_clear
)
721 struct dbg_attn_block_result attn_results
;
722 enum dbg_status status
;
724 memset(&attn_results
, 0, sizeof(attn_results
));
726 status
= qed_dbg_read_attn(p_hwfn
, p_hwfn
->p_dpc_ptt
, id
, type
,
727 b_clear
, &attn_results
);
728 if (status
!= DBG_STATUS_OK
)
730 "Failed to parse attention information [status: %s]\n",
731 qed_dbg_get_status_str(status
));
733 qed_dbg_parse_attn(p_hwfn
, &attn_results
);
737 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
738 * cause of the attention
741 * @param p_aeu - descriptor of an AEU bit which caused the attention
742 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
743 * this bit to this group.
744 * @param bit_index - index of this bit in the aeu_en_reg
749 qed_int_deassertion_aeu_bit(struct qed_hwfn
*p_hwfn
,
750 struct aeu_invert_reg_bit
*p_aeu
,
752 const char *p_bit_name
, u32 bitmask
)
754 bool b_fatal
= false;
758 DP_INFO(p_hwfn
, "Deasserted attention `%s'[%08x]\n",
759 p_bit_name
, bitmask
);
761 /* Call callback before clearing the interrupt status */
763 DP_INFO(p_hwfn
, "`%s (attention)': Calling Callback function\n",
765 rc
= p_aeu
->cb(p_hwfn
);
771 /* Print HW block interrupt registers */
772 if (p_aeu
->block_index
!= MAX_BLOCK_ID
)
773 qed_int_attn_print(p_hwfn
, p_aeu
->block_index
,
774 ATTN_TYPE_INTERRUPT
, !b_fatal
);
777 /* If the attention is benign, no need to prevent it */
781 /* Prevent this Attention from being asserted in the future */
782 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
783 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, (val
& ~bitmask
));
784 DP_INFO(p_hwfn
, "`%s' - Disabled future attentions\n",
792 * @brief qed_int_deassertion_parity - handle a single parity AEU source
795 * @param p_aeu - descriptor of an AEU bit which caused the parity
796 * @param aeu_en_reg - address of the AEU enable register
799 static void qed_int_deassertion_parity(struct qed_hwfn
*p_hwfn
,
800 struct aeu_invert_reg_bit
*p_aeu
,
801 u32 aeu_en_reg
, u8 bit_index
)
803 u32 block_id
= p_aeu
->block_index
, mask
, val
;
805 DP_NOTICE(p_hwfn
->cdev
,
806 "%s parity attention is set [address 0x%08x, bit %d]\n",
807 p_aeu
->bit_name
, aeu_en_reg
, bit_index
);
809 if (block_id
!= MAX_BLOCK_ID
) {
810 qed_int_attn_print(p_hwfn
, block_id
, ATTN_TYPE_PARITY
, false);
812 /* In BB, there's a single parity bit for several blocks */
813 if (block_id
== BLOCK_BTB
) {
814 qed_int_attn_print(p_hwfn
, BLOCK_OPTE
,
815 ATTN_TYPE_PARITY
, false);
816 qed_int_attn_print(p_hwfn
, BLOCK_MCP
,
817 ATTN_TYPE_PARITY
, false);
821 /* Prevent this parity error from being re-asserted */
822 mask
= ~BIT(bit_index
);
823 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
824 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, val
& mask
);
825 DP_INFO(p_hwfn
, "`%s' - Disabled future parity errors\n",
830 * @brief - handles deassertion of previously asserted attentions.
833 * @param deasserted_bits - newly deasserted bits
837 static int qed_int_deassertion(struct qed_hwfn
*p_hwfn
,
840 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
841 u32 aeu_inv_arr
[NUM_ATTN_REGS
], aeu_mask
, aeu_en
, en
;
845 /* Read the attention registers in the AEU */
846 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
847 aeu_inv_arr
[i
] = qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
848 MISC_REG_AEU_AFTER_INVERT_1_IGU
+
850 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
851 "Deasserted bits [%d]: %08x\n",
855 /* Find parity attentions first */
856 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
857 struct aeu_invert_reg
*p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
];
860 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+ i
* sizeof(u32
);
861 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
863 /* Skip register in which no parity bit is currently set */
864 parities
= sb_attn_sw
->parity_mask
[i
] & aeu_inv_arr
[i
] & en
;
868 for (j
= 0, bit_idx
= 0; bit_idx
< 32; j
++) {
869 struct aeu_invert_reg_bit
*p_bit
= &p_aeu
->bits
[j
];
871 if (qed_int_is_parity_flag(p_hwfn
, p_bit
) &&
872 !!(parities
& BIT(bit_idx
)))
873 qed_int_deassertion_parity(p_hwfn
, p_bit
,
876 bit_idx
+= ATTENTION_LENGTH(p_bit
->flags
);
880 /* Find non-parity cause for attention and act */
881 for (k
= 0; k
< MAX_ATTN_GRPS
; k
++) {
882 struct aeu_invert_reg_bit
*p_aeu
;
884 /* Handle only groups whose attention is currently deasserted */
885 if (!(deasserted_bits
& (1 << k
)))
888 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
891 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+
893 k
* sizeof(u32
) * NUM_ATTN_REGS
;
895 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
896 bits
= aeu_inv_arr
[i
] & en
;
898 /* Skip if no bit from this group is currently set */
902 /* Find all set bits from current register which belong
903 * to current group, making them responsible for the
904 * previous assertion.
906 for (j
= 0, bit_idx
= 0; bit_idx
< 32; j
++) {
907 long unsigned int bitmask
;
910 p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
].bits
[j
];
911 p_aeu
= qed_int_aeu_translate(p_hwfn
, p_aeu
);
914 bit_len
= ATTENTION_LENGTH(p_aeu
->flags
);
915 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
)) {
921 bitmask
= bits
& (((1 << bit_len
) - 1) << bit
);
925 u32 flags
= p_aeu
->flags
;
929 num
= (u8
)find_first_bit(&bitmask
,
932 /* Some bits represent more than a
933 * a single interrupt. Correctly print
936 if (ATTENTION_LENGTH(flags
) > 2 ||
937 ((flags
& ATTENTION_PAR_INT
) &&
938 ATTENTION_LENGTH(flags
) > 1))
939 snprintf(bit_name
, 30,
940 p_aeu
->bit_name
, num
);
943 p_aeu
->bit_name
, 30);
945 /* We now need to pass bitmask in its
950 /* Handle source of the attention */
951 qed_int_deassertion_aeu_bit(p_hwfn
,
958 bit_idx
+= ATTENTION_LENGTH(p_aeu
->flags
);
963 /* Clear IGU indication for the deasserted bits */
964 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
965 GTT_BAR0_MAP_REG_IGU_CMD
+
966 ((IGU_CMD_ATTN_BIT_CLR_UPPER
-
967 IGU_CMD_INT_ACK_BASE
) << 3),
968 ~((u32
)deasserted_bits
));
970 /* Unmask deasserted attentions in IGU */
971 aeu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
972 aeu_mask
|= (deasserted_bits
& ATTN_BITS_MASKABLE
);
973 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, aeu_mask
);
975 /* Clear deassertion from inner state */
976 sb_attn_sw
->known_attn
&= ~deasserted_bits
;
981 static int qed_int_attentions(struct qed_hwfn
*p_hwfn
)
983 struct qed_sb_attn_info
*p_sb_attn_sw
= p_hwfn
->p_sb_attn
;
984 struct atten_status_block
*p_sb_attn
= p_sb_attn_sw
->sb_attn
;
985 u32 attn_bits
= 0, attn_acks
= 0;
986 u16 asserted_bits
, deasserted_bits
;
990 /* Read current attention bits/acks - safeguard against attentions
991 * by guaranting work on a synchronized timeframe
994 index
= p_sb_attn
->sb_index
;
995 /* finish reading index before the loop condition */
997 attn_bits
= le32_to_cpu(p_sb_attn
->atten_bits
);
998 attn_acks
= le32_to_cpu(p_sb_attn
->atten_ack
);
999 } while (index
!= p_sb_attn
->sb_index
);
1000 p_sb_attn
->sb_index
= index
;
1002 /* Attention / Deassertion are meaningful (and in correct state)
1003 * only when they differ and consistent with known state - deassertion
1004 * when previous attention & current ack, and assertion when current
1005 * attention with no previous attention
1007 asserted_bits
= (attn_bits
& ~attn_acks
& ATTN_STATE_BITS
) &
1008 ~p_sb_attn_sw
->known_attn
;
1009 deasserted_bits
= (~attn_bits
& attn_acks
& ATTN_STATE_BITS
) &
1010 p_sb_attn_sw
->known_attn
;
1012 if ((asserted_bits
& ~0x100) || (deasserted_bits
& ~0x100)) {
1014 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1015 index
, attn_bits
, attn_acks
, asserted_bits
,
1016 deasserted_bits
, p_sb_attn_sw
->known_attn
);
1017 } else if (asserted_bits
== 0x100) {
1018 DP_INFO(p_hwfn
, "MFW indication via attention\n");
1020 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1021 "MFW indication [deassertion]\n");
1024 if (asserted_bits
) {
1025 rc
= qed_int_assertion(p_hwfn
, asserted_bits
);
1030 if (deasserted_bits
)
1031 rc
= qed_int_deassertion(p_hwfn
, deasserted_bits
);
1036 static void qed_sb_ack_attn(struct qed_hwfn
*p_hwfn
,
1037 void __iomem
*igu_addr
, u32 ack_cons
)
1039 struct igu_prod_cons_update igu_ack
= { 0 };
1041 igu_ack
.sb_id_and_flags
=
1042 ((ack_cons
<< IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT
) |
1043 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT
) |
1044 (IGU_INT_NOP
<< IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT
) |
1045 (IGU_SEG_ACCESS_ATTN
<<
1046 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT
));
1048 DIRECT_REG_WR(igu_addr
, igu_ack
.sb_id_and_flags
);
1050 /* Both segments (interrupts & acks) are written to same place address;
1051 * Need to guarantee all commands will be received (in-order) by HW.
1057 void qed_int_sp_dpc(unsigned long hwfn_cookie
)
1059 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)hwfn_cookie
;
1060 struct qed_pi_info
*pi_info
= NULL
;
1061 struct qed_sb_attn_info
*sb_attn
;
1062 struct qed_sb_info
*sb_info
;
1066 if (!p_hwfn
->p_sp_sb
) {
1067 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sp_sb\n");
1071 sb_info
= &p_hwfn
->p_sp_sb
->sb_info
;
1072 arr_size
= ARRAY_SIZE(p_hwfn
->p_sp_sb
->pi_info_arr
);
1074 DP_ERR(p_hwfn
->cdev
,
1075 "Status block is NULL - cannot ack interrupts\n");
1079 if (!p_hwfn
->p_sb_attn
) {
1080 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sb_attn");
1083 sb_attn
= p_hwfn
->p_sb_attn
;
1085 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "DPC Called! (hwfn %p %d)\n",
1086 p_hwfn
, p_hwfn
->my_id
);
1088 /* Disable ack for def status block. Required both for msix +
1089 * inta in non-mask mode, in inta does no harm.
1091 qed_sb_ack(sb_info
, IGU_INT_DISABLE
, 0);
1093 /* Gather Interrupts/Attentions information */
1094 if (!sb_info
->sb_virt
) {
1095 DP_ERR(p_hwfn
->cdev
,
1096 "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1098 u32 tmp_index
= sb_info
->sb_ack
;
1100 rc
= qed_sb_update_sb_idx(sb_info
);
1101 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1102 "Interrupt indices: 0x%08x --> 0x%08x\n",
1103 tmp_index
, sb_info
->sb_ack
);
1106 if (!sb_attn
|| !sb_attn
->sb_attn
) {
1107 DP_ERR(p_hwfn
->cdev
,
1108 "Attentions Status block is NULL - cannot check for new attentions!\n");
1110 u16 tmp_index
= sb_attn
->index
;
1112 rc
|= qed_attn_update_idx(p_hwfn
, sb_attn
);
1113 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1114 "Attention indices: 0x%08x --> 0x%08x\n",
1115 tmp_index
, sb_attn
->index
);
1118 /* Check if we expect interrupts at this time. if not just ack them */
1119 if (!(rc
& QED_SB_EVENT_MASK
)) {
1120 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1124 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1125 if (!p_hwfn
->p_dpc_ptt
) {
1126 DP_NOTICE(p_hwfn
->cdev
, "Failed to allocate PTT\n");
1127 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1131 if (rc
& QED_SB_ATT_IDX
)
1132 qed_int_attentions(p_hwfn
);
1134 if (rc
& QED_SB_IDX
) {
1137 /* Look for a free index */
1138 for (pi
= 0; pi
< arr_size
; pi
++) {
1139 pi_info
= &p_hwfn
->p_sp_sb
->pi_info_arr
[pi
];
1140 if (pi_info
->comp_cb
)
1141 pi_info
->comp_cb(p_hwfn
, pi_info
->cookie
);
1145 if (sb_attn
&& (rc
& QED_SB_ATT_IDX
))
1146 /* This should be done before the interrupts are enabled,
1147 * since otherwise a new attention will be generated.
1149 qed_sb_ack_attn(p_hwfn
, sb_info
->igu_addr
, sb_attn
->index
);
1151 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1154 static void qed_int_sb_attn_free(struct qed_hwfn
*p_hwfn
)
1156 struct qed_sb_attn_info
*p_sb
= p_hwfn
->p_sb_attn
;
1162 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1163 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1164 p_sb
->sb_attn
, p_sb
->sb_phys
);
1166 p_hwfn
->p_sb_attn
= NULL
;
1169 static void qed_int_sb_attn_setup(struct qed_hwfn
*p_hwfn
,
1170 struct qed_ptt
*p_ptt
)
1172 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1174 memset(sb_info
->sb_attn
, 0, sizeof(*sb_info
->sb_attn
));
1177 sb_info
->known_attn
= 0;
1179 /* Configure Attention Status Block in IGU */
1180 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_L
,
1181 lower_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1182 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_H
,
1183 upper_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1186 static void qed_int_sb_attn_init(struct qed_hwfn
*p_hwfn
,
1187 struct qed_ptt
*p_ptt
,
1188 void *sb_virt_addr
, dma_addr_t sb_phy_addr
)
1190 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1193 sb_info
->sb_attn
= sb_virt_addr
;
1194 sb_info
->sb_phys
= sb_phy_addr
;
1196 /* Set the pointer to the AEU descriptors */
1197 sb_info
->p_aeu_desc
= aeu_descs
;
1199 /* Calculate Parity Masks */
1200 memset(sb_info
->parity_mask
, 0, sizeof(u32
) * NUM_ATTN_REGS
);
1201 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1202 /* j is array index, k is bit index */
1203 for (j
= 0, k
= 0; k
< 32; j
++) {
1204 struct aeu_invert_reg_bit
*p_aeu
;
1206 p_aeu
= &aeu_descs
[i
].bits
[j
];
1207 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
))
1208 sb_info
->parity_mask
[i
] |= 1 << k
;
1210 k
+= ATTENTION_LENGTH(p_aeu
->flags
);
1212 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1213 "Attn Mask [Reg %d]: 0x%08x\n",
1214 i
, sb_info
->parity_mask
[i
]);
1217 /* Set the address of cleanup for the mcp attention */
1218 sb_info
->mfw_attn_addr
= (p_hwfn
->rel_pf_id
<< 3) +
1219 MISC_REG_AEU_GENERAL_ATTN_0
;
1221 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
1224 static int qed_int_sb_attn_alloc(struct qed_hwfn
*p_hwfn
,
1225 struct qed_ptt
*p_ptt
)
1227 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1228 struct qed_sb_attn_info
*p_sb
;
1229 dma_addr_t p_phys
= 0;
1233 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1238 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1239 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1240 &p_phys
, GFP_KERNEL
);
1247 /* Attention setup */
1248 p_hwfn
->p_sb_attn
= p_sb
;
1249 qed_int_sb_attn_init(p_hwfn
, p_ptt
, p_virt
, p_phys
);
1254 /* coalescing timeout = timeset << (timer_res + 1) */
1255 #define QED_CAU_DEF_RX_USECS 24
1256 #define QED_CAU_DEF_TX_USECS 48
1258 void qed_init_cau_sb_entry(struct qed_hwfn
*p_hwfn
,
1259 struct cau_sb_entry
*p_sb_entry
,
1260 u8 pf_id
, u16 vf_number
, u8 vf_valid
)
1262 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1266 memset(p_sb_entry
, 0, sizeof(*p_sb_entry
));
1268 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_PF_NUMBER
, pf_id
);
1269 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_VF_NUMBER
, vf_number
);
1270 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_VF_VALID
, vf_valid
);
1271 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_SB_TIMESET0
, 0x7F);
1272 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_SB_TIMESET1
, 0x7F);
1274 cau_state
= CAU_HC_DISABLE_STATE
;
1276 if (cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1277 cau_state
= CAU_HC_ENABLE_STATE
;
1278 if (!cdev
->rx_coalesce_usecs
)
1279 cdev
->rx_coalesce_usecs
= QED_CAU_DEF_RX_USECS
;
1280 if (!cdev
->tx_coalesce_usecs
)
1281 cdev
->tx_coalesce_usecs
= QED_CAU_DEF_TX_USECS
;
1284 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1285 if (cdev
->rx_coalesce_usecs
<= 0x7F)
1287 else if (cdev
->rx_coalesce_usecs
<= 0xFF)
1291 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
1293 if (cdev
->tx_coalesce_usecs
<= 0x7F)
1295 else if (cdev
->tx_coalesce_usecs
<= 0xFF)
1299 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
1301 SET_FIELD(p_sb_entry
->data
, CAU_SB_ENTRY_STATE0
, cau_state
);
1302 SET_FIELD(p_sb_entry
->data
, CAU_SB_ENTRY_STATE1
, cau_state
);
1305 static void qed_int_cau_conf_pi(struct qed_hwfn
*p_hwfn
,
1306 struct qed_ptt
*p_ptt
,
1309 enum qed_coalescing_fsm coalescing_fsm
,
1312 struct cau_pi_entry pi_entry
;
1313 u32 sb_offset
, pi_offset
;
1315 if (IS_VF(p_hwfn
->cdev
))
1318 sb_offset
= igu_sb_id
* PIS_PER_SB_E4
;
1319 memset(&pi_entry
, 0, sizeof(struct cau_pi_entry
));
1321 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_PI_TIMESET
, timeset
);
1322 if (coalescing_fsm
== QED_COAL_RX_STATE_MACHINE
)
1323 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_FSM_SEL
, 0);
1325 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_FSM_SEL
, 1);
1327 pi_offset
= sb_offset
+ pi_index
;
1328 if (p_hwfn
->hw_init_done
) {
1329 qed_wr(p_hwfn
, p_ptt
,
1330 CAU_REG_PI_MEMORY
+ pi_offset
* sizeof(u32
),
1331 *((u32
*)&(pi_entry
)));
1333 STORE_RT_REG(p_hwfn
,
1334 CAU_REG_PI_MEMORY_RT_OFFSET
+ pi_offset
,
1335 *((u32
*)&(pi_entry
)));
1339 void qed_int_cau_conf_sb(struct qed_hwfn
*p_hwfn
,
1340 struct qed_ptt
*p_ptt
,
1342 u16 igu_sb_id
, u16 vf_number
, u8 vf_valid
)
1344 struct cau_sb_entry sb_entry
;
1346 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
, p_hwfn
->rel_pf_id
,
1347 vf_number
, vf_valid
);
1349 if (p_hwfn
->hw_init_done
) {
1350 /* Wide-bus, initialize via DMAE */
1351 u64 phys_addr
= (u64
)sb_phys
;
1353 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&phys_addr
,
1354 CAU_REG_SB_ADDR_MEMORY
+
1355 igu_sb_id
* sizeof(u64
), 2, 0);
1356 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&sb_entry
,
1357 CAU_REG_SB_VAR_MEMORY
+
1358 igu_sb_id
* sizeof(u64
), 2, 0);
1360 /* Initialize Status Block Address */
1361 STORE_RT_REG_AGG(p_hwfn
,
1362 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET
+
1366 STORE_RT_REG_AGG(p_hwfn
,
1367 CAU_REG_SB_VAR_MEMORY_RT_OFFSET
+
1372 /* Configure pi coalescing if set */
1373 if (p_hwfn
->cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1374 u8 num_tc
= p_hwfn
->hw_info
.num_hw_tc
;
1375 u8 timeset
, timer_res
;
1378 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1379 if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0x7F)
1381 else if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0xFF)
1385 timeset
= (u8
)(p_hwfn
->cdev
->rx_coalesce_usecs
>> timer_res
);
1386 qed_int_cau_conf_pi(p_hwfn
, p_ptt
, igu_sb_id
, RX_PI
,
1387 QED_COAL_RX_STATE_MACHINE
, timeset
);
1389 if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0x7F)
1391 else if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0xFF)
1395 timeset
= (u8
)(p_hwfn
->cdev
->tx_coalesce_usecs
>> timer_res
);
1396 for (i
= 0; i
< num_tc
; i
++) {
1397 qed_int_cau_conf_pi(p_hwfn
, p_ptt
,
1398 igu_sb_id
, TX_PI(i
),
1399 QED_COAL_TX_STATE_MACHINE
,
1405 void qed_int_sb_setup(struct qed_hwfn
*p_hwfn
,
1406 struct qed_ptt
*p_ptt
, struct qed_sb_info
*sb_info
)
1408 /* zero status block and ack counter */
1409 sb_info
->sb_ack
= 0;
1410 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1412 if (IS_PF(p_hwfn
->cdev
))
1413 qed_int_cau_conf_sb(p_hwfn
, p_ptt
, sb_info
->sb_phys
,
1414 sb_info
->igu_sb_id
, 0, 0);
1417 struct qed_igu_block
*qed_get_igu_free_sb(struct qed_hwfn
*p_hwfn
, bool b_is_pf
)
1419 struct qed_igu_block
*p_block
;
1422 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1424 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1426 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1427 !(p_block
->status
& QED_IGU_STATUS_FREE
))
1430 if (!!(p_block
->status
& QED_IGU_STATUS_PF
) == b_is_pf
)
1437 static u16
qed_get_pf_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 vector_id
)
1439 struct qed_igu_block
*p_block
;
1442 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1444 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1446 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1448 p_block
->vector_number
!= vector_id
)
1454 return QED_SB_INVALID_IDX
;
1457 u16
qed_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1461 /* Assuming continuous set of IGU SBs dedicated for given PF */
1462 if (sb_id
== QED_SP_SB_ID
)
1463 igu_sb_id
= p_hwfn
->hw_info
.p_igu_info
->igu_dsb_id
;
1464 else if (IS_PF(p_hwfn
->cdev
))
1465 igu_sb_id
= qed_get_pf_igu_sb_id(p_hwfn
, sb_id
+ 1);
1467 igu_sb_id
= qed_vf_get_igu_sb_id(p_hwfn
, sb_id
);
1469 if (sb_id
== QED_SP_SB_ID
)
1470 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1471 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id
);
1473 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1474 "SB [%04x] <--> IGU SB [%04x]\n", sb_id
, igu_sb_id
);
1479 int qed_int_sb_init(struct qed_hwfn
*p_hwfn
,
1480 struct qed_ptt
*p_ptt
,
1481 struct qed_sb_info
*sb_info
,
1482 void *sb_virt_addr
, dma_addr_t sb_phy_addr
, u16 sb_id
)
1484 sb_info
->sb_virt
= sb_virt_addr
;
1485 sb_info
->sb_phys
= sb_phy_addr
;
1487 sb_info
->igu_sb_id
= qed_get_igu_sb_id(p_hwfn
, sb_id
);
1489 if (sb_id
!= QED_SP_SB_ID
) {
1490 if (IS_PF(p_hwfn
->cdev
)) {
1491 struct qed_igu_info
*p_info
;
1492 struct qed_igu_block
*p_block
;
1494 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1495 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1497 p_block
->sb_info
= sb_info
;
1498 p_block
->status
&= ~QED_IGU_STATUS_FREE
;
1499 p_info
->usage
.free_cnt
--;
1501 qed_vf_set_sb_info(p_hwfn
, sb_id
, sb_info
);
1505 sb_info
->cdev
= p_hwfn
->cdev
;
1507 /* The igu address will hold the absolute address that needs to be
1508 * written to for a specific status block
1510 if (IS_PF(p_hwfn
->cdev
)) {
1511 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1512 GTT_BAR0_MAP_REG_IGU_CMD
+
1513 (sb_info
->igu_sb_id
<< 3);
1515 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1516 PXP_VF_BAR0_START_IGU
+
1517 ((IGU_CMD_INT_ACK_BASE
+
1518 sb_info
->igu_sb_id
) << 3);
1521 sb_info
->flags
|= QED_SB_INFO_INIT
;
1523 qed_int_sb_setup(p_hwfn
, p_ptt
, sb_info
);
1528 int qed_int_sb_release(struct qed_hwfn
*p_hwfn
,
1529 struct qed_sb_info
*sb_info
, u16 sb_id
)
1531 struct qed_igu_block
*p_block
;
1532 struct qed_igu_info
*p_info
;
1537 /* zero status block and ack counter */
1538 sb_info
->sb_ack
= 0;
1539 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1541 if (IS_VF(p_hwfn
->cdev
)) {
1542 qed_vf_set_sb_info(p_hwfn
, sb_id
, NULL
);
1546 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1547 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1549 /* Vector 0 is reserved to Default SB */
1550 if (!p_block
->vector_number
) {
1551 DP_ERR(p_hwfn
, "Do Not free sp sb using this function");
1555 /* Lose reference to client's SB info, and fix counters */
1556 p_block
->sb_info
= NULL
;
1557 p_block
->status
|= QED_IGU_STATUS_FREE
;
1558 p_info
->usage
.free_cnt
++;
1563 static void qed_int_sp_sb_free(struct qed_hwfn
*p_hwfn
)
1565 struct qed_sb_sp_info
*p_sb
= p_hwfn
->p_sp_sb
;
1570 if (p_sb
->sb_info
.sb_virt
)
1571 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1572 SB_ALIGNED_SIZE(p_hwfn
),
1573 p_sb
->sb_info
.sb_virt
,
1574 p_sb
->sb_info
.sb_phys
);
1576 p_hwfn
->p_sp_sb
= NULL
;
1579 static int qed_int_sp_sb_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1581 struct qed_sb_sp_info
*p_sb
;
1582 dma_addr_t p_phys
= 0;
1586 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1591 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1592 SB_ALIGNED_SIZE(p_hwfn
),
1593 &p_phys
, GFP_KERNEL
);
1599 /* Status Block setup */
1600 p_hwfn
->p_sp_sb
= p_sb
;
1601 qed_int_sb_init(p_hwfn
, p_ptt
, &p_sb
->sb_info
, p_virt
,
1602 p_phys
, QED_SP_SB_ID
);
1604 memset(p_sb
->pi_info_arr
, 0, sizeof(p_sb
->pi_info_arr
));
1609 int qed_int_register_cb(struct qed_hwfn
*p_hwfn
,
1610 qed_int_comp_cb_t comp_cb
,
1611 void *cookie
, u8
*sb_idx
, __le16
**p_fw_cons
)
1613 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1617 /* Look for a free index */
1618 for (pi
= 0; pi
< ARRAY_SIZE(p_sp_sb
->pi_info_arr
); pi
++) {
1619 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
)
1622 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= comp_cb
;
1623 p_sp_sb
->pi_info_arr
[pi
].cookie
= cookie
;
1625 *p_fw_cons
= &p_sp_sb
->sb_info
.sb_virt
->pi_array
[pi
];
1633 int qed_int_unregister_cb(struct qed_hwfn
*p_hwfn
, u8 pi
)
1635 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1637 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
== NULL
)
1640 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= NULL
;
1641 p_sp_sb
->pi_info_arr
[pi
].cookie
= NULL
;
1646 u16
qed_int_get_sp_sb_id(struct qed_hwfn
*p_hwfn
)
1648 return p_hwfn
->p_sp_sb
->sb_info
.igu_sb_id
;
1651 void qed_int_igu_enable_int(struct qed_hwfn
*p_hwfn
,
1652 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1654 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
| IGU_PF_CONF_ATTN_BIT_EN
;
1656 p_hwfn
->cdev
->int_mode
= int_mode
;
1657 switch (p_hwfn
->cdev
->int_mode
) {
1658 case QED_INT_MODE_INTA
:
1659 igu_pf_conf
|= IGU_PF_CONF_INT_LINE_EN
;
1660 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1663 case QED_INT_MODE_MSI
:
1664 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1665 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1668 case QED_INT_MODE_MSIX
:
1669 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1671 case QED_INT_MODE_POLL
:
1675 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, igu_pf_conf
);
1678 static void qed_int_igu_enable_attn(struct qed_hwfn
*p_hwfn
,
1679 struct qed_ptt
*p_ptt
)
1682 /* Configure AEU signal change to produce attentions */
1683 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0);
1684 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0xfff);
1685 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0xfff);
1686 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0xfff);
1688 /* Flush the writes to IGU */
1691 /* Unmask AEU signals toward IGU */
1692 qed_wr(p_hwfn
, p_ptt
, MISC_REG_AEU_MASK_ATTN_IGU
, 0xff);
1696 qed_int_igu_enable(struct qed_hwfn
*p_hwfn
,
1697 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1701 qed_int_igu_enable_attn(p_hwfn
, p_ptt
);
1703 if ((int_mode
!= QED_INT_MODE_INTA
) || IS_LEAD_HWFN(p_hwfn
)) {
1704 rc
= qed_slowpath_irq_req(p_hwfn
);
1706 DP_NOTICE(p_hwfn
, "Slowpath IRQ request failed\n");
1709 p_hwfn
->b_int_requested
= true;
1711 /* Enable interrupt Generation */
1712 qed_int_igu_enable_int(p_hwfn
, p_ptt
, int_mode
);
1713 p_hwfn
->b_int_enabled
= 1;
1718 void qed_int_igu_disable_int(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1720 p_hwfn
->b_int_enabled
= 0;
1722 if (IS_VF(p_hwfn
->cdev
))
1725 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, 0);
1728 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1729 static void qed_int_igu_cleanup_sb(struct qed_hwfn
*p_hwfn
,
1730 struct qed_ptt
*p_ptt
,
1732 bool cleanup_set
, u16 opaque_fid
)
1734 u32 cmd_ctrl
= 0, val
= 0, sb_bit
= 0, sb_bit_addr
= 0, data
= 0;
1735 u32 pxp_addr
= IGU_CMD_INT_ACK_BASE
+ igu_sb_id
;
1736 u32 sleep_cnt
= IGU_CLEANUP_SLEEP_LENGTH
;
1738 /* Set the data field */
1739 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_SET
, cleanup_set
? 1 : 0);
1740 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_TYPE
, 0);
1741 SET_FIELD(data
, IGU_CLEANUP_COMMAND_TYPE
, IGU_COMMAND_TYPE_SET
);
1743 /* Set the control register */
1744 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_PXP_ADDR
, pxp_addr
);
1745 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_FID
, opaque_fid
);
1746 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_TYPE
, IGU_CTRL_CMD_TYPE_WR
);
1748 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_32LSB_DATA
, data
);
1752 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_CTRL
, cmd_ctrl
);
1754 /* Flush the write to IGU */
1757 /* calculate where to read the status bit from */
1758 sb_bit
= 1 << (igu_sb_id
% 32);
1759 sb_bit_addr
= igu_sb_id
/ 32 * sizeof(u32
);
1761 sb_bit_addr
+= IGU_REG_CLEANUP_STATUS_0
;
1763 /* Now wait for the command to complete */
1765 val
= qed_rd(p_hwfn
, p_ptt
, sb_bit_addr
);
1767 if ((val
& sb_bit
) == (cleanup_set
? sb_bit
: 0))
1770 usleep_range(5000, 10000);
1771 } while (--sleep_cnt
);
1775 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1779 void qed_int_igu_init_pure_rt_single(struct qed_hwfn
*p_hwfn
,
1780 struct qed_ptt
*p_ptt
,
1781 u16 igu_sb_id
, u16 opaque
, bool b_set
)
1783 struct qed_igu_block
*p_block
;
1786 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
1787 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1788 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1790 p_block
->function_id
,
1791 p_block
->is_pf
, p_block
->vector_number
);
1795 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 1, opaque
);
1798 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 0, opaque
);
1800 /* Wait for the IGU SB to cleanup */
1801 for (i
= 0; i
< IGU_CLEANUP_SLEEP_LENGTH
; i
++) {
1804 val
= qed_rd(p_hwfn
, p_ptt
,
1805 IGU_REG_WRITE_DONE_PENDING
+
1806 ((igu_sb_id
/ 32) * 4));
1807 if (val
& BIT((igu_sb_id
% 32)))
1808 usleep_range(10, 20);
1812 if (i
== IGU_CLEANUP_SLEEP_LENGTH
)
1814 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1817 /* Clear the CAU for the SB */
1818 for (pi
= 0; pi
< 12; pi
++)
1819 qed_wr(p_hwfn
, p_ptt
,
1820 CAU_REG_PI_MEMORY
+ (igu_sb_id
* 12 + pi
) * 4, 0);
1823 void qed_int_igu_init_pure_rt(struct qed_hwfn
*p_hwfn
,
1824 struct qed_ptt
*p_ptt
,
1825 bool b_set
, bool b_slowpath
)
1827 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
1828 struct qed_igu_block
*p_block
;
1832 val
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
);
1833 val
|= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN
;
1834 val
&= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN
;
1835 qed_wr(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
, val
);
1838 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
1839 p_block
= &p_info
->entry
[igu_sb_id
];
1841 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1843 (p_block
->status
& QED_IGU_STATUS_DSB
))
1846 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
, igu_sb_id
,
1847 p_hwfn
->hw_info
.opaque_fid
,
1852 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
1854 p_hwfn
->hw_info
.opaque_fid
,
1858 int qed_int_igu_reset_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1860 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
1861 struct qed_igu_block
*p_block
;
1866 if (!RESC_NUM(p_hwfn
, QED_SB
)) {
1867 p_info
->b_allow_pf_vf_change
= false;
1869 /* Use the numbers the MFW have provided -
1870 * don't forget MFW accounts for the default SB as well.
1872 p_info
->b_allow_pf_vf_change
= true;
1874 if (p_info
->usage
.cnt
!= RESC_NUM(p_hwfn
, QED_SB
) - 1) {
1876 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
1877 RESC_NUM(p_hwfn
, QED_SB
) - 1,
1879 p_info
->usage
.cnt
= RESC_NUM(p_hwfn
, QED_SB
) - 1;
1882 if (IS_PF_SRIOV(p_hwfn
)) {
1883 u16 vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
1885 if (vfs
!= p_info
->usage
.iov_cnt
)
1888 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
1889 p_info
->usage
.iov_cnt
, vfs
);
1891 /* At this point we know how many SBs we have totally
1892 * in IGU + number of PF SBs. So we can validate that
1893 * we'd have sufficient for VF.
1895 if (vfs
> p_info
->usage
.free_cnt
+
1896 p_info
->usage
.free_cnt_iov
- p_info
->usage
.cnt
) {
1898 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
1899 p_info
->usage
.free_cnt
+
1900 p_info
->usage
.free_cnt_iov
,
1901 p_info
->usage
.cnt
, vfs
);
1905 /* Currently cap the number of VFs SBs by the
1908 p_info
->usage
.iov_cnt
= vfs
;
1912 /* Mark all SBs as free, now in the right PF/VFs division */
1913 p_info
->usage
.free_cnt
= p_info
->usage
.cnt
;
1914 p_info
->usage
.free_cnt_iov
= p_info
->usage
.iov_cnt
;
1915 p_info
->usage
.orig
= p_info
->usage
.cnt
;
1916 p_info
->usage
.iov_orig
= p_info
->usage
.iov_cnt
;
1918 /* We now proceed to re-configure the IGU cam to reflect the initial
1919 * configuration. We can start with the Default SB.
1921 pf_sbs
= p_info
->usage
.cnt
;
1922 vf_sbs
= p_info
->usage
.iov_cnt
;
1924 for (igu_sb_id
= p_info
->igu_dsb_id
;
1925 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
1926 p_block
= &p_info
->entry
[igu_sb_id
];
1929 if (!(p_block
->status
& QED_IGU_STATUS_VALID
))
1932 if (p_block
->status
& QED_IGU_STATUS_DSB
) {
1933 p_block
->function_id
= p_hwfn
->rel_pf_id
;
1935 p_block
->vector_number
= 0;
1936 p_block
->status
= QED_IGU_STATUS_VALID
|
1939 } else if (pf_sbs
) {
1941 p_block
->function_id
= p_hwfn
->rel_pf_id
;
1943 p_block
->vector_number
= p_info
->usage
.cnt
- pf_sbs
;
1944 p_block
->status
= QED_IGU_STATUS_VALID
|
1946 QED_IGU_STATUS_FREE
;
1947 } else if (vf_sbs
) {
1948 p_block
->function_id
=
1949 p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+
1950 p_info
->usage
.iov_cnt
- vf_sbs
;
1952 p_block
->vector_number
= 0;
1953 p_block
->status
= QED_IGU_STATUS_VALID
|
1954 QED_IGU_STATUS_FREE
;
1957 p_block
->function_id
= 0;
1959 p_block
->vector_number
= 0;
1962 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
,
1963 p_block
->function_id
);
1964 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, p_block
->is_pf
);
1965 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
,
1966 p_block
->vector_number
);
1968 /* VF entries would be enabled when VF is initializaed */
1969 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, p_block
->is_pf
);
1971 rval
= qed_rd(p_hwfn
, p_ptt
,
1972 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
1975 qed_wr(p_hwfn
, p_ptt
,
1976 IGU_REG_MAPPING_MEMORY
+
1977 sizeof(u32
) * igu_sb_id
, val
);
1981 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
1983 p_block
->function_id
,
1985 p_block
->vector_number
, rval
, val
);
1992 static void qed_int_igu_read_cam_block(struct qed_hwfn
*p_hwfn
,
1993 struct qed_ptt
*p_ptt
, u16 igu_sb_id
)
1995 u32 val
= qed_rd(p_hwfn
, p_ptt
,
1996 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
1997 struct qed_igu_block
*p_block
;
1999 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
2001 /* Fill the block information */
2002 p_block
->function_id
= GET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
);
2003 p_block
->is_pf
= GET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
);
2004 p_block
->vector_number
= GET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
);
2005 p_block
->igu_sb_id
= igu_sb_id
;
2008 int qed_int_igu_read_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2010 struct qed_igu_info
*p_igu_info
;
2011 struct qed_igu_block
*p_block
;
2012 u32 min_vf
= 0, max_vf
= 0;
2015 p_hwfn
->hw_info
.p_igu_info
= kzalloc(sizeof(*p_igu_info
), GFP_KERNEL
);
2016 if (!p_hwfn
->hw_info
.p_igu_info
)
2019 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
2021 /* Distinguish between existent and non-existent default SB */
2022 p_igu_info
->igu_dsb_id
= QED_SB_INVALID_IDX
;
2024 /* Find the range of VF ids whose SB belong to this PF */
2025 if (p_hwfn
->cdev
->p_iov_info
) {
2026 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
2028 min_vf
= p_iov
->first_vf_in_pf
;
2029 max_vf
= p_iov
->first_vf_in_pf
+ p_iov
->total_vfs
;
2033 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2034 /* Read current entry; Notice it might not belong to this PF */
2035 qed_int_igu_read_cam_block(p_hwfn
, p_ptt
, igu_sb_id
);
2036 p_block
= &p_igu_info
->entry
[igu_sb_id
];
2038 if ((p_block
->is_pf
) &&
2039 (p_block
->function_id
== p_hwfn
->rel_pf_id
)) {
2040 p_block
->status
= QED_IGU_STATUS_PF
|
2041 QED_IGU_STATUS_VALID
|
2042 QED_IGU_STATUS_FREE
;
2044 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2045 p_igu_info
->usage
.cnt
++;
2046 } else if (!(p_block
->is_pf
) &&
2047 (p_block
->function_id
>= min_vf
) &&
2048 (p_block
->function_id
< max_vf
)) {
2049 /* Available for VFs of this PF */
2050 p_block
->status
= QED_IGU_STATUS_VALID
|
2051 QED_IGU_STATUS_FREE
;
2053 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2054 p_igu_info
->usage
.iov_cnt
++;
2057 /* Mark the First entry belonging to the PF or its VFs
2058 * as the default SB [we'll reset IGU prior to first usage].
2060 if ((p_block
->status
& QED_IGU_STATUS_VALID
) &&
2061 (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
)) {
2062 p_igu_info
->igu_dsb_id
= igu_sb_id
;
2063 p_block
->status
|= QED_IGU_STATUS_DSB
;
2066 /* limit number of prints by having each PF print only its
2067 * entries with the exception of PF0 which would print
2070 if ((p_block
->status
& QED_IGU_STATUS_VALID
) ||
2071 (p_hwfn
->abs_pf_id
== 0)) {
2072 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2073 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2074 igu_sb_id
, p_block
->function_id
,
2075 p_block
->is_pf
, p_block
->vector_number
);
2079 if (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
) {
2081 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2082 p_igu_info
->igu_dsb_id
);
2086 /* All non default SB are considered free at this point */
2087 p_igu_info
->usage
.free_cnt
= p_igu_info
->usage
.cnt
;
2088 p_igu_info
->usage
.free_cnt_iov
= p_igu_info
->usage
.iov_cnt
;
2090 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2091 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2092 p_igu_info
->igu_dsb_id
,
2093 p_igu_info
->usage
.cnt
, p_igu_info
->usage
.iov_cnt
);
2099 * @brief Initialize igu runtime registers
2103 void qed_int_igu_init_rt(struct qed_hwfn
*p_hwfn
)
2105 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
;
2107 STORE_RT_REG(p_hwfn
, IGU_REG_PF_CONFIGURATION_RT_OFFSET
, igu_pf_conf
);
2110 u64
qed_int_igu_read_sisr_reg(struct qed_hwfn
*p_hwfn
)
2112 u32 lsb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
-
2113 IGU_CMD_INT_ACK_BASE
;
2114 u32 msb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_MSB_UPPER
-
2115 IGU_CMD_INT_ACK_BASE
;
2116 u32 intr_status_hi
= 0, intr_status_lo
= 0;
2117 u64 intr_status
= 0;
2119 intr_status_lo
= REG_RD(p_hwfn
,
2120 GTT_BAR0_MAP_REG_IGU_CMD
+
2121 lsb_igu_cmd_addr
* 8);
2122 intr_status_hi
= REG_RD(p_hwfn
,
2123 GTT_BAR0_MAP_REG_IGU_CMD
+
2124 msb_igu_cmd_addr
* 8);
2125 intr_status
= ((u64
)intr_status_hi
<< 32) + (u64
)intr_status_lo
;
2130 static void qed_int_sp_dpc_setup(struct qed_hwfn
*p_hwfn
)
2132 tasklet_init(p_hwfn
->sp_dpc
,
2133 qed_int_sp_dpc
, (unsigned long)p_hwfn
);
2134 p_hwfn
->b_sp_dpc_enabled
= true;
2137 static int qed_int_sp_dpc_alloc(struct qed_hwfn
*p_hwfn
)
2139 p_hwfn
->sp_dpc
= kmalloc(sizeof(*p_hwfn
->sp_dpc
), GFP_KERNEL
);
2140 if (!p_hwfn
->sp_dpc
)
2146 static void qed_int_sp_dpc_free(struct qed_hwfn
*p_hwfn
)
2148 kfree(p_hwfn
->sp_dpc
);
2149 p_hwfn
->sp_dpc
= NULL
;
2152 int qed_int_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2156 rc
= qed_int_sp_dpc_alloc(p_hwfn
);
2160 rc
= qed_int_sp_sb_alloc(p_hwfn
, p_ptt
);
2164 rc
= qed_int_sb_attn_alloc(p_hwfn
, p_ptt
);
2169 void qed_int_free(struct qed_hwfn
*p_hwfn
)
2171 qed_int_sp_sb_free(p_hwfn
);
2172 qed_int_sb_attn_free(p_hwfn
);
2173 qed_int_sp_dpc_free(p_hwfn
);
2176 void qed_int_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2178 qed_int_sb_setup(p_hwfn
, p_ptt
, &p_hwfn
->p_sp_sb
->sb_info
);
2179 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
2180 qed_int_sp_dpc_setup(p_hwfn
);
2183 void qed_int_get_num_sbs(struct qed_hwfn
*p_hwfn
,
2184 struct qed_sb_cnt_info
*p_sb_cnt_info
)
2186 struct qed_igu_info
*info
= p_hwfn
->hw_info
.p_igu_info
;
2188 if (!info
|| !p_sb_cnt_info
)
2191 memcpy(p_sb_cnt_info
, &info
->usage
, sizeof(*p_sb_cnt_info
));
2194 void qed_int_disable_post_isr_release(struct qed_dev
*cdev
)
2198 for_each_hwfn(cdev
, i
)
2199 cdev
->hwfns
[i
].b_int_requested
= false;
2202 int qed_int_set_timer_res(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2203 u8 timer_res
, u16 sb_id
, bool tx
)
2205 struct cau_sb_entry sb_entry
;
2208 if (!p_hwfn
->hw_init_done
) {
2209 DP_ERR(p_hwfn
, "hardware not initialized yet\n");
2213 rc
= qed_dmae_grc2host(p_hwfn
, p_ptt
, CAU_REG_SB_VAR_MEMORY
+
2214 sb_id
* sizeof(u64
),
2215 (u64
)(uintptr_t)&sb_entry
, 2, 0);
2217 DP_ERR(p_hwfn
, "dmae_grc2host failed %d\n", rc
);
2222 SET_FIELD(sb_entry
.params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
2224 SET_FIELD(sb_entry
.params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
2226 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
2227 (u64
)(uintptr_t)&sb_entry
,
2228 CAU_REG_SB_VAR_MEMORY
+
2229 sb_id
* sizeof(u64
), 2, 0);
2231 DP_ERR(p_hwfn
, "dmae_host2grc failed %d\n", rc
);