1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <linux/bitops.h>
37 #include <linux/delay.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/errno.h>
40 #include <linux/interrupt.h>
41 #include <linux/kernel.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/string.h>
48 #include "qed_init_ops.h"
51 #include "qed_reg_addr.h"
53 #include "qed_sriov.h"
57 qed_int_comp_cb_t comp_cb
;
61 struct qed_sb_sp_info
{
62 struct qed_sb_info sb_info
;
64 /* per protocol index data */
65 struct qed_pi_info pi_info_arr
[PIS_PER_SB_E4
];
68 enum qed_attention_type
{
73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
76 struct aeu_invert_reg_bit
{
79 #define ATTENTION_PARITY (1 << 0)
81 #define ATTENTION_LENGTH_MASK (0x00000ff0)
82 #define ATTENTION_LENGTH_SHIFT (4)
83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
84 ATTENTION_LENGTH_SHIFT)
85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
90 /* Multiple bits start with this offset */
91 #define ATTENTION_OFFSET_MASK (0x000ff000)
92 #define ATTENTION_OFFSET_SHIFT (12)
94 #define ATTENTION_BB_MASK (0x00700000)
95 #define ATTENTION_BB_SHIFT (20)
96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
97 #define ATTENTION_BB_DIFFERENT BIT(23)
101 /* Callback to call if attention will be triggered */
102 int (*cb
)(struct qed_hwfn
*p_hwfn
);
104 enum block_id block_index
;
107 struct aeu_invert_reg
{
108 struct aeu_invert_reg_bit bits
[32];
111 #define MAX_ATTN_GRPS (8)
112 #define NUM_ATTN_REGS (9)
114 /* Specific HW attention callbacks */
115 static int qed_mcp_attn_cb(struct qed_hwfn
*p_hwfn
)
117 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_STATE
);
119 /* This might occur on certain instances; Log it once then mask it */
120 DP_INFO(p_hwfn
->cdev
, "MCP_REG_CPU_STATE: %08x - Masking...\n",
122 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_EVENT_MASK
,
128 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
129 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
130 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
131 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
133 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
135 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
137 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
139 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
141 static int qed_pswhst_attn_cb(struct qed_hwfn
*p_hwfn
)
143 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
144 PSWHST_REG_INCORRECT_ACCESS_VALID
);
146 if (tmp
& QED_PSWHST_ATTENTION_INCORRECT_ACCESS
) {
147 u32 addr
, data
, length
;
149 addr
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
150 PSWHST_REG_INCORRECT_ACCESS_ADDRESS
);
151 data
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
152 PSWHST_REG_INCORRECT_ACCESS_DATA
);
153 length
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
154 PSWHST_REG_INCORRECT_ACCESS_LENGTH
);
156 DP_INFO(p_hwfn
->cdev
,
157 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
159 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_PF_ID
),
160 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_VF_ID
),
162 ATTENTION_INCORRECT_ACCESS_VF_VALID
),
164 ATTENTION_INCORRECT_ACCESS_CLIENT
),
165 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_WR
),
167 ATTENTION_INCORRECT_ACCESS_BYTE_EN
),
174 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
175 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
176 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
177 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
178 #define QED_GRC_ATTENTION_MASTER_MASK (0xf)
179 #define QED_GRC_ATTENTION_MASTER_SHIFT (24)
180 #define QED_GRC_ATTENTION_PF_MASK (0xf)
181 #define QED_GRC_ATTENTION_PF_SHIFT (0)
182 #define QED_GRC_ATTENTION_VF_MASK (0xff)
183 #define QED_GRC_ATTENTION_VF_SHIFT (4)
184 #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
185 #define QED_GRC_ATTENTION_PRIV_SHIFT (14)
186 #define QED_GRC_ATTENTION_PRIV_VF (0)
187 static const char *attn_master_to_str(u8 master
)
190 case 1: return "PXP";
191 case 2: return "MCP";
192 case 3: return "MSDM";
193 case 4: return "PSDM";
194 case 5: return "YSDM";
195 case 6: return "USDM";
196 case 7: return "TSDM";
197 case 8: return "XSDM";
198 case 9: return "DBU";
199 case 10: return "DMAE";
205 static int qed_grc_attn_cb(struct qed_hwfn
*p_hwfn
)
209 /* We've already cleared the timeout interrupt register, so we learn
210 * of interrupts via the validity register
212 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
213 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
);
214 if (!(tmp
& QED_GRC_ATTENTION_VALID_BIT
))
217 /* Read the GRC timeout information */
218 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
219 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0
);
220 tmp2
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
221 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1
);
223 DP_INFO(p_hwfn
->cdev
,
224 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
226 (tmp
& QED_GRC_ATTENTION_RDWR_BIT
) ? "Write to" : "Read from",
227 GET_FIELD(tmp
, QED_GRC_ATTENTION_ADDRESS
) << 2,
228 attn_master_to_str(GET_FIELD(tmp
, QED_GRC_ATTENTION_MASTER
)),
229 GET_FIELD(tmp2
, QED_GRC_ATTENTION_PF
),
230 (GET_FIELD(tmp2
, QED_GRC_ATTENTION_PRIV
) ==
231 QED_GRC_ATTENTION_PRIV_VF
) ? "VF" : "(Ireelevant)",
232 GET_FIELD(tmp2
, QED_GRC_ATTENTION_VF
));
235 /* Regardles of anything else, clean the validity bit */
236 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
237 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
, 0);
241 #define PGLUE_ATTENTION_VALID (1 << 29)
242 #define PGLUE_ATTENTION_RD_VALID (1 << 26)
243 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
244 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
245 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
247 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
248 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
249 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
251 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
252 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
253 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
255 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
256 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
257 #define PGLUE_ATTENTION_ILT_VALID (1 << 23)
258 static int qed_pglub_rbc_attn_cb(struct qed_hwfn
*p_hwfn
)
262 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
263 PGLUE_B_REG_TX_ERR_WR_DETAILS2
);
264 if (tmp
& PGLUE_ATTENTION_VALID
) {
265 u32 addr_lo
, addr_hi
, details
;
267 addr_lo
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
268 PGLUE_B_REG_TX_ERR_WR_ADD_31_0
);
269 addr_hi
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
270 PGLUE_B_REG_TX_ERR_WR_ADD_63_32
);
271 details
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
272 PGLUE_B_REG_TX_ERR_WR_DETAILS
);
275 "Illegal write by chip to [%08x:%08x] blocked.\n"
276 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
277 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
278 addr_hi
, addr_lo
, details
,
279 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
280 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
282 PGLUE_ATTENTION_DETAILS_VF_VALID
) ? 1 : 0,
285 PGLUE_ATTENTION_DETAILS2_WAS_ERR
) ? 1 : 0,
287 PGLUE_ATTENTION_DETAILS2_BME
) ? 1 : 0,
289 PGLUE_ATTENTION_DETAILS2_FID_EN
) ? 1 : 0);
292 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
293 PGLUE_B_REG_TX_ERR_RD_DETAILS2
);
294 if (tmp
& PGLUE_ATTENTION_RD_VALID
) {
295 u32 addr_lo
, addr_hi
, details
;
297 addr_lo
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
298 PGLUE_B_REG_TX_ERR_RD_ADD_31_0
);
299 addr_hi
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
300 PGLUE_B_REG_TX_ERR_RD_ADD_63_32
);
301 details
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
302 PGLUE_B_REG_TX_ERR_RD_DETAILS
);
305 "Illegal read by chip from [%08x:%08x] blocked.\n"
306 " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
307 " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
308 addr_hi
, addr_lo
, details
,
309 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
310 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
312 PGLUE_ATTENTION_DETAILS_VF_VALID
) ? 1 : 0,
314 GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_WAS_ERR
) ? 1
316 GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_BME
) ? 1 : 0,
317 GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_FID_EN
) ? 1
321 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
322 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL
);
323 if (tmp
& PGLUE_ATTENTION_ICPL_VALID
)
324 DP_INFO(p_hwfn
, "ICPL eror - %08x\n", tmp
);
326 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
327 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS
);
328 if (tmp
& PGLUE_ATTENTION_ZLR_VALID
) {
329 u32 addr_hi
, addr_lo
;
331 addr_lo
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
332 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0
);
333 addr_hi
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
334 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32
);
336 DP_INFO(p_hwfn
, "ZLR eror - %08x [Address %08x:%08x]\n",
337 tmp
, addr_hi
, addr_lo
);
340 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
341 PGLUE_B_REG_VF_ILT_ERR_DETAILS2
);
342 if (tmp
& PGLUE_ATTENTION_ILT_VALID
) {
343 u32 addr_hi
, addr_lo
, details
;
345 addr_lo
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
346 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0
);
347 addr_hi
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
348 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32
);
349 details
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
350 PGLUE_B_REG_VF_ILT_ERR_DETAILS
);
353 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
354 details
, tmp
, addr_hi
, addr_lo
);
357 /* Clear the indications */
358 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
359 PGLUE_B_REG_LATCHED_ERRORS_CLR
, (1 << 2));
364 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
365 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
366 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
367 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
368 static int qed_dorq_attn_cb(struct qed_hwfn
*p_hwfn
)
372 reason
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, DORQ_REG_DB_DROP_REASON
) &
373 QED_DORQ_ATTENTION_REASON_MASK
;
375 u32 details
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
376 DORQ_REG_DB_DROP_DETAILS
);
378 DP_INFO(p_hwfn
->cdev
,
379 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
380 qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
381 DORQ_REG_DB_DROP_DETAILS_ADDRESS
),
382 (u16
)(details
& QED_DORQ_ATTENTION_OPAQUE_MASK
),
383 GET_FIELD(details
, QED_DORQ_ATTENTION_SIZE
) * 4,
390 /* Instead of major changes to the data-structure, we have a some 'special'
391 * identifiers for sources that changed meaning between adapters.
393 enum aeu_invert_reg_special_type
{
394 AEU_INVERT_REG_SPECIAL_CNIG_0
,
395 AEU_INVERT_REG_SPECIAL_CNIG_1
,
396 AEU_INVERT_REG_SPECIAL_CNIG_2
,
397 AEU_INVERT_REG_SPECIAL_CNIG_3
,
398 AEU_INVERT_REG_SPECIAL_MAX
,
401 static struct aeu_invert_reg_bit
402 aeu_descs_special
[AEU_INVERT_REG_SPECIAL_MAX
] = {
403 {"CNIG port 0", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
404 {"CNIG port 1", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
405 {"CNIG port 2", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
406 {"CNIG port 3", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
409 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
410 static struct aeu_invert_reg aeu_descs
[NUM_ATTN_REGS
] = {
412 { /* After Invert 1 */
414 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
419 { /* After Invert 2 */
420 {"PGLUE config_space", ATTENTION_SINGLE
,
422 {"PGLUE misc_flr", ATTENTION_SINGLE
,
424 {"PGLUE B RBC", ATTENTION_PAR_INT
,
425 qed_pglub_rbc_attn_cb
, BLOCK_PGLUE_B
},
426 {"PGLUE misc_mctp", ATTENTION_SINGLE
,
428 {"Flash event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
429 {"SMB event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
430 {"Main Power", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
431 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT
) |
432 (1 << ATTENTION_OFFSET_SHIFT
),
434 {"PCIE glue/PXP VPD %d",
435 (16 << ATTENTION_LENGTH_SHIFT
), NULL
, BLOCK_PGLCS
},
440 { /* After Invert 3 */
441 {"General Attention %d",
442 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
447 { /* After Invert 4 */
448 {"General Attention 32", ATTENTION_SINGLE
,
450 {"General Attention %d",
451 (2 << ATTENTION_LENGTH_SHIFT
) |
452 (33 << ATTENTION_OFFSET_SHIFT
), NULL
, MAX_BLOCK_ID
},
453 {"General Attention 35", ATTENTION_SINGLE
,
456 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
457 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0
),
460 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
461 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1
),
464 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
465 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2
),
468 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
469 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3
),
471 {"MCP CPU", ATTENTION_SINGLE
,
472 qed_mcp_attn_cb
, MAX_BLOCK_ID
},
473 {"MCP Watchdog timer", ATTENTION_SINGLE
,
475 {"MCP M2P", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
476 {"AVS stop status ready", ATTENTION_SINGLE
,
478 {"MSTAT", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
479 {"MSTAT per-path", ATTENTION_PAR_INT
,
481 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT
),
483 {"NIG", ATTENTION_PAR_INT
, NULL
, BLOCK_NIG
},
484 {"BMB/OPTE/MCP", ATTENTION_PAR_INT
, NULL
, BLOCK_BMB
},
485 {"BTB", ATTENTION_PAR_INT
, NULL
, BLOCK_BTB
},
486 {"BRB", ATTENTION_PAR_INT
, NULL
, BLOCK_BRB
},
487 {"PRS", ATTENTION_PAR_INT
, NULL
, BLOCK_PRS
},
492 { /* After Invert 5 */
493 {"SRC", ATTENTION_PAR_INT
, NULL
, BLOCK_SRC
},
494 {"PB Client1", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB1
},
495 {"PB Client2", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB2
},
496 {"RPB", ATTENTION_PAR_INT
, NULL
, BLOCK_RPB
},
497 {"PBF", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF
},
498 {"QM", ATTENTION_PAR_INT
, NULL
, BLOCK_QM
},
499 {"TM", ATTENTION_PAR_INT
, NULL
, BLOCK_TM
},
500 {"MCM", ATTENTION_PAR_INT
, NULL
, BLOCK_MCM
},
501 {"MSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSDM
},
502 {"MSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSEM
},
503 {"PCM", ATTENTION_PAR_INT
, NULL
, BLOCK_PCM
},
504 {"PSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSDM
},
505 {"PSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSEM
},
506 {"TCM", ATTENTION_PAR_INT
, NULL
, BLOCK_TCM
},
507 {"TSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSDM
},
508 {"TSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSEM
},
513 { /* After Invert 6 */
514 {"UCM", ATTENTION_PAR_INT
, NULL
, BLOCK_UCM
},
515 {"USDM", ATTENTION_PAR_INT
, NULL
, BLOCK_USDM
},
516 {"USEM", ATTENTION_PAR_INT
, NULL
, BLOCK_USEM
},
517 {"XCM", ATTENTION_PAR_INT
, NULL
, BLOCK_XCM
},
518 {"XSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSDM
},
519 {"XSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSEM
},
520 {"YCM", ATTENTION_PAR_INT
, NULL
, BLOCK_YCM
},
521 {"YSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSDM
},
522 {"YSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSEM
},
523 {"XYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_XYLD
},
524 {"TMLD", ATTENTION_PAR_INT
, NULL
, BLOCK_TMLD
},
525 {"MYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_MULD
},
526 {"YULD", ATTENTION_PAR_INT
, NULL
, BLOCK_YULD
},
527 {"DORQ", ATTENTION_PAR_INT
,
528 qed_dorq_attn_cb
, BLOCK_DORQ
},
529 {"DBG", ATTENTION_PAR_INT
, NULL
, BLOCK_DBG
},
530 {"IPC", ATTENTION_PAR_INT
, NULL
, BLOCK_IPC
},
535 { /* After Invert 7 */
536 {"CCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_CCFC
},
537 {"CDU", ATTENTION_PAR_INT
, NULL
, BLOCK_CDU
},
538 {"DMAE", ATTENTION_PAR_INT
, NULL
, BLOCK_DMAE
},
539 {"IGU", ATTENTION_PAR_INT
, NULL
, BLOCK_IGU
},
540 {"ATC", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
541 {"CAU", ATTENTION_PAR_INT
, NULL
, BLOCK_CAU
},
542 {"PTU", ATTENTION_PAR_INT
, NULL
, BLOCK_PTU
},
543 {"PRM", ATTENTION_PAR_INT
, NULL
, BLOCK_PRM
},
544 {"TCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_TCFC
},
545 {"RDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_RDIF
},
546 {"TDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_TDIF
},
547 {"RSS", ATTENTION_PAR_INT
, NULL
, BLOCK_RSS
},
548 {"MISC", ATTENTION_PAR_INT
, NULL
, BLOCK_MISC
},
549 {"MISCS", ATTENTION_PAR_INT
, NULL
, BLOCK_MISCS
},
550 {"PCIE", ATTENTION_PAR
, NULL
, BLOCK_PCIE
},
551 {"Vaux PCI core", ATTENTION_SINGLE
, NULL
, BLOCK_PGLCS
},
552 {"PSWRQ", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRQ
},
557 { /* After Invert 8 */
558 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT
,
560 {"PSWWR", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWWR
},
561 {"PSWWR (pci_clk)", ATTENTION_PAR_INT
,
563 {"PSWRD", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRD
},
564 {"PSWRD (pci_clk)", ATTENTION_PAR_INT
,
566 {"PSWHST", ATTENTION_PAR_INT
,
567 qed_pswhst_attn_cb
, BLOCK_PSWHST
},
568 {"PSWHST (pci_clk)", ATTENTION_PAR_INT
,
569 NULL
, BLOCK_PSWHST2
},
570 {"GRC", ATTENTION_PAR_INT
,
571 qed_grc_attn_cb
, BLOCK_GRC
},
572 {"CPMU", ATTENTION_PAR_INT
, NULL
, BLOCK_CPMU
},
573 {"NCSI", ATTENTION_PAR_INT
, NULL
, BLOCK_NCSI
},
574 {"MSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
575 {"PSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
576 {"TSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
577 {"USEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
578 {"XSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
579 {"YSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
580 {"pxp_misc_mps", ATTENTION_PAR
, NULL
, BLOCK_PGLCS
},
581 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE
,
583 {"PERST_B assertion", ATTENTION_SINGLE
,
585 {"PERST_B deassertion", ATTENTION_SINGLE
,
587 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT
),
593 { /* After Invert 9 */
594 {"MCP Latched memory", ATTENTION_PAR
,
596 {"MCP Latched scratchpad cache", ATTENTION_SINGLE
,
598 {"MCP Latched ump_tx", ATTENTION_PAR
,
600 {"MCP Latched scratchpad", ATTENTION_PAR
,
602 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT
),
608 static struct aeu_invert_reg_bit
*
609 qed_int_aeu_translate(struct qed_hwfn
*p_hwfn
,
610 struct aeu_invert_reg_bit
*p_bit
)
612 if (!QED_IS_BB(p_hwfn
->cdev
))
615 if (!(p_bit
->flags
& ATTENTION_BB_DIFFERENT
))
618 return &aeu_descs_special
[(p_bit
->flags
& ATTENTION_BB_MASK
) >>
622 static bool qed_int_is_parity_flag(struct qed_hwfn
*p_hwfn
,
623 struct aeu_invert_reg_bit
*p_bit
)
625 return !!(qed_int_aeu_translate(p_hwfn
, p_bit
)->flags
&
629 #define ATTN_STATE_BITS (0xfff)
630 #define ATTN_BITS_MASKABLE (0x3ff)
631 struct qed_sb_attn_info
{
632 /* Virtual & Physical address of the SB */
633 struct atten_status_block
*sb_attn
;
636 /* Last seen running index */
639 /* A mask of the AEU bits resulting in a parity error */
640 u32 parity_mask
[NUM_ATTN_REGS
];
642 /* A pointer to the attention description structure */
643 struct aeu_invert_reg
*p_aeu_desc
;
645 /* Previously asserted attentions, which are still unasserted */
648 /* Cleanup address for the link's general hw attention */
652 static inline u16
qed_attn_update_idx(struct qed_hwfn
*p_hwfn
,
653 struct qed_sb_attn_info
*p_sb_desc
)
657 /* Make certain HW write took affect */
660 index
= le16_to_cpu(p_sb_desc
->sb_attn
->sb_index
);
661 if (p_sb_desc
->index
!= index
) {
662 p_sb_desc
->index
= index
;
666 /* Make certain we got a consistent view with HW */
673 * @brief qed_int_assertion - handles asserted attention bits
676 * @param asserted_bits newly asserted bits
679 static int qed_int_assertion(struct qed_hwfn
*p_hwfn
, u16 asserted_bits
)
681 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
684 /* Mask the source of the attention in the IGU */
685 igu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
686 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "IGU mask: 0x%08x --> 0x%08x\n",
687 igu_mask
, igu_mask
& ~(asserted_bits
& ATTN_BITS_MASKABLE
));
688 igu_mask
&= ~(asserted_bits
& ATTN_BITS_MASKABLE
);
689 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, igu_mask
);
691 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
692 "inner known ATTN state: 0x%04x --> 0x%04x\n",
693 sb_attn_sw
->known_attn
,
694 sb_attn_sw
->known_attn
| asserted_bits
);
695 sb_attn_sw
->known_attn
|= asserted_bits
;
697 /* Handle MCP events */
698 if (asserted_bits
& 0x100) {
699 qed_mcp_handle_events(p_hwfn
, p_hwfn
->p_dpc_ptt
);
700 /* Clean the MCP attention */
701 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
702 sb_attn_sw
->mfw_attn_addr
, 0);
705 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
706 GTT_BAR0_MAP_REG_IGU_CMD
+
707 ((IGU_CMD_ATTN_BIT_SET_UPPER
-
708 IGU_CMD_INT_ACK_BASE
) << 3),
711 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "set cmd IGU: 0x%04x\n",
717 static void qed_int_attn_print(struct qed_hwfn
*p_hwfn
,
719 enum dbg_attn_type type
, bool b_clear
)
721 struct dbg_attn_block_result attn_results
;
722 enum dbg_status status
;
724 memset(&attn_results
, 0, sizeof(attn_results
));
726 status
= qed_dbg_read_attn(p_hwfn
, p_hwfn
->p_dpc_ptt
, id
, type
,
727 b_clear
, &attn_results
);
728 if (status
!= DBG_STATUS_OK
)
730 "Failed to parse attention information [status: %s]\n",
731 qed_dbg_get_status_str(status
));
733 qed_dbg_parse_attn(p_hwfn
, &attn_results
);
737 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
738 * cause of the attention
741 * @param p_aeu - descriptor of an AEU bit which caused the attention
742 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
743 * this bit to this group.
744 * @param bit_index - index of this bit in the aeu_en_reg
749 qed_int_deassertion_aeu_bit(struct qed_hwfn
*p_hwfn
,
750 struct aeu_invert_reg_bit
*p_aeu
,
752 const char *p_bit_name
, u32 bitmask
)
754 bool b_fatal
= false;
758 DP_INFO(p_hwfn
, "Deasserted attention `%s'[%08x]\n",
759 p_bit_name
, bitmask
);
761 /* Call callback before clearing the interrupt status */
763 DP_INFO(p_hwfn
, "`%s (attention)': Calling Callback function\n",
765 rc
= p_aeu
->cb(p_hwfn
);
771 /* Print HW block interrupt registers */
772 if (p_aeu
->block_index
!= MAX_BLOCK_ID
)
773 qed_int_attn_print(p_hwfn
, p_aeu
->block_index
,
774 ATTN_TYPE_INTERRUPT
, !b_fatal
);
777 /* If the attention is benign, no need to prevent it */
781 /* Prevent this Attention from being asserted in the future */
782 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
783 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, (val
& ~bitmask
));
784 DP_INFO(p_hwfn
, "`%s' - Disabled future attentions\n",
792 * @brief qed_int_deassertion_parity - handle a single parity AEU source
795 * @param p_aeu - descriptor of an AEU bit which caused the parity
796 * @param aeu_en_reg - address of the AEU enable register
799 static void qed_int_deassertion_parity(struct qed_hwfn
*p_hwfn
,
800 struct aeu_invert_reg_bit
*p_aeu
,
801 u32 aeu_en_reg
, u8 bit_index
)
803 u32 block_id
= p_aeu
->block_index
, mask
, val
;
805 DP_NOTICE(p_hwfn
->cdev
,
806 "%s parity attention is set [address 0x%08x, bit %d]\n",
807 p_aeu
->bit_name
, aeu_en_reg
, bit_index
);
809 if (block_id
!= MAX_BLOCK_ID
) {
810 qed_int_attn_print(p_hwfn
, block_id
, ATTN_TYPE_PARITY
, false);
812 /* In BB, there's a single parity bit for several blocks */
813 if (block_id
== BLOCK_BTB
) {
814 qed_int_attn_print(p_hwfn
, BLOCK_OPTE
,
815 ATTN_TYPE_PARITY
, false);
816 qed_int_attn_print(p_hwfn
, BLOCK_MCP
,
817 ATTN_TYPE_PARITY
, false);
821 /* Prevent this parity error from being re-asserted */
822 mask
= ~BIT(bit_index
);
823 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
824 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, val
& mask
);
825 DP_INFO(p_hwfn
, "`%s' - Disabled future parity errors\n",
830 * @brief - handles deassertion of previously asserted attentions.
833 * @param deasserted_bits - newly deasserted bits
837 static int qed_int_deassertion(struct qed_hwfn
*p_hwfn
,
840 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
841 u32 aeu_inv_arr
[NUM_ATTN_REGS
], aeu_mask
, aeu_en
, en
;
845 /* Read the attention registers in the AEU */
846 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
847 aeu_inv_arr
[i
] = qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
848 MISC_REG_AEU_AFTER_INVERT_1_IGU
+
850 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
851 "Deasserted bits [%d]: %08x\n",
855 /* Find parity attentions first */
856 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
857 struct aeu_invert_reg
*p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
];
860 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+ i
* sizeof(u32
);
861 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
863 /* Skip register in which no parity bit is currently set */
864 parities
= sb_attn_sw
->parity_mask
[i
] & aeu_inv_arr
[i
] & en
;
868 for (j
= 0, bit_idx
= 0; bit_idx
< 32; j
++) {
869 struct aeu_invert_reg_bit
*p_bit
= &p_aeu
->bits
[j
];
871 if (qed_int_is_parity_flag(p_hwfn
, p_bit
) &&
872 !!(parities
& BIT(bit_idx
)))
873 qed_int_deassertion_parity(p_hwfn
, p_bit
,
876 bit_idx
+= ATTENTION_LENGTH(p_bit
->flags
);
880 /* Find non-parity cause for attention and act */
881 for (k
= 0; k
< MAX_ATTN_GRPS
; k
++) {
882 struct aeu_invert_reg_bit
*p_aeu
;
884 /* Handle only groups whose attention is currently deasserted */
885 if (!(deasserted_bits
& (1 << k
)))
888 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
891 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+
893 k
* sizeof(u32
) * NUM_ATTN_REGS
;
895 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
896 bits
= aeu_inv_arr
[i
] & en
;
898 /* Skip if no bit from this group is currently set */
902 /* Find all set bits from current register which belong
903 * to current group, making them responsible for the
904 * previous assertion.
906 for (j
= 0, bit_idx
= 0; bit_idx
< 32; j
++) {
907 long unsigned int bitmask
;
910 p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
].bits
[j
];
911 p_aeu
= qed_int_aeu_translate(p_hwfn
, p_aeu
);
914 bit_len
= ATTENTION_LENGTH(p_aeu
->flags
);
915 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
)) {
921 bitmask
= bits
& (((1 << bit_len
) - 1) << bit
);
925 u32 flags
= p_aeu
->flags
;
929 num
= (u8
)find_first_bit(&bitmask
,
932 /* Some bits represent more than a
933 * a single interrupt. Correctly print
936 if (ATTENTION_LENGTH(flags
) > 2 ||
937 ((flags
& ATTENTION_PAR_INT
) &&
938 ATTENTION_LENGTH(flags
) > 1))
939 snprintf(bit_name
, 30,
940 p_aeu
->bit_name
, num
);
943 p_aeu
->bit_name
, 30);
945 /* We now need to pass bitmask in its
950 /* Handle source of the attention */
951 qed_int_deassertion_aeu_bit(p_hwfn
,
958 bit_idx
+= ATTENTION_LENGTH(p_aeu
->flags
);
963 /* Clear IGU indication for the deasserted bits */
964 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
965 GTT_BAR0_MAP_REG_IGU_CMD
+
966 ((IGU_CMD_ATTN_BIT_CLR_UPPER
-
967 IGU_CMD_INT_ACK_BASE
) << 3),
968 ~((u32
)deasserted_bits
));
970 /* Unmask deasserted attentions in IGU */
971 aeu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
972 aeu_mask
|= (deasserted_bits
& ATTN_BITS_MASKABLE
);
973 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, aeu_mask
);
975 /* Clear deassertion from inner state */
976 sb_attn_sw
->known_attn
&= ~deasserted_bits
;
981 static int qed_int_attentions(struct qed_hwfn
*p_hwfn
)
983 struct qed_sb_attn_info
*p_sb_attn_sw
= p_hwfn
->p_sb_attn
;
984 struct atten_status_block
*p_sb_attn
= p_sb_attn_sw
->sb_attn
;
985 u32 attn_bits
= 0, attn_acks
= 0;
986 u16 asserted_bits
, deasserted_bits
;
990 /* Read current attention bits/acks - safeguard against attentions
991 * by guaranting work on a synchronized timeframe
994 index
= p_sb_attn
->sb_index
;
995 attn_bits
= le32_to_cpu(p_sb_attn
->atten_bits
);
996 attn_acks
= le32_to_cpu(p_sb_attn
->atten_ack
);
997 } while (index
!= p_sb_attn
->sb_index
);
998 p_sb_attn
->sb_index
= index
;
1000 /* Attention / Deassertion are meaningful (and in correct state)
1001 * only when they differ and consistent with known state - deassertion
1002 * when previous attention & current ack, and assertion when current
1003 * attention with no previous attention
1005 asserted_bits
= (attn_bits
& ~attn_acks
& ATTN_STATE_BITS
) &
1006 ~p_sb_attn_sw
->known_attn
;
1007 deasserted_bits
= (~attn_bits
& attn_acks
& ATTN_STATE_BITS
) &
1008 p_sb_attn_sw
->known_attn
;
1010 if ((asserted_bits
& ~0x100) || (deasserted_bits
& ~0x100)) {
1012 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1013 index
, attn_bits
, attn_acks
, asserted_bits
,
1014 deasserted_bits
, p_sb_attn_sw
->known_attn
);
1015 } else if (asserted_bits
== 0x100) {
1016 DP_INFO(p_hwfn
, "MFW indication via attention\n");
1018 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1019 "MFW indication [deassertion]\n");
1022 if (asserted_bits
) {
1023 rc
= qed_int_assertion(p_hwfn
, asserted_bits
);
1028 if (deasserted_bits
)
1029 rc
= qed_int_deassertion(p_hwfn
, deasserted_bits
);
1034 static void qed_sb_ack_attn(struct qed_hwfn
*p_hwfn
,
1035 void __iomem
*igu_addr
, u32 ack_cons
)
1037 struct igu_prod_cons_update igu_ack
= { 0 };
1039 igu_ack
.sb_id_and_flags
=
1040 ((ack_cons
<< IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT
) |
1041 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT
) |
1042 (IGU_INT_NOP
<< IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT
) |
1043 (IGU_SEG_ACCESS_ATTN
<<
1044 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT
));
1046 DIRECT_REG_WR(igu_addr
, igu_ack
.sb_id_and_flags
);
1048 /* Both segments (interrupts & acks) are written to same place address;
1049 * Need to guarantee all commands will be received (in-order) by HW.
1055 void qed_int_sp_dpc(unsigned long hwfn_cookie
)
1057 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)hwfn_cookie
;
1058 struct qed_pi_info
*pi_info
= NULL
;
1059 struct qed_sb_attn_info
*sb_attn
;
1060 struct qed_sb_info
*sb_info
;
1064 if (!p_hwfn
->p_sp_sb
) {
1065 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sp_sb\n");
1069 sb_info
= &p_hwfn
->p_sp_sb
->sb_info
;
1070 arr_size
= ARRAY_SIZE(p_hwfn
->p_sp_sb
->pi_info_arr
);
1072 DP_ERR(p_hwfn
->cdev
,
1073 "Status block is NULL - cannot ack interrupts\n");
1077 if (!p_hwfn
->p_sb_attn
) {
1078 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sb_attn");
1081 sb_attn
= p_hwfn
->p_sb_attn
;
1083 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "DPC Called! (hwfn %p %d)\n",
1084 p_hwfn
, p_hwfn
->my_id
);
1086 /* Disable ack for def status block. Required both for msix +
1087 * inta in non-mask mode, in inta does no harm.
1089 qed_sb_ack(sb_info
, IGU_INT_DISABLE
, 0);
1091 /* Gather Interrupts/Attentions information */
1092 if (!sb_info
->sb_virt
) {
1093 DP_ERR(p_hwfn
->cdev
,
1094 "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1096 u32 tmp_index
= sb_info
->sb_ack
;
1098 rc
= qed_sb_update_sb_idx(sb_info
);
1099 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1100 "Interrupt indices: 0x%08x --> 0x%08x\n",
1101 tmp_index
, sb_info
->sb_ack
);
1104 if (!sb_attn
|| !sb_attn
->sb_attn
) {
1105 DP_ERR(p_hwfn
->cdev
,
1106 "Attentions Status block is NULL - cannot check for new attentions!\n");
1108 u16 tmp_index
= sb_attn
->index
;
1110 rc
|= qed_attn_update_idx(p_hwfn
, sb_attn
);
1111 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1112 "Attention indices: 0x%08x --> 0x%08x\n",
1113 tmp_index
, sb_attn
->index
);
1116 /* Check if we expect interrupts at this time. if not just ack them */
1117 if (!(rc
& QED_SB_EVENT_MASK
)) {
1118 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1122 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1123 if (!p_hwfn
->p_dpc_ptt
) {
1124 DP_NOTICE(p_hwfn
->cdev
, "Failed to allocate PTT\n");
1125 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1129 if (rc
& QED_SB_ATT_IDX
)
1130 qed_int_attentions(p_hwfn
);
1132 if (rc
& QED_SB_IDX
) {
1135 /* Look for a free index */
1136 for (pi
= 0; pi
< arr_size
; pi
++) {
1137 pi_info
= &p_hwfn
->p_sp_sb
->pi_info_arr
[pi
];
1138 if (pi_info
->comp_cb
)
1139 pi_info
->comp_cb(p_hwfn
, pi_info
->cookie
);
1143 if (sb_attn
&& (rc
& QED_SB_ATT_IDX
))
1144 /* This should be done before the interrupts are enabled,
1145 * since otherwise a new attention will be generated.
1147 qed_sb_ack_attn(p_hwfn
, sb_info
->igu_addr
, sb_attn
->index
);
1149 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1152 static void qed_int_sb_attn_free(struct qed_hwfn
*p_hwfn
)
1154 struct qed_sb_attn_info
*p_sb
= p_hwfn
->p_sb_attn
;
1160 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1161 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1162 p_sb
->sb_attn
, p_sb
->sb_phys
);
1164 p_hwfn
->p_sb_attn
= NULL
;
1167 static void qed_int_sb_attn_setup(struct qed_hwfn
*p_hwfn
,
1168 struct qed_ptt
*p_ptt
)
1170 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1172 memset(sb_info
->sb_attn
, 0, sizeof(*sb_info
->sb_attn
));
1175 sb_info
->known_attn
= 0;
1177 /* Configure Attention Status Block in IGU */
1178 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_L
,
1179 lower_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1180 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_H
,
1181 upper_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1184 static void qed_int_sb_attn_init(struct qed_hwfn
*p_hwfn
,
1185 struct qed_ptt
*p_ptt
,
1186 void *sb_virt_addr
, dma_addr_t sb_phy_addr
)
1188 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1191 sb_info
->sb_attn
= sb_virt_addr
;
1192 sb_info
->sb_phys
= sb_phy_addr
;
1194 /* Set the pointer to the AEU descriptors */
1195 sb_info
->p_aeu_desc
= aeu_descs
;
1197 /* Calculate Parity Masks */
1198 memset(sb_info
->parity_mask
, 0, sizeof(u32
) * NUM_ATTN_REGS
);
1199 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1200 /* j is array index, k is bit index */
1201 for (j
= 0, k
= 0; k
< 32; j
++) {
1202 struct aeu_invert_reg_bit
*p_aeu
;
1204 p_aeu
= &aeu_descs
[i
].bits
[j
];
1205 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
))
1206 sb_info
->parity_mask
[i
] |= 1 << k
;
1208 k
+= ATTENTION_LENGTH(p_aeu
->flags
);
1210 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1211 "Attn Mask [Reg %d]: 0x%08x\n",
1212 i
, sb_info
->parity_mask
[i
]);
1215 /* Set the address of cleanup for the mcp attention */
1216 sb_info
->mfw_attn_addr
= (p_hwfn
->rel_pf_id
<< 3) +
1217 MISC_REG_AEU_GENERAL_ATTN_0
;
1219 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
1222 static int qed_int_sb_attn_alloc(struct qed_hwfn
*p_hwfn
,
1223 struct qed_ptt
*p_ptt
)
1225 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1226 struct qed_sb_attn_info
*p_sb
;
1227 dma_addr_t p_phys
= 0;
1231 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1236 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1237 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1238 &p_phys
, GFP_KERNEL
);
1245 /* Attention setup */
1246 p_hwfn
->p_sb_attn
= p_sb
;
1247 qed_int_sb_attn_init(p_hwfn
, p_ptt
, p_virt
, p_phys
);
1252 /* coalescing timeout = timeset << (timer_res + 1) */
1253 #define QED_CAU_DEF_RX_USECS 24
1254 #define QED_CAU_DEF_TX_USECS 48
1256 void qed_init_cau_sb_entry(struct qed_hwfn
*p_hwfn
,
1257 struct cau_sb_entry
*p_sb_entry
,
1258 u8 pf_id
, u16 vf_number
, u8 vf_valid
)
1260 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1264 memset(p_sb_entry
, 0, sizeof(*p_sb_entry
));
1266 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_PF_NUMBER
, pf_id
);
1267 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_VF_NUMBER
, vf_number
);
1268 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_VF_VALID
, vf_valid
);
1269 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_SB_TIMESET0
, 0x7F);
1270 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_SB_TIMESET1
, 0x7F);
1272 cau_state
= CAU_HC_DISABLE_STATE
;
1274 if (cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1275 cau_state
= CAU_HC_ENABLE_STATE
;
1276 if (!cdev
->rx_coalesce_usecs
)
1277 cdev
->rx_coalesce_usecs
= QED_CAU_DEF_RX_USECS
;
1278 if (!cdev
->tx_coalesce_usecs
)
1279 cdev
->tx_coalesce_usecs
= QED_CAU_DEF_TX_USECS
;
1282 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1283 if (cdev
->rx_coalesce_usecs
<= 0x7F)
1285 else if (cdev
->rx_coalesce_usecs
<= 0xFF)
1289 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
1291 if (cdev
->tx_coalesce_usecs
<= 0x7F)
1293 else if (cdev
->tx_coalesce_usecs
<= 0xFF)
1297 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
1299 SET_FIELD(p_sb_entry
->data
, CAU_SB_ENTRY_STATE0
, cau_state
);
1300 SET_FIELD(p_sb_entry
->data
, CAU_SB_ENTRY_STATE1
, cau_state
);
1303 static void qed_int_cau_conf_pi(struct qed_hwfn
*p_hwfn
,
1304 struct qed_ptt
*p_ptt
,
1307 enum qed_coalescing_fsm coalescing_fsm
,
1310 struct cau_pi_entry pi_entry
;
1311 u32 sb_offset
, pi_offset
;
1313 if (IS_VF(p_hwfn
->cdev
))
1316 sb_offset
= igu_sb_id
* PIS_PER_SB_E4
;
1317 memset(&pi_entry
, 0, sizeof(struct cau_pi_entry
));
1319 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_PI_TIMESET
, timeset
);
1320 if (coalescing_fsm
== QED_COAL_RX_STATE_MACHINE
)
1321 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_FSM_SEL
, 0);
1323 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_FSM_SEL
, 1);
1325 pi_offset
= sb_offset
+ pi_index
;
1326 if (p_hwfn
->hw_init_done
) {
1327 qed_wr(p_hwfn
, p_ptt
,
1328 CAU_REG_PI_MEMORY
+ pi_offset
* sizeof(u32
),
1329 *((u32
*)&(pi_entry
)));
1331 STORE_RT_REG(p_hwfn
,
1332 CAU_REG_PI_MEMORY_RT_OFFSET
+ pi_offset
,
1333 *((u32
*)&(pi_entry
)));
1337 void qed_int_cau_conf_sb(struct qed_hwfn
*p_hwfn
,
1338 struct qed_ptt
*p_ptt
,
1340 u16 igu_sb_id
, u16 vf_number
, u8 vf_valid
)
1342 struct cau_sb_entry sb_entry
;
1344 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
, p_hwfn
->rel_pf_id
,
1345 vf_number
, vf_valid
);
1347 if (p_hwfn
->hw_init_done
) {
1348 /* Wide-bus, initialize via DMAE */
1349 u64 phys_addr
= (u64
)sb_phys
;
1351 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&phys_addr
,
1352 CAU_REG_SB_ADDR_MEMORY
+
1353 igu_sb_id
* sizeof(u64
), 2, 0);
1354 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&sb_entry
,
1355 CAU_REG_SB_VAR_MEMORY
+
1356 igu_sb_id
* sizeof(u64
), 2, 0);
1358 /* Initialize Status Block Address */
1359 STORE_RT_REG_AGG(p_hwfn
,
1360 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET
+
1364 STORE_RT_REG_AGG(p_hwfn
,
1365 CAU_REG_SB_VAR_MEMORY_RT_OFFSET
+
1370 /* Configure pi coalescing if set */
1371 if (p_hwfn
->cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1372 u8 num_tc
= p_hwfn
->hw_info
.num_hw_tc
;
1373 u8 timeset
, timer_res
;
1376 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1377 if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0x7F)
1379 else if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0xFF)
1383 timeset
= (u8
)(p_hwfn
->cdev
->rx_coalesce_usecs
>> timer_res
);
1384 qed_int_cau_conf_pi(p_hwfn
, p_ptt
, igu_sb_id
, RX_PI
,
1385 QED_COAL_RX_STATE_MACHINE
, timeset
);
1387 if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0x7F)
1389 else if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0xFF)
1393 timeset
= (u8
)(p_hwfn
->cdev
->tx_coalesce_usecs
>> timer_res
);
1394 for (i
= 0; i
< num_tc
; i
++) {
1395 qed_int_cau_conf_pi(p_hwfn
, p_ptt
,
1396 igu_sb_id
, TX_PI(i
),
1397 QED_COAL_TX_STATE_MACHINE
,
1403 void qed_int_sb_setup(struct qed_hwfn
*p_hwfn
,
1404 struct qed_ptt
*p_ptt
, struct qed_sb_info
*sb_info
)
1406 /* zero status block and ack counter */
1407 sb_info
->sb_ack
= 0;
1408 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1410 if (IS_PF(p_hwfn
->cdev
))
1411 qed_int_cau_conf_sb(p_hwfn
, p_ptt
, sb_info
->sb_phys
,
1412 sb_info
->igu_sb_id
, 0, 0);
1415 struct qed_igu_block
*qed_get_igu_free_sb(struct qed_hwfn
*p_hwfn
, bool b_is_pf
)
1417 struct qed_igu_block
*p_block
;
1420 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1422 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1424 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1425 !(p_block
->status
& QED_IGU_STATUS_FREE
))
1428 if (!!(p_block
->status
& QED_IGU_STATUS_PF
) == b_is_pf
)
1435 static u16
qed_get_pf_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 vector_id
)
1437 struct qed_igu_block
*p_block
;
1440 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1442 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1444 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1446 p_block
->vector_number
!= vector_id
)
1452 return QED_SB_INVALID_IDX
;
1455 u16
qed_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1459 /* Assuming continuous set of IGU SBs dedicated for given PF */
1460 if (sb_id
== QED_SP_SB_ID
)
1461 igu_sb_id
= p_hwfn
->hw_info
.p_igu_info
->igu_dsb_id
;
1462 else if (IS_PF(p_hwfn
->cdev
))
1463 igu_sb_id
= qed_get_pf_igu_sb_id(p_hwfn
, sb_id
+ 1);
1465 igu_sb_id
= qed_vf_get_igu_sb_id(p_hwfn
, sb_id
);
1467 if (sb_id
== QED_SP_SB_ID
)
1468 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1469 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id
);
1471 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1472 "SB [%04x] <--> IGU SB [%04x]\n", sb_id
, igu_sb_id
);
1477 int qed_int_sb_init(struct qed_hwfn
*p_hwfn
,
1478 struct qed_ptt
*p_ptt
,
1479 struct qed_sb_info
*sb_info
,
1480 void *sb_virt_addr
, dma_addr_t sb_phy_addr
, u16 sb_id
)
1482 sb_info
->sb_virt
= sb_virt_addr
;
1483 sb_info
->sb_phys
= sb_phy_addr
;
1485 sb_info
->igu_sb_id
= qed_get_igu_sb_id(p_hwfn
, sb_id
);
1487 if (sb_id
!= QED_SP_SB_ID
) {
1488 if (IS_PF(p_hwfn
->cdev
)) {
1489 struct qed_igu_info
*p_info
;
1490 struct qed_igu_block
*p_block
;
1492 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1493 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1495 p_block
->sb_info
= sb_info
;
1496 p_block
->status
&= ~QED_IGU_STATUS_FREE
;
1497 p_info
->usage
.free_cnt
--;
1499 qed_vf_set_sb_info(p_hwfn
, sb_id
, sb_info
);
1503 sb_info
->cdev
= p_hwfn
->cdev
;
1505 /* The igu address will hold the absolute address that needs to be
1506 * written to for a specific status block
1508 if (IS_PF(p_hwfn
->cdev
)) {
1509 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1510 GTT_BAR0_MAP_REG_IGU_CMD
+
1511 (sb_info
->igu_sb_id
<< 3);
1513 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1514 PXP_VF_BAR0_START_IGU
+
1515 ((IGU_CMD_INT_ACK_BASE
+
1516 sb_info
->igu_sb_id
) << 3);
1519 sb_info
->flags
|= QED_SB_INFO_INIT
;
1521 qed_int_sb_setup(p_hwfn
, p_ptt
, sb_info
);
1526 int qed_int_sb_release(struct qed_hwfn
*p_hwfn
,
1527 struct qed_sb_info
*sb_info
, u16 sb_id
)
1529 struct qed_igu_block
*p_block
;
1530 struct qed_igu_info
*p_info
;
1535 /* zero status block and ack counter */
1536 sb_info
->sb_ack
= 0;
1537 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1539 if (IS_VF(p_hwfn
->cdev
)) {
1540 qed_vf_set_sb_info(p_hwfn
, sb_id
, NULL
);
1544 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1545 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1547 /* Vector 0 is reserved to Default SB */
1548 if (!p_block
->vector_number
) {
1549 DP_ERR(p_hwfn
, "Do Not free sp sb using this function");
1553 /* Lose reference to client's SB info, and fix counters */
1554 p_block
->sb_info
= NULL
;
1555 p_block
->status
|= QED_IGU_STATUS_FREE
;
1556 p_info
->usage
.free_cnt
++;
1561 static void qed_int_sp_sb_free(struct qed_hwfn
*p_hwfn
)
1563 struct qed_sb_sp_info
*p_sb
= p_hwfn
->p_sp_sb
;
1568 if (p_sb
->sb_info
.sb_virt
)
1569 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1570 SB_ALIGNED_SIZE(p_hwfn
),
1571 p_sb
->sb_info
.sb_virt
,
1572 p_sb
->sb_info
.sb_phys
);
1574 p_hwfn
->p_sp_sb
= NULL
;
1577 static int qed_int_sp_sb_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1579 struct qed_sb_sp_info
*p_sb
;
1580 dma_addr_t p_phys
= 0;
1584 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1589 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1590 SB_ALIGNED_SIZE(p_hwfn
),
1591 &p_phys
, GFP_KERNEL
);
1597 /* Status Block setup */
1598 p_hwfn
->p_sp_sb
= p_sb
;
1599 qed_int_sb_init(p_hwfn
, p_ptt
, &p_sb
->sb_info
, p_virt
,
1600 p_phys
, QED_SP_SB_ID
);
1602 memset(p_sb
->pi_info_arr
, 0, sizeof(p_sb
->pi_info_arr
));
1607 int qed_int_register_cb(struct qed_hwfn
*p_hwfn
,
1608 qed_int_comp_cb_t comp_cb
,
1609 void *cookie
, u8
*sb_idx
, __le16
**p_fw_cons
)
1611 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1615 /* Look for a free index */
1616 for (pi
= 0; pi
< ARRAY_SIZE(p_sp_sb
->pi_info_arr
); pi
++) {
1617 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
)
1620 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= comp_cb
;
1621 p_sp_sb
->pi_info_arr
[pi
].cookie
= cookie
;
1623 *p_fw_cons
= &p_sp_sb
->sb_info
.sb_virt
->pi_array
[pi
];
1631 int qed_int_unregister_cb(struct qed_hwfn
*p_hwfn
, u8 pi
)
1633 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1635 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
== NULL
)
1638 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= NULL
;
1639 p_sp_sb
->pi_info_arr
[pi
].cookie
= NULL
;
1644 u16
qed_int_get_sp_sb_id(struct qed_hwfn
*p_hwfn
)
1646 return p_hwfn
->p_sp_sb
->sb_info
.igu_sb_id
;
1649 void qed_int_igu_enable_int(struct qed_hwfn
*p_hwfn
,
1650 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1652 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
| IGU_PF_CONF_ATTN_BIT_EN
;
1654 p_hwfn
->cdev
->int_mode
= int_mode
;
1655 switch (p_hwfn
->cdev
->int_mode
) {
1656 case QED_INT_MODE_INTA
:
1657 igu_pf_conf
|= IGU_PF_CONF_INT_LINE_EN
;
1658 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1661 case QED_INT_MODE_MSI
:
1662 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1663 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1666 case QED_INT_MODE_MSIX
:
1667 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1669 case QED_INT_MODE_POLL
:
1673 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, igu_pf_conf
);
1676 static void qed_int_igu_enable_attn(struct qed_hwfn
*p_hwfn
,
1677 struct qed_ptt
*p_ptt
)
1680 /* Configure AEU signal change to produce attentions */
1681 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0);
1682 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0xfff);
1683 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0xfff);
1684 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0xfff);
1686 /* Flush the writes to IGU */
1689 /* Unmask AEU signals toward IGU */
1690 qed_wr(p_hwfn
, p_ptt
, MISC_REG_AEU_MASK_ATTN_IGU
, 0xff);
1694 qed_int_igu_enable(struct qed_hwfn
*p_hwfn
,
1695 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1699 qed_int_igu_enable_attn(p_hwfn
, p_ptt
);
1701 if ((int_mode
!= QED_INT_MODE_INTA
) || IS_LEAD_HWFN(p_hwfn
)) {
1702 rc
= qed_slowpath_irq_req(p_hwfn
);
1704 DP_NOTICE(p_hwfn
, "Slowpath IRQ request failed\n");
1707 p_hwfn
->b_int_requested
= true;
1709 /* Enable interrupt Generation */
1710 qed_int_igu_enable_int(p_hwfn
, p_ptt
, int_mode
);
1711 p_hwfn
->b_int_enabled
= 1;
1716 void qed_int_igu_disable_int(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1718 p_hwfn
->b_int_enabled
= 0;
1720 if (IS_VF(p_hwfn
->cdev
))
1723 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, 0);
1726 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1727 static void qed_int_igu_cleanup_sb(struct qed_hwfn
*p_hwfn
,
1728 struct qed_ptt
*p_ptt
,
1730 bool cleanup_set
, u16 opaque_fid
)
1732 u32 cmd_ctrl
= 0, val
= 0, sb_bit
= 0, sb_bit_addr
= 0, data
= 0;
1733 u32 pxp_addr
= IGU_CMD_INT_ACK_BASE
+ igu_sb_id
;
1734 u32 sleep_cnt
= IGU_CLEANUP_SLEEP_LENGTH
;
1736 /* Set the data field */
1737 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_SET
, cleanup_set
? 1 : 0);
1738 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_TYPE
, 0);
1739 SET_FIELD(data
, IGU_CLEANUP_COMMAND_TYPE
, IGU_COMMAND_TYPE_SET
);
1741 /* Set the control register */
1742 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_PXP_ADDR
, pxp_addr
);
1743 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_FID
, opaque_fid
);
1744 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_TYPE
, IGU_CTRL_CMD_TYPE_WR
);
1746 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_32LSB_DATA
, data
);
1750 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_CTRL
, cmd_ctrl
);
1752 /* Flush the write to IGU */
1755 /* calculate where to read the status bit from */
1756 sb_bit
= 1 << (igu_sb_id
% 32);
1757 sb_bit_addr
= igu_sb_id
/ 32 * sizeof(u32
);
1759 sb_bit_addr
+= IGU_REG_CLEANUP_STATUS_0
;
1761 /* Now wait for the command to complete */
1763 val
= qed_rd(p_hwfn
, p_ptt
, sb_bit_addr
);
1765 if ((val
& sb_bit
) == (cleanup_set
? sb_bit
: 0))
1768 usleep_range(5000, 10000);
1769 } while (--sleep_cnt
);
1773 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1777 void qed_int_igu_init_pure_rt_single(struct qed_hwfn
*p_hwfn
,
1778 struct qed_ptt
*p_ptt
,
1779 u16 igu_sb_id
, u16 opaque
, bool b_set
)
1781 struct qed_igu_block
*p_block
;
1784 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
1785 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1786 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1788 p_block
->function_id
,
1789 p_block
->is_pf
, p_block
->vector_number
);
1793 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 1, opaque
);
1796 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 0, opaque
);
1798 /* Wait for the IGU SB to cleanup */
1799 for (i
= 0; i
< IGU_CLEANUP_SLEEP_LENGTH
; i
++) {
1802 val
= qed_rd(p_hwfn
, p_ptt
,
1803 IGU_REG_WRITE_DONE_PENDING
+
1804 ((igu_sb_id
/ 32) * 4));
1805 if (val
& BIT((igu_sb_id
% 32)))
1806 usleep_range(10, 20);
1810 if (i
== IGU_CLEANUP_SLEEP_LENGTH
)
1812 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1815 /* Clear the CAU for the SB */
1816 for (pi
= 0; pi
< 12; pi
++)
1817 qed_wr(p_hwfn
, p_ptt
,
1818 CAU_REG_PI_MEMORY
+ (igu_sb_id
* 12 + pi
) * 4, 0);
1821 void qed_int_igu_init_pure_rt(struct qed_hwfn
*p_hwfn
,
1822 struct qed_ptt
*p_ptt
,
1823 bool b_set
, bool b_slowpath
)
1825 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
1826 struct qed_igu_block
*p_block
;
1830 val
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
);
1831 val
|= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN
;
1832 val
&= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN
;
1833 qed_wr(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
, val
);
1836 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
1837 p_block
= &p_info
->entry
[igu_sb_id
];
1839 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1841 (p_block
->status
& QED_IGU_STATUS_DSB
))
1844 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
, igu_sb_id
,
1845 p_hwfn
->hw_info
.opaque_fid
,
1850 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
1852 p_hwfn
->hw_info
.opaque_fid
,
1856 int qed_int_igu_reset_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1858 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
1859 struct qed_igu_block
*p_block
;
1864 if (!RESC_NUM(p_hwfn
, QED_SB
)) {
1865 p_info
->b_allow_pf_vf_change
= false;
1867 /* Use the numbers the MFW have provided -
1868 * don't forget MFW accounts for the default SB as well.
1870 p_info
->b_allow_pf_vf_change
= true;
1872 if (p_info
->usage
.cnt
!= RESC_NUM(p_hwfn
, QED_SB
) - 1) {
1874 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
1875 RESC_NUM(p_hwfn
, QED_SB
) - 1,
1877 p_info
->usage
.cnt
= RESC_NUM(p_hwfn
, QED_SB
) - 1;
1880 if (IS_PF_SRIOV(p_hwfn
)) {
1881 u16 vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
1883 if (vfs
!= p_info
->usage
.iov_cnt
)
1886 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
1887 p_info
->usage
.iov_cnt
, vfs
);
1889 /* At this point we know how many SBs we have totally
1890 * in IGU + number of PF SBs. So we can validate that
1891 * we'd have sufficient for VF.
1893 if (vfs
> p_info
->usage
.free_cnt
+
1894 p_info
->usage
.free_cnt_iov
- p_info
->usage
.cnt
) {
1896 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
1897 p_info
->usage
.free_cnt
+
1898 p_info
->usage
.free_cnt_iov
,
1899 p_info
->usage
.cnt
, vfs
);
1903 /* Currently cap the number of VFs SBs by the
1906 p_info
->usage
.iov_cnt
= vfs
;
1910 /* Mark all SBs as free, now in the right PF/VFs division */
1911 p_info
->usage
.free_cnt
= p_info
->usage
.cnt
;
1912 p_info
->usage
.free_cnt_iov
= p_info
->usage
.iov_cnt
;
1913 p_info
->usage
.orig
= p_info
->usage
.cnt
;
1914 p_info
->usage
.iov_orig
= p_info
->usage
.iov_cnt
;
1916 /* We now proceed to re-configure the IGU cam to reflect the initial
1917 * configuration. We can start with the Default SB.
1919 pf_sbs
= p_info
->usage
.cnt
;
1920 vf_sbs
= p_info
->usage
.iov_cnt
;
1922 for (igu_sb_id
= p_info
->igu_dsb_id
;
1923 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
1924 p_block
= &p_info
->entry
[igu_sb_id
];
1927 if (!(p_block
->status
& QED_IGU_STATUS_VALID
))
1930 if (p_block
->status
& QED_IGU_STATUS_DSB
) {
1931 p_block
->function_id
= p_hwfn
->rel_pf_id
;
1933 p_block
->vector_number
= 0;
1934 p_block
->status
= QED_IGU_STATUS_VALID
|
1937 } else if (pf_sbs
) {
1939 p_block
->function_id
= p_hwfn
->rel_pf_id
;
1941 p_block
->vector_number
= p_info
->usage
.cnt
- pf_sbs
;
1942 p_block
->status
= QED_IGU_STATUS_VALID
|
1944 QED_IGU_STATUS_FREE
;
1945 } else if (vf_sbs
) {
1946 p_block
->function_id
=
1947 p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+
1948 p_info
->usage
.iov_cnt
- vf_sbs
;
1950 p_block
->vector_number
= 0;
1951 p_block
->status
= QED_IGU_STATUS_VALID
|
1952 QED_IGU_STATUS_FREE
;
1955 p_block
->function_id
= 0;
1957 p_block
->vector_number
= 0;
1960 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
,
1961 p_block
->function_id
);
1962 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, p_block
->is_pf
);
1963 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
,
1964 p_block
->vector_number
);
1966 /* VF entries would be enabled when VF is initializaed */
1967 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, p_block
->is_pf
);
1969 rval
= qed_rd(p_hwfn
, p_ptt
,
1970 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
1973 qed_wr(p_hwfn
, p_ptt
,
1974 IGU_REG_MAPPING_MEMORY
+
1975 sizeof(u32
) * igu_sb_id
, val
);
1979 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
1981 p_block
->function_id
,
1983 p_block
->vector_number
, rval
, val
);
1990 static void qed_int_igu_read_cam_block(struct qed_hwfn
*p_hwfn
,
1991 struct qed_ptt
*p_ptt
, u16 igu_sb_id
)
1993 u32 val
= qed_rd(p_hwfn
, p_ptt
,
1994 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
1995 struct qed_igu_block
*p_block
;
1997 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
1999 /* Fill the block information */
2000 p_block
->function_id
= GET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
);
2001 p_block
->is_pf
= GET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
);
2002 p_block
->vector_number
= GET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
);
2003 p_block
->igu_sb_id
= igu_sb_id
;
2006 int qed_int_igu_read_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2008 struct qed_igu_info
*p_igu_info
;
2009 struct qed_igu_block
*p_block
;
2010 u32 min_vf
= 0, max_vf
= 0;
2013 p_hwfn
->hw_info
.p_igu_info
= kzalloc(sizeof(*p_igu_info
), GFP_KERNEL
);
2014 if (!p_hwfn
->hw_info
.p_igu_info
)
2017 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
2019 /* Distinguish between existent and non-existent default SB */
2020 p_igu_info
->igu_dsb_id
= QED_SB_INVALID_IDX
;
2022 /* Find the range of VF ids whose SB belong to this PF */
2023 if (p_hwfn
->cdev
->p_iov_info
) {
2024 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
2026 min_vf
= p_iov
->first_vf_in_pf
;
2027 max_vf
= p_iov
->first_vf_in_pf
+ p_iov
->total_vfs
;
2031 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2032 /* Read current entry; Notice it might not belong to this PF */
2033 qed_int_igu_read_cam_block(p_hwfn
, p_ptt
, igu_sb_id
);
2034 p_block
= &p_igu_info
->entry
[igu_sb_id
];
2036 if ((p_block
->is_pf
) &&
2037 (p_block
->function_id
== p_hwfn
->rel_pf_id
)) {
2038 p_block
->status
= QED_IGU_STATUS_PF
|
2039 QED_IGU_STATUS_VALID
|
2040 QED_IGU_STATUS_FREE
;
2042 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2043 p_igu_info
->usage
.cnt
++;
2044 } else if (!(p_block
->is_pf
) &&
2045 (p_block
->function_id
>= min_vf
) &&
2046 (p_block
->function_id
< max_vf
)) {
2047 /* Available for VFs of this PF */
2048 p_block
->status
= QED_IGU_STATUS_VALID
|
2049 QED_IGU_STATUS_FREE
;
2051 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2052 p_igu_info
->usage
.iov_cnt
++;
2055 /* Mark the First entry belonging to the PF or its VFs
2056 * as the default SB [we'll reset IGU prior to first usage].
2058 if ((p_block
->status
& QED_IGU_STATUS_VALID
) &&
2059 (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
)) {
2060 p_igu_info
->igu_dsb_id
= igu_sb_id
;
2061 p_block
->status
|= QED_IGU_STATUS_DSB
;
2064 /* limit number of prints by having each PF print only its
2065 * entries with the exception of PF0 which would print
2068 if ((p_block
->status
& QED_IGU_STATUS_VALID
) ||
2069 (p_hwfn
->abs_pf_id
== 0)) {
2070 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2071 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2072 igu_sb_id
, p_block
->function_id
,
2073 p_block
->is_pf
, p_block
->vector_number
);
2077 if (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
) {
2079 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2080 p_igu_info
->igu_dsb_id
);
2084 /* All non default SB are considered free at this point */
2085 p_igu_info
->usage
.free_cnt
= p_igu_info
->usage
.cnt
;
2086 p_igu_info
->usage
.free_cnt_iov
= p_igu_info
->usage
.iov_cnt
;
2088 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2089 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2090 p_igu_info
->igu_dsb_id
,
2091 p_igu_info
->usage
.cnt
, p_igu_info
->usage
.iov_cnt
);
2097 * @brief Initialize igu runtime registers
2101 void qed_int_igu_init_rt(struct qed_hwfn
*p_hwfn
)
2103 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
;
2105 STORE_RT_REG(p_hwfn
, IGU_REG_PF_CONFIGURATION_RT_OFFSET
, igu_pf_conf
);
2108 u64
qed_int_igu_read_sisr_reg(struct qed_hwfn
*p_hwfn
)
2110 u32 lsb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
-
2111 IGU_CMD_INT_ACK_BASE
;
2112 u32 msb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_MSB_UPPER
-
2113 IGU_CMD_INT_ACK_BASE
;
2114 u32 intr_status_hi
= 0, intr_status_lo
= 0;
2115 u64 intr_status
= 0;
2117 intr_status_lo
= REG_RD(p_hwfn
,
2118 GTT_BAR0_MAP_REG_IGU_CMD
+
2119 lsb_igu_cmd_addr
* 8);
2120 intr_status_hi
= REG_RD(p_hwfn
,
2121 GTT_BAR0_MAP_REG_IGU_CMD
+
2122 msb_igu_cmd_addr
* 8);
2123 intr_status
= ((u64
)intr_status_hi
<< 32) + (u64
)intr_status_lo
;
2128 static void qed_int_sp_dpc_setup(struct qed_hwfn
*p_hwfn
)
2130 tasklet_init(p_hwfn
->sp_dpc
,
2131 qed_int_sp_dpc
, (unsigned long)p_hwfn
);
2132 p_hwfn
->b_sp_dpc_enabled
= true;
2135 static int qed_int_sp_dpc_alloc(struct qed_hwfn
*p_hwfn
)
2137 p_hwfn
->sp_dpc
= kmalloc(sizeof(*p_hwfn
->sp_dpc
), GFP_KERNEL
);
2138 if (!p_hwfn
->sp_dpc
)
2144 static void qed_int_sp_dpc_free(struct qed_hwfn
*p_hwfn
)
2146 kfree(p_hwfn
->sp_dpc
);
2147 p_hwfn
->sp_dpc
= NULL
;
2150 int qed_int_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2154 rc
= qed_int_sp_dpc_alloc(p_hwfn
);
2158 rc
= qed_int_sp_sb_alloc(p_hwfn
, p_ptt
);
2162 rc
= qed_int_sb_attn_alloc(p_hwfn
, p_ptt
);
2167 void qed_int_free(struct qed_hwfn
*p_hwfn
)
2169 qed_int_sp_sb_free(p_hwfn
);
2170 qed_int_sb_attn_free(p_hwfn
);
2171 qed_int_sp_dpc_free(p_hwfn
);
2174 void qed_int_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2176 qed_int_sb_setup(p_hwfn
, p_ptt
, &p_hwfn
->p_sp_sb
->sb_info
);
2177 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
2178 qed_int_sp_dpc_setup(p_hwfn
);
2181 void qed_int_get_num_sbs(struct qed_hwfn
*p_hwfn
,
2182 struct qed_sb_cnt_info
*p_sb_cnt_info
)
2184 struct qed_igu_info
*info
= p_hwfn
->hw_info
.p_igu_info
;
2186 if (!info
|| !p_sb_cnt_info
)
2189 memcpy(p_sb_cnt_info
, &info
->usage
, sizeof(*p_sb_cnt_info
));
2192 void qed_int_disable_post_isr_release(struct qed_dev
*cdev
)
2196 for_each_hwfn(cdev
, i
)
2197 cdev
->hwfns
[i
].b_int_requested
= false;
2200 int qed_int_set_timer_res(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2201 u8 timer_res
, u16 sb_id
, bool tx
)
2203 struct cau_sb_entry sb_entry
;
2206 if (!p_hwfn
->hw_init_done
) {
2207 DP_ERR(p_hwfn
, "hardware not initialized yet\n");
2211 rc
= qed_dmae_grc2host(p_hwfn
, p_ptt
, CAU_REG_SB_VAR_MEMORY
+
2212 sb_id
* sizeof(u64
),
2213 (u64
)(uintptr_t)&sb_entry
, 2, 0);
2215 DP_ERR(p_hwfn
, "dmae_grc2host failed %d\n", rc
);
2220 SET_FIELD(sb_entry
.params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
2222 SET_FIELD(sb_entry
.params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
2224 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
2225 (u64
)(uintptr_t)&sb_entry
,
2226 CAU_REG_SB_VAR_MEMORY
+
2227 sb_id
* sizeof(u64
), 2, 0);
2229 DP_ERR(p_hwfn
, "dmae_host2grc failed %d\n", rc
);