1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
22 #include "qed_init_ops.h"
25 #include "qed_reg_addr.h"
27 #include "qed_sriov.h"
31 qed_int_comp_cb_t comp_cb
;
35 struct qed_sb_sp_info
{
36 struct qed_sb_info sb_info
;
38 /* per protocol index data */
39 struct qed_pi_info pi_info_arr
[PIS_PER_SB
];
42 enum qed_attention_type
{
47 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
48 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
50 struct aeu_invert_reg_bit
{
53 #define ATTENTION_PARITY (1 << 0)
55 #define ATTENTION_LENGTH_MASK (0x00000ff0)
56 #define ATTENTION_LENGTH_SHIFT (4)
57 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
58 ATTENTION_LENGTH_SHIFT)
59 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
60 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
61 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
64 /* Multiple bits start with this offset */
65 #define ATTENTION_OFFSET_MASK (0x000ff000)
66 #define ATTENTION_OFFSET_SHIFT (12)
68 #define ATTENTION_BB_MASK (0x00700000)
69 #define ATTENTION_BB_SHIFT (20)
70 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
71 #define ATTENTION_BB_DIFFERENT BIT(23)
73 #define ATTENTION_CLEAR_ENABLE BIT(28)
76 /* Callback to call if attention will be triggered */
77 int (*cb
)(struct qed_hwfn
*p_hwfn
);
79 enum block_id block_index
;
82 struct aeu_invert_reg
{
83 struct aeu_invert_reg_bit bits
[32];
86 #define MAX_ATTN_GRPS (8)
87 #define NUM_ATTN_REGS (9)
89 /* Specific HW attention callbacks */
90 static int qed_mcp_attn_cb(struct qed_hwfn
*p_hwfn
)
92 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_STATE
);
94 /* This might occur on certain instances; Log it once then mask it */
95 DP_INFO(p_hwfn
->cdev
, "MCP_REG_CPU_STATE: %08x - Masking...\n",
97 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_EVENT_MASK
,
103 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
104 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
105 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
106 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
107 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
108 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
109 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
110 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
111 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
112 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
113 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
114 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
115 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
116 static int qed_pswhst_attn_cb(struct qed_hwfn
*p_hwfn
)
118 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
119 PSWHST_REG_INCORRECT_ACCESS_VALID
);
121 if (tmp
& QED_PSWHST_ATTENTION_INCORRECT_ACCESS
) {
122 u32 addr
, data
, length
;
124 addr
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
125 PSWHST_REG_INCORRECT_ACCESS_ADDRESS
);
126 data
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
127 PSWHST_REG_INCORRECT_ACCESS_DATA
);
128 length
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
129 PSWHST_REG_INCORRECT_ACCESS_LENGTH
);
131 DP_INFO(p_hwfn
->cdev
,
132 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
134 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_PF_ID
),
135 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_VF_ID
),
137 ATTENTION_INCORRECT_ACCESS_VF_VALID
),
139 ATTENTION_INCORRECT_ACCESS_CLIENT
),
140 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_WR
),
142 ATTENTION_INCORRECT_ACCESS_BYTE_EN
),
149 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
150 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
151 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
152 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
153 #define QED_GRC_ATTENTION_MASTER_MASK (0xf)
154 #define QED_GRC_ATTENTION_MASTER_SHIFT (24)
155 #define QED_GRC_ATTENTION_PF_MASK (0xf)
156 #define QED_GRC_ATTENTION_PF_SHIFT (0)
157 #define QED_GRC_ATTENTION_VF_MASK (0xff)
158 #define QED_GRC_ATTENTION_VF_SHIFT (4)
159 #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
160 #define QED_GRC_ATTENTION_PRIV_SHIFT (14)
161 #define QED_GRC_ATTENTION_PRIV_VF (0)
162 static const char *attn_master_to_str(u8 master
)
165 case 1: return "PXP";
166 case 2: return "MCP";
167 case 3: return "MSDM";
168 case 4: return "PSDM";
169 case 5: return "YSDM";
170 case 6: return "USDM";
171 case 7: return "TSDM";
172 case 8: return "XSDM";
173 case 9: return "DBU";
174 case 10: return "DMAE";
180 static int qed_grc_attn_cb(struct qed_hwfn
*p_hwfn
)
184 /* We've already cleared the timeout interrupt register, so we learn
185 * of interrupts via the validity register
187 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
188 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
);
189 if (!(tmp
& QED_GRC_ATTENTION_VALID_BIT
))
192 /* Read the GRC timeout information */
193 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
194 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0
);
195 tmp2
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
196 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1
);
198 DP_INFO(p_hwfn
->cdev
,
199 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
201 (tmp
& QED_GRC_ATTENTION_RDWR_BIT
) ? "Write to" : "Read from",
202 GET_FIELD(tmp
, QED_GRC_ATTENTION_ADDRESS
) << 2,
203 attn_master_to_str(GET_FIELD(tmp
, QED_GRC_ATTENTION_MASTER
)),
204 GET_FIELD(tmp2
, QED_GRC_ATTENTION_PF
),
205 (GET_FIELD(tmp2
, QED_GRC_ATTENTION_PRIV
) ==
206 QED_GRC_ATTENTION_PRIV_VF
) ? "VF" : "(Irrelevant)",
207 GET_FIELD(tmp2
, QED_GRC_ATTENTION_VF
));
210 /* Regardles of anything else, clean the validity bit */
211 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
212 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
, 0);
216 #define PGLUE_ATTENTION_VALID (1 << 29)
217 #define PGLUE_ATTENTION_RD_VALID (1 << 26)
218 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
219 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
220 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
221 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
222 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
223 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
224 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
225 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
226 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
227 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
228 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
229 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
230 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
231 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
232 #define PGLUE_ATTENTION_ILT_VALID (1 << 23)
234 int qed_pglueb_rbc_attn_handler(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
240 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_WR_DETAILS2
);
241 if (tmp
& PGLUE_ATTENTION_VALID
) {
242 u32 addr_lo
, addr_hi
, details
;
244 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
245 PGLUE_B_REG_TX_ERR_WR_ADD_31_0
);
246 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
247 PGLUE_B_REG_TX_ERR_WR_ADD_63_32
);
248 details
= qed_rd(p_hwfn
, p_ptt
,
249 PGLUE_B_REG_TX_ERR_WR_DETAILS
);
251 snprintf(msg
, sizeof(msg
),
252 "Illegal write by chip to [%08x:%08x] blocked.\n"
253 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
254 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
255 addr_hi
, addr_lo
, details
,
256 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
257 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
258 !!GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VF_VALID
),
260 !!GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_WAS_ERR
),
261 !!GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_BME
),
262 !!GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_FID_EN
));
265 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "%s\n", msg
);
267 DP_NOTICE(p_hwfn
, "%s\n", msg
);
270 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_RD_DETAILS2
);
271 if (tmp
& PGLUE_ATTENTION_RD_VALID
) {
272 u32 addr_lo
, addr_hi
, details
;
274 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
275 PGLUE_B_REG_TX_ERR_RD_ADD_31_0
);
276 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
277 PGLUE_B_REG_TX_ERR_RD_ADD_63_32
);
278 details
= qed_rd(p_hwfn
, p_ptt
,
279 PGLUE_B_REG_TX_ERR_RD_DETAILS
);
282 "Illegal read by chip from [%08x:%08x] blocked.\n"
283 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
284 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
285 addr_hi
, addr_lo
, details
,
286 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
287 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
289 PGLUE_ATTENTION_DETAILS_VF_VALID
) ? 1 : 0,
292 PGLUE_ATTENTION_DETAILS2_WAS_ERR
) ? 1 : 0,
294 PGLUE_ATTENTION_DETAILS2_BME
) ? 1 : 0,
296 PGLUE_ATTENTION_DETAILS2_FID_EN
) ? 1 : 0);
299 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL
);
300 if (tmp
& PGLUE_ATTENTION_ICPL_VALID
) {
301 snprintf(msg
, sizeof(msg
), "ICPL error - %08x", tmp
);
304 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "%s\n", msg
);
306 DP_NOTICE(p_hwfn
, "%s\n", msg
);
309 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS
);
310 if (tmp
& PGLUE_ATTENTION_ZLR_VALID
) {
311 u32 addr_hi
, addr_lo
;
313 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
314 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0
);
315 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
316 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32
);
318 DP_NOTICE(p_hwfn
, "ZLR error - %08x [Address %08x:%08x]\n",
319 tmp
, addr_hi
, addr_lo
);
322 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_VF_ILT_ERR_DETAILS2
);
323 if (tmp
& PGLUE_ATTENTION_ILT_VALID
) {
324 u32 addr_hi
, addr_lo
, details
;
326 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
327 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0
);
328 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
329 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32
);
330 details
= qed_rd(p_hwfn
, p_ptt
,
331 PGLUE_B_REG_VF_ILT_ERR_DETAILS
);
334 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
335 details
, tmp
, addr_hi
, addr_lo
);
338 /* Clear the indications */
339 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_LATCHED_ERRORS_CLR
, BIT(2));
344 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn
*p_hwfn
)
346 return qed_pglueb_rbc_attn_handler(p_hwfn
, p_hwfn
->p_dpc_ptt
, false);
349 static int qed_fw_assertion(struct qed_hwfn
*p_hwfn
)
351 qed_hw_err_notify(p_hwfn
, p_hwfn
->p_dpc_ptt
, QED_HW_ERR_FW_ASSERT
,
354 /* Clear assert indications */
355 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, MISC_REG_AEU_GENERAL_ATTN_32
, 0);
360 static int qed_general_attention_35(struct qed_hwfn
*p_hwfn
)
362 DP_INFO(p_hwfn
, "General attention 35!\n");
367 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
368 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
369 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
370 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
371 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
373 #define QED_DB_REC_COUNT 1000
374 #define QED_DB_REC_INTERVAL 100
376 static int qed_db_rec_flush_queue(struct qed_hwfn
*p_hwfn
,
377 struct qed_ptt
*p_ptt
)
379 u32 count
= QED_DB_REC_COUNT
;
382 /* Flush any pending (e)dpms as they may never arrive */
383 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_DPM_FORCE_ABORT
, 0x1);
385 /* wait for usage to zero or count to run out. This is necessary since
386 * EDPM doorbell transactions can take multiple 64b cycles, and as such
387 * can "split" over the pci. Possibly, the doorbell drop can happen with
388 * half an EDPM in the queue and other half dropped. Another EDPM
389 * doorbell to the same address (from doorbell recovery mechanism or
390 * from the doorbelling entity) could have first half dropped and second
391 * half interpreted as continuation of the first. To prevent such
392 * malformed doorbells from reaching the device, flush the queue before
393 * releasing the overflow sticky indication.
395 while (count
-- && usage
) {
396 usage
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_USAGE_CNT
);
397 udelay(QED_DB_REC_INTERVAL
);
400 /* should have been depleted by now */
402 DP_NOTICE(p_hwfn
->cdev
,
403 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
404 QED_DB_REC_INTERVAL
* QED_DB_REC_COUNT
, usage
);
411 int qed_db_rec_handler(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
413 u32 attn_ovfl
, cur_ovfl
;
416 attn_ovfl
= test_and_clear_bit(QED_OVERFLOW_BIT
,
417 &p_hwfn
->db_recovery_info
.overflow
);
418 cur_ovfl
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
);
419 if (!cur_ovfl
&& !attn_ovfl
)
422 DP_NOTICE(p_hwfn
, "PF Overflow sticky: attn %u current %u\n",
423 attn_ovfl
, cur_ovfl
);
425 if (cur_ovfl
&& !p_hwfn
->db_bar_no_edpm
) {
426 rc
= qed_db_rec_flush_queue(p_hwfn
, p_ptt
);
431 /* Release overflow sticky indication (stop silently dropping everything) */
432 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
, 0x0);
434 /* Repeat all last doorbells (doorbell drop recovery) */
435 qed_db_recovery_execute(p_hwfn
);
440 static void qed_dorq_attn_overflow(struct qed_hwfn
*p_hwfn
)
442 struct qed_ptt
*p_ptt
= p_hwfn
->p_dpc_ptt
;
446 overflow
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
);
450 /* Run PF doorbell recovery in next periodic handler */
451 set_bit(QED_OVERFLOW_BIT
, &p_hwfn
->db_recovery_info
.overflow
);
453 if (!p_hwfn
->db_bar_no_edpm
) {
454 rc
= qed_db_rec_flush_queue(p_hwfn
, p_ptt
);
459 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
, 0x0);
461 /* Schedule the handler even if overflow was not detected */
462 qed_periodic_db_rec_start(p_hwfn
);
465 static int qed_dorq_attn_int_sts(struct qed_hwfn
*p_hwfn
)
467 u32 int_sts
, first_drop_reason
, details
, address
, all_drops_reason
;
468 struct qed_ptt
*p_ptt
= p_hwfn
->p_dpc_ptt
;
470 int_sts
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_INT_STS
);
471 if (int_sts
== 0xdeadbeaf) {
472 DP_NOTICE(p_hwfn
->cdev
,
473 "DORQ is being reset, skipping int_sts handler\n");
478 /* int_sts may be zero since all PFs were interrupted for doorbell
479 * overflow but another one already handled it. Can abort here. If
480 * This PF also requires overflow recovery we will be interrupted again.
481 * The masked almost full indication may also be set. Ignoring.
483 if (!(int_sts
& ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL
))
486 DP_NOTICE(p_hwfn
->cdev
, "DORQ attention. int_sts was %x\n", int_sts
);
488 /* check if db_drop or overflow happened */
489 if (int_sts
& (DORQ_REG_INT_STS_DB_DROP
|
490 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
)) {
491 /* Obtain data about db drop/overflow */
492 first_drop_reason
= qed_rd(p_hwfn
, p_ptt
,
493 DORQ_REG_DB_DROP_REASON
) &
494 QED_DORQ_ATTENTION_REASON_MASK
;
495 details
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_DB_DROP_DETAILS
);
496 address
= qed_rd(p_hwfn
, p_ptt
,
497 DORQ_REG_DB_DROP_DETAILS_ADDRESS
);
498 all_drops_reason
= qed_rd(p_hwfn
, p_ptt
,
499 DORQ_REG_DB_DROP_DETAILS_REASON
);
502 DP_NOTICE(p_hwfn
->cdev
,
503 "Doorbell drop occurred\n"
504 "Address\t\t0x%08x\t(second BAR address)\n"
505 "FID\t\t0x%04x\t\t(Opaque FID)\n"
506 "Size\t\t0x%04x\t\t(in bytes)\n"
507 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
508 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
510 GET_FIELD(details
, QED_DORQ_ATTENTION_OPAQUE
),
511 GET_FIELD(details
, QED_DORQ_ATTENTION_SIZE
) * 4,
512 first_drop_reason
, all_drops_reason
);
514 /* Clear the doorbell drop details and prepare for next drop */
515 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_DB_DROP_DETAILS_REL
, 0);
517 /* Mark interrupt as handled (note: even if drop was due to a different
518 * reason than overflow we mark as handled)
523 DORQ_REG_INT_STS_DB_DROP
|
524 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
);
526 /* If there are no indications other than drop indications, success */
527 if ((int_sts
& ~(DORQ_REG_INT_STS_DB_DROP
|
528 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
|
529 DORQ_REG_INT_STS_DORQ_FIFO_AFULL
)) == 0)
533 /* Some other indication was present - non recoverable */
534 DP_INFO(p_hwfn
, "DORQ fatal attention\n");
539 static int qed_dorq_attn_cb(struct qed_hwfn
*p_hwfn
)
541 if (p_hwfn
->cdev
->recov_in_prog
)
544 p_hwfn
->db_recovery_info
.dorq_attn
= true;
545 qed_dorq_attn_overflow(p_hwfn
);
547 return qed_dorq_attn_int_sts(p_hwfn
);
550 static void qed_dorq_attn_handler(struct qed_hwfn
*p_hwfn
)
552 if (p_hwfn
->db_recovery_info
.dorq_attn
)
555 /* Call DORQ callback if the attention was missed */
556 qed_dorq_attn_cb(p_hwfn
);
558 p_hwfn
->db_recovery_info
.dorq_attn
= false;
561 /* Instead of major changes to the data-structure, we have a some 'special'
562 * identifiers for sources that changed meaning between adapters.
564 enum aeu_invert_reg_special_type
{
565 AEU_INVERT_REG_SPECIAL_CNIG_0
,
566 AEU_INVERT_REG_SPECIAL_CNIG_1
,
567 AEU_INVERT_REG_SPECIAL_CNIG_2
,
568 AEU_INVERT_REG_SPECIAL_CNIG_3
,
569 AEU_INVERT_REG_SPECIAL_MAX
,
572 static struct aeu_invert_reg_bit
573 aeu_descs_special
[AEU_INVERT_REG_SPECIAL_MAX
] = {
574 {"CNIG port 0", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
575 {"CNIG port 1", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
576 {"CNIG port 2", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
577 {"CNIG port 3", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
580 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
581 static struct aeu_invert_reg aeu_descs
[NUM_ATTN_REGS
] = {
583 { /* After Invert 1 */
585 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
590 { /* After Invert 2 */
591 {"PGLUE config_space", ATTENTION_SINGLE
,
593 {"PGLUE misc_flr", ATTENTION_SINGLE
,
595 {"PGLUE B RBC", ATTENTION_PAR_INT
,
596 qed_pglueb_rbc_attn_cb
, BLOCK_PGLUE_B
},
597 {"PGLUE misc_mctp", ATTENTION_SINGLE
,
599 {"Flash event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
600 {"SMB event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
601 {"Main Power", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
602 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT
) |
603 (1 << ATTENTION_OFFSET_SHIFT
),
605 {"PCIE glue/PXP VPD %d",
606 (16 << ATTENTION_LENGTH_SHIFT
), NULL
, BLOCK_PGLCS
},
611 { /* After Invert 3 */
612 {"General Attention %d",
613 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
618 { /* After Invert 4 */
619 {"General Attention 32", ATTENTION_SINGLE
|
620 ATTENTION_CLEAR_ENABLE
, qed_fw_assertion
,
622 {"General Attention %d",
623 (2 << ATTENTION_LENGTH_SHIFT
) |
624 (33 << ATTENTION_OFFSET_SHIFT
), NULL
, MAX_BLOCK_ID
},
625 {"General Attention 35", ATTENTION_SINGLE
|
626 ATTENTION_CLEAR_ENABLE
, qed_general_attention_35
,
629 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
630 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0
),
633 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
634 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1
),
637 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
638 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2
),
641 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
642 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3
),
644 {"MCP CPU", ATTENTION_SINGLE
,
645 qed_mcp_attn_cb
, MAX_BLOCK_ID
},
646 {"MCP Watchdog timer", ATTENTION_SINGLE
,
648 {"MCP M2P", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
649 {"AVS stop status ready", ATTENTION_SINGLE
,
651 {"MSTAT", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
652 {"MSTAT per-path", ATTENTION_PAR_INT
,
654 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT
),
656 {"NIG", ATTENTION_PAR_INT
, NULL
, BLOCK_NIG
},
657 {"BMB/OPTE/MCP", ATTENTION_PAR_INT
, NULL
, BLOCK_BMB
},
658 {"BTB", ATTENTION_PAR_INT
, NULL
, BLOCK_BTB
},
659 {"BRB", ATTENTION_PAR_INT
, NULL
, BLOCK_BRB
},
660 {"PRS", ATTENTION_PAR_INT
, NULL
, BLOCK_PRS
},
665 { /* After Invert 5 */
666 {"SRC", ATTENTION_PAR_INT
, NULL
, BLOCK_SRC
},
667 {"PB Client1", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB1
},
668 {"PB Client2", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB2
},
669 {"RPB", ATTENTION_PAR_INT
, NULL
, BLOCK_RPB
},
670 {"PBF", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF
},
671 {"QM", ATTENTION_PAR_INT
, NULL
, BLOCK_QM
},
672 {"TM", ATTENTION_PAR_INT
, NULL
, BLOCK_TM
},
673 {"MCM", ATTENTION_PAR_INT
, NULL
, BLOCK_MCM
},
674 {"MSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSDM
},
675 {"MSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSEM
},
676 {"PCM", ATTENTION_PAR_INT
, NULL
, BLOCK_PCM
},
677 {"PSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSDM
},
678 {"PSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSEM
},
679 {"TCM", ATTENTION_PAR_INT
, NULL
, BLOCK_TCM
},
680 {"TSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSDM
},
681 {"TSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSEM
},
686 { /* After Invert 6 */
687 {"UCM", ATTENTION_PAR_INT
, NULL
, BLOCK_UCM
},
688 {"USDM", ATTENTION_PAR_INT
, NULL
, BLOCK_USDM
},
689 {"USEM", ATTENTION_PAR_INT
, NULL
, BLOCK_USEM
},
690 {"XCM", ATTENTION_PAR_INT
, NULL
, BLOCK_XCM
},
691 {"XSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSDM
},
692 {"XSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSEM
},
693 {"YCM", ATTENTION_PAR_INT
, NULL
, BLOCK_YCM
},
694 {"YSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSDM
},
695 {"YSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSEM
},
696 {"XYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_XYLD
},
697 {"TMLD", ATTENTION_PAR_INT
, NULL
, BLOCK_TMLD
},
698 {"MYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_MULD
},
699 {"YULD", ATTENTION_PAR_INT
, NULL
, BLOCK_YULD
},
700 {"DORQ", ATTENTION_PAR_INT
,
701 qed_dorq_attn_cb
, BLOCK_DORQ
},
702 {"DBG", ATTENTION_PAR_INT
, NULL
, BLOCK_DBG
},
703 {"IPC", ATTENTION_PAR_INT
, NULL
, BLOCK_IPC
},
708 { /* After Invert 7 */
709 {"CCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_CCFC
},
710 {"CDU", ATTENTION_PAR_INT
, NULL
, BLOCK_CDU
},
711 {"DMAE", ATTENTION_PAR_INT
, NULL
, BLOCK_DMAE
},
712 {"IGU", ATTENTION_PAR_INT
, NULL
, BLOCK_IGU
},
713 {"ATC", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
714 {"CAU", ATTENTION_PAR_INT
, NULL
, BLOCK_CAU
},
715 {"PTU", ATTENTION_PAR_INT
, NULL
, BLOCK_PTU
},
716 {"PRM", ATTENTION_PAR_INT
, NULL
, BLOCK_PRM
},
717 {"TCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_TCFC
},
718 {"RDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_RDIF
},
719 {"TDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_TDIF
},
720 {"RSS", ATTENTION_PAR_INT
, NULL
, BLOCK_RSS
},
721 {"MISC", ATTENTION_PAR_INT
, NULL
, BLOCK_MISC
},
722 {"MISCS", ATTENTION_PAR_INT
, NULL
, BLOCK_MISCS
},
723 {"PCIE", ATTENTION_PAR
, NULL
, BLOCK_PCIE
},
724 {"Vaux PCI core", ATTENTION_SINGLE
, NULL
, BLOCK_PGLCS
},
725 {"PSWRQ", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRQ
},
730 { /* After Invert 8 */
731 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT
,
733 {"PSWWR", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWWR
},
734 {"PSWWR (pci_clk)", ATTENTION_PAR_INT
,
736 {"PSWRD", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRD
},
737 {"PSWRD (pci_clk)", ATTENTION_PAR_INT
,
739 {"PSWHST", ATTENTION_PAR_INT
,
740 qed_pswhst_attn_cb
, BLOCK_PSWHST
},
741 {"PSWHST (pci_clk)", ATTENTION_PAR_INT
,
742 NULL
, BLOCK_PSWHST2
},
743 {"GRC", ATTENTION_PAR_INT
,
744 qed_grc_attn_cb
, BLOCK_GRC
},
745 {"CPMU", ATTENTION_PAR_INT
, NULL
, BLOCK_CPMU
},
746 {"NCSI", ATTENTION_PAR_INT
, NULL
, BLOCK_NCSI
},
747 {"MSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
748 {"PSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
749 {"TSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
750 {"USEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
751 {"XSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
752 {"YSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
753 {"pxp_misc_mps", ATTENTION_PAR
, NULL
, BLOCK_PGLCS
},
754 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE
,
756 {"PERST_B assertion", ATTENTION_SINGLE
,
758 {"PERST_B deassertion", ATTENTION_SINGLE
,
760 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT
),
766 { /* After Invert 9 */
767 {"MCP Latched memory", ATTENTION_PAR
,
769 {"MCP Latched scratchpad cache", ATTENTION_SINGLE
,
771 {"MCP Latched ump_tx", ATTENTION_PAR
,
773 {"MCP Latched scratchpad", ATTENTION_PAR
,
775 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT
),
781 static struct aeu_invert_reg_bit
*
782 qed_int_aeu_translate(struct qed_hwfn
*p_hwfn
,
783 struct aeu_invert_reg_bit
*p_bit
)
785 if (!QED_IS_BB(p_hwfn
->cdev
))
788 if (!(p_bit
->flags
& ATTENTION_BB_DIFFERENT
))
791 return &aeu_descs_special
[(p_bit
->flags
& ATTENTION_BB_MASK
) >>
795 static bool qed_int_is_parity_flag(struct qed_hwfn
*p_hwfn
,
796 struct aeu_invert_reg_bit
*p_bit
)
798 return !!(qed_int_aeu_translate(p_hwfn
, p_bit
)->flags
&
802 #define ATTN_STATE_BITS (0xfff)
803 #define ATTN_BITS_MASKABLE (0x3ff)
804 struct qed_sb_attn_info
{
805 /* Virtual & Physical address of the SB */
806 struct atten_status_block
*sb_attn
;
809 /* Last seen running index */
812 /* A mask of the AEU bits resulting in a parity error */
813 u32 parity_mask
[NUM_ATTN_REGS
];
815 /* A pointer to the attention description structure */
816 struct aeu_invert_reg
*p_aeu_desc
;
818 /* Previously asserted attentions, which are still unasserted */
821 /* Cleanup address for the link's general hw attention */
825 static inline u16
qed_attn_update_idx(struct qed_hwfn
*p_hwfn
,
826 struct qed_sb_attn_info
*p_sb_desc
)
830 index
= le16_to_cpu(p_sb_desc
->sb_attn
->sb_index
);
831 if (p_sb_desc
->index
!= index
) {
832 p_sb_desc
->index
= index
;
840 * qed_int_assertion() - Handle asserted attention bits.
842 * @p_hwfn: HW device data.
843 * @asserted_bits: Newly asserted bits.
845 * Return: Zero value.
847 static int qed_int_assertion(struct qed_hwfn
*p_hwfn
, u16 asserted_bits
)
849 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
852 /* Mask the source of the attention in the IGU */
853 igu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
854 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "IGU mask: 0x%08x --> 0x%08x\n",
855 igu_mask
, igu_mask
& ~(asserted_bits
& ATTN_BITS_MASKABLE
));
856 igu_mask
&= ~(asserted_bits
& ATTN_BITS_MASKABLE
);
857 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, igu_mask
);
859 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
860 "inner known ATTN state: 0x%04x --> 0x%04x\n",
861 sb_attn_sw
->known_attn
,
862 sb_attn_sw
->known_attn
| asserted_bits
);
863 sb_attn_sw
->known_attn
|= asserted_bits
;
865 /* Handle MCP events */
866 if (asserted_bits
& 0x100) {
867 qed_mcp_handle_events(p_hwfn
, p_hwfn
->p_dpc_ptt
);
868 /* Clean the MCP attention */
869 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
870 sb_attn_sw
->mfw_attn_addr
, 0);
873 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
874 GTT_BAR0_MAP_REG_IGU_CMD
+
875 ((IGU_CMD_ATTN_BIT_SET_UPPER
-
876 IGU_CMD_INT_ACK_BASE
) << 3),
879 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "set cmd IGU: 0x%04x\n",
885 static void qed_int_attn_print(struct qed_hwfn
*p_hwfn
,
887 enum dbg_attn_type type
, bool b_clear
)
889 struct dbg_attn_block_result attn_results
;
890 enum dbg_status status
;
892 memset(&attn_results
, 0, sizeof(attn_results
));
894 status
= qed_dbg_read_attn(p_hwfn
, p_hwfn
->p_dpc_ptt
, id
, type
,
895 b_clear
, &attn_results
);
896 if (status
!= DBG_STATUS_OK
)
898 "Failed to parse attention information [status: %s]\n",
899 qed_dbg_get_status_str(status
));
901 qed_dbg_parse_attn(p_hwfn
, &attn_results
);
905 * qed_int_deassertion_aeu_bit() - Handles the effects of a single
906 * cause of the attention.
908 * @p_hwfn: HW device data.
909 * @p_aeu: Descriptor of an AEU bit which caused the attention.
910 * @aeu_en_reg: Register offset of the AEU enable reg. which configured
911 * this bit to this group.
912 * @p_bit_name: AEU bit description for logging purposes.
913 * @bitmask: Index of this bit in the aeu_en_reg.
915 * Return: Zero on success, negative errno otherwise.
918 qed_int_deassertion_aeu_bit(struct qed_hwfn
*p_hwfn
,
919 struct aeu_invert_reg_bit
*p_aeu
,
921 const char *p_bit_name
, u32 bitmask
)
923 bool b_fatal
= false;
927 DP_INFO(p_hwfn
, "Deasserted attention `%s'[%08x]\n",
928 p_bit_name
, bitmask
);
930 /* Call callback before clearing the interrupt status */
932 DP_INFO(p_hwfn
, "`%s (attention)': Calling Callback function\n",
934 rc
= p_aeu
->cb(p_hwfn
);
940 /* Print HW block interrupt registers */
941 if (p_aeu
->block_index
!= MAX_BLOCK_ID
)
942 qed_int_attn_print(p_hwfn
, p_aeu
->block_index
,
943 ATTN_TYPE_INTERRUPT
, !b_fatal
);
945 /* Reach assertion if attention is fatal */
947 qed_hw_err_notify(p_hwfn
, p_hwfn
->p_dpc_ptt
, QED_HW_ERR_HW_ATTN
,
948 "`%s': Fatal attention\n",
950 else /* If the attention is benign, no need to prevent it */
953 /* Prevent this Attention from being asserted in the future */
954 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
955 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, (val
& ~bitmask
));
956 DP_INFO(p_hwfn
, "`%s' - Disabled future attentions\n",
959 /* Re-enable FW aassertion (Gen 32) interrupts */
960 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
961 MISC_REG_AEU_ENABLE4_IGU_OUT_0
);
962 val
|= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32
;
963 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
964 MISC_REG_AEU_ENABLE4_IGU_OUT_0
, val
);
971 * qed_int_deassertion_parity() - Handle a single parity AEU source.
973 * @p_hwfn: HW device data.
974 * @p_aeu: Descriptor of an AEU bit which caused the parity.
975 * @aeu_en_reg: Address of the AEU enable register.
976 * @bit_index: Index (0-31) of an AEU bit.
978 static void qed_int_deassertion_parity(struct qed_hwfn
*p_hwfn
,
979 struct aeu_invert_reg_bit
*p_aeu
,
980 u32 aeu_en_reg
, u8 bit_index
)
982 u32 block_id
= p_aeu
->block_index
, mask
, val
;
984 DP_NOTICE(p_hwfn
->cdev
,
985 "%s parity attention is set [address 0x%08x, bit %d]\n",
986 p_aeu
->bit_name
, aeu_en_reg
, bit_index
);
988 if (block_id
!= MAX_BLOCK_ID
) {
989 qed_int_attn_print(p_hwfn
, block_id
, ATTN_TYPE_PARITY
, false);
991 /* In BB, there's a single parity bit for several blocks */
992 if (block_id
== BLOCK_BTB
) {
993 qed_int_attn_print(p_hwfn
, BLOCK_OPTE
,
994 ATTN_TYPE_PARITY
, false);
995 qed_int_attn_print(p_hwfn
, BLOCK_MCP
,
996 ATTN_TYPE_PARITY
, false);
1000 /* Prevent this parity error from being re-asserted */
1001 mask
= ~BIT(bit_index
);
1002 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
1003 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, val
& mask
);
1004 DP_INFO(p_hwfn
, "`%s' - Disabled future parity errors\n",
1009 * qed_int_deassertion() - Handle deassertion of previously asserted
1012 * @p_hwfn: HW device data.
1013 * @deasserted_bits: newly deasserted bits.
1015 * Return: Zero value.
1017 static int qed_int_deassertion(struct qed_hwfn
*p_hwfn
,
1018 u16 deasserted_bits
)
1020 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
1021 u32 aeu_inv_arr
[NUM_ATTN_REGS
], aeu_mask
, aeu_en
, en
;
1022 u8 i
, j
, k
, bit_idx
;
1025 /* Read the attention registers in the AEU */
1026 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1027 aeu_inv_arr
[i
] = qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
1028 MISC_REG_AEU_AFTER_INVERT_1_IGU
+
1030 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1031 "Deasserted bits [%d]: %08x\n",
1035 /* Find parity attentions first */
1036 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1037 struct aeu_invert_reg
*p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
];
1040 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+ i
* sizeof(u32
);
1041 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
1043 /* Skip register in which no parity bit is currently set */
1044 parities
= sb_attn_sw
->parity_mask
[i
] & aeu_inv_arr
[i
] & en
;
1048 for (j
= 0, bit_idx
= 0; bit_idx
< 32 && j
< 32; j
++) {
1049 struct aeu_invert_reg_bit
*p_bit
= &p_aeu
->bits
[j
];
1051 if (qed_int_is_parity_flag(p_hwfn
, p_bit
) &&
1052 !!(parities
& BIT(bit_idx
)))
1053 qed_int_deassertion_parity(p_hwfn
, p_bit
,
1056 bit_idx
+= ATTENTION_LENGTH(p_bit
->flags
);
1060 /* Find non-parity cause for attention and act */
1061 for (k
= 0; k
< MAX_ATTN_GRPS
; k
++) {
1062 struct aeu_invert_reg_bit
*p_aeu
;
1064 /* Handle only groups whose attention is currently deasserted */
1065 if (!(deasserted_bits
& (1 << k
)))
1068 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1071 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+
1073 k
* sizeof(u32
) * NUM_ATTN_REGS
;
1075 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
1076 bits
= aeu_inv_arr
[i
] & en
;
1078 /* Skip if no bit from this group is currently set */
1082 /* Find all set bits from current register which belong
1083 * to current group, making them responsible for the
1084 * previous assertion.
1086 for (j
= 0, bit_idx
= 0; bit_idx
< 32 && j
< 32; j
++) {
1087 long unsigned int bitmask
;
1090 p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
].bits
[j
];
1091 p_aeu
= qed_int_aeu_translate(p_hwfn
, p_aeu
);
1094 bit_len
= ATTENTION_LENGTH(p_aeu
->flags
);
1095 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
)) {
1101 bitmask
= bits
& (((1 << bit_len
) - 1) << bit
);
1105 u32 flags
= p_aeu
->flags
;
1109 num
= (u8
)find_first_bit(&bitmask
,
1112 /* Some bits represent more than a
1113 * single interrupt. Correctly print
1116 if (ATTENTION_LENGTH(flags
) > 2 ||
1117 ((flags
& ATTENTION_PAR_INT
) &&
1118 ATTENTION_LENGTH(flags
) > 1))
1119 snprintf(bit_name
, 30,
1120 p_aeu
->bit_name
, num
);
1123 p_aeu
->bit_name
, 30);
1125 /* We now need to pass bitmask in its
1130 /* Handle source of the attention */
1131 qed_int_deassertion_aeu_bit(p_hwfn
,
1138 bit_idx
+= ATTENTION_LENGTH(p_aeu
->flags
);
1143 /* Handle missed DORQ attention */
1144 qed_dorq_attn_handler(p_hwfn
);
1146 /* Clear IGU indication for the deasserted bits */
1147 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
1148 GTT_BAR0_MAP_REG_IGU_CMD
+
1149 ((IGU_CMD_ATTN_BIT_CLR_UPPER
-
1150 IGU_CMD_INT_ACK_BASE
) << 3),
1151 ~((u32
)deasserted_bits
));
1153 /* Unmask deasserted attentions in IGU */
1154 aeu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
1155 aeu_mask
|= (deasserted_bits
& ATTN_BITS_MASKABLE
);
1156 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, aeu_mask
);
1158 /* Clear deassertion from inner state */
1159 sb_attn_sw
->known_attn
&= ~deasserted_bits
;
1164 static int qed_int_attentions(struct qed_hwfn
*p_hwfn
)
1166 struct qed_sb_attn_info
*p_sb_attn_sw
= p_hwfn
->p_sb_attn
;
1167 struct atten_status_block
*p_sb_attn
= p_sb_attn_sw
->sb_attn
;
1168 u32 attn_bits
= 0, attn_acks
= 0;
1169 u16 asserted_bits
, deasserted_bits
;
1173 /* Read current attention bits/acks - safeguard against attentions
1174 * by guaranting work on a synchronized timeframe
1177 index
= p_sb_attn
->sb_index
;
1178 /* finish reading index before the loop condition */
1180 attn_bits
= le32_to_cpu(p_sb_attn
->atten_bits
);
1181 attn_acks
= le32_to_cpu(p_sb_attn
->atten_ack
);
1182 } while (index
!= p_sb_attn
->sb_index
);
1183 p_sb_attn
->sb_index
= index
;
1185 /* Attention / Deassertion are meaningful (and in correct state)
1186 * only when they differ and consistent with known state - deassertion
1187 * when previous attention & current ack, and assertion when current
1188 * attention with no previous attention
1190 asserted_bits
= (attn_bits
& ~attn_acks
& ATTN_STATE_BITS
) &
1191 ~p_sb_attn_sw
->known_attn
;
1192 deasserted_bits
= (~attn_bits
& attn_acks
& ATTN_STATE_BITS
) &
1193 p_sb_attn_sw
->known_attn
;
1195 if ((asserted_bits
& ~0x100) || (deasserted_bits
& ~0x100)) {
1197 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1198 index
, attn_bits
, attn_acks
, asserted_bits
,
1199 deasserted_bits
, p_sb_attn_sw
->known_attn
);
1200 } else if (asserted_bits
== 0x100) {
1201 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1202 "MFW indication via attention\n");
1204 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1205 "MFW indication [deassertion]\n");
1208 if (asserted_bits
) {
1209 rc
= qed_int_assertion(p_hwfn
, asserted_bits
);
1214 if (deasserted_bits
)
1215 rc
= qed_int_deassertion(p_hwfn
, deasserted_bits
);
1220 static void qed_sb_ack_attn(struct qed_hwfn
*p_hwfn
,
1221 void __iomem
*igu_addr
, u32 ack_cons
)
1225 igu_ack
= ((ack_cons
<< IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT
) |
1226 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT
) |
1227 (IGU_INT_NOP
<< IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT
) |
1228 (IGU_SEG_ACCESS_ATTN
<<
1229 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT
));
1231 DIRECT_REG_WR(igu_addr
, igu_ack
);
1233 /* Both segments (interrupts & acks) are written to same place address;
1234 * Need to guarantee all commands will be received (in-order) by HW.
1239 void qed_int_sp_dpc(struct tasklet_struct
*t
)
1241 struct qed_hwfn
*p_hwfn
= from_tasklet(p_hwfn
, t
, sp_dpc
);
1242 struct qed_pi_info
*pi_info
= NULL
;
1243 struct qed_sb_attn_info
*sb_attn
;
1244 struct qed_sb_info
*sb_info
;
1248 if (!p_hwfn
->p_sp_sb
) {
1249 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sp_sb\n");
1253 sb_info
= &p_hwfn
->p_sp_sb
->sb_info
;
1254 arr_size
= ARRAY_SIZE(p_hwfn
->p_sp_sb
->pi_info_arr
);
1256 DP_ERR(p_hwfn
->cdev
,
1257 "Status block is NULL - cannot ack interrupts\n");
1261 if (!p_hwfn
->p_sb_attn
) {
1262 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sb_attn");
1265 sb_attn
= p_hwfn
->p_sb_attn
;
1267 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "DPC Called! (hwfn %p %d)\n",
1268 p_hwfn
, p_hwfn
->my_id
);
1270 /* Disable ack for def status block. Required both for msix +
1271 * inta in non-mask mode, in inta does no harm.
1273 qed_sb_ack(sb_info
, IGU_INT_DISABLE
, 0);
1275 /* Gather Interrupts/Attentions information */
1276 if (!sb_info
->sb_virt
) {
1277 DP_ERR(p_hwfn
->cdev
,
1278 "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1280 u32 tmp_index
= sb_info
->sb_ack
;
1282 rc
= qed_sb_update_sb_idx(sb_info
);
1283 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1284 "Interrupt indices: 0x%08x --> 0x%08x\n",
1285 tmp_index
, sb_info
->sb_ack
);
1288 if (!sb_attn
|| !sb_attn
->sb_attn
) {
1289 DP_ERR(p_hwfn
->cdev
,
1290 "Attentions Status block is NULL - cannot check for new attentions!\n");
1292 u16 tmp_index
= sb_attn
->index
;
1294 rc
|= qed_attn_update_idx(p_hwfn
, sb_attn
);
1295 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1296 "Attention indices: 0x%08x --> 0x%08x\n",
1297 tmp_index
, sb_attn
->index
);
1300 /* Check if we expect interrupts at this time. if not just ack them */
1301 if (!(rc
& QED_SB_EVENT_MASK
)) {
1302 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1306 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1307 if (!p_hwfn
->p_dpc_ptt
) {
1308 DP_NOTICE(p_hwfn
->cdev
, "Failed to allocate PTT\n");
1309 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1313 if (rc
& QED_SB_ATT_IDX
)
1314 qed_int_attentions(p_hwfn
);
1316 if (rc
& QED_SB_IDX
) {
1319 /* Look for a free index */
1320 for (pi
= 0; pi
< arr_size
; pi
++) {
1321 pi_info
= &p_hwfn
->p_sp_sb
->pi_info_arr
[pi
];
1322 if (pi_info
->comp_cb
)
1323 pi_info
->comp_cb(p_hwfn
, pi_info
->cookie
);
1327 if (sb_attn
&& (rc
& QED_SB_ATT_IDX
))
1328 /* This should be done before the interrupts are enabled,
1329 * since otherwise a new attention will be generated.
1331 qed_sb_ack_attn(p_hwfn
, sb_info
->igu_addr
, sb_attn
->index
);
1333 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1336 static void qed_int_sb_attn_free(struct qed_hwfn
*p_hwfn
)
1338 struct qed_sb_attn_info
*p_sb
= p_hwfn
->p_sb_attn
;
1344 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1345 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1346 p_sb
->sb_attn
, p_sb
->sb_phys
);
1348 p_hwfn
->p_sb_attn
= NULL
;
1351 static void qed_int_sb_attn_setup(struct qed_hwfn
*p_hwfn
,
1352 struct qed_ptt
*p_ptt
)
1354 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1356 memset(sb_info
->sb_attn
, 0, sizeof(*sb_info
->sb_attn
));
1359 sb_info
->known_attn
= 0;
1361 /* Configure Attention Status Block in IGU */
1362 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_L
,
1363 lower_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1364 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_H
,
1365 upper_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1368 static void qed_int_sb_attn_init(struct qed_hwfn
*p_hwfn
,
1369 struct qed_ptt
*p_ptt
,
1370 void *sb_virt_addr
, dma_addr_t sb_phy_addr
)
1372 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1375 sb_info
->sb_attn
= sb_virt_addr
;
1376 sb_info
->sb_phys
= sb_phy_addr
;
1378 /* Set the pointer to the AEU descriptors */
1379 sb_info
->p_aeu_desc
= aeu_descs
;
1381 /* Calculate Parity Masks */
1382 memset(sb_info
->parity_mask
, 0, sizeof(u32
) * NUM_ATTN_REGS
);
1383 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1384 /* j is array index, k is bit index */
1385 for (j
= 0, k
= 0; k
< 32 && j
< 32; j
++) {
1386 struct aeu_invert_reg_bit
*p_aeu
;
1388 p_aeu
= &aeu_descs
[i
].bits
[j
];
1389 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
))
1390 sb_info
->parity_mask
[i
] |= 1 << k
;
1392 k
+= ATTENTION_LENGTH(p_aeu
->flags
);
1394 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1395 "Attn Mask [Reg %d]: 0x%08x\n",
1396 i
, sb_info
->parity_mask
[i
]);
1399 /* Set the address of cleanup for the mcp attention */
1400 sb_info
->mfw_attn_addr
= (p_hwfn
->rel_pf_id
<< 3) +
1401 MISC_REG_AEU_GENERAL_ATTN_0
;
1403 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
1406 static int qed_int_sb_attn_alloc(struct qed_hwfn
*p_hwfn
,
1407 struct qed_ptt
*p_ptt
)
1409 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1410 struct qed_sb_attn_info
*p_sb
;
1411 dma_addr_t p_phys
= 0;
1415 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1420 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1421 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1422 &p_phys
, GFP_KERNEL
);
1429 /* Attention setup */
1430 p_hwfn
->p_sb_attn
= p_sb
;
1431 qed_int_sb_attn_init(p_hwfn
, p_ptt
, p_virt
, p_phys
);
1436 /* coalescing timeout = timeset << (timer_res + 1) */
1437 #define QED_CAU_DEF_RX_USECS 24
1438 #define QED_CAU_DEF_TX_USECS 48
1440 void qed_init_cau_sb_entry(struct qed_hwfn
*p_hwfn
,
1441 struct cau_sb_entry
*p_sb_entry
,
1442 u8 pf_id
, u16 vf_number
, u8 vf_valid
)
1444 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1445 u32 cau_state
, params
= 0, data
= 0;
1448 memset(p_sb_entry
, 0, sizeof(*p_sb_entry
));
1450 SET_FIELD(params
, CAU_SB_ENTRY_PF_NUMBER
, pf_id
);
1451 SET_FIELD(params
, CAU_SB_ENTRY_VF_NUMBER
, vf_number
);
1452 SET_FIELD(params
, CAU_SB_ENTRY_VF_VALID
, vf_valid
);
1453 SET_FIELD(params
, CAU_SB_ENTRY_SB_TIMESET0
, 0x7F);
1454 SET_FIELD(params
, CAU_SB_ENTRY_SB_TIMESET1
, 0x7F);
1456 cau_state
= CAU_HC_DISABLE_STATE
;
1458 if (cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1459 cau_state
= CAU_HC_ENABLE_STATE
;
1460 if (!cdev
->rx_coalesce_usecs
)
1461 cdev
->rx_coalesce_usecs
= QED_CAU_DEF_RX_USECS
;
1462 if (!cdev
->tx_coalesce_usecs
)
1463 cdev
->tx_coalesce_usecs
= QED_CAU_DEF_TX_USECS
;
1466 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1467 if (cdev
->rx_coalesce_usecs
<= 0x7F)
1469 else if (cdev
->rx_coalesce_usecs
<= 0xFF)
1474 SET_FIELD(params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
1476 if (cdev
->tx_coalesce_usecs
<= 0x7F)
1478 else if (cdev
->tx_coalesce_usecs
<= 0xFF)
1483 SET_FIELD(params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
1484 p_sb_entry
->params
= cpu_to_le32(params
);
1486 SET_FIELD(data
, CAU_SB_ENTRY_STATE0
, cau_state
);
1487 SET_FIELD(data
, CAU_SB_ENTRY_STATE1
, cau_state
);
1488 p_sb_entry
->data
= cpu_to_le32(data
);
1491 static void qed_int_cau_conf_pi(struct qed_hwfn
*p_hwfn
,
1492 struct qed_ptt
*p_ptt
,
1495 enum qed_coalescing_fsm coalescing_fsm
,
1498 u32 sb_offset
, pi_offset
;
1501 if (IS_VF(p_hwfn
->cdev
))
1504 SET_FIELD(prod
, CAU_PI_ENTRY_PI_TIMESET
, timeset
);
1505 if (coalescing_fsm
== QED_COAL_RX_STATE_MACHINE
)
1506 SET_FIELD(prod
, CAU_PI_ENTRY_FSM_SEL
, 0);
1508 SET_FIELD(prod
, CAU_PI_ENTRY_FSM_SEL
, 1);
1510 sb_offset
= igu_sb_id
* PIS_PER_SB
;
1511 pi_offset
= sb_offset
+ pi_index
;
1513 if (p_hwfn
->hw_init_done
)
1514 qed_wr(p_hwfn
, p_ptt
,
1515 CAU_REG_PI_MEMORY
+ pi_offset
* sizeof(u32
), prod
);
1517 STORE_RT_REG(p_hwfn
, CAU_REG_PI_MEMORY_RT_OFFSET
+ pi_offset
,
1521 void qed_int_cau_conf_sb(struct qed_hwfn
*p_hwfn
,
1522 struct qed_ptt
*p_ptt
,
1524 u16 igu_sb_id
, u16 vf_number
, u8 vf_valid
)
1526 struct cau_sb_entry sb_entry
;
1528 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
, p_hwfn
->rel_pf_id
,
1529 vf_number
, vf_valid
);
1531 if (p_hwfn
->hw_init_done
) {
1532 /* Wide-bus, initialize via DMAE */
1533 u64 phys_addr
= (u64
)sb_phys
;
1535 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&phys_addr
,
1536 CAU_REG_SB_ADDR_MEMORY
+
1537 igu_sb_id
* sizeof(u64
), 2, NULL
);
1538 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&sb_entry
,
1539 CAU_REG_SB_VAR_MEMORY
+
1540 igu_sb_id
* sizeof(u64
), 2, NULL
);
1542 /* Initialize Status Block Address */
1543 STORE_RT_REG_AGG(p_hwfn
,
1544 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET
+
1548 STORE_RT_REG_AGG(p_hwfn
,
1549 CAU_REG_SB_VAR_MEMORY_RT_OFFSET
+
1554 /* Configure pi coalescing if set */
1555 if (p_hwfn
->cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1556 u8 num_tc
= p_hwfn
->hw_info
.num_hw_tc
;
1557 u8 timeset
, timer_res
;
1560 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1561 if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0x7F)
1563 else if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0xFF)
1567 timeset
= (u8
)(p_hwfn
->cdev
->rx_coalesce_usecs
>> timer_res
);
1568 qed_int_cau_conf_pi(p_hwfn
, p_ptt
, igu_sb_id
, RX_PI
,
1569 QED_COAL_RX_STATE_MACHINE
, timeset
);
1571 if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0x7F)
1573 else if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0xFF)
1577 timeset
= (u8
)(p_hwfn
->cdev
->tx_coalesce_usecs
>> timer_res
);
1578 for (i
= 0; i
< num_tc
; i
++) {
1579 qed_int_cau_conf_pi(p_hwfn
, p_ptt
,
1580 igu_sb_id
, TX_PI(i
),
1581 QED_COAL_TX_STATE_MACHINE
,
1587 void qed_int_sb_setup(struct qed_hwfn
*p_hwfn
,
1588 struct qed_ptt
*p_ptt
, struct qed_sb_info
*sb_info
)
1590 /* zero status block and ack counter */
1591 sb_info
->sb_ack
= 0;
1592 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1594 if (IS_PF(p_hwfn
->cdev
))
1595 qed_int_cau_conf_sb(p_hwfn
, p_ptt
, sb_info
->sb_phys
,
1596 sb_info
->igu_sb_id
, 0, 0);
1599 struct qed_igu_block
*qed_get_igu_free_sb(struct qed_hwfn
*p_hwfn
, bool b_is_pf
)
1601 struct qed_igu_block
*p_block
;
1604 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1606 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1608 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1609 !(p_block
->status
& QED_IGU_STATUS_FREE
))
1612 if (!!(p_block
->status
& QED_IGU_STATUS_PF
) == b_is_pf
)
1619 static u16
qed_get_pf_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 vector_id
)
1621 struct qed_igu_block
*p_block
;
1624 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1626 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1628 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1630 p_block
->vector_number
!= vector_id
)
1636 return QED_SB_INVALID_IDX
;
1639 u16
qed_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1643 /* Assuming continuous set of IGU SBs dedicated for given PF */
1644 if (sb_id
== QED_SP_SB_ID
)
1645 igu_sb_id
= p_hwfn
->hw_info
.p_igu_info
->igu_dsb_id
;
1646 else if (IS_PF(p_hwfn
->cdev
))
1647 igu_sb_id
= qed_get_pf_igu_sb_id(p_hwfn
, sb_id
+ 1);
1649 igu_sb_id
= qed_vf_get_igu_sb_id(p_hwfn
, sb_id
);
1651 if (sb_id
== QED_SP_SB_ID
)
1652 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1653 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id
);
1655 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1656 "SB [%04x] <--> IGU SB [%04x]\n", sb_id
, igu_sb_id
);
1661 int qed_int_sb_init(struct qed_hwfn
*p_hwfn
,
1662 struct qed_ptt
*p_ptt
,
1663 struct qed_sb_info
*sb_info
,
1664 void *sb_virt_addr
, dma_addr_t sb_phy_addr
, u16 sb_id
)
1666 sb_info
->sb_virt
= sb_virt_addr
;
1667 sb_info
->sb_phys
= sb_phy_addr
;
1669 sb_info
->igu_sb_id
= qed_get_igu_sb_id(p_hwfn
, sb_id
);
1671 if (sb_id
!= QED_SP_SB_ID
) {
1672 if (IS_PF(p_hwfn
->cdev
)) {
1673 struct qed_igu_info
*p_info
;
1674 struct qed_igu_block
*p_block
;
1676 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1677 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1679 p_block
->sb_info
= sb_info
;
1680 p_block
->status
&= ~QED_IGU_STATUS_FREE
;
1681 p_info
->usage
.free_cnt
--;
1683 qed_vf_set_sb_info(p_hwfn
, sb_id
, sb_info
);
1687 sb_info
->cdev
= p_hwfn
->cdev
;
1689 /* The igu address will hold the absolute address that needs to be
1690 * written to for a specific status block
1692 if (IS_PF(p_hwfn
->cdev
)) {
1693 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1694 GTT_BAR0_MAP_REG_IGU_CMD
+
1695 (sb_info
->igu_sb_id
<< 3);
1697 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1698 PXP_VF_BAR0_START_IGU
+
1699 ((IGU_CMD_INT_ACK_BASE
+
1700 sb_info
->igu_sb_id
) << 3);
1703 sb_info
->flags
|= QED_SB_INFO_INIT
;
1705 qed_int_sb_setup(p_hwfn
, p_ptt
, sb_info
);
1710 int qed_int_sb_release(struct qed_hwfn
*p_hwfn
,
1711 struct qed_sb_info
*sb_info
, u16 sb_id
)
1713 struct qed_igu_block
*p_block
;
1714 struct qed_igu_info
*p_info
;
1719 /* zero status block and ack counter */
1720 sb_info
->sb_ack
= 0;
1721 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1723 if (IS_VF(p_hwfn
->cdev
)) {
1724 qed_vf_set_sb_info(p_hwfn
, sb_id
, NULL
);
1728 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1729 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1731 /* Vector 0 is reserved to Default SB */
1732 if (!p_block
->vector_number
) {
1733 DP_ERR(p_hwfn
, "Do Not free sp sb using this function");
1737 /* Lose reference to client's SB info, and fix counters */
1738 p_block
->sb_info
= NULL
;
1739 p_block
->status
|= QED_IGU_STATUS_FREE
;
1740 p_info
->usage
.free_cnt
++;
1745 static void qed_int_sp_sb_free(struct qed_hwfn
*p_hwfn
)
1747 struct qed_sb_sp_info
*p_sb
= p_hwfn
->p_sp_sb
;
1752 if (p_sb
->sb_info
.sb_virt
)
1753 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1754 SB_ALIGNED_SIZE(p_hwfn
),
1755 p_sb
->sb_info
.sb_virt
,
1756 p_sb
->sb_info
.sb_phys
);
1758 p_hwfn
->p_sp_sb
= NULL
;
1761 static int qed_int_sp_sb_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1763 struct qed_sb_sp_info
*p_sb
;
1764 dma_addr_t p_phys
= 0;
1768 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1773 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1774 SB_ALIGNED_SIZE(p_hwfn
),
1775 &p_phys
, GFP_KERNEL
);
1781 /* Status Block setup */
1782 p_hwfn
->p_sp_sb
= p_sb
;
1783 qed_int_sb_init(p_hwfn
, p_ptt
, &p_sb
->sb_info
, p_virt
,
1784 p_phys
, QED_SP_SB_ID
);
1786 memset(p_sb
->pi_info_arr
, 0, sizeof(p_sb
->pi_info_arr
));
1791 int qed_int_register_cb(struct qed_hwfn
*p_hwfn
,
1792 qed_int_comp_cb_t comp_cb
,
1793 void *cookie
, u8
*sb_idx
, __le16
**p_fw_cons
)
1795 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1799 /* Look for a free index */
1800 for (pi
= 0; pi
< ARRAY_SIZE(p_sp_sb
->pi_info_arr
); pi
++) {
1801 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
)
1804 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= comp_cb
;
1805 p_sp_sb
->pi_info_arr
[pi
].cookie
= cookie
;
1807 *p_fw_cons
= &p_sp_sb
->sb_info
.sb_virt
->pi_array
[pi
];
1815 int qed_int_unregister_cb(struct qed_hwfn
*p_hwfn
, u8 pi
)
1817 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1819 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
== NULL
)
1822 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= NULL
;
1823 p_sp_sb
->pi_info_arr
[pi
].cookie
= NULL
;
1828 u16
qed_int_get_sp_sb_id(struct qed_hwfn
*p_hwfn
)
1830 return p_hwfn
->p_sp_sb
->sb_info
.igu_sb_id
;
1833 void qed_int_igu_enable_int(struct qed_hwfn
*p_hwfn
,
1834 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1836 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
| IGU_PF_CONF_ATTN_BIT_EN
;
1838 p_hwfn
->cdev
->int_mode
= int_mode
;
1839 switch (p_hwfn
->cdev
->int_mode
) {
1840 case QED_INT_MODE_INTA
:
1841 igu_pf_conf
|= IGU_PF_CONF_INT_LINE_EN
;
1842 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1845 case QED_INT_MODE_MSI
:
1846 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1847 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1850 case QED_INT_MODE_MSIX
:
1851 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1853 case QED_INT_MODE_POLL
:
1857 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, igu_pf_conf
);
1860 static void qed_int_igu_enable_attn(struct qed_hwfn
*p_hwfn
,
1861 struct qed_ptt
*p_ptt
)
1864 /* Configure AEU signal change to produce attentions */
1865 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0);
1866 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0xfff);
1867 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0xfff);
1868 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0xfff);
1870 /* Unmask AEU signals toward IGU */
1871 qed_wr(p_hwfn
, p_ptt
, MISC_REG_AEU_MASK_ATTN_IGU
, 0xff);
1875 qed_int_igu_enable(struct qed_hwfn
*p_hwfn
,
1876 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1880 qed_int_igu_enable_attn(p_hwfn
, p_ptt
);
1882 if ((int_mode
!= QED_INT_MODE_INTA
) || IS_LEAD_HWFN(p_hwfn
)) {
1883 rc
= qed_slowpath_irq_req(p_hwfn
);
1885 DP_NOTICE(p_hwfn
, "Slowpath IRQ request failed\n");
1888 p_hwfn
->b_int_requested
= true;
1890 /* Enable interrupt Generation */
1891 qed_int_igu_enable_int(p_hwfn
, p_ptt
, int_mode
);
1892 p_hwfn
->b_int_enabled
= 1;
1897 void qed_int_igu_disable_int(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1899 p_hwfn
->b_int_enabled
= 0;
1901 if (IS_VF(p_hwfn
->cdev
))
1904 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, 0);
1907 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1908 static void qed_int_igu_cleanup_sb(struct qed_hwfn
*p_hwfn
,
1909 struct qed_ptt
*p_ptt
,
1911 bool cleanup_set
, u16 opaque_fid
)
1913 u32 cmd_ctrl
= 0, val
= 0, sb_bit
= 0, sb_bit_addr
= 0, data
= 0;
1914 u32 pxp_addr
= IGU_CMD_INT_ACK_BASE
+ igu_sb_id
;
1915 u32 sleep_cnt
= IGU_CLEANUP_SLEEP_LENGTH
;
1917 /* Set the data field */
1918 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_SET
, cleanup_set
? 1 : 0);
1919 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_TYPE
, 0);
1920 SET_FIELD(data
, IGU_CLEANUP_COMMAND_TYPE
, IGU_COMMAND_TYPE_SET
);
1922 /* Set the control register */
1923 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_PXP_ADDR
, pxp_addr
);
1924 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_FID
, opaque_fid
);
1925 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_TYPE
, IGU_CTRL_CMD_TYPE_WR
);
1927 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_32LSB_DATA
, data
);
1931 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_CTRL
, cmd_ctrl
);
1933 /* calculate where to read the status bit from */
1934 sb_bit
= 1 << (igu_sb_id
% 32);
1935 sb_bit_addr
= igu_sb_id
/ 32 * sizeof(u32
);
1937 sb_bit_addr
+= IGU_REG_CLEANUP_STATUS_0
;
1939 /* Now wait for the command to complete */
1941 val
= qed_rd(p_hwfn
, p_ptt
, sb_bit_addr
);
1943 if ((val
& sb_bit
) == (cleanup_set
? sb_bit
: 0))
1946 usleep_range(5000, 10000);
1947 } while (--sleep_cnt
);
1951 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1955 void qed_int_igu_init_pure_rt_single(struct qed_hwfn
*p_hwfn
,
1956 struct qed_ptt
*p_ptt
,
1957 u16 igu_sb_id
, u16 opaque
, bool b_set
)
1959 struct qed_igu_block
*p_block
;
1962 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
1963 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1964 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1966 p_block
->function_id
,
1967 p_block
->is_pf
, p_block
->vector_number
);
1971 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 1, opaque
);
1974 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 0, opaque
);
1976 /* Wait for the IGU SB to cleanup */
1977 for (i
= 0; i
< IGU_CLEANUP_SLEEP_LENGTH
; i
++) {
1980 val
= qed_rd(p_hwfn
, p_ptt
,
1981 IGU_REG_WRITE_DONE_PENDING
+
1982 ((igu_sb_id
/ 32) * 4));
1983 if (val
& BIT((igu_sb_id
% 32)))
1984 usleep_range(10, 20);
1988 if (i
== IGU_CLEANUP_SLEEP_LENGTH
)
1990 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1993 /* Clear the CAU for the SB */
1994 for (pi
= 0; pi
< 12; pi
++)
1995 qed_wr(p_hwfn
, p_ptt
,
1996 CAU_REG_PI_MEMORY
+ (igu_sb_id
* 12 + pi
) * 4, 0);
1999 void qed_int_igu_init_pure_rt(struct qed_hwfn
*p_hwfn
,
2000 struct qed_ptt
*p_ptt
,
2001 bool b_set
, bool b_slowpath
)
2003 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
2004 struct qed_igu_block
*p_block
;
2008 val
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
);
2009 val
|= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN
;
2010 val
&= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN
;
2011 qed_wr(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
, val
);
2014 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2015 p_block
= &p_info
->entry
[igu_sb_id
];
2017 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
2019 (p_block
->status
& QED_IGU_STATUS_DSB
))
2022 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
, igu_sb_id
,
2023 p_hwfn
->hw_info
.opaque_fid
,
2028 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2030 p_hwfn
->hw_info
.opaque_fid
,
2034 int qed_int_igu_reset_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2036 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
2037 struct qed_igu_block
*p_block
;
2042 if (!RESC_NUM(p_hwfn
, QED_SB
)) {
2043 p_info
->b_allow_pf_vf_change
= false;
2045 /* Use the numbers the MFW have provided -
2046 * don't forget MFW accounts for the default SB as well.
2048 p_info
->b_allow_pf_vf_change
= true;
2050 if (p_info
->usage
.cnt
!= RESC_NUM(p_hwfn
, QED_SB
) - 1) {
2052 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2053 RESC_NUM(p_hwfn
, QED_SB
) - 1,
2055 p_info
->usage
.cnt
= RESC_NUM(p_hwfn
, QED_SB
) - 1;
2058 if (IS_PF_SRIOV(p_hwfn
)) {
2059 u16 vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
2061 if (vfs
!= p_info
->usage
.iov_cnt
)
2064 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2065 p_info
->usage
.iov_cnt
, vfs
);
2067 /* At this point we know how many SBs we have totally
2068 * in IGU + number of PF SBs. So we can validate that
2069 * we'd have sufficient for VF.
2071 if (vfs
> p_info
->usage
.free_cnt
+
2072 p_info
->usage
.free_cnt_iov
- p_info
->usage
.cnt
) {
2074 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2075 p_info
->usage
.free_cnt
+
2076 p_info
->usage
.free_cnt_iov
,
2077 p_info
->usage
.cnt
, vfs
);
2081 /* Currently cap the number of VFs SBs by the
2084 p_info
->usage
.iov_cnt
= vfs
;
2088 /* Mark all SBs as free, now in the right PF/VFs division */
2089 p_info
->usage
.free_cnt
= p_info
->usage
.cnt
;
2090 p_info
->usage
.free_cnt_iov
= p_info
->usage
.iov_cnt
;
2091 p_info
->usage
.orig
= p_info
->usage
.cnt
;
2092 p_info
->usage
.iov_orig
= p_info
->usage
.iov_cnt
;
2094 /* We now proceed to re-configure the IGU cam to reflect the initial
2095 * configuration. We can start with the Default SB.
2097 pf_sbs
= p_info
->usage
.cnt
;
2098 vf_sbs
= p_info
->usage
.iov_cnt
;
2100 for (igu_sb_id
= p_info
->igu_dsb_id
;
2101 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2102 p_block
= &p_info
->entry
[igu_sb_id
];
2105 if (!(p_block
->status
& QED_IGU_STATUS_VALID
))
2108 if (p_block
->status
& QED_IGU_STATUS_DSB
) {
2109 p_block
->function_id
= p_hwfn
->rel_pf_id
;
2111 p_block
->vector_number
= 0;
2112 p_block
->status
= QED_IGU_STATUS_VALID
|
2115 } else if (pf_sbs
) {
2117 p_block
->function_id
= p_hwfn
->rel_pf_id
;
2119 p_block
->vector_number
= p_info
->usage
.cnt
- pf_sbs
;
2120 p_block
->status
= QED_IGU_STATUS_VALID
|
2122 QED_IGU_STATUS_FREE
;
2123 } else if (vf_sbs
) {
2124 p_block
->function_id
=
2125 p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+
2126 p_info
->usage
.iov_cnt
- vf_sbs
;
2128 p_block
->vector_number
= 0;
2129 p_block
->status
= QED_IGU_STATUS_VALID
|
2130 QED_IGU_STATUS_FREE
;
2133 p_block
->function_id
= 0;
2135 p_block
->vector_number
= 0;
2138 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
,
2139 p_block
->function_id
);
2140 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, p_block
->is_pf
);
2141 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
,
2142 p_block
->vector_number
);
2144 /* VF entries would be enabled when VF is initializaed */
2145 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, p_block
->is_pf
);
2147 rval
= qed_rd(p_hwfn
, p_ptt
,
2148 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
2151 qed_wr(p_hwfn
, p_ptt
,
2152 IGU_REG_MAPPING_MEMORY
+
2153 sizeof(u32
) * igu_sb_id
, val
);
2157 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2159 p_block
->function_id
,
2161 p_block
->vector_number
, rval
, val
);
2168 static void qed_int_igu_read_cam_block(struct qed_hwfn
*p_hwfn
,
2169 struct qed_ptt
*p_ptt
, u16 igu_sb_id
)
2171 u32 val
= qed_rd(p_hwfn
, p_ptt
,
2172 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
2173 struct qed_igu_block
*p_block
;
2175 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
2177 /* Fill the block information */
2178 p_block
->function_id
= GET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
);
2179 p_block
->is_pf
= GET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
);
2180 p_block
->vector_number
= GET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
);
2181 p_block
->igu_sb_id
= igu_sb_id
;
2184 int qed_int_igu_read_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2186 struct qed_igu_info
*p_igu_info
;
2187 struct qed_igu_block
*p_block
;
2188 u32 min_vf
= 0, max_vf
= 0;
2191 p_hwfn
->hw_info
.p_igu_info
= kzalloc(sizeof(*p_igu_info
), GFP_KERNEL
);
2192 if (!p_hwfn
->hw_info
.p_igu_info
)
2195 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
2197 /* Distinguish between existent and non-existent default SB */
2198 p_igu_info
->igu_dsb_id
= QED_SB_INVALID_IDX
;
2200 /* Find the range of VF ids whose SB belong to this PF */
2201 if (p_hwfn
->cdev
->p_iov_info
) {
2202 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
2204 min_vf
= p_iov
->first_vf_in_pf
;
2205 max_vf
= p_iov
->first_vf_in_pf
+ p_iov
->total_vfs
;
2209 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2210 /* Read current entry; Notice it might not belong to this PF */
2211 qed_int_igu_read_cam_block(p_hwfn
, p_ptt
, igu_sb_id
);
2212 p_block
= &p_igu_info
->entry
[igu_sb_id
];
2214 if ((p_block
->is_pf
) &&
2215 (p_block
->function_id
== p_hwfn
->rel_pf_id
)) {
2216 p_block
->status
= QED_IGU_STATUS_PF
|
2217 QED_IGU_STATUS_VALID
|
2218 QED_IGU_STATUS_FREE
;
2220 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2221 p_igu_info
->usage
.cnt
++;
2222 } else if (!(p_block
->is_pf
) &&
2223 (p_block
->function_id
>= min_vf
) &&
2224 (p_block
->function_id
< max_vf
)) {
2225 /* Available for VFs of this PF */
2226 p_block
->status
= QED_IGU_STATUS_VALID
|
2227 QED_IGU_STATUS_FREE
;
2229 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2230 p_igu_info
->usage
.iov_cnt
++;
2233 /* Mark the First entry belonging to the PF or its VFs
2234 * as the default SB [we'll reset IGU prior to first usage].
2236 if ((p_block
->status
& QED_IGU_STATUS_VALID
) &&
2237 (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
)) {
2238 p_igu_info
->igu_dsb_id
= igu_sb_id
;
2239 p_block
->status
|= QED_IGU_STATUS_DSB
;
2242 /* limit number of prints by having each PF print only its
2243 * entries with the exception of PF0 which would print
2246 if ((p_block
->status
& QED_IGU_STATUS_VALID
) ||
2247 (p_hwfn
->abs_pf_id
== 0)) {
2248 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2249 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2250 igu_sb_id
, p_block
->function_id
,
2251 p_block
->is_pf
, p_block
->vector_number
);
2255 if (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
) {
2257 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2258 p_igu_info
->igu_dsb_id
);
2262 /* All non default SB are considered free at this point */
2263 p_igu_info
->usage
.free_cnt
= p_igu_info
->usage
.cnt
;
2264 p_igu_info
->usage
.free_cnt_iov
= p_igu_info
->usage
.iov_cnt
;
2266 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2267 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2268 p_igu_info
->igu_dsb_id
,
2269 p_igu_info
->usage
.cnt
, p_igu_info
->usage
.iov_cnt
);
2275 * qed_int_igu_init_rt() - Initialize IGU runtime registers.
2277 * @p_hwfn: HW device data.
2279 void qed_int_igu_init_rt(struct qed_hwfn
*p_hwfn
)
2281 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
;
2283 STORE_RT_REG(p_hwfn
, IGU_REG_PF_CONFIGURATION_RT_OFFSET
, igu_pf_conf
);
2286 u64
qed_int_igu_read_sisr_reg(struct qed_hwfn
*p_hwfn
)
2288 u32 lsb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
-
2289 IGU_CMD_INT_ACK_BASE
;
2290 u32 msb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_MSB_UPPER
-
2291 IGU_CMD_INT_ACK_BASE
;
2292 u32 intr_status_hi
= 0, intr_status_lo
= 0;
2293 u64 intr_status
= 0;
2295 intr_status_lo
= REG_RD(p_hwfn
,
2296 GTT_BAR0_MAP_REG_IGU_CMD
+
2297 lsb_igu_cmd_addr
* 8);
2298 intr_status_hi
= REG_RD(p_hwfn
,
2299 GTT_BAR0_MAP_REG_IGU_CMD
+
2300 msb_igu_cmd_addr
* 8);
2301 intr_status
= ((u64
)intr_status_hi
<< 32) + (u64
)intr_status_lo
;
2306 static void qed_int_sp_dpc_setup(struct qed_hwfn
*p_hwfn
)
2308 tasklet_setup(&p_hwfn
->sp_dpc
, qed_int_sp_dpc
);
2309 p_hwfn
->b_sp_dpc_enabled
= true;
2312 int qed_int_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2316 rc
= qed_int_sp_sb_alloc(p_hwfn
, p_ptt
);
2320 rc
= qed_int_sb_attn_alloc(p_hwfn
, p_ptt
);
2325 void qed_int_free(struct qed_hwfn
*p_hwfn
)
2327 qed_int_sp_sb_free(p_hwfn
);
2328 qed_int_sb_attn_free(p_hwfn
);
2331 void qed_int_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2333 qed_int_sb_setup(p_hwfn
, p_ptt
, &p_hwfn
->p_sp_sb
->sb_info
);
2334 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
2335 qed_int_sp_dpc_setup(p_hwfn
);
2338 void qed_int_get_num_sbs(struct qed_hwfn
*p_hwfn
,
2339 struct qed_sb_cnt_info
*p_sb_cnt_info
)
2341 struct qed_igu_info
*info
= p_hwfn
->hw_info
.p_igu_info
;
2343 if (!info
|| !p_sb_cnt_info
)
2346 memcpy(p_sb_cnt_info
, &info
->usage
, sizeof(*p_sb_cnt_info
));
2349 void qed_int_disable_post_isr_release(struct qed_dev
*cdev
)
2353 for_each_hwfn(cdev
, i
)
2354 cdev
->hwfns
[i
].b_int_requested
= false;
2357 void qed_int_attn_clr_enable(struct qed_dev
*cdev
, bool clr_enable
)
2359 cdev
->attn_clr_en
= clr_enable
;
2362 int qed_int_set_timer_res(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2363 u8 timer_res
, u16 sb_id
, bool tx
)
2365 struct cau_sb_entry sb_entry
;
2369 if (!p_hwfn
->hw_init_done
) {
2370 DP_ERR(p_hwfn
, "hardware not initialized yet\n");
2374 rc
= qed_dmae_grc2host(p_hwfn
, p_ptt
, CAU_REG_SB_VAR_MEMORY
+
2375 sb_id
* sizeof(u64
),
2376 (u64
)(uintptr_t)&sb_entry
, 2, NULL
);
2378 DP_ERR(p_hwfn
, "dmae_grc2host failed %d\n", rc
);
2382 params
= le32_to_cpu(sb_entry
.params
);
2385 SET_FIELD(params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
2387 SET_FIELD(params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
2389 sb_entry
.params
= cpu_to_le32(params
);
2391 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
2392 (u64
)(uintptr_t)&sb_entry
,
2393 CAU_REG_SB_VAR_MEMORY
+
2394 sb_id
* sizeof(u64
), 2, NULL
);
2396 DP_ERR(p_hwfn
, "dmae_host2grc failed %d\n", rc
);
2403 int qed_int_get_sb_dbg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2404 struct qed_sb_info
*p_sb
, struct qed_sb_info_dbg
*p_info
)
2406 u16 sbid
= p_sb
->igu_sb_id
;
2409 if (IS_VF(p_hwfn
->cdev
))
2412 if (sbid
>= NUM_OF_SBS(p_hwfn
->cdev
))
2415 p_info
->igu_prod
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_PRODUCER_MEMORY
+ sbid
* 4);
2416 p_info
->igu_cons
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_CONSUMER_MEM
+ sbid
* 4);
2418 for (i
= 0; i
< PIS_PER_SB
; i
++)
2419 p_info
->pi
[i
] = (u16
)qed_rd(p_hwfn
, p_ptt
,
2420 CAU_REG_PI_MEMORY
+ sbid
* 4 * PIS_PER_SB
+ i
* 4);