1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
22 #include "qed_init_ops.h"
25 #include "qed_reg_addr.h"
27 #include "qed_sriov.h"
31 qed_int_comp_cb_t comp_cb
;
35 struct qed_sb_sp_info
{
36 struct qed_sb_info sb_info
;
38 /* per protocol index data */
39 struct qed_pi_info pi_info_arr
[PIS_PER_SB_E4
];
42 enum qed_attention_type
{
47 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
48 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
50 struct aeu_invert_reg_bit
{
53 #define ATTENTION_PARITY (1 << 0)
55 #define ATTENTION_LENGTH_MASK (0x00000ff0)
56 #define ATTENTION_LENGTH_SHIFT (4)
57 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
58 ATTENTION_LENGTH_SHIFT)
59 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
60 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
61 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
64 /* Multiple bits start with this offset */
65 #define ATTENTION_OFFSET_MASK (0x000ff000)
66 #define ATTENTION_OFFSET_SHIFT (12)
68 #define ATTENTION_BB_MASK (0x00700000)
69 #define ATTENTION_BB_SHIFT (20)
70 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
71 #define ATTENTION_BB_DIFFERENT BIT(23)
73 #define ATTENTION_CLEAR_ENABLE BIT(28)
76 /* Callback to call if attention will be triggered */
77 int (*cb
)(struct qed_hwfn
*p_hwfn
);
79 enum block_id block_index
;
82 struct aeu_invert_reg
{
83 struct aeu_invert_reg_bit bits
[32];
86 #define MAX_ATTN_GRPS (8)
87 #define NUM_ATTN_REGS (9)
89 /* Specific HW attention callbacks */
90 static int qed_mcp_attn_cb(struct qed_hwfn
*p_hwfn
)
92 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_STATE
);
94 /* This might occur on certain instances; Log it once then mask it */
95 DP_INFO(p_hwfn
->cdev
, "MCP_REG_CPU_STATE: %08x - Masking...\n",
97 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_EVENT_MASK
,
103 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
104 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
105 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
106 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
107 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
108 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
109 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
110 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
111 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
112 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
113 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
114 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
115 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
116 static int qed_pswhst_attn_cb(struct qed_hwfn
*p_hwfn
)
118 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
119 PSWHST_REG_INCORRECT_ACCESS_VALID
);
121 if (tmp
& QED_PSWHST_ATTENTION_INCORRECT_ACCESS
) {
122 u32 addr
, data
, length
;
124 addr
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
125 PSWHST_REG_INCORRECT_ACCESS_ADDRESS
);
126 data
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
127 PSWHST_REG_INCORRECT_ACCESS_DATA
);
128 length
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
129 PSWHST_REG_INCORRECT_ACCESS_LENGTH
);
131 DP_INFO(p_hwfn
->cdev
,
132 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
134 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_PF_ID
),
135 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_VF_ID
),
137 ATTENTION_INCORRECT_ACCESS_VF_VALID
),
139 ATTENTION_INCORRECT_ACCESS_CLIENT
),
140 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_WR
),
142 ATTENTION_INCORRECT_ACCESS_BYTE_EN
),
149 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
150 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
151 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
152 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
153 #define QED_GRC_ATTENTION_MASTER_MASK (0xf)
154 #define QED_GRC_ATTENTION_MASTER_SHIFT (24)
155 #define QED_GRC_ATTENTION_PF_MASK (0xf)
156 #define QED_GRC_ATTENTION_PF_SHIFT (0)
157 #define QED_GRC_ATTENTION_VF_MASK (0xff)
158 #define QED_GRC_ATTENTION_VF_SHIFT (4)
159 #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
160 #define QED_GRC_ATTENTION_PRIV_SHIFT (14)
161 #define QED_GRC_ATTENTION_PRIV_VF (0)
162 static const char *attn_master_to_str(u8 master
)
165 case 1: return "PXP";
166 case 2: return "MCP";
167 case 3: return "MSDM";
168 case 4: return "PSDM";
169 case 5: return "YSDM";
170 case 6: return "USDM";
171 case 7: return "TSDM";
172 case 8: return "XSDM";
173 case 9: return "DBU";
174 case 10: return "DMAE";
180 static int qed_grc_attn_cb(struct qed_hwfn
*p_hwfn
)
184 /* We've already cleared the timeout interrupt register, so we learn
185 * of interrupts via the validity register
187 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
188 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
);
189 if (!(tmp
& QED_GRC_ATTENTION_VALID_BIT
))
192 /* Read the GRC timeout information */
193 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
194 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0
);
195 tmp2
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
196 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1
);
198 DP_INFO(p_hwfn
->cdev
,
199 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
201 (tmp
& QED_GRC_ATTENTION_RDWR_BIT
) ? "Write to" : "Read from",
202 GET_FIELD(tmp
, QED_GRC_ATTENTION_ADDRESS
) << 2,
203 attn_master_to_str(GET_FIELD(tmp
, QED_GRC_ATTENTION_MASTER
)),
204 GET_FIELD(tmp2
, QED_GRC_ATTENTION_PF
),
205 (GET_FIELD(tmp2
, QED_GRC_ATTENTION_PRIV
) ==
206 QED_GRC_ATTENTION_PRIV_VF
) ? "VF" : "(Irrelevant)",
207 GET_FIELD(tmp2
, QED_GRC_ATTENTION_VF
));
210 /* Regardles of anything else, clean the validity bit */
211 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
212 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
, 0);
216 #define PGLUE_ATTENTION_VALID (1 << 29)
217 #define PGLUE_ATTENTION_RD_VALID (1 << 26)
218 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
219 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
220 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
221 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
222 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
223 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
224 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
225 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
226 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
227 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
228 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
229 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
230 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
231 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
232 #define PGLUE_ATTENTION_ILT_VALID (1 << 23)
234 int qed_pglueb_rbc_attn_handler(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
240 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_WR_DETAILS2
);
241 if (tmp
& PGLUE_ATTENTION_VALID
) {
242 u32 addr_lo
, addr_hi
, details
;
244 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
245 PGLUE_B_REG_TX_ERR_WR_ADD_31_0
);
246 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
247 PGLUE_B_REG_TX_ERR_WR_ADD_63_32
);
248 details
= qed_rd(p_hwfn
, p_ptt
,
249 PGLUE_B_REG_TX_ERR_WR_DETAILS
);
251 snprintf(msg
, sizeof(msg
),
252 "Illegal write by chip to [%08x:%08x] blocked.\n"
253 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
254 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
255 addr_hi
, addr_lo
, details
,
256 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
257 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
258 !!GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VF_VALID
),
260 !!GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_WAS_ERR
),
261 !!GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_BME
),
262 !!GET_FIELD(tmp
, PGLUE_ATTENTION_DETAILS2_FID_EN
));
265 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "%s\n", msg
);
267 DP_NOTICE(p_hwfn
, "%s\n", msg
);
270 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_RD_DETAILS2
);
271 if (tmp
& PGLUE_ATTENTION_RD_VALID
) {
272 u32 addr_lo
, addr_hi
, details
;
274 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
275 PGLUE_B_REG_TX_ERR_RD_ADD_31_0
);
276 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
277 PGLUE_B_REG_TX_ERR_RD_ADD_63_32
);
278 details
= qed_rd(p_hwfn
, p_ptt
,
279 PGLUE_B_REG_TX_ERR_RD_DETAILS
);
282 "Illegal read by chip from [%08x:%08x] blocked.\n"
283 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
284 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
285 addr_hi
, addr_lo
, details
,
286 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
287 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
289 PGLUE_ATTENTION_DETAILS_VF_VALID
) ? 1 : 0,
292 PGLUE_ATTENTION_DETAILS2_WAS_ERR
) ? 1 : 0,
294 PGLUE_ATTENTION_DETAILS2_BME
) ? 1 : 0,
296 PGLUE_ATTENTION_DETAILS2_FID_EN
) ? 1 : 0);
299 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL
);
300 if (tmp
& PGLUE_ATTENTION_ICPL_VALID
) {
301 snprintf(msg
, sizeof(msg
), "ICPL error - %08x", tmp
);
304 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "%s\n", msg
);
306 DP_NOTICE(p_hwfn
, "%s\n", msg
);
309 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS
);
310 if (tmp
& PGLUE_ATTENTION_ZLR_VALID
) {
311 u32 addr_hi
, addr_lo
;
313 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
314 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0
);
315 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
316 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32
);
318 DP_NOTICE(p_hwfn
, "ZLR error - %08x [Address %08x:%08x]\n",
319 tmp
, addr_hi
, addr_lo
);
322 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_VF_ILT_ERR_DETAILS2
);
323 if (tmp
& PGLUE_ATTENTION_ILT_VALID
) {
324 u32 addr_hi
, addr_lo
, details
;
326 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
327 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0
);
328 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
329 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32
);
330 details
= qed_rd(p_hwfn
, p_ptt
,
331 PGLUE_B_REG_VF_ILT_ERR_DETAILS
);
334 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
335 details
, tmp
, addr_hi
, addr_lo
);
338 /* Clear the indications */
339 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_LATCHED_ERRORS_CLR
, BIT(2));
344 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn
*p_hwfn
)
346 return qed_pglueb_rbc_attn_handler(p_hwfn
, p_hwfn
->p_dpc_ptt
, false);
349 static int qed_fw_assertion(struct qed_hwfn
*p_hwfn
)
351 qed_hw_err_notify(p_hwfn
, p_hwfn
->p_dpc_ptt
, QED_HW_ERR_FW_ASSERT
,
357 static int qed_general_attention_35(struct qed_hwfn
*p_hwfn
)
359 DP_INFO(p_hwfn
, "General attention 35!\n");
364 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
365 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
366 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
367 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
368 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
370 #define QED_DB_REC_COUNT 1000
371 #define QED_DB_REC_INTERVAL 100
373 static int qed_db_rec_flush_queue(struct qed_hwfn
*p_hwfn
,
374 struct qed_ptt
*p_ptt
)
376 u32 count
= QED_DB_REC_COUNT
;
379 /* Flush any pending (e)dpms as they may never arrive */
380 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_DPM_FORCE_ABORT
, 0x1);
382 /* wait for usage to zero or count to run out. This is necessary since
383 * EDPM doorbell transactions can take multiple 64b cycles, and as such
384 * can "split" over the pci. Possibly, the doorbell drop can happen with
385 * half an EDPM in the queue and other half dropped. Another EDPM
386 * doorbell to the same address (from doorbell recovery mechanism or
387 * from the doorbelling entity) could have first half dropped and second
388 * half interpreted as continuation of the first. To prevent such
389 * malformed doorbells from reaching the device, flush the queue before
390 * releasing the overflow sticky indication.
392 while (count
-- && usage
) {
393 usage
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_USAGE_CNT
);
394 udelay(QED_DB_REC_INTERVAL
);
397 /* should have been depleted by now */
399 DP_NOTICE(p_hwfn
->cdev
,
400 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
401 QED_DB_REC_INTERVAL
* QED_DB_REC_COUNT
, usage
);
408 int qed_db_rec_handler(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
410 u32 attn_ovfl
, cur_ovfl
;
413 attn_ovfl
= test_and_clear_bit(QED_OVERFLOW_BIT
,
414 &p_hwfn
->db_recovery_info
.overflow
);
415 cur_ovfl
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
);
416 if (!cur_ovfl
&& !attn_ovfl
)
419 DP_NOTICE(p_hwfn
, "PF Overflow sticky: attn %u current %u\n",
420 attn_ovfl
, cur_ovfl
);
422 if (cur_ovfl
&& !p_hwfn
->db_bar_no_edpm
) {
423 rc
= qed_db_rec_flush_queue(p_hwfn
, p_ptt
);
428 /* Release overflow sticky indication (stop silently dropping everything) */
429 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
, 0x0);
431 /* Repeat all last doorbells (doorbell drop recovery) */
432 qed_db_recovery_execute(p_hwfn
);
437 static void qed_dorq_attn_overflow(struct qed_hwfn
*p_hwfn
)
439 struct qed_ptt
*p_ptt
= p_hwfn
->p_dpc_ptt
;
443 overflow
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
);
447 /* Run PF doorbell recovery in next periodic handler */
448 set_bit(QED_OVERFLOW_BIT
, &p_hwfn
->db_recovery_info
.overflow
);
450 if (!p_hwfn
->db_bar_no_edpm
) {
451 rc
= qed_db_rec_flush_queue(p_hwfn
, p_ptt
);
456 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
, 0x0);
458 /* Schedule the handler even if overflow was not detected */
459 qed_periodic_db_rec_start(p_hwfn
);
462 static int qed_dorq_attn_int_sts(struct qed_hwfn
*p_hwfn
)
464 u32 int_sts
, first_drop_reason
, details
, address
, all_drops_reason
;
465 struct qed_ptt
*p_ptt
= p_hwfn
->p_dpc_ptt
;
467 /* int_sts may be zero since all PFs were interrupted for doorbell
468 * overflow but another one already handled it. Can abort here. If
469 * This PF also requires overflow recovery we will be interrupted again.
470 * The masked almost full indication may also be set. Ignoring.
472 int_sts
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_INT_STS
);
473 if (!(int_sts
& ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL
))
476 DP_NOTICE(p_hwfn
->cdev
, "DORQ attention. int_sts was %x\n", int_sts
);
478 /* check if db_drop or overflow happened */
479 if (int_sts
& (DORQ_REG_INT_STS_DB_DROP
|
480 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
)) {
481 /* Obtain data about db drop/overflow */
482 first_drop_reason
= qed_rd(p_hwfn
, p_ptt
,
483 DORQ_REG_DB_DROP_REASON
) &
484 QED_DORQ_ATTENTION_REASON_MASK
;
485 details
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_DB_DROP_DETAILS
);
486 address
= qed_rd(p_hwfn
, p_ptt
,
487 DORQ_REG_DB_DROP_DETAILS_ADDRESS
);
488 all_drops_reason
= qed_rd(p_hwfn
, p_ptt
,
489 DORQ_REG_DB_DROP_DETAILS_REASON
);
492 DP_NOTICE(p_hwfn
->cdev
,
493 "Doorbell drop occurred\n"
494 "Address\t\t0x%08x\t(second BAR address)\n"
495 "FID\t\t0x%04x\t\t(Opaque FID)\n"
496 "Size\t\t0x%04x\t\t(in bytes)\n"
497 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
498 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
500 GET_FIELD(details
, QED_DORQ_ATTENTION_OPAQUE
),
501 GET_FIELD(details
, QED_DORQ_ATTENTION_SIZE
) * 4,
502 first_drop_reason
, all_drops_reason
);
504 /* Clear the doorbell drop details and prepare for next drop */
505 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_DB_DROP_DETAILS_REL
, 0);
507 /* Mark interrupt as handled (note: even if drop was due to a different
508 * reason than overflow we mark as handled)
513 DORQ_REG_INT_STS_DB_DROP
|
514 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
);
516 /* If there are no indications other than drop indications, success */
517 if ((int_sts
& ~(DORQ_REG_INT_STS_DB_DROP
|
518 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
|
519 DORQ_REG_INT_STS_DORQ_FIFO_AFULL
)) == 0)
523 /* Some other indication was present - non recoverable */
524 DP_INFO(p_hwfn
, "DORQ fatal attention\n");
529 static int qed_dorq_attn_cb(struct qed_hwfn
*p_hwfn
)
531 p_hwfn
->db_recovery_info
.dorq_attn
= true;
532 qed_dorq_attn_overflow(p_hwfn
);
534 return qed_dorq_attn_int_sts(p_hwfn
);
537 static void qed_dorq_attn_handler(struct qed_hwfn
*p_hwfn
)
539 if (p_hwfn
->db_recovery_info
.dorq_attn
)
542 /* Call DORQ callback if the attention was missed */
543 qed_dorq_attn_cb(p_hwfn
);
545 p_hwfn
->db_recovery_info
.dorq_attn
= false;
548 /* Instead of major changes to the data-structure, we have a some 'special'
549 * identifiers for sources that changed meaning between adapters.
551 enum aeu_invert_reg_special_type
{
552 AEU_INVERT_REG_SPECIAL_CNIG_0
,
553 AEU_INVERT_REG_SPECIAL_CNIG_1
,
554 AEU_INVERT_REG_SPECIAL_CNIG_2
,
555 AEU_INVERT_REG_SPECIAL_CNIG_3
,
556 AEU_INVERT_REG_SPECIAL_MAX
,
559 static struct aeu_invert_reg_bit
560 aeu_descs_special
[AEU_INVERT_REG_SPECIAL_MAX
] = {
561 {"CNIG port 0", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
562 {"CNIG port 1", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
563 {"CNIG port 2", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
564 {"CNIG port 3", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
567 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
568 static struct aeu_invert_reg aeu_descs
[NUM_ATTN_REGS
] = {
570 { /* After Invert 1 */
572 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
577 { /* After Invert 2 */
578 {"PGLUE config_space", ATTENTION_SINGLE
,
580 {"PGLUE misc_flr", ATTENTION_SINGLE
,
582 {"PGLUE B RBC", ATTENTION_PAR_INT
,
583 qed_pglueb_rbc_attn_cb
, BLOCK_PGLUE_B
},
584 {"PGLUE misc_mctp", ATTENTION_SINGLE
,
586 {"Flash event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
587 {"SMB event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
588 {"Main Power", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
589 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT
) |
590 (1 << ATTENTION_OFFSET_SHIFT
),
592 {"PCIE glue/PXP VPD %d",
593 (16 << ATTENTION_LENGTH_SHIFT
), NULL
, BLOCK_PGLCS
},
598 { /* After Invert 3 */
599 {"General Attention %d",
600 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
605 { /* After Invert 4 */
606 {"General Attention 32", ATTENTION_SINGLE
|
607 ATTENTION_CLEAR_ENABLE
, qed_fw_assertion
,
609 {"General Attention %d",
610 (2 << ATTENTION_LENGTH_SHIFT
) |
611 (33 << ATTENTION_OFFSET_SHIFT
), NULL
, MAX_BLOCK_ID
},
612 {"General Attention 35", ATTENTION_SINGLE
|
613 ATTENTION_CLEAR_ENABLE
, qed_general_attention_35
,
616 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
617 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0
),
620 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
621 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1
),
624 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
625 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2
),
628 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
629 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3
),
631 {"MCP CPU", ATTENTION_SINGLE
,
632 qed_mcp_attn_cb
, MAX_BLOCK_ID
},
633 {"MCP Watchdog timer", ATTENTION_SINGLE
,
635 {"MCP M2P", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
636 {"AVS stop status ready", ATTENTION_SINGLE
,
638 {"MSTAT", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
639 {"MSTAT per-path", ATTENTION_PAR_INT
,
641 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT
),
643 {"NIG", ATTENTION_PAR_INT
, NULL
, BLOCK_NIG
},
644 {"BMB/OPTE/MCP", ATTENTION_PAR_INT
, NULL
, BLOCK_BMB
},
645 {"BTB", ATTENTION_PAR_INT
, NULL
, BLOCK_BTB
},
646 {"BRB", ATTENTION_PAR_INT
, NULL
, BLOCK_BRB
},
647 {"PRS", ATTENTION_PAR_INT
, NULL
, BLOCK_PRS
},
652 { /* After Invert 5 */
653 {"SRC", ATTENTION_PAR_INT
, NULL
, BLOCK_SRC
},
654 {"PB Client1", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB1
},
655 {"PB Client2", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB2
},
656 {"RPB", ATTENTION_PAR_INT
, NULL
, BLOCK_RPB
},
657 {"PBF", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF
},
658 {"QM", ATTENTION_PAR_INT
, NULL
, BLOCK_QM
},
659 {"TM", ATTENTION_PAR_INT
, NULL
, BLOCK_TM
},
660 {"MCM", ATTENTION_PAR_INT
, NULL
, BLOCK_MCM
},
661 {"MSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSDM
},
662 {"MSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSEM
},
663 {"PCM", ATTENTION_PAR_INT
, NULL
, BLOCK_PCM
},
664 {"PSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSDM
},
665 {"PSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSEM
},
666 {"TCM", ATTENTION_PAR_INT
, NULL
, BLOCK_TCM
},
667 {"TSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSDM
},
668 {"TSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSEM
},
673 { /* After Invert 6 */
674 {"UCM", ATTENTION_PAR_INT
, NULL
, BLOCK_UCM
},
675 {"USDM", ATTENTION_PAR_INT
, NULL
, BLOCK_USDM
},
676 {"USEM", ATTENTION_PAR_INT
, NULL
, BLOCK_USEM
},
677 {"XCM", ATTENTION_PAR_INT
, NULL
, BLOCK_XCM
},
678 {"XSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSDM
},
679 {"XSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSEM
},
680 {"YCM", ATTENTION_PAR_INT
, NULL
, BLOCK_YCM
},
681 {"YSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSDM
},
682 {"YSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSEM
},
683 {"XYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_XYLD
},
684 {"TMLD", ATTENTION_PAR_INT
, NULL
, BLOCK_TMLD
},
685 {"MYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_MULD
},
686 {"YULD", ATTENTION_PAR_INT
, NULL
, BLOCK_YULD
},
687 {"DORQ", ATTENTION_PAR_INT
,
688 qed_dorq_attn_cb
, BLOCK_DORQ
},
689 {"DBG", ATTENTION_PAR_INT
, NULL
, BLOCK_DBG
},
690 {"IPC", ATTENTION_PAR_INT
, NULL
, BLOCK_IPC
},
695 { /* After Invert 7 */
696 {"CCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_CCFC
},
697 {"CDU", ATTENTION_PAR_INT
, NULL
, BLOCK_CDU
},
698 {"DMAE", ATTENTION_PAR_INT
, NULL
, BLOCK_DMAE
},
699 {"IGU", ATTENTION_PAR_INT
, NULL
, BLOCK_IGU
},
700 {"ATC", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
701 {"CAU", ATTENTION_PAR_INT
, NULL
, BLOCK_CAU
},
702 {"PTU", ATTENTION_PAR_INT
, NULL
, BLOCK_PTU
},
703 {"PRM", ATTENTION_PAR_INT
, NULL
, BLOCK_PRM
},
704 {"TCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_TCFC
},
705 {"RDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_RDIF
},
706 {"TDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_TDIF
},
707 {"RSS", ATTENTION_PAR_INT
, NULL
, BLOCK_RSS
},
708 {"MISC", ATTENTION_PAR_INT
, NULL
, BLOCK_MISC
},
709 {"MISCS", ATTENTION_PAR_INT
, NULL
, BLOCK_MISCS
},
710 {"PCIE", ATTENTION_PAR
, NULL
, BLOCK_PCIE
},
711 {"Vaux PCI core", ATTENTION_SINGLE
, NULL
, BLOCK_PGLCS
},
712 {"PSWRQ", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRQ
},
717 { /* After Invert 8 */
718 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT
,
720 {"PSWWR", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWWR
},
721 {"PSWWR (pci_clk)", ATTENTION_PAR_INT
,
723 {"PSWRD", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRD
},
724 {"PSWRD (pci_clk)", ATTENTION_PAR_INT
,
726 {"PSWHST", ATTENTION_PAR_INT
,
727 qed_pswhst_attn_cb
, BLOCK_PSWHST
},
728 {"PSWHST (pci_clk)", ATTENTION_PAR_INT
,
729 NULL
, BLOCK_PSWHST2
},
730 {"GRC", ATTENTION_PAR_INT
,
731 qed_grc_attn_cb
, BLOCK_GRC
},
732 {"CPMU", ATTENTION_PAR_INT
, NULL
, BLOCK_CPMU
},
733 {"NCSI", ATTENTION_PAR_INT
, NULL
, BLOCK_NCSI
},
734 {"MSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
735 {"PSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
736 {"TSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
737 {"USEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
738 {"XSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
739 {"YSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
740 {"pxp_misc_mps", ATTENTION_PAR
, NULL
, BLOCK_PGLCS
},
741 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE
,
743 {"PERST_B assertion", ATTENTION_SINGLE
,
745 {"PERST_B deassertion", ATTENTION_SINGLE
,
747 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT
),
753 { /* After Invert 9 */
754 {"MCP Latched memory", ATTENTION_PAR
,
756 {"MCP Latched scratchpad cache", ATTENTION_SINGLE
,
758 {"MCP Latched ump_tx", ATTENTION_PAR
,
760 {"MCP Latched scratchpad", ATTENTION_PAR
,
762 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT
),
768 static struct aeu_invert_reg_bit
*
769 qed_int_aeu_translate(struct qed_hwfn
*p_hwfn
,
770 struct aeu_invert_reg_bit
*p_bit
)
772 if (!QED_IS_BB(p_hwfn
->cdev
))
775 if (!(p_bit
->flags
& ATTENTION_BB_DIFFERENT
))
778 return &aeu_descs_special
[(p_bit
->flags
& ATTENTION_BB_MASK
) >>
782 static bool qed_int_is_parity_flag(struct qed_hwfn
*p_hwfn
,
783 struct aeu_invert_reg_bit
*p_bit
)
785 return !!(qed_int_aeu_translate(p_hwfn
, p_bit
)->flags
&
789 #define ATTN_STATE_BITS (0xfff)
790 #define ATTN_BITS_MASKABLE (0x3ff)
791 struct qed_sb_attn_info
{
792 /* Virtual & Physical address of the SB */
793 struct atten_status_block
*sb_attn
;
796 /* Last seen running index */
799 /* A mask of the AEU bits resulting in a parity error */
800 u32 parity_mask
[NUM_ATTN_REGS
];
802 /* A pointer to the attention description structure */
803 struct aeu_invert_reg
*p_aeu_desc
;
805 /* Previously asserted attentions, which are still unasserted */
808 /* Cleanup address for the link's general hw attention */
812 static inline u16
qed_attn_update_idx(struct qed_hwfn
*p_hwfn
,
813 struct qed_sb_attn_info
*p_sb_desc
)
817 index
= le16_to_cpu(p_sb_desc
->sb_attn
->sb_index
);
818 if (p_sb_desc
->index
!= index
) {
819 p_sb_desc
->index
= index
;
827 * qed_int_assertion() - Handle asserted attention bits.
829 * @p_hwfn: HW device data.
830 * @asserted_bits: Newly asserted bits.
832 * Return: Zero value.
834 static int qed_int_assertion(struct qed_hwfn
*p_hwfn
, u16 asserted_bits
)
836 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
839 /* Mask the source of the attention in the IGU */
840 igu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
841 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "IGU mask: 0x%08x --> 0x%08x\n",
842 igu_mask
, igu_mask
& ~(asserted_bits
& ATTN_BITS_MASKABLE
));
843 igu_mask
&= ~(asserted_bits
& ATTN_BITS_MASKABLE
);
844 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, igu_mask
);
846 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
847 "inner known ATTN state: 0x%04x --> 0x%04x\n",
848 sb_attn_sw
->known_attn
,
849 sb_attn_sw
->known_attn
| asserted_bits
);
850 sb_attn_sw
->known_attn
|= asserted_bits
;
852 /* Handle MCP events */
853 if (asserted_bits
& 0x100) {
854 qed_mcp_handle_events(p_hwfn
, p_hwfn
->p_dpc_ptt
);
855 /* Clean the MCP attention */
856 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
857 sb_attn_sw
->mfw_attn_addr
, 0);
860 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
861 GTT_BAR0_MAP_REG_IGU_CMD
+
862 ((IGU_CMD_ATTN_BIT_SET_UPPER
-
863 IGU_CMD_INT_ACK_BASE
) << 3),
866 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "set cmd IGU: 0x%04x\n",
872 static void qed_int_attn_print(struct qed_hwfn
*p_hwfn
,
874 enum dbg_attn_type type
, bool b_clear
)
876 struct dbg_attn_block_result attn_results
;
877 enum dbg_status status
;
879 memset(&attn_results
, 0, sizeof(attn_results
));
881 status
= qed_dbg_read_attn(p_hwfn
, p_hwfn
->p_dpc_ptt
, id
, type
,
882 b_clear
, &attn_results
);
883 if (status
!= DBG_STATUS_OK
)
885 "Failed to parse attention information [status: %s]\n",
886 qed_dbg_get_status_str(status
));
888 qed_dbg_parse_attn(p_hwfn
, &attn_results
);
892 * qed_int_deassertion_aeu_bit() - Handles the effects of a single
893 * cause of the attention.
895 * @p_hwfn: HW device data.
896 * @p_aeu: Descriptor of an AEU bit which caused the attention.
897 * @aeu_en_reg: Register offset of the AEU enable reg. which configured
898 * this bit to this group.
899 * @p_bit_name: AEU bit description for logging purposes.
900 * @bitmask: Index of this bit in the aeu_en_reg.
902 * Return: Zero on success, negative errno otherwise.
905 qed_int_deassertion_aeu_bit(struct qed_hwfn
*p_hwfn
,
906 struct aeu_invert_reg_bit
*p_aeu
,
908 const char *p_bit_name
, u32 bitmask
)
910 bool b_fatal
= false;
914 DP_INFO(p_hwfn
, "Deasserted attention `%s'[%08x]\n",
915 p_bit_name
, bitmask
);
917 /* Call callback before clearing the interrupt status */
919 DP_INFO(p_hwfn
, "`%s (attention)': Calling Callback function\n",
921 rc
= p_aeu
->cb(p_hwfn
);
927 /* Print HW block interrupt registers */
928 if (p_aeu
->block_index
!= MAX_BLOCK_ID
)
929 qed_int_attn_print(p_hwfn
, p_aeu
->block_index
,
930 ATTN_TYPE_INTERRUPT
, !b_fatal
);
932 /* Reach assertion if attention is fatal */
934 qed_hw_err_notify(p_hwfn
, p_hwfn
->p_dpc_ptt
, QED_HW_ERR_HW_ATTN
,
935 "`%s': Fatal attention\n",
937 else /* If the attention is benign, no need to prevent it */
940 /* Prevent this Attention from being asserted in the future */
941 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
942 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, (val
& ~bitmask
));
943 DP_INFO(p_hwfn
, "`%s' - Disabled future attentions\n",
951 * qed_int_deassertion_parity() - Handle a single parity AEU source.
953 * @p_hwfn: HW device data.
954 * @p_aeu: Descriptor of an AEU bit which caused the parity.
955 * @aeu_en_reg: Address of the AEU enable register.
956 * @bit_index: Index (0-31) of an AEU bit.
958 static void qed_int_deassertion_parity(struct qed_hwfn
*p_hwfn
,
959 struct aeu_invert_reg_bit
*p_aeu
,
960 u32 aeu_en_reg
, u8 bit_index
)
962 u32 block_id
= p_aeu
->block_index
, mask
, val
;
964 DP_NOTICE(p_hwfn
->cdev
,
965 "%s parity attention is set [address 0x%08x, bit %d]\n",
966 p_aeu
->bit_name
, aeu_en_reg
, bit_index
);
968 if (block_id
!= MAX_BLOCK_ID
) {
969 qed_int_attn_print(p_hwfn
, block_id
, ATTN_TYPE_PARITY
, false);
971 /* In BB, there's a single parity bit for several blocks */
972 if (block_id
== BLOCK_BTB
) {
973 qed_int_attn_print(p_hwfn
, BLOCK_OPTE
,
974 ATTN_TYPE_PARITY
, false);
975 qed_int_attn_print(p_hwfn
, BLOCK_MCP
,
976 ATTN_TYPE_PARITY
, false);
980 /* Prevent this parity error from being re-asserted */
981 mask
= ~BIT(bit_index
);
982 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
983 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, val
& mask
);
984 DP_INFO(p_hwfn
, "`%s' - Disabled future parity errors\n",
989 * qed_int_deassertion() - Handle deassertion of previously asserted
992 * @p_hwfn: HW device data.
993 * @deasserted_bits: newly deasserted bits.
995 * Return: Zero value.
997 static int qed_int_deassertion(struct qed_hwfn
*p_hwfn
,
1000 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
1001 u32 aeu_inv_arr
[NUM_ATTN_REGS
], aeu_mask
, aeu_en
, en
;
1002 u8 i
, j
, k
, bit_idx
;
1005 /* Read the attention registers in the AEU */
1006 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1007 aeu_inv_arr
[i
] = qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
1008 MISC_REG_AEU_AFTER_INVERT_1_IGU
+
1010 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1011 "Deasserted bits [%d]: %08x\n",
1015 /* Find parity attentions first */
1016 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1017 struct aeu_invert_reg
*p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
];
1020 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+ i
* sizeof(u32
);
1021 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
1023 /* Skip register in which no parity bit is currently set */
1024 parities
= sb_attn_sw
->parity_mask
[i
] & aeu_inv_arr
[i
] & en
;
1028 for (j
= 0, bit_idx
= 0; bit_idx
< 32; j
++) {
1029 struct aeu_invert_reg_bit
*p_bit
= &p_aeu
->bits
[j
];
1031 if (qed_int_is_parity_flag(p_hwfn
, p_bit
) &&
1032 !!(parities
& BIT(bit_idx
)))
1033 qed_int_deassertion_parity(p_hwfn
, p_bit
,
1036 bit_idx
+= ATTENTION_LENGTH(p_bit
->flags
);
1040 /* Find non-parity cause for attention and act */
1041 for (k
= 0; k
< MAX_ATTN_GRPS
; k
++) {
1042 struct aeu_invert_reg_bit
*p_aeu
;
1044 /* Handle only groups whose attention is currently deasserted */
1045 if (!(deasserted_bits
& (1 << k
)))
1048 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1051 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+
1053 k
* sizeof(u32
) * NUM_ATTN_REGS
;
1055 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
1056 bits
= aeu_inv_arr
[i
] & en
;
1058 /* Skip if no bit from this group is currently set */
1062 /* Find all set bits from current register which belong
1063 * to current group, making them responsible for the
1064 * previous assertion.
1066 for (j
= 0, bit_idx
= 0; bit_idx
< 32; j
++) {
1067 long unsigned int bitmask
;
1070 p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
].bits
[j
];
1071 p_aeu
= qed_int_aeu_translate(p_hwfn
, p_aeu
);
1074 bit_len
= ATTENTION_LENGTH(p_aeu
->flags
);
1075 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
)) {
1081 bitmask
= bits
& (((1 << bit_len
) - 1) << bit
);
1085 u32 flags
= p_aeu
->flags
;
1089 num
= (u8
)find_first_bit(&bitmask
,
1092 /* Some bits represent more than a
1093 * a single interrupt. Correctly print
1096 if (ATTENTION_LENGTH(flags
) > 2 ||
1097 ((flags
& ATTENTION_PAR_INT
) &&
1098 ATTENTION_LENGTH(flags
) > 1))
1099 snprintf(bit_name
, 30,
1100 p_aeu
->bit_name
, num
);
1103 p_aeu
->bit_name
, 30);
1105 /* We now need to pass bitmask in its
1110 /* Handle source of the attention */
1111 qed_int_deassertion_aeu_bit(p_hwfn
,
1118 bit_idx
+= ATTENTION_LENGTH(p_aeu
->flags
);
1123 /* Handle missed DORQ attention */
1124 qed_dorq_attn_handler(p_hwfn
);
1126 /* Clear IGU indication for the deasserted bits */
1127 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
1128 GTT_BAR0_MAP_REG_IGU_CMD
+
1129 ((IGU_CMD_ATTN_BIT_CLR_UPPER
-
1130 IGU_CMD_INT_ACK_BASE
) << 3),
1131 ~((u32
)deasserted_bits
));
1133 /* Unmask deasserted attentions in IGU */
1134 aeu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
1135 aeu_mask
|= (deasserted_bits
& ATTN_BITS_MASKABLE
);
1136 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, aeu_mask
);
1138 /* Clear deassertion from inner state */
1139 sb_attn_sw
->known_attn
&= ~deasserted_bits
;
1144 static int qed_int_attentions(struct qed_hwfn
*p_hwfn
)
1146 struct qed_sb_attn_info
*p_sb_attn_sw
= p_hwfn
->p_sb_attn
;
1147 struct atten_status_block
*p_sb_attn
= p_sb_attn_sw
->sb_attn
;
1148 u32 attn_bits
= 0, attn_acks
= 0;
1149 u16 asserted_bits
, deasserted_bits
;
1153 /* Read current attention bits/acks - safeguard against attentions
1154 * by guaranting work on a synchronized timeframe
1157 index
= p_sb_attn
->sb_index
;
1158 /* finish reading index before the loop condition */
1160 attn_bits
= le32_to_cpu(p_sb_attn
->atten_bits
);
1161 attn_acks
= le32_to_cpu(p_sb_attn
->atten_ack
);
1162 } while (index
!= p_sb_attn
->sb_index
);
1163 p_sb_attn
->sb_index
= index
;
1165 /* Attention / Deassertion are meaningful (and in correct state)
1166 * only when they differ and consistent with known state - deassertion
1167 * when previous attention & current ack, and assertion when current
1168 * attention with no previous attention
1170 asserted_bits
= (attn_bits
& ~attn_acks
& ATTN_STATE_BITS
) &
1171 ~p_sb_attn_sw
->known_attn
;
1172 deasserted_bits
= (~attn_bits
& attn_acks
& ATTN_STATE_BITS
) &
1173 p_sb_attn_sw
->known_attn
;
1175 if ((asserted_bits
& ~0x100) || (deasserted_bits
& ~0x100)) {
1177 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1178 index
, attn_bits
, attn_acks
, asserted_bits
,
1179 deasserted_bits
, p_sb_attn_sw
->known_attn
);
1180 } else if (asserted_bits
== 0x100) {
1181 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1182 "MFW indication via attention\n");
1184 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1185 "MFW indication [deassertion]\n");
1188 if (asserted_bits
) {
1189 rc
= qed_int_assertion(p_hwfn
, asserted_bits
);
1194 if (deasserted_bits
)
1195 rc
= qed_int_deassertion(p_hwfn
, deasserted_bits
);
1200 static void qed_sb_ack_attn(struct qed_hwfn
*p_hwfn
,
1201 void __iomem
*igu_addr
, u32 ack_cons
)
1205 igu_ack
= ((ack_cons
<< IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT
) |
1206 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT
) |
1207 (IGU_INT_NOP
<< IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT
) |
1208 (IGU_SEG_ACCESS_ATTN
<<
1209 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT
));
1211 DIRECT_REG_WR(igu_addr
, igu_ack
);
1213 /* Both segments (interrupts & acks) are written to same place address;
1214 * Need to guarantee all commands will be received (in-order) by HW.
1219 void qed_int_sp_dpc(struct tasklet_struct
*t
)
1221 struct qed_hwfn
*p_hwfn
= from_tasklet(p_hwfn
, t
, sp_dpc
);
1222 struct qed_pi_info
*pi_info
= NULL
;
1223 struct qed_sb_attn_info
*sb_attn
;
1224 struct qed_sb_info
*sb_info
;
1228 if (!p_hwfn
->p_sp_sb
) {
1229 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sp_sb\n");
1233 sb_info
= &p_hwfn
->p_sp_sb
->sb_info
;
1234 arr_size
= ARRAY_SIZE(p_hwfn
->p_sp_sb
->pi_info_arr
);
1236 DP_ERR(p_hwfn
->cdev
,
1237 "Status block is NULL - cannot ack interrupts\n");
1241 if (!p_hwfn
->p_sb_attn
) {
1242 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sb_attn");
1245 sb_attn
= p_hwfn
->p_sb_attn
;
1247 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "DPC Called! (hwfn %p %d)\n",
1248 p_hwfn
, p_hwfn
->my_id
);
1250 /* Disable ack for def status block. Required both for msix +
1251 * inta in non-mask mode, in inta does no harm.
1253 qed_sb_ack(sb_info
, IGU_INT_DISABLE
, 0);
1255 /* Gather Interrupts/Attentions information */
1256 if (!sb_info
->sb_virt
) {
1257 DP_ERR(p_hwfn
->cdev
,
1258 "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1260 u32 tmp_index
= sb_info
->sb_ack
;
1262 rc
= qed_sb_update_sb_idx(sb_info
);
1263 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1264 "Interrupt indices: 0x%08x --> 0x%08x\n",
1265 tmp_index
, sb_info
->sb_ack
);
1268 if (!sb_attn
|| !sb_attn
->sb_attn
) {
1269 DP_ERR(p_hwfn
->cdev
,
1270 "Attentions Status block is NULL - cannot check for new attentions!\n");
1272 u16 tmp_index
= sb_attn
->index
;
1274 rc
|= qed_attn_update_idx(p_hwfn
, sb_attn
);
1275 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1276 "Attention indices: 0x%08x --> 0x%08x\n",
1277 tmp_index
, sb_attn
->index
);
1280 /* Check if we expect interrupts at this time. if not just ack them */
1281 if (!(rc
& QED_SB_EVENT_MASK
)) {
1282 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1286 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1287 if (!p_hwfn
->p_dpc_ptt
) {
1288 DP_NOTICE(p_hwfn
->cdev
, "Failed to allocate PTT\n");
1289 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1293 if (rc
& QED_SB_ATT_IDX
)
1294 qed_int_attentions(p_hwfn
);
1296 if (rc
& QED_SB_IDX
) {
1299 /* Look for a free index */
1300 for (pi
= 0; pi
< arr_size
; pi
++) {
1301 pi_info
= &p_hwfn
->p_sp_sb
->pi_info_arr
[pi
];
1302 if (pi_info
->comp_cb
)
1303 pi_info
->comp_cb(p_hwfn
, pi_info
->cookie
);
1307 if (sb_attn
&& (rc
& QED_SB_ATT_IDX
))
1308 /* This should be done before the interrupts are enabled,
1309 * since otherwise a new attention will be generated.
1311 qed_sb_ack_attn(p_hwfn
, sb_info
->igu_addr
, sb_attn
->index
);
1313 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1316 static void qed_int_sb_attn_free(struct qed_hwfn
*p_hwfn
)
1318 struct qed_sb_attn_info
*p_sb
= p_hwfn
->p_sb_attn
;
1324 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1325 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1326 p_sb
->sb_attn
, p_sb
->sb_phys
);
1328 p_hwfn
->p_sb_attn
= NULL
;
1331 static void qed_int_sb_attn_setup(struct qed_hwfn
*p_hwfn
,
1332 struct qed_ptt
*p_ptt
)
1334 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1336 memset(sb_info
->sb_attn
, 0, sizeof(*sb_info
->sb_attn
));
1339 sb_info
->known_attn
= 0;
1341 /* Configure Attention Status Block in IGU */
1342 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_L
,
1343 lower_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1344 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_H
,
1345 upper_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1348 static void qed_int_sb_attn_init(struct qed_hwfn
*p_hwfn
,
1349 struct qed_ptt
*p_ptt
,
1350 void *sb_virt_addr
, dma_addr_t sb_phy_addr
)
1352 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1355 sb_info
->sb_attn
= sb_virt_addr
;
1356 sb_info
->sb_phys
= sb_phy_addr
;
1358 /* Set the pointer to the AEU descriptors */
1359 sb_info
->p_aeu_desc
= aeu_descs
;
1361 /* Calculate Parity Masks */
1362 memset(sb_info
->parity_mask
, 0, sizeof(u32
) * NUM_ATTN_REGS
);
1363 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1364 /* j is array index, k is bit index */
1365 for (j
= 0, k
= 0; k
< 32; j
++) {
1366 struct aeu_invert_reg_bit
*p_aeu
;
1368 p_aeu
= &aeu_descs
[i
].bits
[j
];
1369 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
))
1370 sb_info
->parity_mask
[i
] |= 1 << k
;
1372 k
+= ATTENTION_LENGTH(p_aeu
->flags
);
1374 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1375 "Attn Mask [Reg %d]: 0x%08x\n",
1376 i
, sb_info
->parity_mask
[i
]);
1379 /* Set the address of cleanup for the mcp attention */
1380 sb_info
->mfw_attn_addr
= (p_hwfn
->rel_pf_id
<< 3) +
1381 MISC_REG_AEU_GENERAL_ATTN_0
;
1383 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
1386 static int qed_int_sb_attn_alloc(struct qed_hwfn
*p_hwfn
,
1387 struct qed_ptt
*p_ptt
)
1389 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1390 struct qed_sb_attn_info
*p_sb
;
1391 dma_addr_t p_phys
= 0;
1395 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1400 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1401 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1402 &p_phys
, GFP_KERNEL
);
1409 /* Attention setup */
1410 p_hwfn
->p_sb_attn
= p_sb
;
1411 qed_int_sb_attn_init(p_hwfn
, p_ptt
, p_virt
, p_phys
);
1416 /* coalescing timeout = timeset << (timer_res + 1) */
1417 #define QED_CAU_DEF_RX_USECS 24
1418 #define QED_CAU_DEF_TX_USECS 48
1420 void qed_init_cau_sb_entry(struct qed_hwfn
*p_hwfn
,
1421 struct cau_sb_entry
*p_sb_entry
,
1422 u8 pf_id
, u16 vf_number
, u8 vf_valid
)
1424 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1425 u32 cau_state
, params
= 0, data
= 0;
1428 memset(p_sb_entry
, 0, sizeof(*p_sb_entry
));
1430 SET_FIELD(params
, CAU_SB_ENTRY_PF_NUMBER
, pf_id
);
1431 SET_FIELD(params
, CAU_SB_ENTRY_VF_NUMBER
, vf_number
);
1432 SET_FIELD(params
, CAU_SB_ENTRY_VF_VALID
, vf_valid
);
1433 SET_FIELD(params
, CAU_SB_ENTRY_SB_TIMESET0
, 0x7F);
1434 SET_FIELD(params
, CAU_SB_ENTRY_SB_TIMESET1
, 0x7F);
1436 cau_state
= CAU_HC_DISABLE_STATE
;
1438 if (cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1439 cau_state
= CAU_HC_ENABLE_STATE
;
1440 if (!cdev
->rx_coalesce_usecs
)
1441 cdev
->rx_coalesce_usecs
= QED_CAU_DEF_RX_USECS
;
1442 if (!cdev
->tx_coalesce_usecs
)
1443 cdev
->tx_coalesce_usecs
= QED_CAU_DEF_TX_USECS
;
1446 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1447 if (cdev
->rx_coalesce_usecs
<= 0x7F)
1449 else if (cdev
->rx_coalesce_usecs
<= 0xFF)
1454 SET_FIELD(params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
1456 if (cdev
->tx_coalesce_usecs
<= 0x7F)
1458 else if (cdev
->tx_coalesce_usecs
<= 0xFF)
1463 SET_FIELD(params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
1464 p_sb_entry
->params
= cpu_to_le32(params
);
1466 SET_FIELD(data
, CAU_SB_ENTRY_STATE0
, cau_state
);
1467 SET_FIELD(data
, CAU_SB_ENTRY_STATE1
, cau_state
);
1468 p_sb_entry
->data
= cpu_to_le32(data
);
1471 static void qed_int_cau_conf_pi(struct qed_hwfn
*p_hwfn
,
1472 struct qed_ptt
*p_ptt
,
1475 enum qed_coalescing_fsm coalescing_fsm
,
1478 u32 sb_offset
, pi_offset
;
1481 if (IS_VF(p_hwfn
->cdev
))
1484 SET_FIELD(prod
, CAU_PI_ENTRY_PI_TIMESET
, timeset
);
1485 if (coalescing_fsm
== QED_COAL_RX_STATE_MACHINE
)
1486 SET_FIELD(prod
, CAU_PI_ENTRY_FSM_SEL
, 0);
1488 SET_FIELD(prod
, CAU_PI_ENTRY_FSM_SEL
, 1);
1490 sb_offset
= igu_sb_id
* PIS_PER_SB_E4
;
1491 pi_offset
= sb_offset
+ pi_index
;
1493 if (p_hwfn
->hw_init_done
)
1494 qed_wr(p_hwfn
, p_ptt
,
1495 CAU_REG_PI_MEMORY
+ pi_offset
* sizeof(u32
), prod
);
1497 STORE_RT_REG(p_hwfn
, CAU_REG_PI_MEMORY_RT_OFFSET
+ pi_offset
,
1501 void qed_int_cau_conf_sb(struct qed_hwfn
*p_hwfn
,
1502 struct qed_ptt
*p_ptt
,
1504 u16 igu_sb_id
, u16 vf_number
, u8 vf_valid
)
1506 struct cau_sb_entry sb_entry
;
1508 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
, p_hwfn
->rel_pf_id
,
1509 vf_number
, vf_valid
);
1511 if (p_hwfn
->hw_init_done
) {
1512 /* Wide-bus, initialize via DMAE */
1513 u64 phys_addr
= (u64
)sb_phys
;
1515 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&phys_addr
,
1516 CAU_REG_SB_ADDR_MEMORY
+
1517 igu_sb_id
* sizeof(u64
), 2, NULL
);
1518 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&sb_entry
,
1519 CAU_REG_SB_VAR_MEMORY
+
1520 igu_sb_id
* sizeof(u64
), 2, NULL
);
1522 /* Initialize Status Block Address */
1523 STORE_RT_REG_AGG(p_hwfn
,
1524 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET
+
1528 STORE_RT_REG_AGG(p_hwfn
,
1529 CAU_REG_SB_VAR_MEMORY_RT_OFFSET
+
1534 /* Configure pi coalescing if set */
1535 if (p_hwfn
->cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1536 u8 num_tc
= p_hwfn
->hw_info
.num_hw_tc
;
1537 u8 timeset
, timer_res
;
1540 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1541 if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0x7F)
1543 else if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0xFF)
1547 timeset
= (u8
)(p_hwfn
->cdev
->rx_coalesce_usecs
>> timer_res
);
1548 qed_int_cau_conf_pi(p_hwfn
, p_ptt
, igu_sb_id
, RX_PI
,
1549 QED_COAL_RX_STATE_MACHINE
, timeset
);
1551 if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0x7F)
1553 else if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0xFF)
1557 timeset
= (u8
)(p_hwfn
->cdev
->tx_coalesce_usecs
>> timer_res
);
1558 for (i
= 0; i
< num_tc
; i
++) {
1559 qed_int_cau_conf_pi(p_hwfn
, p_ptt
,
1560 igu_sb_id
, TX_PI(i
),
1561 QED_COAL_TX_STATE_MACHINE
,
1567 void qed_int_sb_setup(struct qed_hwfn
*p_hwfn
,
1568 struct qed_ptt
*p_ptt
, struct qed_sb_info
*sb_info
)
1570 /* zero status block and ack counter */
1571 sb_info
->sb_ack
= 0;
1572 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1574 if (IS_PF(p_hwfn
->cdev
))
1575 qed_int_cau_conf_sb(p_hwfn
, p_ptt
, sb_info
->sb_phys
,
1576 sb_info
->igu_sb_id
, 0, 0);
1579 struct qed_igu_block
*qed_get_igu_free_sb(struct qed_hwfn
*p_hwfn
, bool b_is_pf
)
1581 struct qed_igu_block
*p_block
;
1584 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1586 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1588 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1589 !(p_block
->status
& QED_IGU_STATUS_FREE
))
1592 if (!!(p_block
->status
& QED_IGU_STATUS_PF
) == b_is_pf
)
1599 static u16
qed_get_pf_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 vector_id
)
1601 struct qed_igu_block
*p_block
;
1604 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1606 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1608 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1610 p_block
->vector_number
!= vector_id
)
1616 return QED_SB_INVALID_IDX
;
1619 u16
qed_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1623 /* Assuming continuous set of IGU SBs dedicated for given PF */
1624 if (sb_id
== QED_SP_SB_ID
)
1625 igu_sb_id
= p_hwfn
->hw_info
.p_igu_info
->igu_dsb_id
;
1626 else if (IS_PF(p_hwfn
->cdev
))
1627 igu_sb_id
= qed_get_pf_igu_sb_id(p_hwfn
, sb_id
+ 1);
1629 igu_sb_id
= qed_vf_get_igu_sb_id(p_hwfn
, sb_id
);
1631 if (sb_id
== QED_SP_SB_ID
)
1632 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1633 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id
);
1635 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1636 "SB [%04x] <--> IGU SB [%04x]\n", sb_id
, igu_sb_id
);
1641 int qed_int_sb_init(struct qed_hwfn
*p_hwfn
,
1642 struct qed_ptt
*p_ptt
,
1643 struct qed_sb_info
*sb_info
,
1644 void *sb_virt_addr
, dma_addr_t sb_phy_addr
, u16 sb_id
)
1646 sb_info
->sb_virt
= sb_virt_addr
;
1647 sb_info
->sb_phys
= sb_phy_addr
;
1649 sb_info
->igu_sb_id
= qed_get_igu_sb_id(p_hwfn
, sb_id
);
1651 if (sb_id
!= QED_SP_SB_ID
) {
1652 if (IS_PF(p_hwfn
->cdev
)) {
1653 struct qed_igu_info
*p_info
;
1654 struct qed_igu_block
*p_block
;
1656 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1657 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1659 p_block
->sb_info
= sb_info
;
1660 p_block
->status
&= ~QED_IGU_STATUS_FREE
;
1661 p_info
->usage
.free_cnt
--;
1663 qed_vf_set_sb_info(p_hwfn
, sb_id
, sb_info
);
1667 sb_info
->cdev
= p_hwfn
->cdev
;
1669 /* The igu address will hold the absolute address that needs to be
1670 * written to for a specific status block
1672 if (IS_PF(p_hwfn
->cdev
)) {
1673 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1674 GTT_BAR0_MAP_REG_IGU_CMD
+
1675 (sb_info
->igu_sb_id
<< 3);
1677 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1678 PXP_VF_BAR0_START_IGU
+
1679 ((IGU_CMD_INT_ACK_BASE
+
1680 sb_info
->igu_sb_id
) << 3);
1683 sb_info
->flags
|= QED_SB_INFO_INIT
;
1685 qed_int_sb_setup(p_hwfn
, p_ptt
, sb_info
);
1690 int qed_int_sb_release(struct qed_hwfn
*p_hwfn
,
1691 struct qed_sb_info
*sb_info
, u16 sb_id
)
1693 struct qed_igu_block
*p_block
;
1694 struct qed_igu_info
*p_info
;
1699 /* zero status block and ack counter */
1700 sb_info
->sb_ack
= 0;
1701 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1703 if (IS_VF(p_hwfn
->cdev
)) {
1704 qed_vf_set_sb_info(p_hwfn
, sb_id
, NULL
);
1708 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1709 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1711 /* Vector 0 is reserved to Default SB */
1712 if (!p_block
->vector_number
) {
1713 DP_ERR(p_hwfn
, "Do Not free sp sb using this function");
1717 /* Lose reference to client's SB info, and fix counters */
1718 p_block
->sb_info
= NULL
;
1719 p_block
->status
|= QED_IGU_STATUS_FREE
;
1720 p_info
->usage
.free_cnt
++;
1725 static void qed_int_sp_sb_free(struct qed_hwfn
*p_hwfn
)
1727 struct qed_sb_sp_info
*p_sb
= p_hwfn
->p_sp_sb
;
1732 if (p_sb
->sb_info
.sb_virt
)
1733 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1734 SB_ALIGNED_SIZE(p_hwfn
),
1735 p_sb
->sb_info
.sb_virt
,
1736 p_sb
->sb_info
.sb_phys
);
1738 p_hwfn
->p_sp_sb
= NULL
;
1741 static int qed_int_sp_sb_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1743 struct qed_sb_sp_info
*p_sb
;
1744 dma_addr_t p_phys
= 0;
1748 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1753 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1754 SB_ALIGNED_SIZE(p_hwfn
),
1755 &p_phys
, GFP_KERNEL
);
1761 /* Status Block setup */
1762 p_hwfn
->p_sp_sb
= p_sb
;
1763 qed_int_sb_init(p_hwfn
, p_ptt
, &p_sb
->sb_info
, p_virt
,
1764 p_phys
, QED_SP_SB_ID
);
1766 memset(p_sb
->pi_info_arr
, 0, sizeof(p_sb
->pi_info_arr
));
1771 int qed_int_register_cb(struct qed_hwfn
*p_hwfn
,
1772 qed_int_comp_cb_t comp_cb
,
1773 void *cookie
, u8
*sb_idx
, __le16
**p_fw_cons
)
1775 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1779 /* Look for a free index */
1780 for (pi
= 0; pi
< ARRAY_SIZE(p_sp_sb
->pi_info_arr
); pi
++) {
1781 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
)
1784 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= comp_cb
;
1785 p_sp_sb
->pi_info_arr
[pi
].cookie
= cookie
;
1787 *p_fw_cons
= &p_sp_sb
->sb_info
.sb_virt
->pi_array
[pi
];
1795 int qed_int_unregister_cb(struct qed_hwfn
*p_hwfn
, u8 pi
)
1797 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1799 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
== NULL
)
1802 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= NULL
;
1803 p_sp_sb
->pi_info_arr
[pi
].cookie
= NULL
;
1808 u16
qed_int_get_sp_sb_id(struct qed_hwfn
*p_hwfn
)
1810 return p_hwfn
->p_sp_sb
->sb_info
.igu_sb_id
;
1813 void qed_int_igu_enable_int(struct qed_hwfn
*p_hwfn
,
1814 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1816 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
| IGU_PF_CONF_ATTN_BIT_EN
;
1818 p_hwfn
->cdev
->int_mode
= int_mode
;
1819 switch (p_hwfn
->cdev
->int_mode
) {
1820 case QED_INT_MODE_INTA
:
1821 igu_pf_conf
|= IGU_PF_CONF_INT_LINE_EN
;
1822 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1825 case QED_INT_MODE_MSI
:
1826 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1827 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1830 case QED_INT_MODE_MSIX
:
1831 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1833 case QED_INT_MODE_POLL
:
1837 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, igu_pf_conf
);
1840 static void qed_int_igu_enable_attn(struct qed_hwfn
*p_hwfn
,
1841 struct qed_ptt
*p_ptt
)
1844 /* Configure AEU signal change to produce attentions */
1845 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0);
1846 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0xfff);
1847 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0xfff);
1848 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0xfff);
1850 /* Unmask AEU signals toward IGU */
1851 qed_wr(p_hwfn
, p_ptt
, MISC_REG_AEU_MASK_ATTN_IGU
, 0xff);
1855 qed_int_igu_enable(struct qed_hwfn
*p_hwfn
,
1856 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1860 qed_int_igu_enable_attn(p_hwfn
, p_ptt
);
1862 if ((int_mode
!= QED_INT_MODE_INTA
) || IS_LEAD_HWFN(p_hwfn
)) {
1863 rc
= qed_slowpath_irq_req(p_hwfn
);
1865 DP_NOTICE(p_hwfn
, "Slowpath IRQ request failed\n");
1868 p_hwfn
->b_int_requested
= true;
1870 /* Enable interrupt Generation */
1871 qed_int_igu_enable_int(p_hwfn
, p_ptt
, int_mode
);
1872 p_hwfn
->b_int_enabled
= 1;
1877 void qed_int_igu_disable_int(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1879 p_hwfn
->b_int_enabled
= 0;
1881 if (IS_VF(p_hwfn
->cdev
))
1884 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, 0);
1887 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1888 static void qed_int_igu_cleanup_sb(struct qed_hwfn
*p_hwfn
,
1889 struct qed_ptt
*p_ptt
,
1891 bool cleanup_set
, u16 opaque_fid
)
1893 u32 cmd_ctrl
= 0, val
= 0, sb_bit
= 0, sb_bit_addr
= 0, data
= 0;
1894 u32 pxp_addr
= IGU_CMD_INT_ACK_BASE
+ igu_sb_id
;
1895 u32 sleep_cnt
= IGU_CLEANUP_SLEEP_LENGTH
;
1897 /* Set the data field */
1898 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_SET
, cleanup_set
? 1 : 0);
1899 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_TYPE
, 0);
1900 SET_FIELD(data
, IGU_CLEANUP_COMMAND_TYPE
, IGU_COMMAND_TYPE_SET
);
1902 /* Set the control register */
1903 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_PXP_ADDR
, pxp_addr
);
1904 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_FID
, opaque_fid
);
1905 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_TYPE
, IGU_CTRL_CMD_TYPE_WR
);
1907 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_32LSB_DATA
, data
);
1911 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_CTRL
, cmd_ctrl
);
1913 /* calculate where to read the status bit from */
1914 sb_bit
= 1 << (igu_sb_id
% 32);
1915 sb_bit_addr
= igu_sb_id
/ 32 * sizeof(u32
);
1917 sb_bit_addr
+= IGU_REG_CLEANUP_STATUS_0
;
1919 /* Now wait for the command to complete */
1921 val
= qed_rd(p_hwfn
, p_ptt
, sb_bit_addr
);
1923 if ((val
& sb_bit
) == (cleanup_set
? sb_bit
: 0))
1926 usleep_range(5000, 10000);
1927 } while (--sleep_cnt
);
1931 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1935 void qed_int_igu_init_pure_rt_single(struct qed_hwfn
*p_hwfn
,
1936 struct qed_ptt
*p_ptt
,
1937 u16 igu_sb_id
, u16 opaque
, bool b_set
)
1939 struct qed_igu_block
*p_block
;
1942 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
1943 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1944 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1946 p_block
->function_id
,
1947 p_block
->is_pf
, p_block
->vector_number
);
1951 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 1, opaque
);
1954 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 0, opaque
);
1956 /* Wait for the IGU SB to cleanup */
1957 for (i
= 0; i
< IGU_CLEANUP_SLEEP_LENGTH
; i
++) {
1960 val
= qed_rd(p_hwfn
, p_ptt
,
1961 IGU_REG_WRITE_DONE_PENDING
+
1962 ((igu_sb_id
/ 32) * 4));
1963 if (val
& BIT((igu_sb_id
% 32)))
1964 usleep_range(10, 20);
1968 if (i
== IGU_CLEANUP_SLEEP_LENGTH
)
1970 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1973 /* Clear the CAU for the SB */
1974 for (pi
= 0; pi
< 12; pi
++)
1975 qed_wr(p_hwfn
, p_ptt
,
1976 CAU_REG_PI_MEMORY
+ (igu_sb_id
* 12 + pi
) * 4, 0);
1979 void qed_int_igu_init_pure_rt(struct qed_hwfn
*p_hwfn
,
1980 struct qed_ptt
*p_ptt
,
1981 bool b_set
, bool b_slowpath
)
1983 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
1984 struct qed_igu_block
*p_block
;
1988 val
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
);
1989 val
|= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN
;
1990 val
&= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN
;
1991 qed_wr(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
, val
);
1994 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
1995 p_block
= &p_info
->entry
[igu_sb_id
];
1997 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1999 (p_block
->status
& QED_IGU_STATUS_DSB
))
2002 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
, igu_sb_id
,
2003 p_hwfn
->hw_info
.opaque_fid
,
2008 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2010 p_hwfn
->hw_info
.opaque_fid
,
2014 int qed_int_igu_reset_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2016 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
2017 struct qed_igu_block
*p_block
;
2022 if (!RESC_NUM(p_hwfn
, QED_SB
)) {
2023 p_info
->b_allow_pf_vf_change
= false;
2025 /* Use the numbers the MFW have provided -
2026 * don't forget MFW accounts for the default SB as well.
2028 p_info
->b_allow_pf_vf_change
= true;
2030 if (p_info
->usage
.cnt
!= RESC_NUM(p_hwfn
, QED_SB
) - 1) {
2032 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2033 RESC_NUM(p_hwfn
, QED_SB
) - 1,
2035 p_info
->usage
.cnt
= RESC_NUM(p_hwfn
, QED_SB
) - 1;
2038 if (IS_PF_SRIOV(p_hwfn
)) {
2039 u16 vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
2041 if (vfs
!= p_info
->usage
.iov_cnt
)
2044 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2045 p_info
->usage
.iov_cnt
, vfs
);
2047 /* At this point we know how many SBs we have totally
2048 * in IGU + number of PF SBs. So we can validate that
2049 * we'd have sufficient for VF.
2051 if (vfs
> p_info
->usage
.free_cnt
+
2052 p_info
->usage
.free_cnt_iov
- p_info
->usage
.cnt
) {
2054 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2055 p_info
->usage
.free_cnt
+
2056 p_info
->usage
.free_cnt_iov
,
2057 p_info
->usage
.cnt
, vfs
);
2061 /* Currently cap the number of VFs SBs by the
2064 p_info
->usage
.iov_cnt
= vfs
;
2068 /* Mark all SBs as free, now in the right PF/VFs division */
2069 p_info
->usage
.free_cnt
= p_info
->usage
.cnt
;
2070 p_info
->usage
.free_cnt_iov
= p_info
->usage
.iov_cnt
;
2071 p_info
->usage
.orig
= p_info
->usage
.cnt
;
2072 p_info
->usage
.iov_orig
= p_info
->usage
.iov_cnt
;
2074 /* We now proceed to re-configure the IGU cam to reflect the initial
2075 * configuration. We can start with the Default SB.
2077 pf_sbs
= p_info
->usage
.cnt
;
2078 vf_sbs
= p_info
->usage
.iov_cnt
;
2080 for (igu_sb_id
= p_info
->igu_dsb_id
;
2081 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2082 p_block
= &p_info
->entry
[igu_sb_id
];
2085 if (!(p_block
->status
& QED_IGU_STATUS_VALID
))
2088 if (p_block
->status
& QED_IGU_STATUS_DSB
) {
2089 p_block
->function_id
= p_hwfn
->rel_pf_id
;
2091 p_block
->vector_number
= 0;
2092 p_block
->status
= QED_IGU_STATUS_VALID
|
2095 } else if (pf_sbs
) {
2097 p_block
->function_id
= p_hwfn
->rel_pf_id
;
2099 p_block
->vector_number
= p_info
->usage
.cnt
- pf_sbs
;
2100 p_block
->status
= QED_IGU_STATUS_VALID
|
2102 QED_IGU_STATUS_FREE
;
2103 } else if (vf_sbs
) {
2104 p_block
->function_id
=
2105 p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+
2106 p_info
->usage
.iov_cnt
- vf_sbs
;
2108 p_block
->vector_number
= 0;
2109 p_block
->status
= QED_IGU_STATUS_VALID
|
2110 QED_IGU_STATUS_FREE
;
2113 p_block
->function_id
= 0;
2115 p_block
->vector_number
= 0;
2118 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
,
2119 p_block
->function_id
);
2120 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, p_block
->is_pf
);
2121 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
,
2122 p_block
->vector_number
);
2124 /* VF entries would be enabled when VF is initializaed */
2125 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, p_block
->is_pf
);
2127 rval
= qed_rd(p_hwfn
, p_ptt
,
2128 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
2131 qed_wr(p_hwfn
, p_ptt
,
2132 IGU_REG_MAPPING_MEMORY
+
2133 sizeof(u32
) * igu_sb_id
, val
);
2137 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2139 p_block
->function_id
,
2141 p_block
->vector_number
, rval
, val
);
2148 static void qed_int_igu_read_cam_block(struct qed_hwfn
*p_hwfn
,
2149 struct qed_ptt
*p_ptt
, u16 igu_sb_id
)
2151 u32 val
= qed_rd(p_hwfn
, p_ptt
,
2152 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
2153 struct qed_igu_block
*p_block
;
2155 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
2157 /* Fill the block information */
2158 p_block
->function_id
= GET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
);
2159 p_block
->is_pf
= GET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
);
2160 p_block
->vector_number
= GET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
);
2161 p_block
->igu_sb_id
= igu_sb_id
;
2164 int qed_int_igu_read_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2166 struct qed_igu_info
*p_igu_info
;
2167 struct qed_igu_block
*p_block
;
2168 u32 min_vf
= 0, max_vf
= 0;
2171 p_hwfn
->hw_info
.p_igu_info
= kzalloc(sizeof(*p_igu_info
), GFP_KERNEL
);
2172 if (!p_hwfn
->hw_info
.p_igu_info
)
2175 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
2177 /* Distinguish between existent and non-existent default SB */
2178 p_igu_info
->igu_dsb_id
= QED_SB_INVALID_IDX
;
2180 /* Find the range of VF ids whose SB belong to this PF */
2181 if (p_hwfn
->cdev
->p_iov_info
) {
2182 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
2184 min_vf
= p_iov
->first_vf_in_pf
;
2185 max_vf
= p_iov
->first_vf_in_pf
+ p_iov
->total_vfs
;
2189 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2190 /* Read current entry; Notice it might not belong to this PF */
2191 qed_int_igu_read_cam_block(p_hwfn
, p_ptt
, igu_sb_id
);
2192 p_block
= &p_igu_info
->entry
[igu_sb_id
];
2194 if ((p_block
->is_pf
) &&
2195 (p_block
->function_id
== p_hwfn
->rel_pf_id
)) {
2196 p_block
->status
= QED_IGU_STATUS_PF
|
2197 QED_IGU_STATUS_VALID
|
2198 QED_IGU_STATUS_FREE
;
2200 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2201 p_igu_info
->usage
.cnt
++;
2202 } else if (!(p_block
->is_pf
) &&
2203 (p_block
->function_id
>= min_vf
) &&
2204 (p_block
->function_id
< max_vf
)) {
2205 /* Available for VFs of this PF */
2206 p_block
->status
= QED_IGU_STATUS_VALID
|
2207 QED_IGU_STATUS_FREE
;
2209 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2210 p_igu_info
->usage
.iov_cnt
++;
2213 /* Mark the First entry belonging to the PF or its VFs
2214 * as the default SB [we'll reset IGU prior to first usage].
2216 if ((p_block
->status
& QED_IGU_STATUS_VALID
) &&
2217 (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
)) {
2218 p_igu_info
->igu_dsb_id
= igu_sb_id
;
2219 p_block
->status
|= QED_IGU_STATUS_DSB
;
2222 /* limit number of prints by having each PF print only its
2223 * entries with the exception of PF0 which would print
2226 if ((p_block
->status
& QED_IGU_STATUS_VALID
) ||
2227 (p_hwfn
->abs_pf_id
== 0)) {
2228 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2229 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2230 igu_sb_id
, p_block
->function_id
,
2231 p_block
->is_pf
, p_block
->vector_number
);
2235 if (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
) {
2237 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2238 p_igu_info
->igu_dsb_id
);
2242 /* All non default SB are considered free at this point */
2243 p_igu_info
->usage
.free_cnt
= p_igu_info
->usage
.cnt
;
2244 p_igu_info
->usage
.free_cnt_iov
= p_igu_info
->usage
.iov_cnt
;
2246 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2247 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2248 p_igu_info
->igu_dsb_id
,
2249 p_igu_info
->usage
.cnt
, p_igu_info
->usage
.iov_cnt
);
2255 * qed_int_igu_init_rt() - Initialize IGU runtime registers.
2257 * @p_hwfn: HW device data.
2259 void qed_int_igu_init_rt(struct qed_hwfn
*p_hwfn
)
2261 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
;
2263 STORE_RT_REG(p_hwfn
, IGU_REG_PF_CONFIGURATION_RT_OFFSET
, igu_pf_conf
);
2266 u64
qed_int_igu_read_sisr_reg(struct qed_hwfn
*p_hwfn
)
2268 u32 lsb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
-
2269 IGU_CMD_INT_ACK_BASE
;
2270 u32 msb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_MSB_UPPER
-
2271 IGU_CMD_INT_ACK_BASE
;
2272 u32 intr_status_hi
= 0, intr_status_lo
= 0;
2273 u64 intr_status
= 0;
2275 intr_status_lo
= REG_RD(p_hwfn
,
2276 GTT_BAR0_MAP_REG_IGU_CMD
+
2277 lsb_igu_cmd_addr
* 8);
2278 intr_status_hi
= REG_RD(p_hwfn
,
2279 GTT_BAR0_MAP_REG_IGU_CMD
+
2280 msb_igu_cmd_addr
* 8);
2281 intr_status
= ((u64
)intr_status_hi
<< 32) + (u64
)intr_status_lo
;
2286 static void qed_int_sp_dpc_setup(struct qed_hwfn
*p_hwfn
)
2288 tasklet_setup(&p_hwfn
->sp_dpc
, qed_int_sp_dpc
);
2289 p_hwfn
->b_sp_dpc_enabled
= true;
2292 int qed_int_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2296 rc
= qed_int_sp_sb_alloc(p_hwfn
, p_ptt
);
2300 rc
= qed_int_sb_attn_alloc(p_hwfn
, p_ptt
);
2305 void qed_int_free(struct qed_hwfn
*p_hwfn
)
2307 qed_int_sp_sb_free(p_hwfn
);
2308 qed_int_sb_attn_free(p_hwfn
);
2311 void qed_int_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2313 qed_int_sb_setup(p_hwfn
, p_ptt
, &p_hwfn
->p_sp_sb
->sb_info
);
2314 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
2315 qed_int_sp_dpc_setup(p_hwfn
);
2318 void qed_int_get_num_sbs(struct qed_hwfn
*p_hwfn
,
2319 struct qed_sb_cnt_info
*p_sb_cnt_info
)
2321 struct qed_igu_info
*info
= p_hwfn
->hw_info
.p_igu_info
;
2323 if (!info
|| !p_sb_cnt_info
)
2326 memcpy(p_sb_cnt_info
, &info
->usage
, sizeof(*p_sb_cnt_info
));
2329 void qed_int_disable_post_isr_release(struct qed_dev
*cdev
)
2333 for_each_hwfn(cdev
, i
)
2334 cdev
->hwfns
[i
].b_int_requested
= false;
2337 void qed_int_attn_clr_enable(struct qed_dev
*cdev
, bool clr_enable
)
2339 cdev
->attn_clr_en
= clr_enable
;
2342 int qed_int_set_timer_res(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2343 u8 timer_res
, u16 sb_id
, bool tx
)
2345 struct cau_sb_entry sb_entry
;
2349 if (!p_hwfn
->hw_init_done
) {
2350 DP_ERR(p_hwfn
, "hardware not initialized yet\n");
2354 rc
= qed_dmae_grc2host(p_hwfn
, p_ptt
, CAU_REG_SB_VAR_MEMORY
+
2355 sb_id
* sizeof(u64
),
2356 (u64
)(uintptr_t)&sb_entry
, 2, NULL
);
2358 DP_ERR(p_hwfn
, "dmae_grc2host failed %d\n", rc
);
2362 params
= le32_to_cpu(sb_entry
.params
);
2365 SET_FIELD(params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
2367 SET_FIELD(params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
2369 sb_entry
.params
= cpu_to_le32(params
);
2371 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
2372 (u64
)(uintptr_t)&sb_entry
,
2373 CAU_REG_SB_VAR_MEMORY
+
2374 sb_id
* sizeof(u64
), 2, NULL
);
2376 DP_ERR(p_hwfn
, "dmae_host2grc failed %d\n", rc
);