1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <linux/bitops.h>
37 #include <linux/delay.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/errno.h>
40 #include <linux/interrupt.h>
41 #include <linux/kernel.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/string.h>
48 #include "qed_init_ops.h"
51 #include "qed_reg_addr.h"
53 #include "qed_sriov.h"
57 qed_int_comp_cb_t comp_cb
;
61 struct qed_sb_sp_info
{
62 struct qed_sb_info sb_info
;
64 /* per protocol index data */
65 struct qed_pi_info pi_info_arr
[PIS_PER_SB_E4
];
68 enum qed_attention_type
{
73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
76 struct aeu_invert_reg_bit
{
79 #define ATTENTION_PARITY (1 << 0)
81 #define ATTENTION_LENGTH_MASK (0x00000ff0)
82 #define ATTENTION_LENGTH_SHIFT (4)
83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
84 ATTENTION_LENGTH_SHIFT)
85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
90 /* Multiple bits start with this offset */
91 #define ATTENTION_OFFSET_MASK (0x000ff000)
92 #define ATTENTION_OFFSET_SHIFT (12)
94 #define ATTENTION_BB_MASK (0x00700000)
95 #define ATTENTION_BB_SHIFT (20)
96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
97 #define ATTENTION_BB_DIFFERENT BIT(23)
101 /* Callback to call if attention will be triggered */
102 int (*cb
)(struct qed_hwfn
*p_hwfn
);
104 enum block_id block_index
;
107 struct aeu_invert_reg
{
108 struct aeu_invert_reg_bit bits
[32];
111 #define MAX_ATTN_GRPS (8)
112 #define NUM_ATTN_REGS (9)
114 /* Specific HW attention callbacks */
115 static int qed_mcp_attn_cb(struct qed_hwfn
*p_hwfn
)
117 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_STATE
);
119 /* This might occur on certain instances; Log it once then mask it */
120 DP_INFO(p_hwfn
->cdev
, "MCP_REG_CPU_STATE: %08x - Masking...\n",
122 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, MCP_REG_CPU_EVENT_MASK
,
128 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
129 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
130 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
131 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
133 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
135 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
137 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
139 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
141 static int qed_pswhst_attn_cb(struct qed_hwfn
*p_hwfn
)
143 u32 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
144 PSWHST_REG_INCORRECT_ACCESS_VALID
);
146 if (tmp
& QED_PSWHST_ATTENTION_INCORRECT_ACCESS
) {
147 u32 addr
, data
, length
;
149 addr
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
150 PSWHST_REG_INCORRECT_ACCESS_ADDRESS
);
151 data
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
152 PSWHST_REG_INCORRECT_ACCESS_DATA
);
153 length
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
154 PSWHST_REG_INCORRECT_ACCESS_LENGTH
);
156 DP_INFO(p_hwfn
->cdev
,
157 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
159 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_PF_ID
),
160 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_VF_ID
),
162 ATTENTION_INCORRECT_ACCESS_VF_VALID
),
164 ATTENTION_INCORRECT_ACCESS_CLIENT
),
165 (u8
) GET_FIELD(data
, ATTENTION_INCORRECT_ACCESS_WR
),
167 ATTENTION_INCORRECT_ACCESS_BYTE_EN
),
174 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
175 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
176 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
177 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
178 #define QED_GRC_ATTENTION_MASTER_MASK (0xf)
179 #define QED_GRC_ATTENTION_MASTER_SHIFT (24)
180 #define QED_GRC_ATTENTION_PF_MASK (0xf)
181 #define QED_GRC_ATTENTION_PF_SHIFT (0)
182 #define QED_GRC_ATTENTION_VF_MASK (0xff)
183 #define QED_GRC_ATTENTION_VF_SHIFT (4)
184 #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
185 #define QED_GRC_ATTENTION_PRIV_SHIFT (14)
186 #define QED_GRC_ATTENTION_PRIV_VF (0)
187 static const char *attn_master_to_str(u8 master
)
190 case 1: return "PXP";
191 case 2: return "MCP";
192 case 3: return "MSDM";
193 case 4: return "PSDM";
194 case 5: return "YSDM";
195 case 6: return "USDM";
196 case 7: return "TSDM";
197 case 8: return "XSDM";
198 case 9: return "DBU";
199 case 10: return "DMAE";
205 static int qed_grc_attn_cb(struct qed_hwfn
*p_hwfn
)
209 /* We've already cleared the timeout interrupt register, so we learn
210 * of interrupts via the validity register
212 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
213 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
);
214 if (!(tmp
& QED_GRC_ATTENTION_VALID_BIT
))
217 /* Read the GRC timeout information */
218 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
219 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0
);
220 tmp2
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
221 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1
);
223 DP_INFO(p_hwfn
->cdev
,
224 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
226 (tmp
& QED_GRC_ATTENTION_RDWR_BIT
) ? "Write to" : "Read from",
227 GET_FIELD(tmp
, QED_GRC_ATTENTION_ADDRESS
) << 2,
228 attn_master_to_str(GET_FIELD(tmp
, QED_GRC_ATTENTION_MASTER
)),
229 GET_FIELD(tmp2
, QED_GRC_ATTENTION_PF
),
230 (GET_FIELD(tmp2
, QED_GRC_ATTENTION_PRIV
) ==
231 QED_GRC_ATTENTION_PRIV_VF
) ? "VF" : "(Irrelevant)",
232 GET_FIELD(tmp2
, QED_GRC_ATTENTION_VF
));
235 /* Regardles of anything else, clean the validity bit */
236 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
237 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID
, 0);
241 #define PGLUE_ATTENTION_VALID (1 << 29)
242 #define PGLUE_ATTENTION_RD_VALID (1 << 26)
243 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
244 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
245 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
247 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
248 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
249 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
251 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
252 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
253 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
255 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
256 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
257 #define PGLUE_ATTENTION_ILT_VALID (1 << 23)
259 int qed_pglueb_rbc_attn_handler(struct qed_hwfn
*p_hwfn
,
260 struct qed_ptt
*p_ptt
)
264 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_WR_DETAILS2
);
265 if (tmp
& PGLUE_ATTENTION_VALID
) {
266 u32 addr_lo
, addr_hi
, details
;
268 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
269 PGLUE_B_REG_TX_ERR_WR_ADD_31_0
);
270 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
271 PGLUE_B_REG_TX_ERR_WR_ADD_63_32
);
272 details
= qed_rd(p_hwfn
, p_ptt
,
273 PGLUE_B_REG_TX_ERR_WR_DETAILS
);
276 "Illegal write by chip to [%08x:%08x] blocked.\n"
277 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
278 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
279 addr_hi
, addr_lo
, details
,
280 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
281 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
283 PGLUE_ATTENTION_DETAILS_VF_VALID
) ? 1 : 0,
286 PGLUE_ATTENTION_DETAILS2_WAS_ERR
) ? 1 : 0,
288 PGLUE_ATTENTION_DETAILS2_BME
) ? 1 : 0,
290 PGLUE_ATTENTION_DETAILS2_FID_EN
) ? 1 : 0);
293 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_RD_DETAILS2
);
294 if (tmp
& PGLUE_ATTENTION_RD_VALID
) {
295 u32 addr_lo
, addr_hi
, details
;
297 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
298 PGLUE_B_REG_TX_ERR_RD_ADD_31_0
);
299 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
300 PGLUE_B_REG_TX_ERR_RD_ADD_63_32
);
301 details
= qed_rd(p_hwfn
, p_ptt
,
302 PGLUE_B_REG_TX_ERR_RD_DETAILS
);
305 "Illegal read by chip from [%08x:%08x] blocked.\n"
306 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
307 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
308 addr_hi
, addr_lo
, details
,
309 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_PFID
),
310 (u8
)GET_FIELD(details
, PGLUE_ATTENTION_DETAILS_VFID
),
312 PGLUE_ATTENTION_DETAILS_VF_VALID
) ? 1 : 0,
315 PGLUE_ATTENTION_DETAILS2_WAS_ERR
) ? 1 : 0,
317 PGLUE_ATTENTION_DETAILS2_BME
) ? 1 : 0,
319 PGLUE_ATTENTION_DETAILS2_FID_EN
) ? 1 : 0);
322 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL
);
323 if (tmp
& PGLUE_ATTENTION_ICPL_VALID
)
324 DP_NOTICE(p_hwfn
, "ICPL error - %08x\n", tmp
);
326 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS
);
327 if (tmp
& PGLUE_ATTENTION_ZLR_VALID
) {
328 u32 addr_hi
, addr_lo
;
330 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
331 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0
);
332 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
333 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32
);
335 DP_NOTICE(p_hwfn
, "ZLR error - %08x [Address %08x:%08x]\n",
336 tmp
, addr_hi
, addr_lo
);
339 tmp
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_VF_ILT_ERR_DETAILS2
);
340 if (tmp
& PGLUE_ATTENTION_ILT_VALID
) {
341 u32 addr_hi
, addr_lo
, details
;
343 addr_lo
= qed_rd(p_hwfn
, p_ptt
,
344 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0
);
345 addr_hi
= qed_rd(p_hwfn
, p_ptt
,
346 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32
);
347 details
= qed_rd(p_hwfn
, p_ptt
,
348 PGLUE_B_REG_VF_ILT_ERR_DETAILS
);
351 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
352 details
, tmp
, addr_hi
, addr_lo
);
355 /* Clear the indications */
356 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_LATCHED_ERRORS_CLR
, BIT(2));
361 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn
*p_hwfn
)
363 return qed_pglueb_rbc_attn_handler(p_hwfn
, p_hwfn
->p_dpc_ptt
);
366 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
367 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
368 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
369 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
370 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
372 #define QED_DB_REC_COUNT 1000
373 #define QED_DB_REC_INTERVAL 100
375 static int qed_db_rec_flush_queue(struct qed_hwfn
*p_hwfn
,
376 struct qed_ptt
*p_ptt
)
378 u32 count
= QED_DB_REC_COUNT
;
381 /* Flush any pending (e)dpms as they may never arrive */
382 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_DPM_FORCE_ABORT
, 0x1);
384 /* wait for usage to zero or count to run out. This is necessary since
385 * EDPM doorbell transactions can take multiple 64b cycles, and as such
386 * can "split" over the pci. Possibly, the doorbell drop can happen with
387 * half an EDPM in the queue and other half dropped. Another EDPM
388 * doorbell to the same address (from doorbell recovery mechanism or
389 * from the doorbelling entity) could have first half dropped and second
390 * half interpreted as continuation of the first. To prevent such
391 * malformed doorbells from reaching the device, flush the queue before
392 * releasing the overflow sticky indication.
394 while (count
-- && usage
) {
395 usage
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_USAGE_CNT
);
396 udelay(QED_DB_REC_INTERVAL
);
399 /* should have been depleted by now */
401 DP_NOTICE(p_hwfn
->cdev
,
402 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
403 QED_DB_REC_INTERVAL
* QED_DB_REC_COUNT
, usage
);
410 int qed_db_rec_handler(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
412 u32 attn_ovfl
, cur_ovfl
;
415 attn_ovfl
= test_and_clear_bit(QED_OVERFLOW_BIT
,
416 &p_hwfn
->db_recovery_info
.overflow
);
417 cur_ovfl
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
);
418 if (!cur_ovfl
&& !attn_ovfl
)
421 DP_NOTICE(p_hwfn
, "PF Overflow sticky: attn %u current %u\n",
422 attn_ovfl
, cur_ovfl
);
424 if (cur_ovfl
&& !p_hwfn
->db_bar_no_edpm
) {
425 rc
= qed_db_rec_flush_queue(p_hwfn
, p_ptt
);
430 /* Release overflow sticky indication (stop silently dropping everything) */
431 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
, 0x0);
433 /* Repeat all last doorbells (doorbell drop recovery) */
434 qed_db_recovery_execute(p_hwfn
);
439 static void qed_dorq_attn_overflow(struct qed_hwfn
*p_hwfn
)
441 struct qed_ptt
*p_ptt
= p_hwfn
->p_dpc_ptt
;
445 overflow
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
);
449 /* Run PF doorbell recovery in next periodic handler */
450 set_bit(QED_OVERFLOW_BIT
, &p_hwfn
->db_recovery_info
.overflow
);
452 if (!p_hwfn
->db_bar_no_edpm
) {
453 rc
= qed_db_rec_flush_queue(p_hwfn
, p_ptt
);
458 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_OVFL_STICKY
, 0x0);
460 /* Schedule the handler even if overflow was not detected */
461 qed_periodic_db_rec_start(p_hwfn
);
464 static int qed_dorq_attn_int_sts(struct qed_hwfn
*p_hwfn
)
466 u32 int_sts
, first_drop_reason
, details
, address
, all_drops_reason
;
467 struct qed_ptt
*p_ptt
= p_hwfn
->p_dpc_ptt
;
469 /* int_sts may be zero since all PFs were interrupted for doorbell
470 * overflow but another one already handled it. Can abort here. If
471 * This PF also requires overflow recovery we will be interrupted again.
472 * The masked almost full indication may also be set. Ignoring.
474 int_sts
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_INT_STS
);
475 if (!(int_sts
& ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL
))
478 DP_NOTICE(p_hwfn
->cdev
, "DORQ attention. int_sts was %x\n", int_sts
);
480 /* check if db_drop or overflow happened */
481 if (int_sts
& (DORQ_REG_INT_STS_DB_DROP
|
482 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
)) {
483 /* Obtain data about db drop/overflow */
484 first_drop_reason
= qed_rd(p_hwfn
, p_ptt
,
485 DORQ_REG_DB_DROP_REASON
) &
486 QED_DORQ_ATTENTION_REASON_MASK
;
487 details
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_DB_DROP_DETAILS
);
488 address
= qed_rd(p_hwfn
, p_ptt
,
489 DORQ_REG_DB_DROP_DETAILS_ADDRESS
);
490 all_drops_reason
= qed_rd(p_hwfn
, p_ptt
,
491 DORQ_REG_DB_DROP_DETAILS_REASON
);
494 DP_NOTICE(p_hwfn
->cdev
,
495 "Doorbell drop occurred\n"
496 "Address\t\t0x%08x\t(second BAR address)\n"
497 "FID\t\t0x%04x\t\t(Opaque FID)\n"
498 "Size\t\t0x%04x\t\t(in bytes)\n"
499 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
500 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
502 GET_FIELD(details
, QED_DORQ_ATTENTION_OPAQUE
),
503 GET_FIELD(details
, QED_DORQ_ATTENTION_SIZE
) * 4,
504 first_drop_reason
, all_drops_reason
);
506 /* Clear the doorbell drop details and prepare for next drop */
507 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_DB_DROP_DETAILS_REL
, 0);
509 /* Mark interrupt as handled (note: even if drop was due to a different
510 * reason than overflow we mark as handled)
515 DORQ_REG_INT_STS_DB_DROP
|
516 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
);
518 /* If there are no indications other than drop indications, success */
519 if ((int_sts
& ~(DORQ_REG_INT_STS_DB_DROP
|
520 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR
|
521 DORQ_REG_INT_STS_DORQ_FIFO_AFULL
)) == 0)
525 /* Some other indication was present - non recoverable */
526 DP_INFO(p_hwfn
, "DORQ fatal attention\n");
531 static int qed_dorq_attn_cb(struct qed_hwfn
*p_hwfn
)
533 p_hwfn
->db_recovery_info
.dorq_attn
= true;
534 qed_dorq_attn_overflow(p_hwfn
);
536 return qed_dorq_attn_int_sts(p_hwfn
);
539 static void qed_dorq_attn_handler(struct qed_hwfn
*p_hwfn
)
541 if (p_hwfn
->db_recovery_info
.dorq_attn
)
544 /* Call DORQ callback if the attention was missed */
545 qed_dorq_attn_cb(p_hwfn
);
547 p_hwfn
->db_recovery_info
.dorq_attn
= false;
550 /* Instead of major changes to the data-structure, we have a some 'special'
551 * identifiers for sources that changed meaning between adapters.
553 enum aeu_invert_reg_special_type
{
554 AEU_INVERT_REG_SPECIAL_CNIG_0
,
555 AEU_INVERT_REG_SPECIAL_CNIG_1
,
556 AEU_INVERT_REG_SPECIAL_CNIG_2
,
557 AEU_INVERT_REG_SPECIAL_CNIG_3
,
558 AEU_INVERT_REG_SPECIAL_MAX
,
561 static struct aeu_invert_reg_bit
562 aeu_descs_special
[AEU_INVERT_REG_SPECIAL_MAX
] = {
563 {"CNIG port 0", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
564 {"CNIG port 1", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
565 {"CNIG port 2", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
566 {"CNIG port 3", ATTENTION_SINGLE
, NULL
, BLOCK_CNIG
},
569 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
570 static struct aeu_invert_reg aeu_descs
[NUM_ATTN_REGS
] = {
572 { /* After Invert 1 */
574 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
579 { /* After Invert 2 */
580 {"PGLUE config_space", ATTENTION_SINGLE
,
582 {"PGLUE misc_flr", ATTENTION_SINGLE
,
584 {"PGLUE B RBC", ATTENTION_PAR_INT
,
585 qed_pglueb_rbc_attn_cb
, BLOCK_PGLUE_B
},
586 {"PGLUE misc_mctp", ATTENTION_SINGLE
,
588 {"Flash event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
589 {"SMB event", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
590 {"Main Power", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
591 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT
) |
592 (1 << ATTENTION_OFFSET_SHIFT
),
594 {"PCIE glue/PXP VPD %d",
595 (16 << ATTENTION_LENGTH_SHIFT
), NULL
, BLOCK_PGLCS
},
600 { /* After Invert 3 */
601 {"General Attention %d",
602 (32 << ATTENTION_LENGTH_SHIFT
), NULL
, MAX_BLOCK_ID
},
607 { /* After Invert 4 */
608 {"General Attention 32", ATTENTION_SINGLE
,
610 {"General Attention %d",
611 (2 << ATTENTION_LENGTH_SHIFT
) |
612 (33 << ATTENTION_OFFSET_SHIFT
), NULL
, MAX_BLOCK_ID
},
613 {"General Attention 35", ATTENTION_SINGLE
,
616 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
617 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0
),
620 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
621 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1
),
624 ATTENTION_PAR
| ATTENTION_BB_DIFFERENT
|
625 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2
),
628 ATTENTION_SINGLE
| ATTENTION_BB_DIFFERENT
|
629 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3
),
631 {"MCP CPU", ATTENTION_SINGLE
,
632 qed_mcp_attn_cb
, MAX_BLOCK_ID
},
633 {"MCP Watchdog timer", ATTENTION_SINGLE
,
635 {"MCP M2P", ATTENTION_SINGLE
, NULL
, MAX_BLOCK_ID
},
636 {"AVS stop status ready", ATTENTION_SINGLE
,
638 {"MSTAT", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
639 {"MSTAT per-path", ATTENTION_PAR_INT
,
641 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT
),
643 {"NIG", ATTENTION_PAR_INT
, NULL
, BLOCK_NIG
},
644 {"BMB/OPTE/MCP", ATTENTION_PAR_INT
, NULL
, BLOCK_BMB
},
645 {"BTB", ATTENTION_PAR_INT
, NULL
, BLOCK_BTB
},
646 {"BRB", ATTENTION_PAR_INT
, NULL
, BLOCK_BRB
},
647 {"PRS", ATTENTION_PAR_INT
, NULL
, BLOCK_PRS
},
652 { /* After Invert 5 */
653 {"SRC", ATTENTION_PAR_INT
, NULL
, BLOCK_SRC
},
654 {"PB Client1", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB1
},
655 {"PB Client2", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF_PB2
},
656 {"RPB", ATTENTION_PAR_INT
, NULL
, BLOCK_RPB
},
657 {"PBF", ATTENTION_PAR_INT
, NULL
, BLOCK_PBF
},
658 {"QM", ATTENTION_PAR_INT
, NULL
, BLOCK_QM
},
659 {"TM", ATTENTION_PAR_INT
, NULL
, BLOCK_TM
},
660 {"MCM", ATTENTION_PAR_INT
, NULL
, BLOCK_MCM
},
661 {"MSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSDM
},
662 {"MSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_MSEM
},
663 {"PCM", ATTENTION_PAR_INT
, NULL
, BLOCK_PCM
},
664 {"PSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSDM
},
665 {"PSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_PSEM
},
666 {"TCM", ATTENTION_PAR_INT
, NULL
, BLOCK_TCM
},
667 {"TSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSDM
},
668 {"TSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_TSEM
},
673 { /* After Invert 6 */
674 {"UCM", ATTENTION_PAR_INT
, NULL
, BLOCK_UCM
},
675 {"USDM", ATTENTION_PAR_INT
, NULL
, BLOCK_USDM
},
676 {"USEM", ATTENTION_PAR_INT
, NULL
, BLOCK_USEM
},
677 {"XCM", ATTENTION_PAR_INT
, NULL
, BLOCK_XCM
},
678 {"XSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSDM
},
679 {"XSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_XSEM
},
680 {"YCM", ATTENTION_PAR_INT
, NULL
, BLOCK_YCM
},
681 {"YSDM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSDM
},
682 {"YSEM", ATTENTION_PAR_INT
, NULL
, BLOCK_YSEM
},
683 {"XYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_XYLD
},
684 {"TMLD", ATTENTION_PAR_INT
, NULL
, BLOCK_TMLD
},
685 {"MYLD", ATTENTION_PAR_INT
, NULL
, BLOCK_MULD
},
686 {"YULD", ATTENTION_PAR_INT
, NULL
, BLOCK_YULD
},
687 {"DORQ", ATTENTION_PAR_INT
,
688 qed_dorq_attn_cb
, BLOCK_DORQ
},
689 {"DBG", ATTENTION_PAR_INT
, NULL
, BLOCK_DBG
},
690 {"IPC", ATTENTION_PAR_INT
, NULL
, BLOCK_IPC
},
695 { /* After Invert 7 */
696 {"CCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_CCFC
},
697 {"CDU", ATTENTION_PAR_INT
, NULL
, BLOCK_CDU
},
698 {"DMAE", ATTENTION_PAR_INT
, NULL
, BLOCK_DMAE
},
699 {"IGU", ATTENTION_PAR_INT
, NULL
, BLOCK_IGU
},
700 {"ATC", ATTENTION_PAR_INT
, NULL
, MAX_BLOCK_ID
},
701 {"CAU", ATTENTION_PAR_INT
, NULL
, BLOCK_CAU
},
702 {"PTU", ATTENTION_PAR_INT
, NULL
, BLOCK_PTU
},
703 {"PRM", ATTENTION_PAR_INT
, NULL
, BLOCK_PRM
},
704 {"TCFC", ATTENTION_PAR_INT
, NULL
, BLOCK_TCFC
},
705 {"RDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_RDIF
},
706 {"TDIF", ATTENTION_PAR_INT
, NULL
, BLOCK_TDIF
},
707 {"RSS", ATTENTION_PAR_INT
, NULL
, BLOCK_RSS
},
708 {"MISC", ATTENTION_PAR_INT
, NULL
, BLOCK_MISC
},
709 {"MISCS", ATTENTION_PAR_INT
, NULL
, BLOCK_MISCS
},
710 {"PCIE", ATTENTION_PAR
, NULL
, BLOCK_PCIE
},
711 {"Vaux PCI core", ATTENTION_SINGLE
, NULL
, BLOCK_PGLCS
},
712 {"PSWRQ", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRQ
},
717 { /* After Invert 8 */
718 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT
,
720 {"PSWWR", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWWR
},
721 {"PSWWR (pci_clk)", ATTENTION_PAR_INT
,
723 {"PSWRD", ATTENTION_PAR_INT
, NULL
, BLOCK_PSWRD
},
724 {"PSWRD (pci_clk)", ATTENTION_PAR_INT
,
726 {"PSWHST", ATTENTION_PAR_INT
,
727 qed_pswhst_attn_cb
, BLOCK_PSWHST
},
728 {"PSWHST (pci_clk)", ATTENTION_PAR_INT
,
729 NULL
, BLOCK_PSWHST2
},
730 {"GRC", ATTENTION_PAR_INT
,
731 qed_grc_attn_cb
, BLOCK_GRC
},
732 {"CPMU", ATTENTION_PAR_INT
, NULL
, BLOCK_CPMU
},
733 {"NCSI", ATTENTION_PAR_INT
, NULL
, BLOCK_NCSI
},
734 {"MSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
735 {"PSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
736 {"TSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
737 {"USEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
738 {"XSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
739 {"YSEM PRAM", ATTENTION_PAR
, NULL
, MAX_BLOCK_ID
},
740 {"pxp_misc_mps", ATTENTION_PAR
, NULL
, BLOCK_PGLCS
},
741 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE
,
743 {"PERST_B assertion", ATTENTION_SINGLE
,
745 {"PERST_B deassertion", ATTENTION_SINGLE
,
747 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT
),
753 { /* After Invert 9 */
754 {"MCP Latched memory", ATTENTION_PAR
,
756 {"MCP Latched scratchpad cache", ATTENTION_SINGLE
,
758 {"MCP Latched ump_tx", ATTENTION_PAR
,
760 {"MCP Latched scratchpad", ATTENTION_PAR
,
762 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT
),
768 static struct aeu_invert_reg_bit
*
769 qed_int_aeu_translate(struct qed_hwfn
*p_hwfn
,
770 struct aeu_invert_reg_bit
*p_bit
)
772 if (!QED_IS_BB(p_hwfn
->cdev
))
775 if (!(p_bit
->flags
& ATTENTION_BB_DIFFERENT
))
778 return &aeu_descs_special
[(p_bit
->flags
& ATTENTION_BB_MASK
) >>
782 static bool qed_int_is_parity_flag(struct qed_hwfn
*p_hwfn
,
783 struct aeu_invert_reg_bit
*p_bit
)
785 return !!(qed_int_aeu_translate(p_hwfn
, p_bit
)->flags
&
789 #define ATTN_STATE_BITS (0xfff)
790 #define ATTN_BITS_MASKABLE (0x3ff)
791 struct qed_sb_attn_info
{
792 /* Virtual & Physical address of the SB */
793 struct atten_status_block
*sb_attn
;
796 /* Last seen running index */
799 /* A mask of the AEU bits resulting in a parity error */
800 u32 parity_mask
[NUM_ATTN_REGS
];
802 /* A pointer to the attention description structure */
803 struct aeu_invert_reg
*p_aeu_desc
;
805 /* Previously asserted attentions, which are still unasserted */
808 /* Cleanup address for the link's general hw attention */
812 static inline u16
qed_attn_update_idx(struct qed_hwfn
*p_hwfn
,
813 struct qed_sb_attn_info
*p_sb_desc
)
817 index
= le16_to_cpu(p_sb_desc
->sb_attn
->sb_index
);
818 if (p_sb_desc
->index
!= index
) {
819 p_sb_desc
->index
= index
;
827 * @brief qed_int_assertion - handles asserted attention bits
830 * @param asserted_bits newly asserted bits
833 static int qed_int_assertion(struct qed_hwfn
*p_hwfn
, u16 asserted_bits
)
835 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
838 /* Mask the source of the attention in the IGU */
839 igu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
840 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "IGU mask: 0x%08x --> 0x%08x\n",
841 igu_mask
, igu_mask
& ~(asserted_bits
& ATTN_BITS_MASKABLE
));
842 igu_mask
&= ~(asserted_bits
& ATTN_BITS_MASKABLE
);
843 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, igu_mask
);
845 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
846 "inner known ATTN state: 0x%04x --> 0x%04x\n",
847 sb_attn_sw
->known_attn
,
848 sb_attn_sw
->known_attn
| asserted_bits
);
849 sb_attn_sw
->known_attn
|= asserted_bits
;
851 /* Handle MCP events */
852 if (asserted_bits
& 0x100) {
853 qed_mcp_handle_events(p_hwfn
, p_hwfn
->p_dpc_ptt
);
854 /* Clean the MCP attention */
855 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
,
856 sb_attn_sw
->mfw_attn_addr
, 0);
859 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
860 GTT_BAR0_MAP_REG_IGU_CMD
+
861 ((IGU_CMD_ATTN_BIT_SET_UPPER
-
862 IGU_CMD_INT_ACK_BASE
) << 3),
865 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "set cmd IGU: 0x%04x\n",
871 static void qed_int_attn_print(struct qed_hwfn
*p_hwfn
,
873 enum dbg_attn_type type
, bool b_clear
)
875 struct dbg_attn_block_result attn_results
;
876 enum dbg_status status
;
878 memset(&attn_results
, 0, sizeof(attn_results
));
880 status
= qed_dbg_read_attn(p_hwfn
, p_hwfn
->p_dpc_ptt
, id
, type
,
881 b_clear
, &attn_results
);
882 if (status
!= DBG_STATUS_OK
)
884 "Failed to parse attention information [status: %s]\n",
885 qed_dbg_get_status_str(status
));
887 qed_dbg_parse_attn(p_hwfn
, &attn_results
);
891 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
892 * cause of the attention
895 * @param p_aeu - descriptor of an AEU bit which caused the attention
896 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
897 * this bit to this group.
898 * @param bit_index - index of this bit in the aeu_en_reg
903 qed_int_deassertion_aeu_bit(struct qed_hwfn
*p_hwfn
,
904 struct aeu_invert_reg_bit
*p_aeu
,
906 const char *p_bit_name
, u32 bitmask
)
908 bool b_fatal
= false;
912 DP_INFO(p_hwfn
, "Deasserted attention `%s'[%08x]\n",
913 p_bit_name
, bitmask
);
915 /* Call callback before clearing the interrupt status */
917 DP_INFO(p_hwfn
, "`%s (attention)': Calling Callback function\n",
919 rc
= p_aeu
->cb(p_hwfn
);
925 /* Print HW block interrupt registers */
926 if (p_aeu
->block_index
!= MAX_BLOCK_ID
)
927 qed_int_attn_print(p_hwfn
, p_aeu
->block_index
,
928 ATTN_TYPE_INTERRUPT
, !b_fatal
);
931 /* If the attention is benign, no need to prevent it */
935 /* Prevent this Attention from being asserted in the future */
936 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
937 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, (val
& ~bitmask
));
938 DP_INFO(p_hwfn
, "`%s' - Disabled future attentions\n",
946 * @brief qed_int_deassertion_parity - handle a single parity AEU source
949 * @param p_aeu - descriptor of an AEU bit which caused the parity
950 * @param aeu_en_reg - address of the AEU enable register
953 static void qed_int_deassertion_parity(struct qed_hwfn
*p_hwfn
,
954 struct aeu_invert_reg_bit
*p_aeu
,
955 u32 aeu_en_reg
, u8 bit_index
)
957 u32 block_id
= p_aeu
->block_index
, mask
, val
;
959 DP_NOTICE(p_hwfn
->cdev
,
960 "%s parity attention is set [address 0x%08x, bit %d]\n",
961 p_aeu
->bit_name
, aeu_en_reg
, bit_index
);
963 if (block_id
!= MAX_BLOCK_ID
) {
964 qed_int_attn_print(p_hwfn
, block_id
, ATTN_TYPE_PARITY
, false);
966 /* In BB, there's a single parity bit for several blocks */
967 if (block_id
== BLOCK_BTB
) {
968 qed_int_attn_print(p_hwfn
, BLOCK_OPTE
,
969 ATTN_TYPE_PARITY
, false);
970 qed_int_attn_print(p_hwfn
, BLOCK_MCP
,
971 ATTN_TYPE_PARITY
, false);
975 /* Prevent this parity error from being re-asserted */
976 mask
= ~BIT(bit_index
);
977 val
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
);
978 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en_reg
, val
& mask
);
979 DP_INFO(p_hwfn
, "`%s' - Disabled future parity errors\n",
984 * @brief - handles deassertion of previously asserted attentions.
987 * @param deasserted_bits - newly deasserted bits
991 static int qed_int_deassertion(struct qed_hwfn
*p_hwfn
,
994 struct qed_sb_attn_info
*sb_attn_sw
= p_hwfn
->p_sb_attn
;
995 u32 aeu_inv_arr
[NUM_ATTN_REGS
], aeu_mask
, aeu_en
, en
;
999 /* Read the attention registers in the AEU */
1000 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1001 aeu_inv_arr
[i
] = qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
,
1002 MISC_REG_AEU_AFTER_INVERT_1_IGU
+
1004 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1005 "Deasserted bits [%d]: %08x\n",
1009 /* Find parity attentions first */
1010 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1011 struct aeu_invert_reg
*p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
];
1014 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+ i
* sizeof(u32
);
1015 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
1017 /* Skip register in which no parity bit is currently set */
1018 parities
= sb_attn_sw
->parity_mask
[i
] & aeu_inv_arr
[i
] & en
;
1022 for (j
= 0, bit_idx
= 0; bit_idx
< 32; j
++) {
1023 struct aeu_invert_reg_bit
*p_bit
= &p_aeu
->bits
[j
];
1025 if (qed_int_is_parity_flag(p_hwfn
, p_bit
) &&
1026 !!(parities
& BIT(bit_idx
)))
1027 qed_int_deassertion_parity(p_hwfn
, p_bit
,
1030 bit_idx
+= ATTENTION_LENGTH(p_bit
->flags
);
1034 /* Find non-parity cause for attention and act */
1035 for (k
= 0; k
< MAX_ATTN_GRPS
; k
++) {
1036 struct aeu_invert_reg_bit
*p_aeu
;
1038 /* Handle only groups whose attention is currently deasserted */
1039 if (!(deasserted_bits
& (1 << k
)))
1042 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1045 aeu_en
= MISC_REG_AEU_ENABLE1_IGU_OUT_0
+
1047 k
* sizeof(u32
) * NUM_ATTN_REGS
;
1049 en
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, aeu_en
);
1050 bits
= aeu_inv_arr
[i
] & en
;
1052 /* Skip if no bit from this group is currently set */
1056 /* Find all set bits from current register which belong
1057 * to current group, making them responsible for the
1058 * previous assertion.
1060 for (j
= 0, bit_idx
= 0; bit_idx
< 32; j
++) {
1061 long unsigned int bitmask
;
1064 p_aeu
= &sb_attn_sw
->p_aeu_desc
[i
].bits
[j
];
1065 p_aeu
= qed_int_aeu_translate(p_hwfn
, p_aeu
);
1068 bit_len
= ATTENTION_LENGTH(p_aeu
->flags
);
1069 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
)) {
1075 bitmask
= bits
& (((1 << bit_len
) - 1) << bit
);
1079 u32 flags
= p_aeu
->flags
;
1083 num
= (u8
)find_first_bit(&bitmask
,
1086 /* Some bits represent more than a
1087 * a single interrupt. Correctly print
1090 if (ATTENTION_LENGTH(flags
) > 2 ||
1091 ((flags
& ATTENTION_PAR_INT
) &&
1092 ATTENTION_LENGTH(flags
) > 1))
1093 snprintf(bit_name
, 30,
1094 p_aeu
->bit_name
, num
);
1097 p_aeu
->bit_name
, 30);
1099 /* We now need to pass bitmask in its
1104 /* Handle source of the attention */
1105 qed_int_deassertion_aeu_bit(p_hwfn
,
1112 bit_idx
+= ATTENTION_LENGTH(p_aeu
->flags
);
1117 /* Handle missed DORQ attention */
1118 qed_dorq_attn_handler(p_hwfn
);
1120 /* Clear IGU indication for the deasserted bits */
1121 DIRECT_REG_WR((u8 __iomem
*)p_hwfn
->regview
+
1122 GTT_BAR0_MAP_REG_IGU_CMD
+
1123 ((IGU_CMD_ATTN_BIT_CLR_UPPER
-
1124 IGU_CMD_INT_ACK_BASE
) << 3),
1125 ~((u32
)deasserted_bits
));
1127 /* Unmask deasserted attentions in IGU */
1128 aeu_mask
= qed_rd(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
);
1129 aeu_mask
|= (deasserted_bits
& ATTN_BITS_MASKABLE
);
1130 qed_wr(p_hwfn
, p_hwfn
->p_dpc_ptt
, IGU_REG_ATTENTION_ENABLE
, aeu_mask
);
1132 /* Clear deassertion from inner state */
1133 sb_attn_sw
->known_attn
&= ~deasserted_bits
;
1138 static int qed_int_attentions(struct qed_hwfn
*p_hwfn
)
1140 struct qed_sb_attn_info
*p_sb_attn_sw
= p_hwfn
->p_sb_attn
;
1141 struct atten_status_block
*p_sb_attn
= p_sb_attn_sw
->sb_attn
;
1142 u32 attn_bits
= 0, attn_acks
= 0;
1143 u16 asserted_bits
, deasserted_bits
;
1147 /* Read current attention bits/acks - safeguard against attentions
1148 * by guaranting work on a synchronized timeframe
1151 index
= p_sb_attn
->sb_index
;
1152 /* finish reading index before the loop condition */
1154 attn_bits
= le32_to_cpu(p_sb_attn
->atten_bits
);
1155 attn_acks
= le32_to_cpu(p_sb_attn
->atten_ack
);
1156 } while (index
!= p_sb_attn
->sb_index
);
1157 p_sb_attn
->sb_index
= index
;
1159 /* Attention / Deassertion are meaningful (and in correct state)
1160 * only when they differ and consistent with known state - deassertion
1161 * when previous attention & current ack, and assertion when current
1162 * attention with no previous attention
1164 asserted_bits
= (attn_bits
& ~attn_acks
& ATTN_STATE_BITS
) &
1165 ~p_sb_attn_sw
->known_attn
;
1166 deasserted_bits
= (~attn_bits
& attn_acks
& ATTN_STATE_BITS
) &
1167 p_sb_attn_sw
->known_attn
;
1169 if ((asserted_bits
& ~0x100) || (deasserted_bits
& ~0x100)) {
1171 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1172 index
, attn_bits
, attn_acks
, asserted_bits
,
1173 deasserted_bits
, p_sb_attn_sw
->known_attn
);
1174 } else if (asserted_bits
== 0x100) {
1175 DP_INFO(p_hwfn
, "MFW indication via attention\n");
1177 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1178 "MFW indication [deassertion]\n");
1181 if (asserted_bits
) {
1182 rc
= qed_int_assertion(p_hwfn
, asserted_bits
);
1187 if (deasserted_bits
)
1188 rc
= qed_int_deassertion(p_hwfn
, deasserted_bits
);
1193 static void qed_sb_ack_attn(struct qed_hwfn
*p_hwfn
,
1194 void __iomem
*igu_addr
, u32 ack_cons
)
1196 struct igu_prod_cons_update igu_ack
= { 0 };
1198 igu_ack
.sb_id_and_flags
=
1199 ((ack_cons
<< IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT
) |
1200 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT
) |
1201 (IGU_INT_NOP
<< IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT
) |
1202 (IGU_SEG_ACCESS_ATTN
<<
1203 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT
));
1205 DIRECT_REG_WR(igu_addr
, igu_ack
.sb_id_and_flags
);
1207 /* Both segments (interrupts & acks) are written to same place address;
1208 * Need to guarantee all commands will be received (in-order) by HW.
1213 void qed_int_sp_dpc(unsigned long hwfn_cookie
)
1215 struct qed_hwfn
*p_hwfn
= (struct qed_hwfn
*)hwfn_cookie
;
1216 struct qed_pi_info
*pi_info
= NULL
;
1217 struct qed_sb_attn_info
*sb_attn
;
1218 struct qed_sb_info
*sb_info
;
1222 if (!p_hwfn
->p_sp_sb
) {
1223 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sp_sb\n");
1227 sb_info
= &p_hwfn
->p_sp_sb
->sb_info
;
1228 arr_size
= ARRAY_SIZE(p_hwfn
->p_sp_sb
->pi_info_arr
);
1230 DP_ERR(p_hwfn
->cdev
,
1231 "Status block is NULL - cannot ack interrupts\n");
1235 if (!p_hwfn
->p_sb_attn
) {
1236 DP_ERR(p_hwfn
->cdev
, "DPC called - no p_sb_attn");
1239 sb_attn
= p_hwfn
->p_sb_attn
;
1241 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
, "DPC Called! (hwfn %p %d)\n",
1242 p_hwfn
, p_hwfn
->my_id
);
1244 /* Disable ack for def status block. Required both for msix +
1245 * inta in non-mask mode, in inta does no harm.
1247 qed_sb_ack(sb_info
, IGU_INT_DISABLE
, 0);
1249 /* Gather Interrupts/Attentions information */
1250 if (!sb_info
->sb_virt
) {
1251 DP_ERR(p_hwfn
->cdev
,
1252 "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1254 u32 tmp_index
= sb_info
->sb_ack
;
1256 rc
= qed_sb_update_sb_idx(sb_info
);
1257 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1258 "Interrupt indices: 0x%08x --> 0x%08x\n",
1259 tmp_index
, sb_info
->sb_ack
);
1262 if (!sb_attn
|| !sb_attn
->sb_attn
) {
1263 DP_ERR(p_hwfn
->cdev
,
1264 "Attentions Status block is NULL - cannot check for new attentions!\n");
1266 u16 tmp_index
= sb_attn
->index
;
1268 rc
|= qed_attn_update_idx(p_hwfn
, sb_attn
);
1269 DP_VERBOSE(p_hwfn
->cdev
, NETIF_MSG_INTR
,
1270 "Attention indices: 0x%08x --> 0x%08x\n",
1271 tmp_index
, sb_attn
->index
);
1274 /* Check if we expect interrupts at this time. if not just ack them */
1275 if (!(rc
& QED_SB_EVENT_MASK
)) {
1276 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1280 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1281 if (!p_hwfn
->p_dpc_ptt
) {
1282 DP_NOTICE(p_hwfn
->cdev
, "Failed to allocate PTT\n");
1283 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1287 if (rc
& QED_SB_ATT_IDX
)
1288 qed_int_attentions(p_hwfn
);
1290 if (rc
& QED_SB_IDX
) {
1293 /* Look for a free index */
1294 for (pi
= 0; pi
< arr_size
; pi
++) {
1295 pi_info
= &p_hwfn
->p_sp_sb
->pi_info_arr
[pi
];
1296 if (pi_info
->comp_cb
)
1297 pi_info
->comp_cb(p_hwfn
, pi_info
->cookie
);
1301 if (sb_attn
&& (rc
& QED_SB_ATT_IDX
))
1302 /* This should be done before the interrupts are enabled,
1303 * since otherwise a new attention will be generated.
1305 qed_sb_ack_attn(p_hwfn
, sb_info
->igu_addr
, sb_attn
->index
);
1307 qed_sb_ack(sb_info
, IGU_INT_ENABLE
, 1);
1310 static void qed_int_sb_attn_free(struct qed_hwfn
*p_hwfn
)
1312 struct qed_sb_attn_info
*p_sb
= p_hwfn
->p_sb_attn
;
1318 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1319 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1320 p_sb
->sb_attn
, p_sb
->sb_phys
);
1322 p_hwfn
->p_sb_attn
= NULL
;
1325 static void qed_int_sb_attn_setup(struct qed_hwfn
*p_hwfn
,
1326 struct qed_ptt
*p_ptt
)
1328 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1330 memset(sb_info
->sb_attn
, 0, sizeof(*sb_info
->sb_attn
));
1333 sb_info
->known_attn
= 0;
1335 /* Configure Attention Status Block in IGU */
1336 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_L
,
1337 lower_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1338 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTN_MSG_ADDR_H
,
1339 upper_32_bits(p_hwfn
->p_sb_attn
->sb_phys
));
1342 static void qed_int_sb_attn_init(struct qed_hwfn
*p_hwfn
,
1343 struct qed_ptt
*p_ptt
,
1344 void *sb_virt_addr
, dma_addr_t sb_phy_addr
)
1346 struct qed_sb_attn_info
*sb_info
= p_hwfn
->p_sb_attn
;
1349 sb_info
->sb_attn
= sb_virt_addr
;
1350 sb_info
->sb_phys
= sb_phy_addr
;
1352 /* Set the pointer to the AEU descriptors */
1353 sb_info
->p_aeu_desc
= aeu_descs
;
1355 /* Calculate Parity Masks */
1356 memset(sb_info
->parity_mask
, 0, sizeof(u32
) * NUM_ATTN_REGS
);
1357 for (i
= 0; i
< NUM_ATTN_REGS
; i
++) {
1358 /* j is array index, k is bit index */
1359 for (j
= 0, k
= 0; k
< 32; j
++) {
1360 struct aeu_invert_reg_bit
*p_aeu
;
1362 p_aeu
= &aeu_descs
[i
].bits
[j
];
1363 if (qed_int_is_parity_flag(p_hwfn
, p_aeu
))
1364 sb_info
->parity_mask
[i
] |= 1 << k
;
1366 k
+= ATTENTION_LENGTH(p_aeu
->flags
);
1368 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1369 "Attn Mask [Reg %d]: 0x%08x\n",
1370 i
, sb_info
->parity_mask
[i
]);
1373 /* Set the address of cleanup for the mcp attention */
1374 sb_info
->mfw_attn_addr
= (p_hwfn
->rel_pf_id
<< 3) +
1375 MISC_REG_AEU_GENERAL_ATTN_0
;
1377 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
1380 static int qed_int_sb_attn_alloc(struct qed_hwfn
*p_hwfn
,
1381 struct qed_ptt
*p_ptt
)
1383 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1384 struct qed_sb_attn_info
*p_sb
;
1385 dma_addr_t p_phys
= 0;
1389 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1394 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1395 SB_ATTN_ALIGNED_SIZE(p_hwfn
),
1396 &p_phys
, GFP_KERNEL
);
1403 /* Attention setup */
1404 p_hwfn
->p_sb_attn
= p_sb
;
1405 qed_int_sb_attn_init(p_hwfn
, p_ptt
, p_virt
, p_phys
);
1410 /* coalescing timeout = timeset << (timer_res + 1) */
1411 #define QED_CAU_DEF_RX_USECS 24
1412 #define QED_CAU_DEF_TX_USECS 48
1414 void qed_init_cau_sb_entry(struct qed_hwfn
*p_hwfn
,
1415 struct cau_sb_entry
*p_sb_entry
,
1416 u8 pf_id
, u16 vf_number
, u8 vf_valid
)
1418 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1422 memset(p_sb_entry
, 0, sizeof(*p_sb_entry
));
1424 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_PF_NUMBER
, pf_id
);
1425 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_VF_NUMBER
, vf_number
);
1426 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_VF_VALID
, vf_valid
);
1427 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_SB_TIMESET0
, 0x7F);
1428 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_SB_TIMESET1
, 0x7F);
1430 cau_state
= CAU_HC_DISABLE_STATE
;
1432 if (cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1433 cau_state
= CAU_HC_ENABLE_STATE
;
1434 if (!cdev
->rx_coalesce_usecs
)
1435 cdev
->rx_coalesce_usecs
= QED_CAU_DEF_RX_USECS
;
1436 if (!cdev
->tx_coalesce_usecs
)
1437 cdev
->tx_coalesce_usecs
= QED_CAU_DEF_TX_USECS
;
1440 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1441 if (cdev
->rx_coalesce_usecs
<= 0x7F)
1443 else if (cdev
->rx_coalesce_usecs
<= 0xFF)
1447 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
1449 if (cdev
->tx_coalesce_usecs
<= 0x7F)
1451 else if (cdev
->tx_coalesce_usecs
<= 0xFF)
1455 SET_FIELD(p_sb_entry
->params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
1457 SET_FIELD(p_sb_entry
->data
, CAU_SB_ENTRY_STATE0
, cau_state
);
1458 SET_FIELD(p_sb_entry
->data
, CAU_SB_ENTRY_STATE1
, cau_state
);
1461 static void qed_int_cau_conf_pi(struct qed_hwfn
*p_hwfn
,
1462 struct qed_ptt
*p_ptt
,
1465 enum qed_coalescing_fsm coalescing_fsm
,
1468 struct cau_pi_entry pi_entry
;
1469 u32 sb_offset
, pi_offset
;
1471 if (IS_VF(p_hwfn
->cdev
))
1474 sb_offset
= igu_sb_id
* PIS_PER_SB_E4
;
1475 memset(&pi_entry
, 0, sizeof(struct cau_pi_entry
));
1477 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_PI_TIMESET
, timeset
);
1478 if (coalescing_fsm
== QED_COAL_RX_STATE_MACHINE
)
1479 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_FSM_SEL
, 0);
1481 SET_FIELD(pi_entry
.prod
, CAU_PI_ENTRY_FSM_SEL
, 1);
1483 pi_offset
= sb_offset
+ pi_index
;
1484 if (p_hwfn
->hw_init_done
) {
1485 qed_wr(p_hwfn
, p_ptt
,
1486 CAU_REG_PI_MEMORY
+ pi_offset
* sizeof(u32
),
1487 *((u32
*)&(pi_entry
)));
1489 STORE_RT_REG(p_hwfn
,
1490 CAU_REG_PI_MEMORY_RT_OFFSET
+ pi_offset
,
1491 *((u32
*)&(pi_entry
)));
1495 void qed_int_cau_conf_sb(struct qed_hwfn
*p_hwfn
,
1496 struct qed_ptt
*p_ptt
,
1498 u16 igu_sb_id
, u16 vf_number
, u8 vf_valid
)
1500 struct cau_sb_entry sb_entry
;
1502 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
, p_hwfn
->rel_pf_id
,
1503 vf_number
, vf_valid
);
1505 if (p_hwfn
->hw_init_done
) {
1506 /* Wide-bus, initialize via DMAE */
1507 u64 phys_addr
= (u64
)sb_phys
;
1509 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&phys_addr
,
1510 CAU_REG_SB_ADDR_MEMORY
+
1511 igu_sb_id
* sizeof(u64
), 2, NULL
);
1512 qed_dmae_host2grc(p_hwfn
, p_ptt
, (u64
)(uintptr_t)&sb_entry
,
1513 CAU_REG_SB_VAR_MEMORY
+
1514 igu_sb_id
* sizeof(u64
), 2, NULL
);
1516 /* Initialize Status Block Address */
1517 STORE_RT_REG_AGG(p_hwfn
,
1518 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET
+
1522 STORE_RT_REG_AGG(p_hwfn
,
1523 CAU_REG_SB_VAR_MEMORY_RT_OFFSET
+
1528 /* Configure pi coalescing if set */
1529 if (p_hwfn
->cdev
->int_coalescing_mode
== QED_COAL_MODE_ENABLE
) {
1530 u8 num_tc
= p_hwfn
->hw_info
.num_hw_tc
;
1531 u8 timeset
, timer_res
;
1534 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1535 if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0x7F)
1537 else if (p_hwfn
->cdev
->rx_coalesce_usecs
<= 0xFF)
1541 timeset
= (u8
)(p_hwfn
->cdev
->rx_coalesce_usecs
>> timer_res
);
1542 qed_int_cau_conf_pi(p_hwfn
, p_ptt
, igu_sb_id
, RX_PI
,
1543 QED_COAL_RX_STATE_MACHINE
, timeset
);
1545 if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0x7F)
1547 else if (p_hwfn
->cdev
->tx_coalesce_usecs
<= 0xFF)
1551 timeset
= (u8
)(p_hwfn
->cdev
->tx_coalesce_usecs
>> timer_res
);
1552 for (i
= 0; i
< num_tc
; i
++) {
1553 qed_int_cau_conf_pi(p_hwfn
, p_ptt
,
1554 igu_sb_id
, TX_PI(i
),
1555 QED_COAL_TX_STATE_MACHINE
,
1561 void qed_int_sb_setup(struct qed_hwfn
*p_hwfn
,
1562 struct qed_ptt
*p_ptt
, struct qed_sb_info
*sb_info
)
1564 /* zero status block and ack counter */
1565 sb_info
->sb_ack
= 0;
1566 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1568 if (IS_PF(p_hwfn
->cdev
))
1569 qed_int_cau_conf_sb(p_hwfn
, p_ptt
, sb_info
->sb_phys
,
1570 sb_info
->igu_sb_id
, 0, 0);
1573 struct qed_igu_block
*qed_get_igu_free_sb(struct qed_hwfn
*p_hwfn
, bool b_is_pf
)
1575 struct qed_igu_block
*p_block
;
1578 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1580 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1582 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1583 !(p_block
->status
& QED_IGU_STATUS_FREE
))
1586 if (!!(p_block
->status
& QED_IGU_STATUS_PF
) == b_is_pf
)
1593 static u16
qed_get_pf_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 vector_id
)
1595 struct qed_igu_block
*p_block
;
1598 for (igu_id
= 0; igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
1600 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_id
];
1602 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1604 p_block
->vector_number
!= vector_id
)
1610 return QED_SB_INVALID_IDX
;
1613 u16
qed_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1617 /* Assuming continuous set of IGU SBs dedicated for given PF */
1618 if (sb_id
== QED_SP_SB_ID
)
1619 igu_sb_id
= p_hwfn
->hw_info
.p_igu_info
->igu_dsb_id
;
1620 else if (IS_PF(p_hwfn
->cdev
))
1621 igu_sb_id
= qed_get_pf_igu_sb_id(p_hwfn
, sb_id
+ 1);
1623 igu_sb_id
= qed_vf_get_igu_sb_id(p_hwfn
, sb_id
);
1625 if (sb_id
== QED_SP_SB_ID
)
1626 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1627 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id
);
1629 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1630 "SB [%04x] <--> IGU SB [%04x]\n", sb_id
, igu_sb_id
);
1635 int qed_int_sb_init(struct qed_hwfn
*p_hwfn
,
1636 struct qed_ptt
*p_ptt
,
1637 struct qed_sb_info
*sb_info
,
1638 void *sb_virt_addr
, dma_addr_t sb_phy_addr
, u16 sb_id
)
1640 sb_info
->sb_virt
= sb_virt_addr
;
1641 sb_info
->sb_phys
= sb_phy_addr
;
1643 sb_info
->igu_sb_id
= qed_get_igu_sb_id(p_hwfn
, sb_id
);
1645 if (sb_id
!= QED_SP_SB_ID
) {
1646 if (IS_PF(p_hwfn
->cdev
)) {
1647 struct qed_igu_info
*p_info
;
1648 struct qed_igu_block
*p_block
;
1650 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1651 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1653 p_block
->sb_info
= sb_info
;
1654 p_block
->status
&= ~QED_IGU_STATUS_FREE
;
1655 p_info
->usage
.free_cnt
--;
1657 qed_vf_set_sb_info(p_hwfn
, sb_id
, sb_info
);
1661 sb_info
->cdev
= p_hwfn
->cdev
;
1663 /* The igu address will hold the absolute address that needs to be
1664 * written to for a specific status block
1666 if (IS_PF(p_hwfn
->cdev
)) {
1667 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1668 GTT_BAR0_MAP_REG_IGU_CMD
+
1669 (sb_info
->igu_sb_id
<< 3);
1671 sb_info
->igu_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1672 PXP_VF_BAR0_START_IGU
+
1673 ((IGU_CMD_INT_ACK_BASE
+
1674 sb_info
->igu_sb_id
) << 3);
1677 sb_info
->flags
|= QED_SB_INFO_INIT
;
1679 qed_int_sb_setup(p_hwfn
, p_ptt
, sb_info
);
1684 int qed_int_sb_release(struct qed_hwfn
*p_hwfn
,
1685 struct qed_sb_info
*sb_info
, u16 sb_id
)
1687 struct qed_igu_block
*p_block
;
1688 struct qed_igu_info
*p_info
;
1693 /* zero status block and ack counter */
1694 sb_info
->sb_ack
= 0;
1695 memset(sb_info
->sb_virt
, 0, sizeof(*sb_info
->sb_virt
));
1697 if (IS_VF(p_hwfn
->cdev
)) {
1698 qed_vf_set_sb_info(p_hwfn
, sb_id
, NULL
);
1702 p_info
= p_hwfn
->hw_info
.p_igu_info
;
1703 p_block
= &p_info
->entry
[sb_info
->igu_sb_id
];
1705 /* Vector 0 is reserved to Default SB */
1706 if (!p_block
->vector_number
) {
1707 DP_ERR(p_hwfn
, "Do Not free sp sb using this function");
1711 /* Lose reference to client's SB info, and fix counters */
1712 p_block
->sb_info
= NULL
;
1713 p_block
->status
|= QED_IGU_STATUS_FREE
;
1714 p_info
->usage
.free_cnt
++;
1719 static void qed_int_sp_sb_free(struct qed_hwfn
*p_hwfn
)
1721 struct qed_sb_sp_info
*p_sb
= p_hwfn
->p_sp_sb
;
1726 if (p_sb
->sb_info
.sb_virt
)
1727 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1728 SB_ALIGNED_SIZE(p_hwfn
),
1729 p_sb
->sb_info
.sb_virt
,
1730 p_sb
->sb_info
.sb_phys
);
1732 p_hwfn
->p_sp_sb
= NULL
;
1735 static int qed_int_sp_sb_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1737 struct qed_sb_sp_info
*p_sb
;
1738 dma_addr_t p_phys
= 0;
1742 p_sb
= kmalloc(sizeof(*p_sb
), GFP_KERNEL
);
1747 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1748 SB_ALIGNED_SIZE(p_hwfn
),
1749 &p_phys
, GFP_KERNEL
);
1755 /* Status Block setup */
1756 p_hwfn
->p_sp_sb
= p_sb
;
1757 qed_int_sb_init(p_hwfn
, p_ptt
, &p_sb
->sb_info
, p_virt
,
1758 p_phys
, QED_SP_SB_ID
);
1760 memset(p_sb
->pi_info_arr
, 0, sizeof(p_sb
->pi_info_arr
));
1765 int qed_int_register_cb(struct qed_hwfn
*p_hwfn
,
1766 qed_int_comp_cb_t comp_cb
,
1767 void *cookie
, u8
*sb_idx
, __le16
**p_fw_cons
)
1769 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1773 /* Look for a free index */
1774 for (pi
= 0; pi
< ARRAY_SIZE(p_sp_sb
->pi_info_arr
); pi
++) {
1775 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
)
1778 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= comp_cb
;
1779 p_sp_sb
->pi_info_arr
[pi
].cookie
= cookie
;
1781 *p_fw_cons
= &p_sp_sb
->sb_info
.sb_virt
->pi_array
[pi
];
1789 int qed_int_unregister_cb(struct qed_hwfn
*p_hwfn
, u8 pi
)
1791 struct qed_sb_sp_info
*p_sp_sb
= p_hwfn
->p_sp_sb
;
1793 if (p_sp_sb
->pi_info_arr
[pi
].comp_cb
== NULL
)
1796 p_sp_sb
->pi_info_arr
[pi
].comp_cb
= NULL
;
1797 p_sp_sb
->pi_info_arr
[pi
].cookie
= NULL
;
1802 u16
qed_int_get_sp_sb_id(struct qed_hwfn
*p_hwfn
)
1804 return p_hwfn
->p_sp_sb
->sb_info
.igu_sb_id
;
1807 void qed_int_igu_enable_int(struct qed_hwfn
*p_hwfn
,
1808 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1810 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
| IGU_PF_CONF_ATTN_BIT_EN
;
1812 p_hwfn
->cdev
->int_mode
= int_mode
;
1813 switch (p_hwfn
->cdev
->int_mode
) {
1814 case QED_INT_MODE_INTA
:
1815 igu_pf_conf
|= IGU_PF_CONF_INT_LINE_EN
;
1816 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1819 case QED_INT_MODE_MSI
:
1820 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1821 igu_pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
1824 case QED_INT_MODE_MSIX
:
1825 igu_pf_conf
|= IGU_PF_CONF_MSI_MSIX_EN
;
1827 case QED_INT_MODE_POLL
:
1831 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, igu_pf_conf
);
1834 static void qed_int_igu_enable_attn(struct qed_hwfn
*p_hwfn
,
1835 struct qed_ptt
*p_ptt
)
1838 /* Configure AEU signal change to produce attentions */
1839 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0);
1840 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0xfff);
1841 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0xfff);
1842 qed_wr(p_hwfn
, p_ptt
, IGU_REG_ATTENTION_ENABLE
, 0xfff);
1844 /* Unmask AEU signals toward IGU */
1845 qed_wr(p_hwfn
, p_ptt
, MISC_REG_AEU_MASK_ATTN_IGU
, 0xff);
1849 qed_int_igu_enable(struct qed_hwfn
*p_hwfn
,
1850 struct qed_ptt
*p_ptt
, enum qed_int_mode int_mode
)
1854 qed_int_igu_enable_attn(p_hwfn
, p_ptt
);
1856 if ((int_mode
!= QED_INT_MODE_INTA
) || IS_LEAD_HWFN(p_hwfn
)) {
1857 rc
= qed_slowpath_irq_req(p_hwfn
);
1859 DP_NOTICE(p_hwfn
, "Slowpath IRQ request failed\n");
1862 p_hwfn
->b_int_requested
= true;
1864 /* Enable interrupt Generation */
1865 qed_int_igu_enable_int(p_hwfn
, p_ptt
, int_mode
);
1866 p_hwfn
->b_int_enabled
= 1;
1871 void qed_int_igu_disable_int(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1873 p_hwfn
->b_int_enabled
= 0;
1875 if (IS_VF(p_hwfn
->cdev
))
1878 qed_wr(p_hwfn
, p_ptt
, IGU_REG_PF_CONFIGURATION
, 0);
1881 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1882 static void qed_int_igu_cleanup_sb(struct qed_hwfn
*p_hwfn
,
1883 struct qed_ptt
*p_ptt
,
1885 bool cleanup_set
, u16 opaque_fid
)
1887 u32 cmd_ctrl
= 0, val
= 0, sb_bit
= 0, sb_bit_addr
= 0, data
= 0;
1888 u32 pxp_addr
= IGU_CMD_INT_ACK_BASE
+ igu_sb_id
;
1889 u32 sleep_cnt
= IGU_CLEANUP_SLEEP_LENGTH
;
1891 /* Set the data field */
1892 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_SET
, cleanup_set
? 1 : 0);
1893 SET_FIELD(data
, IGU_CLEANUP_CLEANUP_TYPE
, 0);
1894 SET_FIELD(data
, IGU_CLEANUP_COMMAND_TYPE
, IGU_COMMAND_TYPE_SET
);
1896 /* Set the control register */
1897 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_PXP_ADDR
, pxp_addr
);
1898 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_FID
, opaque_fid
);
1899 SET_FIELD(cmd_ctrl
, IGU_CTRL_REG_TYPE
, IGU_CTRL_CMD_TYPE_WR
);
1901 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_32LSB_DATA
, data
);
1905 qed_wr(p_hwfn
, p_ptt
, IGU_REG_COMMAND_REG_CTRL
, cmd_ctrl
);
1907 /* calculate where to read the status bit from */
1908 sb_bit
= 1 << (igu_sb_id
% 32);
1909 sb_bit_addr
= igu_sb_id
/ 32 * sizeof(u32
);
1911 sb_bit_addr
+= IGU_REG_CLEANUP_STATUS_0
;
1913 /* Now wait for the command to complete */
1915 val
= qed_rd(p_hwfn
, p_ptt
, sb_bit_addr
);
1917 if ((val
& sb_bit
) == (cleanup_set
? sb_bit
: 0))
1920 usleep_range(5000, 10000);
1921 } while (--sleep_cnt
);
1925 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1929 void qed_int_igu_init_pure_rt_single(struct qed_hwfn
*p_hwfn
,
1930 struct qed_ptt
*p_ptt
,
1931 u16 igu_sb_id
, u16 opaque
, bool b_set
)
1933 struct qed_igu_block
*p_block
;
1936 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
1937 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
1938 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1940 p_block
->function_id
,
1941 p_block
->is_pf
, p_block
->vector_number
);
1945 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 1, opaque
);
1948 qed_int_igu_cleanup_sb(p_hwfn
, p_ptt
, igu_sb_id
, 0, opaque
);
1950 /* Wait for the IGU SB to cleanup */
1951 for (i
= 0; i
< IGU_CLEANUP_SLEEP_LENGTH
; i
++) {
1954 val
= qed_rd(p_hwfn
, p_ptt
,
1955 IGU_REG_WRITE_DONE_PENDING
+
1956 ((igu_sb_id
/ 32) * 4));
1957 if (val
& BIT((igu_sb_id
% 32)))
1958 usleep_range(10, 20);
1962 if (i
== IGU_CLEANUP_SLEEP_LENGTH
)
1964 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1967 /* Clear the CAU for the SB */
1968 for (pi
= 0; pi
< 12; pi
++)
1969 qed_wr(p_hwfn
, p_ptt
,
1970 CAU_REG_PI_MEMORY
+ (igu_sb_id
* 12 + pi
) * 4, 0);
1973 void qed_int_igu_init_pure_rt(struct qed_hwfn
*p_hwfn
,
1974 struct qed_ptt
*p_ptt
,
1975 bool b_set
, bool b_slowpath
)
1977 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
1978 struct qed_igu_block
*p_block
;
1982 val
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
);
1983 val
|= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN
;
1984 val
&= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN
;
1985 qed_wr(p_hwfn
, p_ptt
, IGU_REG_BLOCK_CONFIGURATION
, val
);
1988 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
1989 p_block
= &p_info
->entry
[igu_sb_id
];
1991 if (!(p_block
->status
& QED_IGU_STATUS_VALID
) ||
1993 (p_block
->status
& QED_IGU_STATUS_DSB
))
1996 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
, igu_sb_id
,
1997 p_hwfn
->hw_info
.opaque_fid
,
2002 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2004 p_hwfn
->hw_info
.opaque_fid
,
2008 int qed_int_igu_reset_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2010 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
2011 struct qed_igu_block
*p_block
;
2016 if (!RESC_NUM(p_hwfn
, QED_SB
)) {
2017 p_info
->b_allow_pf_vf_change
= false;
2019 /* Use the numbers the MFW have provided -
2020 * don't forget MFW accounts for the default SB as well.
2022 p_info
->b_allow_pf_vf_change
= true;
2024 if (p_info
->usage
.cnt
!= RESC_NUM(p_hwfn
, QED_SB
) - 1) {
2026 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2027 RESC_NUM(p_hwfn
, QED_SB
) - 1,
2029 p_info
->usage
.cnt
= RESC_NUM(p_hwfn
, QED_SB
) - 1;
2032 if (IS_PF_SRIOV(p_hwfn
)) {
2033 u16 vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
2035 if (vfs
!= p_info
->usage
.iov_cnt
)
2038 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2039 p_info
->usage
.iov_cnt
, vfs
);
2041 /* At this point we know how many SBs we have totally
2042 * in IGU + number of PF SBs. So we can validate that
2043 * we'd have sufficient for VF.
2045 if (vfs
> p_info
->usage
.free_cnt
+
2046 p_info
->usage
.free_cnt_iov
- p_info
->usage
.cnt
) {
2048 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2049 p_info
->usage
.free_cnt
+
2050 p_info
->usage
.free_cnt_iov
,
2051 p_info
->usage
.cnt
, vfs
);
2055 /* Currently cap the number of VFs SBs by the
2058 p_info
->usage
.iov_cnt
= vfs
;
2062 /* Mark all SBs as free, now in the right PF/VFs division */
2063 p_info
->usage
.free_cnt
= p_info
->usage
.cnt
;
2064 p_info
->usage
.free_cnt_iov
= p_info
->usage
.iov_cnt
;
2065 p_info
->usage
.orig
= p_info
->usage
.cnt
;
2066 p_info
->usage
.iov_orig
= p_info
->usage
.iov_cnt
;
2068 /* We now proceed to re-configure the IGU cam to reflect the initial
2069 * configuration. We can start with the Default SB.
2071 pf_sbs
= p_info
->usage
.cnt
;
2072 vf_sbs
= p_info
->usage
.iov_cnt
;
2074 for (igu_sb_id
= p_info
->igu_dsb_id
;
2075 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2076 p_block
= &p_info
->entry
[igu_sb_id
];
2079 if (!(p_block
->status
& QED_IGU_STATUS_VALID
))
2082 if (p_block
->status
& QED_IGU_STATUS_DSB
) {
2083 p_block
->function_id
= p_hwfn
->rel_pf_id
;
2085 p_block
->vector_number
= 0;
2086 p_block
->status
= QED_IGU_STATUS_VALID
|
2089 } else if (pf_sbs
) {
2091 p_block
->function_id
= p_hwfn
->rel_pf_id
;
2093 p_block
->vector_number
= p_info
->usage
.cnt
- pf_sbs
;
2094 p_block
->status
= QED_IGU_STATUS_VALID
|
2096 QED_IGU_STATUS_FREE
;
2097 } else if (vf_sbs
) {
2098 p_block
->function_id
=
2099 p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+
2100 p_info
->usage
.iov_cnt
- vf_sbs
;
2102 p_block
->vector_number
= 0;
2103 p_block
->status
= QED_IGU_STATUS_VALID
|
2104 QED_IGU_STATUS_FREE
;
2107 p_block
->function_id
= 0;
2109 p_block
->vector_number
= 0;
2112 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
,
2113 p_block
->function_id
);
2114 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, p_block
->is_pf
);
2115 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
,
2116 p_block
->vector_number
);
2118 /* VF entries would be enabled when VF is initializaed */
2119 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, p_block
->is_pf
);
2121 rval
= qed_rd(p_hwfn
, p_ptt
,
2122 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
2125 qed_wr(p_hwfn
, p_ptt
,
2126 IGU_REG_MAPPING_MEMORY
+
2127 sizeof(u32
) * igu_sb_id
, val
);
2131 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2133 p_block
->function_id
,
2135 p_block
->vector_number
, rval
, val
);
2142 static void qed_int_igu_read_cam_block(struct qed_hwfn
*p_hwfn
,
2143 struct qed_ptt
*p_ptt
, u16 igu_sb_id
)
2145 u32 val
= qed_rd(p_hwfn
, p_ptt
,
2146 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_sb_id
);
2147 struct qed_igu_block
*p_block
;
2149 p_block
= &p_hwfn
->hw_info
.p_igu_info
->entry
[igu_sb_id
];
2151 /* Fill the block information */
2152 p_block
->function_id
= GET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
);
2153 p_block
->is_pf
= GET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
);
2154 p_block
->vector_number
= GET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
);
2155 p_block
->igu_sb_id
= igu_sb_id
;
2158 int qed_int_igu_read_cam(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2160 struct qed_igu_info
*p_igu_info
;
2161 struct qed_igu_block
*p_block
;
2162 u32 min_vf
= 0, max_vf
= 0;
2165 p_hwfn
->hw_info
.p_igu_info
= kzalloc(sizeof(*p_igu_info
), GFP_KERNEL
);
2166 if (!p_hwfn
->hw_info
.p_igu_info
)
2169 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
2171 /* Distinguish between existent and non-existent default SB */
2172 p_igu_info
->igu_dsb_id
= QED_SB_INVALID_IDX
;
2174 /* Find the range of VF ids whose SB belong to this PF */
2175 if (p_hwfn
->cdev
->p_iov_info
) {
2176 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
2178 min_vf
= p_iov
->first_vf_in_pf
;
2179 max_vf
= p_iov
->first_vf_in_pf
+ p_iov
->total_vfs
;
2183 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
); igu_sb_id
++) {
2184 /* Read current entry; Notice it might not belong to this PF */
2185 qed_int_igu_read_cam_block(p_hwfn
, p_ptt
, igu_sb_id
);
2186 p_block
= &p_igu_info
->entry
[igu_sb_id
];
2188 if ((p_block
->is_pf
) &&
2189 (p_block
->function_id
== p_hwfn
->rel_pf_id
)) {
2190 p_block
->status
= QED_IGU_STATUS_PF
|
2191 QED_IGU_STATUS_VALID
|
2192 QED_IGU_STATUS_FREE
;
2194 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2195 p_igu_info
->usage
.cnt
++;
2196 } else if (!(p_block
->is_pf
) &&
2197 (p_block
->function_id
>= min_vf
) &&
2198 (p_block
->function_id
< max_vf
)) {
2199 /* Available for VFs of this PF */
2200 p_block
->status
= QED_IGU_STATUS_VALID
|
2201 QED_IGU_STATUS_FREE
;
2203 if (p_igu_info
->igu_dsb_id
!= QED_SB_INVALID_IDX
)
2204 p_igu_info
->usage
.iov_cnt
++;
2207 /* Mark the First entry belonging to the PF or its VFs
2208 * as the default SB [we'll reset IGU prior to first usage].
2210 if ((p_block
->status
& QED_IGU_STATUS_VALID
) &&
2211 (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
)) {
2212 p_igu_info
->igu_dsb_id
= igu_sb_id
;
2213 p_block
->status
|= QED_IGU_STATUS_DSB
;
2216 /* limit number of prints by having each PF print only its
2217 * entries with the exception of PF0 which would print
2220 if ((p_block
->status
& QED_IGU_STATUS_VALID
) ||
2221 (p_hwfn
->abs_pf_id
== 0)) {
2222 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2223 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2224 igu_sb_id
, p_block
->function_id
,
2225 p_block
->is_pf
, p_block
->vector_number
);
2229 if (p_igu_info
->igu_dsb_id
== QED_SB_INVALID_IDX
) {
2231 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2232 p_igu_info
->igu_dsb_id
);
2236 /* All non default SB are considered free at this point */
2237 p_igu_info
->usage
.free_cnt
= p_igu_info
->usage
.cnt
;
2238 p_igu_info
->usage
.free_cnt_iov
= p_igu_info
->usage
.iov_cnt
;
2240 DP_VERBOSE(p_hwfn
, NETIF_MSG_INTR
,
2241 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2242 p_igu_info
->igu_dsb_id
,
2243 p_igu_info
->usage
.cnt
, p_igu_info
->usage
.iov_cnt
);
2249 * @brief Initialize igu runtime registers
2253 void qed_int_igu_init_rt(struct qed_hwfn
*p_hwfn
)
2255 u32 igu_pf_conf
= IGU_PF_CONF_FUNC_EN
;
2257 STORE_RT_REG(p_hwfn
, IGU_REG_PF_CONFIGURATION_RT_OFFSET
, igu_pf_conf
);
2260 u64
qed_int_igu_read_sisr_reg(struct qed_hwfn
*p_hwfn
)
2262 u32 lsb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
-
2263 IGU_CMD_INT_ACK_BASE
;
2264 u32 msb_igu_cmd_addr
= IGU_REG_SISR_MDPC_WMASK_MSB_UPPER
-
2265 IGU_CMD_INT_ACK_BASE
;
2266 u32 intr_status_hi
= 0, intr_status_lo
= 0;
2267 u64 intr_status
= 0;
2269 intr_status_lo
= REG_RD(p_hwfn
,
2270 GTT_BAR0_MAP_REG_IGU_CMD
+
2271 lsb_igu_cmd_addr
* 8);
2272 intr_status_hi
= REG_RD(p_hwfn
,
2273 GTT_BAR0_MAP_REG_IGU_CMD
+
2274 msb_igu_cmd_addr
* 8);
2275 intr_status
= ((u64
)intr_status_hi
<< 32) + (u64
)intr_status_lo
;
2280 static void qed_int_sp_dpc_setup(struct qed_hwfn
*p_hwfn
)
2282 tasklet_init(p_hwfn
->sp_dpc
,
2283 qed_int_sp_dpc
, (unsigned long)p_hwfn
);
2284 p_hwfn
->b_sp_dpc_enabled
= true;
2287 static int qed_int_sp_dpc_alloc(struct qed_hwfn
*p_hwfn
)
2289 p_hwfn
->sp_dpc
= kmalloc(sizeof(*p_hwfn
->sp_dpc
), GFP_KERNEL
);
2290 if (!p_hwfn
->sp_dpc
)
2296 static void qed_int_sp_dpc_free(struct qed_hwfn
*p_hwfn
)
2298 kfree(p_hwfn
->sp_dpc
);
2299 p_hwfn
->sp_dpc
= NULL
;
2302 int qed_int_alloc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2306 rc
= qed_int_sp_dpc_alloc(p_hwfn
);
2310 rc
= qed_int_sp_sb_alloc(p_hwfn
, p_ptt
);
2314 rc
= qed_int_sb_attn_alloc(p_hwfn
, p_ptt
);
2319 void qed_int_free(struct qed_hwfn
*p_hwfn
)
2321 qed_int_sp_sb_free(p_hwfn
);
2322 qed_int_sb_attn_free(p_hwfn
);
2323 qed_int_sp_dpc_free(p_hwfn
);
2326 void qed_int_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2328 qed_int_sb_setup(p_hwfn
, p_ptt
, &p_hwfn
->p_sp_sb
->sb_info
);
2329 qed_int_sb_attn_setup(p_hwfn
, p_ptt
);
2330 qed_int_sp_dpc_setup(p_hwfn
);
2333 void qed_int_get_num_sbs(struct qed_hwfn
*p_hwfn
,
2334 struct qed_sb_cnt_info
*p_sb_cnt_info
)
2336 struct qed_igu_info
*info
= p_hwfn
->hw_info
.p_igu_info
;
2338 if (!info
|| !p_sb_cnt_info
)
2341 memcpy(p_sb_cnt_info
, &info
->usage
, sizeof(*p_sb_cnt_info
));
2344 void qed_int_disable_post_isr_release(struct qed_dev
*cdev
)
2348 for_each_hwfn(cdev
, i
)
2349 cdev
->hwfns
[i
].b_int_requested
= false;
2352 int qed_int_set_timer_res(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2353 u8 timer_res
, u16 sb_id
, bool tx
)
2355 struct cau_sb_entry sb_entry
;
2358 if (!p_hwfn
->hw_init_done
) {
2359 DP_ERR(p_hwfn
, "hardware not initialized yet\n");
2363 rc
= qed_dmae_grc2host(p_hwfn
, p_ptt
, CAU_REG_SB_VAR_MEMORY
+
2364 sb_id
* sizeof(u64
),
2365 (u64
)(uintptr_t)&sb_entry
, 2, NULL
);
2367 DP_ERR(p_hwfn
, "dmae_grc2host failed %d\n", rc
);
2372 SET_FIELD(sb_entry
.params
, CAU_SB_ENTRY_TIMER_RES1
, timer_res
);
2374 SET_FIELD(sb_entry
.params
, CAU_SB_ENTRY_TIMER_RES0
, timer_res
);
2376 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
2377 (u64
)(uintptr_t)&sb_entry
,
2378 CAU_REG_SB_VAR_MEMORY
+
2379 sb_id
* sizeof(u64
), 2, NULL
);
2381 DP_ERR(p_hwfn
, "dmae_host2grc failed %d\n", rc
);