treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / qlogic / qed / qed_int.c
blob9f5113639eaf0cf2649769e3b2f481f19ff9fec1
1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/io.h>
36 #include <linux/bitops.h>
37 #include <linux/delay.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/errno.h>
40 #include <linux/interrupt.h>
41 #include <linux/kernel.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/string.h>
45 #include "qed.h"
46 #include "qed_hsi.h"
47 #include "qed_hw.h"
48 #include "qed_init_ops.h"
49 #include "qed_int.h"
50 #include "qed_mcp.h"
51 #include "qed_reg_addr.h"
52 #include "qed_sp.h"
53 #include "qed_sriov.h"
54 #include "qed_vf.h"
56 struct qed_pi_info {
57 qed_int_comp_cb_t comp_cb;
58 void *cookie;
61 struct qed_sb_sp_info {
62 struct qed_sb_info sb_info;
64 /* per protocol index data */
65 struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
68 enum qed_attention_type {
69 QED_ATTN_TYPE_ATTN,
70 QED_ATTN_TYPE_PARITY,
73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
76 struct aeu_invert_reg_bit {
77 char bit_name[30];
79 #define ATTENTION_PARITY (1 << 0)
81 #define ATTENTION_LENGTH_MASK (0x00000ff0)
82 #define ATTENTION_LENGTH_SHIFT (4)
83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
84 ATTENTION_LENGTH_SHIFT)
85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
88 ATTENTION_PARITY)
90 /* Multiple bits start with this offset */
91 #define ATTENTION_OFFSET_MASK (0x000ff000)
92 #define ATTENTION_OFFSET_SHIFT (12)
94 #define ATTENTION_BB_MASK (0x00700000)
95 #define ATTENTION_BB_SHIFT (20)
96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
97 #define ATTENTION_BB_DIFFERENT BIT(23)
99 unsigned int flags;
101 /* Callback to call if attention will be triggered */
102 int (*cb)(struct qed_hwfn *p_hwfn);
104 enum block_id block_index;
107 struct aeu_invert_reg {
108 struct aeu_invert_reg_bit bits[32];
111 #define MAX_ATTN_GRPS (8)
112 #define NUM_ATTN_REGS (9)
114 /* Specific HW attention callbacks */
115 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
117 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
119 /* This might occur on certain instances; Log it once then mask it */
120 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
121 tmp);
122 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
123 0xffffffff);
125 return 0;
128 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
129 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
130 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
131 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
133 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
135 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
137 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
139 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
141 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
143 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
144 PSWHST_REG_INCORRECT_ACCESS_VALID);
146 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
147 u32 addr, data, length;
149 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
150 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
151 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
152 PSWHST_REG_INCORRECT_ACCESS_DATA);
153 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
154 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
156 DP_INFO(p_hwfn->cdev,
157 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
158 addr, length,
159 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
160 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
161 (u8) GET_FIELD(data,
162 ATTENTION_INCORRECT_ACCESS_VF_VALID),
163 (u8) GET_FIELD(data,
164 ATTENTION_INCORRECT_ACCESS_CLIENT),
165 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
166 (u8) GET_FIELD(data,
167 ATTENTION_INCORRECT_ACCESS_BYTE_EN),
168 data);
171 return 0;
174 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
175 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
176 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
177 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
178 #define QED_GRC_ATTENTION_MASTER_MASK (0xf)
179 #define QED_GRC_ATTENTION_MASTER_SHIFT (24)
180 #define QED_GRC_ATTENTION_PF_MASK (0xf)
181 #define QED_GRC_ATTENTION_PF_SHIFT (0)
182 #define QED_GRC_ATTENTION_VF_MASK (0xff)
183 #define QED_GRC_ATTENTION_VF_SHIFT (4)
184 #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
185 #define QED_GRC_ATTENTION_PRIV_SHIFT (14)
186 #define QED_GRC_ATTENTION_PRIV_VF (0)
187 static const char *attn_master_to_str(u8 master)
189 switch (master) {
190 case 1: return "PXP";
191 case 2: return "MCP";
192 case 3: return "MSDM";
193 case 4: return "PSDM";
194 case 5: return "YSDM";
195 case 6: return "USDM";
196 case 7: return "TSDM";
197 case 8: return "XSDM";
198 case 9: return "DBU";
199 case 10: return "DMAE";
200 default:
201 return "Unknown";
205 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
207 u32 tmp, tmp2;
209 /* We've already cleared the timeout interrupt register, so we learn
210 * of interrupts via the validity register
212 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
213 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
214 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
215 goto out;
217 /* Read the GRC timeout information */
218 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
219 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
220 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
221 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
223 DP_INFO(p_hwfn->cdev,
224 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
225 tmp2, tmp,
226 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
227 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
228 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
229 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
230 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
231 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
232 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
234 out:
235 /* Regardles of anything else, clean the validity bit */
236 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
237 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
238 return 0;
241 #define PGLUE_ATTENTION_VALID (1 << 29)
242 #define PGLUE_ATTENTION_RD_VALID (1 << 26)
243 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
244 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
245 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
247 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
248 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
249 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
251 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
252 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
253 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
255 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
256 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
257 #define PGLUE_ATTENTION_ILT_VALID (1 << 23)
259 int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
260 struct qed_ptt *p_ptt)
262 u32 tmp;
264 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
265 if (tmp & PGLUE_ATTENTION_VALID) {
266 u32 addr_lo, addr_hi, details;
268 addr_lo = qed_rd(p_hwfn, p_ptt,
269 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
270 addr_hi = qed_rd(p_hwfn, p_ptt,
271 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
272 details = qed_rd(p_hwfn, p_ptt,
273 PGLUE_B_REG_TX_ERR_WR_DETAILS);
275 DP_NOTICE(p_hwfn,
276 "Illegal write by chip to [%08x:%08x] blocked.\n"
277 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
278 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
279 addr_hi, addr_lo, details,
280 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
281 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
282 GET_FIELD(details,
283 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
284 tmp,
285 GET_FIELD(tmp,
286 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
287 GET_FIELD(tmp,
288 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
289 GET_FIELD(tmp,
290 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
293 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
294 if (tmp & PGLUE_ATTENTION_RD_VALID) {
295 u32 addr_lo, addr_hi, details;
297 addr_lo = qed_rd(p_hwfn, p_ptt,
298 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
299 addr_hi = qed_rd(p_hwfn, p_ptt,
300 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
301 details = qed_rd(p_hwfn, p_ptt,
302 PGLUE_B_REG_TX_ERR_RD_DETAILS);
304 DP_NOTICE(p_hwfn,
305 "Illegal read by chip from [%08x:%08x] blocked.\n"
306 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
307 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
308 addr_hi, addr_lo, details,
309 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
310 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
311 GET_FIELD(details,
312 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
313 tmp,
314 GET_FIELD(tmp,
315 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
316 GET_FIELD(tmp,
317 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
318 GET_FIELD(tmp,
319 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
322 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
323 if (tmp & PGLUE_ATTENTION_ICPL_VALID)
324 DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp);
326 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
327 if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
328 u32 addr_hi, addr_lo;
330 addr_lo = qed_rd(p_hwfn, p_ptt,
331 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
332 addr_hi = qed_rd(p_hwfn, p_ptt,
333 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
335 DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n",
336 tmp, addr_hi, addr_lo);
339 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
340 if (tmp & PGLUE_ATTENTION_ILT_VALID) {
341 u32 addr_hi, addr_lo, details;
343 addr_lo = qed_rd(p_hwfn, p_ptt,
344 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
345 addr_hi = qed_rd(p_hwfn, p_ptt,
346 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
347 details = qed_rd(p_hwfn, p_ptt,
348 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
350 DP_NOTICE(p_hwfn,
351 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
352 details, tmp, addr_hi, addr_lo);
355 /* Clear the indications */
356 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2));
358 return 0;
361 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
363 return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
366 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
367 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
368 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
369 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
370 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
372 #define QED_DB_REC_COUNT 1000
373 #define QED_DB_REC_INTERVAL 100
375 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
376 struct qed_ptt *p_ptt)
378 u32 count = QED_DB_REC_COUNT;
379 u32 usage = 1;
381 /* Flush any pending (e)dpms as they may never arrive */
382 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
384 /* wait for usage to zero or count to run out. This is necessary since
385 * EDPM doorbell transactions can take multiple 64b cycles, and as such
386 * can "split" over the pci. Possibly, the doorbell drop can happen with
387 * half an EDPM in the queue and other half dropped. Another EDPM
388 * doorbell to the same address (from doorbell recovery mechanism or
389 * from the doorbelling entity) could have first half dropped and second
390 * half interpreted as continuation of the first. To prevent such
391 * malformed doorbells from reaching the device, flush the queue before
392 * releasing the overflow sticky indication.
394 while (count-- && usage) {
395 usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
396 udelay(QED_DB_REC_INTERVAL);
399 /* should have been depleted by now */
400 if (usage) {
401 DP_NOTICE(p_hwfn->cdev,
402 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
403 QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage);
404 return -EBUSY;
407 return 0;
410 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
412 u32 attn_ovfl, cur_ovfl;
413 int rc;
415 attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
416 &p_hwfn->db_recovery_info.overflow);
417 cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
418 if (!cur_ovfl && !attn_ovfl)
419 return 0;
421 DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
422 attn_ovfl, cur_ovfl);
424 if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
425 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
426 if (rc)
427 return rc;
430 /* Release overflow sticky indication (stop silently dropping everything) */
431 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
433 /* Repeat all last doorbells (doorbell drop recovery) */
434 qed_db_recovery_execute(p_hwfn);
436 return 0;
439 static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
441 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
442 u32 overflow;
443 int rc;
445 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
446 if (!overflow)
447 goto out;
449 /* Run PF doorbell recovery in next periodic handler */
450 set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
452 if (!p_hwfn->db_bar_no_edpm) {
453 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
454 if (rc)
455 goto out;
458 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
459 out:
460 /* Schedule the handler even if overflow was not detected */
461 qed_periodic_db_rec_start(p_hwfn);
464 static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
466 u32 int_sts, first_drop_reason, details, address, all_drops_reason;
467 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
469 /* int_sts may be zero since all PFs were interrupted for doorbell
470 * overflow but another one already handled it. Can abort here. If
471 * This PF also requires overflow recovery we will be interrupted again.
472 * The masked almost full indication may also be set. Ignoring.
474 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
475 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
476 return 0;
478 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
480 /* check if db_drop or overflow happened */
481 if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
482 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
483 /* Obtain data about db drop/overflow */
484 first_drop_reason = qed_rd(p_hwfn, p_ptt,
485 DORQ_REG_DB_DROP_REASON) &
486 QED_DORQ_ATTENTION_REASON_MASK;
487 details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS);
488 address = qed_rd(p_hwfn, p_ptt,
489 DORQ_REG_DB_DROP_DETAILS_ADDRESS);
490 all_drops_reason = qed_rd(p_hwfn, p_ptt,
491 DORQ_REG_DB_DROP_DETAILS_REASON);
493 /* Log info */
494 DP_NOTICE(p_hwfn->cdev,
495 "Doorbell drop occurred\n"
496 "Address\t\t0x%08x\t(second BAR address)\n"
497 "FID\t\t0x%04x\t\t(Opaque FID)\n"
498 "Size\t\t0x%04x\t\t(in bytes)\n"
499 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
500 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
501 address,
502 GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE),
503 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
504 first_drop_reason, all_drops_reason);
506 /* Clear the doorbell drop details and prepare for next drop */
507 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
509 /* Mark interrupt as handled (note: even if drop was due to a different
510 * reason than overflow we mark as handled)
512 qed_wr(p_hwfn,
513 p_ptt,
514 DORQ_REG_INT_STS_WR,
515 DORQ_REG_INT_STS_DB_DROP |
516 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
518 /* If there are no indications other than drop indications, success */
519 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
520 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
521 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
522 return 0;
525 /* Some other indication was present - non recoverable */
526 DP_INFO(p_hwfn, "DORQ fatal attention\n");
528 return -EINVAL;
531 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
533 p_hwfn->db_recovery_info.dorq_attn = true;
534 qed_dorq_attn_overflow(p_hwfn);
536 return qed_dorq_attn_int_sts(p_hwfn);
539 static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
541 if (p_hwfn->db_recovery_info.dorq_attn)
542 goto out;
544 /* Call DORQ callback if the attention was missed */
545 qed_dorq_attn_cb(p_hwfn);
546 out:
547 p_hwfn->db_recovery_info.dorq_attn = false;
550 /* Instead of major changes to the data-structure, we have a some 'special'
551 * identifiers for sources that changed meaning between adapters.
553 enum aeu_invert_reg_special_type {
554 AEU_INVERT_REG_SPECIAL_CNIG_0,
555 AEU_INVERT_REG_SPECIAL_CNIG_1,
556 AEU_INVERT_REG_SPECIAL_CNIG_2,
557 AEU_INVERT_REG_SPECIAL_CNIG_3,
558 AEU_INVERT_REG_SPECIAL_MAX,
561 static struct aeu_invert_reg_bit
562 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
563 {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
564 {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
565 {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
566 {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
569 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
570 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
572 { /* After Invert 1 */
573 {"GPIO0 function%d",
574 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
579 { /* After Invert 2 */
580 {"PGLUE config_space", ATTENTION_SINGLE,
581 NULL, MAX_BLOCK_ID},
582 {"PGLUE misc_flr", ATTENTION_SINGLE,
583 NULL, MAX_BLOCK_ID},
584 {"PGLUE B RBC", ATTENTION_PAR_INT,
585 qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
586 {"PGLUE misc_mctp", ATTENTION_SINGLE,
587 NULL, MAX_BLOCK_ID},
588 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
589 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
590 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
591 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
592 (1 << ATTENTION_OFFSET_SHIFT),
593 NULL, MAX_BLOCK_ID},
594 {"PCIE glue/PXP VPD %d",
595 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
600 { /* After Invert 3 */
601 {"General Attention %d",
602 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
607 { /* After Invert 4 */
608 {"General Attention 32", ATTENTION_SINGLE,
609 NULL, MAX_BLOCK_ID},
610 {"General Attention %d",
611 (2 << ATTENTION_LENGTH_SHIFT) |
612 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
613 {"General Attention 35", ATTENTION_SINGLE,
614 NULL, MAX_BLOCK_ID},
615 {"NWS Parity",
616 ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
617 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
618 NULL, BLOCK_NWS},
619 {"NWS Interrupt",
620 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
621 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
622 NULL, BLOCK_NWS},
623 {"NWM Parity",
624 ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
625 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
626 NULL, BLOCK_NWM},
627 {"NWM Interrupt",
628 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
629 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
630 NULL, BLOCK_NWM},
631 {"MCP CPU", ATTENTION_SINGLE,
632 qed_mcp_attn_cb, MAX_BLOCK_ID},
633 {"MCP Watchdog timer", ATTENTION_SINGLE,
634 NULL, MAX_BLOCK_ID},
635 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
636 {"AVS stop status ready", ATTENTION_SINGLE,
637 NULL, MAX_BLOCK_ID},
638 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
639 {"MSTAT per-path", ATTENTION_PAR_INT,
640 NULL, MAX_BLOCK_ID},
641 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
642 NULL, MAX_BLOCK_ID},
643 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
644 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
645 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
646 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
647 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
652 { /* After Invert 5 */
653 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
654 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
655 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
656 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
657 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
658 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
659 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
660 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM},
661 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
662 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
663 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
664 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
665 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
666 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
667 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
668 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
673 { /* After Invert 6 */
674 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
675 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
676 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
677 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
678 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
679 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
680 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
681 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
682 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
683 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
684 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
685 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
686 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
687 {"DORQ", ATTENTION_PAR_INT,
688 qed_dorq_attn_cb, BLOCK_DORQ},
689 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
690 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
695 { /* After Invert 7 */
696 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
697 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
698 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
699 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
700 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
701 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
702 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
703 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
704 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
705 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
706 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
707 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
708 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
709 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
710 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
711 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
712 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
717 { /* After Invert 8 */
718 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
719 NULL, BLOCK_PSWRQ2},
720 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
721 {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
722 NULL, BLOCK_PSWWR2},
723 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
724 {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
725 NULL, BLOCK_PSWRD2},
726 {"PSWHST", ATTENTION_PAR_INT,
727 qed_pswhst_attn_cb, BLOCK_PSWHST},
728 {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
729 NULL, BLOCK_PSWHST2},
730 {"GRC", ATTENTION_PAR_INT,
731 qed_grc_attn_cb, BLOCK_GRC},
732 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
733 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
734 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
735 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
736 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
737 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
738 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
739 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
740 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
741 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
742 NULL, BLOCK_PGLCS},
743 {"PERST_B assertion", ATTENTION_SINGLE,
744 NULL, MAX_BLOCK_ID},
745 {"PERST_B deassertion", ATTENTION_SINGLE,
746 NULL, MAX_BLOCK_ID},
747 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
748 NULL, MAX_BLOCK_ID},
753 { /* After Invert 9 */
754 {"MCP Latched memory", ATTENTION_PAR,
755 NULL, MAX_BLOCK_ID},
756 {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
757 NULL, MAX_BLOCK_ID},
758 {"MCP Latched ump_tx", ATTENTION_PAR,
759 NULL, MAX_BLOCK_ID},
760 {"MCP Latched scratchpad", ATTENTION_PAR,
761 NULL, MAX_BLOCK_ID},
762 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
763 NULL, MAX_BLOCK_ID},
768 static struct aeu_invert_reg_bit *
769 qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
770 struct aeu_invert_reg_bit *p_bit)
772 if (!QED_IS_BB(p_hwfn->cdev))
773 return p_bit;
775 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
776 return p_bit;
778 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
779 ATTENTION_BB_SHIFT];
782 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
783 struct aeu_invert_reg_bit *p_bit)
785 return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
786 ATTENTION_PARITY);
789 #define ATTN_STATE_BITS (0xfff)
790 #define ATTN_BITS_MASKABLE (0x3ff)
791 struct qed_sb_attn_info {
792 /* Virtual & Physical address of the SB */
793 struct atten_status_block *sb_attn;
794 dma_addr_t sb_phys;
796 /* Last seen running index */
797 u16 index;
799 /* A mask of the AEU bits resulting in a parity error */
800 u32 parity_mask[NUM_ATTN_REGS];
802 /* A pointer to the attention description structure */
803 struct aeu_invert_reg *p_aeu_desc;
805 /* Previously asserted attentions, which are still unasserted */
806 u16 known_attn;
808 /* Cleanup address for the link's general hw attention */
809 u32 mfw_attn_addr;
812 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
813 struct qed_sb_attn_info *p_sb_desc)
815 u16 rc = 0, index;
817 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
818 if (p_sb_desc->index != index) {
819 p_sb_desc->index = index;
820 rc = QED_SB_ATT_IDX;
823 return rc;
827 * @brief qed_int_assertion - handles asserted attention bits
829 * @param p_hwfn
830 * @param asserted_bits newly asserted bits
831 * @return int
833 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
835 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
836 u32 igu_mask;
838 /* Mask the source of the attention in the IGU */
839 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
840 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
841 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
842 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
843 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
845 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
846 "inner known ATTN state: 0x%04x --> 0x%04x\n",
847 sb_attn_sw->known_attn,
848 sb_attn_sw->known_attn | asserted_bits);
849 sb_attn_sw->known_attn |= asserted_bits;
851 /* Handle MCP events */
852 if (asserted_bits & 0x100) {
853 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
854 /* Clean the MCP attention */
855 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
856 sb_attn_sw->mfw_attn_addr, 0);
859 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
860 GTT_BAR0_MAP_REG_IGU_CMD +
861 ((IGU_CMD_ATTN_BIT_SET_UPPER -
862 IGU_CMD_INT_ACK_BASE) << 3),
863 (u32)asserted_bits);
865 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
866 asserted_bits);
868 return 0;
871 static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
872 enum block_id id,
873 enum dbg_attn_type type, bool b_clear)
875 struct dbg_attn_block_result attn_results;
876 enum dbg_status status;
878 memset(&attn_results, 0, sizeof(attn_results));
880 status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
881 b_clear, &attn_results);
882 if (status != DBG_STATUS_OK)
883 DP_NOTICE(p_hwfn,
884 "Failed to parse attention information [status: %s]\n",
885 qed_dbg_get_status_str(status));
886 else
887 qed_dbg_parse_attn(p_hwfn, &attn_results);
891 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
892 * cause of the attention
894 * @param p_hwfn
895 * @param p_aeu - descriptor of an AEU bit which caused the attention
896 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
897 * this bit to this group.
898 * @param bit_index - index of this bit in the aeu_en_reg
900 * @return int
902 static int
903 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
904 struct aeu_invert_reg_bit *p_aeu,
905 u32 aeu_en_reg,
906 const char *p_bit_name, u32 bitmask)
908 bool b_fatal = false;
909 int rc = -EINVAL;
910 u32 val;
912 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
913 p_bit_name, bitmask);
915 /* Call callback before clearing the interrupt status */
916 if (p_aeu->cb) {
917 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
918 p_bit_name);
919 rc = p_aeu->cb(p_hwfn);
922 if (rc)
923 b_fatal = true;
925 /* Print HW block interrupt registers */
926 if (p_aeu->block_index != MAX_BLOCK_ID)
927 qed_int_attn_print(p_hwfn, p_aeu->block_index,
928 ATTN_TYPE_INTERRUPT, !b_fatal);
931 /* If the attention is benign, no need to prevent it */
932 if (!rc)
933 goto out;
935 /* Prevent this Attention from being asserted in the future */
936 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
937 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
938 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
939 p_bit_name);
941 out:
942 return rc;
946 * @brief qed_int_deassertion_parity - handle a single parity AEU source
948 * @param p_hwfn
949 * @param p_aeu - descriptor of an AEU bit which caused the parity
950 * @param aeu_en_reg - address of the AEU enable register
951 * @param bit_index
953 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
954 struct aeu_invert_reg_bit *p_aeu,
955 u32 aeu_en_reg, u8 bit_index)
957 u32 block_id = p_aeu->block_index, mask, val;
959 DP_NOTICE(p_hwfn->cdev,
960 "%s parity attention is set [address 0x%08x, bit %d]\n",
961 p_aeu->bit_name, aeu_en_reg, bit_index);
963 if (block_id != MAX_BLOCK_ID) {
964 qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
966 /* In BB, there's a single parity bit for several blocks */
967 if (block_id == BLOCK_BTB) {
968 qed_int_attn_print(p_hwfn, BLOCK_OPTE,
969 ATTN_TYPE_PARITY, false);
970 qed_int_attn_print(p_hwfn, BLOCK_MCP,
971 ATTN_TYPE_PARITY, false);
975 /* Prevent this parity error from being re-asserted */
976 mask = ~BIT(bit_index);
977 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
978 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
979 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
980 p_aeu->bit_name);
984 * @brief - handles deassertion of previously asserted attentions.
986 * @param p_hwfn
987 * @param deasserted_bits - newly deasserted bits
988 * @return int
991 static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
992 u16 deasserted_bits)
994 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
995 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
996 u8 i, j, k, bit_idx;
997 int rc = 0;
999 /* Read the attention registers in the AEU */
1000 for (i = 0; i < NUM_ATTN_REGS; i++) {
1001 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1002 MISC_REG_AEU_AFTER_INVERT_1_IGU +
1003 i * 0x4);
1004 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1005 "Deasserted bits [%d]: %08x\n",
1006 i, aeu_inv_arr[i]);
1009 /* Find parity attentions first */
1010 for (i = 0; i < NUM_ATTN_REGS; i++) {
1011 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
1012 u32 parities;
1014 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
1015 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1017 /* Skip register in which no parity bit is currently set */
1018 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1019 if (!parities)
1020 continue;
1022 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1023 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1025 if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
1026 !!(parities & BIT(bit_idx)))
1027 qed_int_deassertion_parity(p_hwfn, p_bit,
1028 aeu_en, bit_idx);
1030 bit_idx += ATTENTION_LENGTH(p_bit->flags);
1034 /* Find non-parity cause for attention and act */
1035 for (k = 0; k < MAX_ATTN_GRPS; k++) {
1036 struct aeu_invert_reg_bit *p_aeu;
1038 /* Handle only groups whose attention is currently deasserted */
1039 if (!(deasserted_bits & (1 << k)))
1040 continue;
1042 for (i = 0; i < NUM_ATTN_REGS; i++) {
1043 u32 bits;
1045 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1046 i * sizeof(u32) +
1047 k * sizeof(u32) * NUM_ATTN_REGS;
1049 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1050 bits = aeu_inv_arr[i] & en;
1052 /* Skip if no bit from this group is currently set */
1053 if (!bits)
1054 continue;
1056 /* Find all set bits from current register which belong
1057 * to current group, making them responsible for the
1058 * previous assertion.
1060 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1061 long unsigned int bitmask;
1062 u8 bit, bit_len;
1064 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1065 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
1067 bit = bit_idx;
1068 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1069 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
1070 /* Skip Parity */
1071 bit++;
1072 bit_len--;
1075 bitmask = bits & (((1 << bit_len) - 1) << bit);
1076 bitmask >>= bit;
1078 if (bitmask) {
1079 u32 flags = p_aeu->flags;
1080 char bit_name[30];
1081 u8 num;
1083 num = (u8)find_first_bit(&bitmask,
1084 bit_len);
1086 /* Some bits represent more than a
1087 * a single interrupt. Correctly print
1088 * their name.
1090 if (ATTENTION_LENGTH(flags) > 2 ||
1091 ((flags & ATTENTION_PAR_INT) &&
1092 ATTENTION_LENGTH(flags) > 1))
1093 snprintf(bit_name, 30,
1094 p_aeu->bit_name, num);
1095 else
1096 strlcpy(bit_name,
1097 p_aeu->bit_name, 30);
1099 /* We now need to pass bitmask in its
1100 * correct position.
1102 bitmask <<= bit;
1104 /* Handle source of the attention */
1105 qed_int_deassertion_aeu_bit(p_hwfn,
1106 p_aeu,
1107 aeu_en,
1108 bit_name,
1109 bitmask);
1112 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1117 /* Handle missed DORQ attention */
1118 qed_dorq_attn_handler(p_hwfn);
1120 /* Clear IGU indication for the deasserted bits */
1121 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
1122 GTT_BAR0_MAP_REG_IGU_CMD +
1123 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1124 IGU_CMD_INT_ACK_BASE) << 3),
1125 ~((u32)deasserted_bits));
1127 /* Unmask deasserted attentions in IGU */
1128 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
1129 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1130 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1132 /* Clear deassertion from inner state */
1133 sb_attn_sw->known_attn &= ~deasserted_bits;
1135 return rc;
1138 static int qed_int_attentions(struct qed_hwfn *p_hwfn)
1140 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1141 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1142 u32 attn_bits = 0, attn_acks = 0;
1143 u16 asserted_bits, deasserted_bits;
1144 __le16 index;
1145 int rc = 0;
1147 /* Read current attention bits/acks - safeguard against attentions
1148 * by guaranting work on a synchronized timeframe
1150 do {
1151 index = p_sb_attn->sb_index;
1152 /* finish reading index before the loop condition */
1153 dma_rmb();
1154 attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
1155 attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
1156 } while (index != p_sb_attn->sb_index);
1157 p_sb_attn->sb_index = index;
1159 /* Attention / Deassertion are meaningful (and in correct state)
1160 * only when they differ and consistent with known state - deassertion
1161 * when previous attention & current ack, and assertion when current
1162 * attention with no previous attention
1164 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1165 ~p_sb_attn_sw->known_attn;
1166 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1167 p_sb_attn_sw->known_attn;
1169 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
1170 DP_INFO(p_hwfn,
1171 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1172 index, attn_bits, attn_acks, asserted_bits,
1173 deasserted_bits, p_sb_attn_sw->known_attn);
1174 } else if (asserted_bits == 0x100) {
1175 DP_INFO(p_hwfn, "MFW indication via attention\n");
1176 } else {
1177 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1178 "MFW indication [deassertion]\n");
1181 if (asserted_bits) {
1182 rc = qed_int_assertion(p_hwfn, asserted_bits);
1183 if (rc)
1184 return rc;
1187 if (deasserted_bits)
1188 rc = qed_int_deassertion(p_hwfn, deasserted_bits);
1190 return rc;
1193 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
1194 void __iomem *igu_addr, u32 ack_cons)
1196 struct igu_prod_cons_update igu_ack = { 0 };
1198 igu_ack.sb_id_and_flags =
1199 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1200 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1201 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1202 (IGU_SEG_ACCESS_ATTN <<
1203 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1205 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
1207 /* Both segments (interrupts & acks) are written to same place address;
1208 * Need to guarantee all commands will be received (in-order) by HW.
1210 barrier();
1213 void qed_int_sp_dpc(unsigned long hwfn_cookie)
1215 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
1216 struct qed_pi_info *pi_info = NULL;
1217 struct qed_sb_attn_info *sb_attn;
1218 struct qed_sb_info *sb_info;
1219 int arr_size;
1220 u16 rc = 0;
1222 if (!p_hwfn->p_sp_sb) {
1223 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
1224 return;
1227 sb_info = &p_hwfn->p_sp_sb->sb_info;
1228 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1229 if (!sb_info) {
1230 DP_ERR(p_hwfn->cdev,
1231 "Status block is NULL - cannot ack interrupts\n");
1232 return;
1235 if (!p_hwfn->p_sb_attn) {
1236 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
1237 return;
1239 sb_attn = p_hwfn->p_sb_attn;
1241 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1242 p_hwfn, p_hwfn->my_id);
1244 /* Disable ack for def status block. Required both for msix +
1245 * inta in non-mask mode, in inta does no harm.
1247 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1249 /* Gather Interrupts/Attentions information */
1250 if (!sb_info->sb_virt) {
1251 DP_ERR(p_hwfn->cdev,
1252 "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1253 } else {
1254 u32 tmp_index = sb_info->sb_ack;
1256 rc = qed_sb_update_sb_idx(sb_info);
1257 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1258 "Interrupt indices: 0x%08x --> 0x%08x\n",
1259 tmp_index, sb_info->sb_ack);
1262 if (!sb_attn || !sb_attn->sb_attn) {
1263 DP_ERR(p_hwfn->cdev,
1264 "Attentions Status block is NULL - cannot check for new attentions!\n");
1265 } else {
1266 u16 tmp_index = sb_attn->index;
1268 rc |= qed_attn_update_idx(p_hwfn, sb_attn);
1269 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1270 "Attention indices: 0x%08x --> 0x%08x\n",
1271 tmp_index, sb_attn->index);
1274 /* Check if we expect interrupts at this time. if not just ack them */
1275 if (!(rc & QED_SB_EVENT_MASK)) {
1276 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1277 return;
1280 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1281 if (!p_hwfn->p_dpc_ptt) {
1282 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
1283 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1284 return;
1287 if (rc & QED_SB_ATT_IDX)
1288 qed_int_attentions(p_hwfn);
1290 if (rc & QED_SB_IDX) {
1291 int pi;
1293 /* Look for a free index */
1294 for (pi = 0; pi < arr_size; pi++) {
1295 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1296 if (pi_info->comp_cb)
1297 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1301 if (sb_attn && (rc & QED_SB_ATT_IDX))
1302 /* This should be done before the interrupts are enabled,
1303 * since otherwise a new attention will be generated.
1305 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1307 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1310 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
1312 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1314 if (!p_sb)
1315 return;
1317 if (p_sb->sb_attn)
1318 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1319 SB_ATTN_ALIGNED_SIZE(p_hwfn),
1320 p_sb->sb_attn, p_sb->sb_phys);
1321 kfree(p_sb);
1322 p_hwfn->p_sb_attn = NULL;
1325 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
1326 struct qed_ptt *p_ptt)
1328 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1330 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1332 sb_info->index = 0;
1333 sb_info->known_attn = 0;
1335 /* Configure Attention Status Block in IGU */
1336 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1337 lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
1338 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1339 upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
1342 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
1343 struct qed_ptt *p_ptt,
1344 void *sb_virt_addr, dma_addr_t sb_phy_addr)
1346 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1347 int i, j, k;
1349 sb_info->sb_attn = sb_virt_addr;
1350 sb_info->sb_phys = sb_phy_addr;
1352 /* Set the pointer to the AEU descriptors */
1353 sb_info->p_aeu_desc = aeu_descs;
1355 /* Calculate Parity Masks */
1356 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1357 for (i = 0; i < NUM_ATTN_REGS; i++) {
1358 /* j is array index, k is bit index */
1359 for (j = 0, k = 0; k < 32; j++) {
1360 struct aeu_invert_reg_bit *p_aeu;
1362 p_aeu = &aeu_descs[i].bits[j];
1363 if (qed_int_is_parity_flag(p_hwfn, p_aeu))
1364 sb_info->parity_mask[i] |= 1 << k;
1366 k += ATTENTION_LENGTH(p_aeu->flags);
1368 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1369 "Attn Mask [Reg %d]: 0x%08x\n",
1370 i, sb_info->parity_mask[i]);
1373 /* Set the address of cleanup for the mcp attention */
1374 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1375 MISC_REG_AEU_GENERAL_ATTN_0;
1377 qed_int_sb_attn_setup(p_hwfn, p_ptt);
1380 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
1381 struct qed_ptt *p_ptt)
1383 struct qed_dev *cdev = p_hwfn->cdev;
1384 struct qed_sb_attn_info *p_sb;
1385 dma_addr_t p_phys = 0;
1386 void *p_virt;
1388 /* SB struct */
1389 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1390 if (!p_sb)
1391 return -ENOMEM;
1393 /* SB ring */
1394 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1395 SB_ATTN_ALIGNED_SIZE(p_hwfn),
1396 &p_phys, GFP_KERNEL);
1398 if (!p_virt) {
1399 kfree(p_sb);
1400 return -ENOMEM;
1403 /* Attention setup */
1404 p_hwfn->p_sb_attn = p_sb;
1405 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1407 return 0;
1410 /* coalescing timeout = timeset << (timer_res + 1) */
1411 #define QED_CAU_DEF_RX_USECS 24
1412 #define QED_CAU_DEF_TX_USECS 48
1414 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
1415 struct cau_sb_entry *p_sb_entry,
1416 u8 pf_id, u16 vf_number, u8 vf_valid)
1418 struct qed_dev *cdev = p_hwfn->cdev;
1419 u32 cau_state;
1420 u8 timer_res;
1422 memset(p_sb_entry, 0, sizeof(*p_sb_entry));
1424 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1425 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1426 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1427 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1428 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1430 cau_state = CAU_HC_DISABLE_STATE;
1432 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1433 cau_state = CAU_HC_ENABLE_STATE;
1434 if (!cdev->rx_coalesce_usecs)
1435 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
1436 if (!cdev->tx_coalesce_usecs)
1437 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
1440 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1441 if (cdev->rx_coalesce_usecs <= 0x7F)
1442 timer_res = 0;
1443 else if (cdev->rx_coalesce_usecs <= 0xFF)
1444 timer_res = 1;
1445 else
1446 timer_res = 2;
1447 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1449 if (cdev->tx_coalesce_usecs <= 0x7F)
1450 timer_res = 0;
1451 else if (cdev->tx_coalesce_usecs <= 0xFF)
1452 timer_res = 1;
1453 else
1454 timer_res = 2;
1455 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1457 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1458 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1461 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
1462 struct qed_ptt *p_ptt,
1463 u16 igu_sb_id,
1464 u32 pi_index,
1465 enum qed_coalescing_fsm coalescing_fsm,
1466 u8 timeset)
1468 struct cau_pi_entry pi_entry;
1469 u32 sb_offset, pi_offset;
1471 if (IS_VF(p_hwfn->cdev))
1472 return;
1474 sb_offset = igu_sb_id * PIS_PER_SB_E4;
1475 memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
1477 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1478 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
1479 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1480 else
1481 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1483 pi_offset = sb_offset + pi_index;
1484 if (p_hwfn->hw_init_done) {
1485 qed_wr(p_hwfn, p_ptt,
1486 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1487 *((u32 *)&(pi_entry)));
1488 } else {
1489 STORE_RT_REG(p_hwfn,
1490 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1491 *((u32 *)&(pi_entry)));
1495 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
1496 struct qed_ptt *p_ptt,
1497 dma_addr_t sb_phys,
1498 u16 igu_sb_id, u16 vf_number, u8 vf_valid)
1500 struct cau_sb_entry sb_entry;
1502 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1503 vf_number, vf_valid);
1505 if (p_hwfn->hw_init_done) {
1506 /* Wide-bus, initialize via DMAE */
1507 u64 phys_addr = (u64)sb_phys;
1509 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
1510 CAU_REG_SB_ADDR_MEMORY +
1511 igu_sb_id * sizeof(u64), 2, NULL);
1512 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
1513 CAU_REG_SB_VAR_MEMORY +
1514 igu_sb_id * sizeof(u64), 2, NULL);
1515 } else {
1516 /* Initialize Status Block Address */
1517 STORE_RT_REG_AGG(p_hwfn,
1518 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1519 igu_sb_id * 2,
1520 sb_phys);
1522 STORE_RT_REG_AGG(p_hwfn,
1523 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1524 igu_sb_id * 2,
1525 sb_entry);
1528 /* Configure pi coalescing if set */
1529 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1530 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1531 u8 timeset, timer_res;
1532 u8 i;
1534 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1535 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
1536 timer_res = 0;
1537 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
1538 timer_res = 1;
1539 else
1540 timer_res = 2;
1541 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
1542 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1543 QED_COAL_RX_STATE_MACHINE, timeset);
1545 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
1546 timer_res = 0;
1547 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
1548 timer_res = 1;
1549 else
1550 timer_res = 2;
1551 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
1552 for (i = 0; i < num_tc; i++) {
1553 qed_int_cau_conf_pi(p_hwfn, p_ptt,
1554 igu_sb_id, TX_PI(i),
1555 QED_COAL_TX_STATE_MACHINE,
1556 timeset);
1561 void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
1562 struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
1564 /* zero status block and ack counter */
1565 sb_info->sb_ack = 0;
1566 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1568 if (IS_PF(p_hwfn->cdev))
1569 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1570 sb_info->igu_sb_id, 0, 0);
1573 struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
1575 struct qed_igu_block *p_block;
1576 u16 igu_id;
1578 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1579 igu_id++) {
1580 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1582 if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1583 !(p_block->status & QED_IGU_STATUS_FREE))
1584 continue;
1586 if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
1587 return p_block;
1590 return NULL;
1593 static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
1595 struct qed_igu_block *p_block;
1596 u16 igu_id;
1598 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1599 igu_id++) {
1600 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1602 if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1603 !p_block->is_pf ||
1604 p_block->vector_number != vector_id)
1605 continue;
1607 return igu_id;
1610 return QED_SB_INVALID_IDX;
1613 u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1615 u16 igu_sb_id;
1617 /* Assuming continuous set of IGU SBs dedicated for given PF */
1618 if (sb_id == QED_SP_SB_ID)
1619 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1620 else if (IS_PF(p_hwfn->cdev))
1621 igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1622 else
1623 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
1625 if (sb_id == QED_SP_SB_ID)
1626 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1627 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1628 else
1629 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1630 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1632 return igu_sb_id;
1635 int qed_int_sb_init(struct qed_hwfn *p_hwfn,
1636 struct qed_ptt *p_ptt,
1637 struct qed_sb_info *sb_info,
1638 void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
1640 sb_info->sb_virt = sb_virt_addr;
1641 sb_info->sb_phys = sb_phy_addr;
1643 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
1645 if (sb_id != QED_SP_SB_ID) {
1646 if (IS_PF(p_hwfn->cdev)) {
1647 struct qed_igu_info *p_info;
1648 struct qed_igu_block *p_block;
1650 p_info = p_hwfn->hw_info.p_igu_info;
1651 p_block = &p_info->entry[sb_info->igu_sb_id];
1653 p_block->sb_info = sb_info;
1654 p_block->status &= ~QED_IGU_STATUS_FREE;
1655 p_info->usage.free_cnt--;
1656 } else {
1657 qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1661 sb_info->cdev = p_hwfn->cdev;
1663 /* The igu address will hold the absolute address that needs to be
1664 * written to for a specific status block
1666 if (IS_PF(p_hwfn->cdev)) {
1667 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1668 GTT_BAR0_MAP_REG_IGU_CMD +
1669 (sb_info->igu_sb_id << 3);
1670 } else {
1671 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1672 PXP_VF_BAR0_START_IGU +
1673 ((IGU_CMD_INT_ACK_BASE +
1674 sb_info->igu_sb_id) << 3);
1677 sb_info->flags |= QED_SB_INFO_INIT;
1679 qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
1681 return 0;
1684 int qed_int_sb_release(struct qed_hwfn *p_hwfn,
1685 struct qed_sb_info *sb_info, u16 sb_id)
1687 struct qed_igu_block *p_block;
1688 struct qed_igu_info *p_info;
1690 if (!sb_info)
1691 return 0;
1693 /* zero status block and ack counter */
1694 sb_info->sb_ack = 0;
1695 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1697 if (IS_VF(p_hwfn->cdev)) {
1698 qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
1699 return 0;
1702 p_info = p_hwfn->hw_info.p_igu_info;
1703 p_block = &p_info->entry[sb_info->igu_sb_id];
1705 /* Vector 0 is reserved to Default SB */
1706 if (!p_block->vector_number) {
1707 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1708 return -EINVAL;
1711 /* Lose reference to client's SB info, and fix counters */
1712 p_block->sb_info = NULL;
1713 p_block->status |= QED_IGU_STATUS_FREE;
1714 p_info->usage.free_cnt++;
1716 return 0;
1719 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
1721 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1723 if (!p_sb)
1724 return;
1726 if (p_sb->sb_info.sb_virt)
1727 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1728 SB_ALIGNED_SIZE(p_hwfn),
1729 p_sb->sb_info.sb_virt,
1730 p_sb->sb_info.sb_phys);
1731 kfree(p_sb);
1732 p_hwfn->p_sp_sb = NULL;
1735 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1737 struct qed_sb_sp_info *p_sb;
1738 dma_addr_t p_phys = 0;
1739 void *p_virt;
1741 /* SB struct */
1742 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1743 if (!p_sb)
1744 return -ENOMEM;
1746 /* SB ring */
1747 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1748 SB_ALIGNED_SIZE(p_hwfn),
1749 &p_phys, GFP_KERNEL);
1750 if (!p_virt) {
1751 kfree(p_sb);
1752 return -ENOMEM;
1755 /* Status Block setup */
1756 p_hwfn->p_sp_sb = p_sb;
1757 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
1758 p_phys, QED_SP_SB_ID);
1760 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1762 return 0;
1765 int qed_int_register_cb(struct qed_hwfn *p_hwfn,
1766 qed_int_comp_cb_t comp_cb,
1767 void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
1769 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1770 int rc = -ENOMEM;
1771 u8 pi;
1773 /* Look for a free index */
1774 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1775 if (p_sp_sb->pi_info_arr[pi].comp_cb)
1776 continue;
1778 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1779 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1780 *sb_idx = pi;
1781 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1782 rc = 0;
1783 break;
1786 return rc;
1789 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
1791 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1793 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
1794 return -ENOMEM;
1796 p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
1797 p_sp_sb->pi_info_arr[pi].cookie = NULL;
1799 return 0;
1802 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
1804 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1807 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
1808 struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1810 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1812 p_hwfn->cdev->int_mode = int_mode;
1813 switch (p_hwfn->cdev->int_mode) {
1814 case QED_INT_MODE_INTA:
1815 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1816 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1817 break;
1819 case QED_INT_MODE_MSI:
1820 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1821 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1822 break;
1824 case QED_INT_MODE_MSIX:
1825 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1826 break;
1827 case QED_INT_MODE_POLL:
1828 break;
1831 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1834 static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
1835 struct qed_ptt *p_ptt)
1838 /* Configure AEU signal change to produce attentions */
1839 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1840 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1841 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1842 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1844 /* Unmask AEU signals toward IGU */
1845 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1849 qed_int_igu_enable(struct qed_hwfn *p_hwfn,
1850 struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1852 int rc = 0;
1854 qed_int_igu_enable_attn(p_hwfn, p_ptt);
1856 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1857 rc = qed_slowpath_irq_req(p_hwfn);
1858 if (rc) {
1859 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
1860 return -EINVAL;
1862 p_hwfn->b_int_requested = true;
1864 /* Enable interrupt Generation */
1865 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1866 p_hwfn->b_int_enabled = 1;
1868 return rc;
1871 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1873 p_hwfn->b_int_enabled = 0;
1875 if (IS_VF(p_hwfn->cdev))
1876 return;
1878 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1881 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1882 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
1883 struct qed_ptt *p_ptt,
1884 u16 igu_sb_id,
1885 bool cleanup_set, u16 opaque_fid)
1887 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1888 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1889 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1891 /* Set the data field */
1892 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1893 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
1894 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1896 /* Set the control register */
1897 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1898 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1899 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1901 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1903 barrier();
1905 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1907 /* calculate where to read the status bit from */
1908 sb_bit = 1 << (igu_sb_id % 32);
1909 sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
1911 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
1913 /* Now wait for the command to complete */
1914 do {
1915 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
1917 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1918 break;
1920 usleep_range(5000, 10000);
1921 } while (--sleep_cnt);
1923 if (!sleep_cnt)
1924 DP_NOTICE(p_hwfn,
1925 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1926 val, igu_sb_id);
1929 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
1930 struct qed_ptt *p_ptt,
1931 u16 igu_sb_id, u16 opaque, bool b_set)
1933 struct qed_igu_block *p_block;
1934 int pi, i;
1936 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1937 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1938 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1939 igu_sb_id,
1940 p_block->function_id,
1941 p_block->is_pf, p_block->vector_number);
1943 /* Set */
1944 if (b_set)
1945 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
1947 /* Clear */
1948 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
1950 /* Wait for the IGU SB to cleanup */
1951 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1952 u32 val;
1954 val = qed_rd(p_hwfn, p_ptt,
1955 IGU_REG_WRITE_DONE_PENDING +
1956 ((igu_sb_id / 32) * 4));
1957 if (val & BIT((igu_sb_id % 32)))
1958 usleep_range(10, 20);
1959 else
1960 break;
1962 if (i == IGU_CLEANUP_SLEEP_LENGTH)
1963 DP_NOTICE(p_hwfn,
1964 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1965 igu_sb_id);
1967 /* Clear the CAU for the SB */
1968 for (pi = 0; pi < 12; pi++)
1969 qed_wr(p_hwfn, p_ptt,
1970 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
1973 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
1974 struct qed_ptt *p_ptt,
1975 bool b_set, bool b_slowpath)
1977 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1978 struct qed_igu_block *p_block;
1979 u16 igu_sb_id = 0;
1980 u32 val = 0;
1982 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1983 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1984 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1985 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1987 for (igu_sb_id = 0;
1988 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
1989 p_block = &p_info->entry[igu_sb_id];
1991 if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1992 !p_block->is_pf ||
1993 (p_block->status & QED_IGU_STATUS_DSB))
1994 continue;
1996 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
1997 p_hwfn->hw_info.opaque_fid,
1998 b_set);
2001 if (b_slowpath)
2002 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2003 p_info->igu_dsb_id,
2004 p_hwfn->hw_info.opaque_fid,
2005 b_set);
2008 int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2010 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2011 struct qed_igu_block *p_block;
2012 int pf_sbs, vf_sbs;
2013 u16 igu_sb_id;
2014 u32 val, rval;
2016 if (!RESC_NUM(p_hwfn, QED_SB)) {
2017 p_info->b_allow_pf_vf_change = false;
2018 } else {
2019 /* Use the numbers the MFW have provided -
2020 * don't forget MFW accounts for the default SB as well.
2022 p_info->b_allow_pf_vf_change = true;
2024 if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
2025 DP_INFO(p_hwfn,
2026 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2027 RESC_NUM(p_hwfn, QED_SB) - 1,
2028 p_info->usage.cnt);
2029 p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
2032 if (IS_PF_SRIOV(p_hwfn)) {
2033 u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
2035 if (vfs != p_info->usage.iov_cnt)
2036 DP_VERBOSE(p_hwfn,
2037 NETIF_MSG_INTR,
2038 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2039 p_info->usage.iov_cnt, vfs);
2041 /* At this point we know how many SBs we have totally
2042 * in IGU + number of PF SBs. So we can validate that
2043 * we'd have sufficient for VF.
2045 if (vfs > p_info->usage.free_cnt +
2046 p_info->usage.free_cnt_iov - p_info->usage.cnt) {
2047 DP_NOTICE(p_hwfn,
2048 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2049 p_info->usage.free_cnt +
2050 p_info->usage.free_cnt_iov,
2051 p_info->usage.cnt, vfs);
2052 return -EINVAL;
2055 /* Currently cap the number of VFs SBs by the
2056 * number of VFs.
2058 p_info->usage.iov_cnt = vfs;
2062 /* Mark all SBs as free, now in the right PF/VFs division */
2063 p_info->usage.free_cnt = p_info->usage.cnt;
2064 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2065 p_info->usage.orig = p_info->usage.cnt;
2066 p_info->usage.iov_orig = p_info->usage.iov_cnt;
2068 /* We now proceed to re-configure the IGU cam to reflect the initial
2069 * configuration. We can start with the Default SB.
2071 pf_sbs = p_info->usage.cnt;
2072 vf_sbs = p_info->usage.iov_cnt;
2074 for (igu_sb_id = p_info->igu_dsb_id;
2075 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2076 p_block = &p_info->entry[igu_sb_id];
2077 val = 0;
2079 if (!(p_block->status & QED_IGU_STATUS_VALID))
2080 continue;
2082 if (p_block->status & QED_IGU_STATUS_DSB) {
2083 p_block->function_id = p_hwfn->rel_pf_id;
2084 p_block->is_pf = 1;
2085 p_block->vector_number = 0;
2086 p_block->status = QED_IGU_STATUS_VALID |
2087 QED_IGU_STATUS_PF |
2088 QED_IGU_STATUS_DSB;
2089 } else if (pf_sbs) {
2090 pf_sbs--;
2091 p_block->function_id = p_hwfn->rel_pf_id;
2092 p_block->is_pf = 1;
2093 p_block->vector_number = p_info->usage.cnt - pf_sbs;
2094 p_block->status = QED_IGU_STATUS_VALID |
2095 QED_IGU_STATUS_PF |
2096 QED_IGU_STATUS_FREE;
2097 } else if (vf_sbs) {
2098 p_block->function_id =
2099 p_hwfn->cdev->p_iov_info->first_vf_in_pf +
2100 p_info->usage.iov_cnt - vf_sbs;
2101 p_block->is_pf = 0;
2102 p_block->vector_number = 0;
2103 p_block->status = QED_IGU_STATUS_VALID |
2104 QED_IGU_STATUS_FREE;
2105 vf_sbs--;
2106 } else {
2107 p_block->function_id = 0;
2108 p_block->is_pf = 0;
2109 p_block->vector_number = 0;
2112 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2113 p_block->function_id);
2114 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2115 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2116 p_block->vector_number);
2118 /* VF entries would be enabled when VF is initializaed */
2119 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2121 rval = qed_rd(p_hwfn, p_ptt,
2122 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2124 if (rval != val) {
2125 qed_wr(p_hwfn, p_ptt,
2126 IGU_REG_MAPPING_MEMORY +
2127 sizeof(u32) * igu_sb_id, val);
2129 DP_VERBOSE(p_hwfn,
2130 NETIF_MSG_INTR,
2131 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2132 igu_sb_id,
2133 p_block->function_id,
2134 p_block->is_pf,
2135 p_block->vector_number, rval, val);
2139 return 0;
2142 static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
2143 struct qed_ptt *p_ptt, u16 igu_sb_id)
2145 u32 val = qed_rd(p_hwfn, p_ptt,
2146 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2147 struct qed_igu_block *p_block;
2149 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2151 /* Fill the block information */
2152 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
2153 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2154 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
2155 p_block->igu_sb_id = igu_sb_id;
2158 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2160 struct qed_igu_info *p_igu_info;
2161 struct qed_igu_block *p_block;
2162 u32 min_vf = 0, max_vf = 0;
2163 u16 igu_sb_id;
2165 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
2166 if (!p_hwfn->hw_info.p_igu_info)
2167 return -ENOMEM;
2169 p_igu_info = p_hwfn->hw_info.p_igu_info;
2171 /* Distinguish between existent and non-existent default SB */
2172 p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
2174 /* Find the range of VF ids whose SB belong to this PF */
2175 if (p_hwfn->cdev->p_iov_info) {
2176 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
2178 min_vf = p_iov->first_vf_in_pf;
2179 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2182 for (igu_sb_id = 0;
2183 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2184 /* Read current entry; Notice it might not belong to this PF */
2185 qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2186 p_block = &p_igu_info->entry[igu_sb_id];
2188 if ((p_block->is_pf) &&
2189 (p_block->function_id == p_hwfn->rel_pf_id)) {
2190 p_block->status = QED_IGU_STATUS_PF |
2191 QED_IGU_STATUS_VALID |
2192 QED_IGU_STATUS_FREE;
2194 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2195 p_igu_info->usage.cnt++;
2196 } else if (!(p_block->is_pf) &&
2197 (p_block->function_id >= min_vf) &&
2198 (p_block->function_id < max_vf)) {
2199 /* Available for VFs of this PF */
2200 p_block->status = QED_IGU_STATUS_VALID |
2201 QED_IGU_STATUS_FREE;
2203 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2204 p_igu_info->usage.iov_cnt++;
2207 /* Mark the First entry belonging to the PF or its VFs
2208 * as the default SB [we'll reset IGU prior to first usage].
2210 if ((p_block->status & QED_IGU_STATUS_VALID) &&
2211 (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
2212 p_igu_info->igu_dsb_id = igu_sb_id;
2213 p_block->status |= QED_IGU_STATUS_DSB;
2216 /* limit number of prints by having each PF print only its
2217 * entries with the exception of PF0 which would print
2218 * everything.
2220 if ((p_block->status & QED_IGU_STATUS_VALID) ||
2221 (p_hwfn->abs_pf_id == 0)) {
2222 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2223 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2224 igu_sb_id, p_block->function_id,
2225 p_block->is_pf, p_block->vector_number);
2229 if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
2230 DP_NOTICE(p_hwfn,
2231 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2232 p_igu_info->igu_dsb_id);
2233 return -EINVAL;
2236 /* All non default SB are considered free at this point */
2237 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2238 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2240 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2241 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2242 p_igu_info->igu_dsb_id,
2243 p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
2245 return 0;
2249 * @brief Initialize igu runtime registers
2251 * @param p_hwfn
2253 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
2255 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2257 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2260 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
2262 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
2263 IGU_CMD_INT_ACK_BASE;
2264 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
2265 IGU_CMD_INT_ACK_BASE;
2266 u32 intr_status_hi = 0, intr_status_lo = 0;
2267 u64 intr_status = 0;
2269 intr_status_lo = REG_RD(p_hwfn,
2270 GTT_BAR0_MAP_REG_IGU_CMD +
2271 lsb_igu_cmd_addr * 8);
2272 intr_status_hi = REG_RD(p_hwfn,
2273 GTT_BAR0_MAP_REG_IGU_CMD +
2274 msb_igu_cmd_addr * 8);
2275 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2277 return intr_status;
2280 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
2282 tasklet_init(p_hwfn->sp_dpc,
2283 qed_int_sp_dpc, (unsigned long)p_hwfn);
2284 p_hwfn->b_sp_dpc_enabled = true;
2287 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
2289 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
2290 if (!p_hwfn->sp_dpc)
2291 return -ENOMEM;
2293 return 0;
2296 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
2298 kfree(p_hwfn->sp_dpc);
2299 p_hwfn->sp_dpc = NULL;
2302 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2304 int rc = 0;
2306 rc = qed_int_sp_dpc_alloc(p_hwfn);
2307 if (rc)
2308 return rc;
2310 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
2311 if (rc)
2312 return rc;
2314 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
2316 return rc;
2319 void qed_int_free(struct qed_hwfn *p_hwfn)
2321 qed_int_sp_sb_free(p_hwfn);
2322 qed_int_sb_attn_free(p_hwfn);
2323 qed_int_sp_dpc_free(p_hwfn);
2326 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2328 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2329 qed_int_sb_attn_setup(p_hwfn, p_ptt);
2330 qed_int_sp_dpc_setup(p_hwfn);
2333 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
2334 struct qed_sb_cnt_info *p_sb_cnt_info)
2336 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
2338 if (!info || !p_sb_cnt_info)
2339 return;
2341 memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
2344 void qed_int_disable_post_isr_release(struct qed_dev *cdev)
2346 int i;
2348 for_each_hwfn(cdev, i)
2349 cdev->hwfns[i].b_int_requested = false;
2352 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2353 u8 timer_res, u16 sb_id, bool tx)
2355 struct cau_sb_entry sb_entry;
2356 int rc;
2358 if (!p_hwfn->hw_init_done) {
2359 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2360 return -EINVAL;
2363 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2364 sb_id * sizeof(u64),
2365 (u64)(uintptr_t)&sb_entry, 2, NULL);
2366 if (rc) {
2367 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2368 return rc;
2371 if (tx)
2372 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2373 else
2374 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2376 rc = qed_dmae_host2grc(p_hwfn, p_ptt,
2377 (u64)(uintptr_t)&sb_entry,
2378 CAU_REG_SB_VAR_MEMORY +
2379 sb_id * sizeof(u64), 2, NULL);
2380 if (rc) {
2381 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2382 return rc;
2385 return rc;