1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
16 #include "qed_reg_addr.h"
26 /* Memory groups enum */
43 MEM_GROUP_CONN_CFC_MEM
,
44 MEM_GROUP_TASK_CFC_MEM
,
58 /* Memory groups names */
59 static const char * const s_mem_group_names
[] = {
89 /* Idle check conditions */
90 static u32
cond4(const u32
*r
, const u32
*imm
)
92 return ((r
[0] & imm
[0]) != imm
[1]) && ((r
[1] & imm
[2]) != imm
[3]);
95 static u32
cond6(const u32
*r
, const u32
*imm
)
97 return ((r
[0] >> imm
[0]) & imm
[1]) != imm
[2];
100 static u32
cond5(const u32
*r
, const u32
*imm
)
102 return (r
[0] & imm
[0]) != imm
[1];
105 static u32
cond8(const u32
*r
, const u32
*imm
)
107 return ((r
[0] & imm
[0]) >> imm
[1]) !=
108 (((r
[0] & imm
[2]) >> imm
[3]) | ((r
[1] & imm
[4]) << imm
[5]));
111 static u32
cond9(const u32
*r
, const u32
*imm
)
113 return ((r
[0] & imm
[0]) >> imm
[1]) != (r
[0] & imm
[2]);
116 static u32
cond1(const u32
*r
, const u32
*imm
)
118 return (r
[0] & ~imm
[0]) != imm
[1];
121 static u32
cond0(const u32
*r
, const u32
*imm
)
123 return r
[0] != imm
[0];
126 static u32
cond10(const u32
*r
, const u32
*imm
)
128 return r
[0] != r
[1] && r
[2] == imm
[0];
131 static u32
cond11(const u32
*r
, const u32
*imm
)
133 return r
[0] != r
[1] && r
[2] > imm
[0];
136 static u32
cond3(const u32
*r
, const u32
*imm
)
141 static u32
cond12(const u32
*r
, const u32
*imm
)
143 return r
[0] & imm
[0];
146 static u32
cond7(const u32
*r
, const u32
*imm
)
148 return r
[0] < (r
[1] - imm
[0]);
151 static u32
cond2(const u32
*r
, const u32
*imm
)
153 return r
[0] > imm
[0];
156 /* Array of Idle Check conditions */
157 static u32(*cond_arr
[]) (const u32
*r
, const u32
*imm
) = {
173 /******************************* Data Types **********************************/
188 /* Chip constant definitions */
194 } per_platform
[MAX_PLATFORM_IDS
];
197 /* Platform constant definitions */
198 struct platform_defs
{
203 /* Storm constant definitions */
206 enum block_id block_id
;
207 enum dbg_bus_clients dbg_client_id
[MAX_CHIP_IDS
];
209 u32 sem_fast_mem_addr
;
210 u32 sem_frame_mode_addr
;
211 u32 sem_slow_enable_addr
;
212 u32 sem_slow_mode_addr
;
213 u32 sem_slow_mode1_conf_addr
;
214 u32 sem_sync_dbg_empty_addr
;
215 u32 sem_slow_dbg_empty_addr
;
217 u32 cm_conn_ag_ctx_lid_size
; /* In quad-regs */
218 u32 cm_conn_ag_ctx_rd_addr
;
219 u32 cm_conn_st_ctx_lid_size
; /* In quad-regs */
220 u32 cm_conn_st_ctx_rd_addr
;
221 u32 cm_task_ag_ctx_lid_size
; /* In quad-regs */
222 u32 cm_task_ag_ctx_rd_addr
;
223 u32 cm_task_st_ctx_lid_size
; /* In quad-regs */
224 u32 cm_task_st_ctx_rd_addr
;
227 /* Block constant definitions */
230 bool has_dbg_bus
[MAX_CHIP_IDS
];
231 bool associated_to_storm
;
232 u32 storm_id
; /* Valid only if associated_to_storm is true */
233 enum dbg_bus_clients dbg_client_id
[MAX_CHIP_IDS
];
235 u32 dbg_cycle_enable_addr
;
237 u32 dbg_force_valid_addr
;
238 u32 dbg_force_frame_addr
;
240 bool unreset
; /* If true, the block is taken out of reset before dump */
241 enum dbg_reset_regs reset_reg
;
242 u8 reset_bit_offset
; /* Bit offset in reset register */
245 /* Reset register definitions */
246 struct reset_reg_defs
{
249 bool exists
[MAX_CHIP_IDS
];
252 struct grc_param_defs
{
253 u32 default_val
[MAX_CHIP_IDS
];
257 u32 exclude_all_preset_val
;
258 u32 crash_preset_val
;
261 struct rss_mem_defs
{
262 const char *mem_name
;
263 const char *type_name
;
264 u32 addr
; /* In 128b units */
265 u32 num_entries
[MAX_CHIP_IDS
];
266 u32 entry_width
[MAX_CHIP_IDS
]; /* In bits */
269 struct vfc_ram_defs
{
270 const char *mem_name
;
271 const char *type_name
;
276 struct big_ram_defs
{
277 const char *instance_name
;
278 enum mem_groups mem_group_id
;
279 enum mem_groups ram_mem_group_id
;
280 enum dbg_grc_params grc_param
;
283 u32 num_of_blocks
[MAX_CHIP_IDS
];
287 const char *phy_name
;
289 u32 tbus_addr_lo_addr
;
290 u32 tbus_addr_hi_addr
;
291 u32 tbus_data_lo_addr
;
292 u32 tbus_data_hi_addr
;
295 /******************************** Constants **********************************/
297 #define MAX_LCIDS 320
298 #define MAX_LTIDS 320
299 #define NUM_IOR_SETS 2
300 #define IORS_PER_SET 176
301 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
302 #define BYTES_IN_DWORD sizeof(u32)
304 /* In the macros below, size and offset are specified in bits */
305 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
306 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
307 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
308 #define FIELD_DWORD_OFFSET(type, field) \
309 (int)(FIELD_BIT_OFFSET(type, field) / 32)
310 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
311 #define FIELD_BIT_MASK(type, field) \
312 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
313 FIELD_DWORD_SHIFT(type, field))
314 #define SET_VAR_FIELD(var, type, field, val) \
316 var[FIELD_DWORD_OFFSET(type, field)] &= \
317 (~FIELD_BIT_MASK(type, field)); \
318 var[FIELD_DWORD_OFFSET(type, field)] |= \
319 (val) << FIELD_DWORD_SHIFT(type, field); \
321 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
323 for (i = 0; i < (arr_size); i++) \
324 qed_wr(dev, ptt, addr, (arr)[i]); \
326 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
328 for (i = 0; i < (arr_size); i++) \
329 (arr)[i] = qed_rd(dev, ptt, addr); \
332 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
333 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
334 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
335 #define RAM_LINES_TO_BYTES(lines) \
336 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
337 #define REG_DUMP_LEN_SHIFT 24
338 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
339 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
340 #define IDLE_CHK_RULE_SIZE_DWORDS \
341 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
342 #define IDLE_CHK_RESULT_HDR_DWORDS \
343 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
344 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
345 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
346 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
348 /* The sizes and offsets below are specified in bits */
349 #define VFC_CAM_CMD_STRUCT_SIZE 64
350 #define VFC_CAM_CMD_ROW_OFFSET 48
351 #define VFC_CAM_CMD_ROW_SIZE 9
352 #define VFC_CAM_ADDR_STRUCT_SIZE 16
353 #define VFC_CAM_ADDR_OP_OFFSET 0
354 #define VFC_CAM_ADDR_OP_SIZE 4
355 #define VFC_CAM_RESP_STRUCT_SIZE 256
356 #define VFC_RAM_ADDR_STRUCT_SIZE 16
357 #define VFC_RAM_ADDR_OP_OFFSET 0
358 #define VFC_RAM_ADDR_OP_SIZE 2
359 #define VFC_RAM_ADDR_ROW_OFFSET 2
360 #define VFC_RAM_ADDR_ROW_SIZE 10
361 #define VFC_RAM_RESP_STRUCT_SIZE 256
362 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
363 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
364 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
365 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
366 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
367 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
368 #define NUM_VFC_RAM_TYPES 4
369 #define VFC_CAM_NUM_ROWS 512
370 #define VFC_OPCODE_CAM_RD 14
371 #define VFC_OPCODE_RAM_RD 0
372 #define NUM_RSS_MEM_TYPES 5
373 #define NUM_BIG_RAM_TYPES 3
374 #define BIG_RAM_BLOCK_SIZE_BYTES 128
375 #define BIG_RAM_BLOCK_SIZE_DWORDS \
376 BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
377 #define NUM_PHY_TBUS_ADDRESSES 2048
378 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
379 #define RESET_REG_UNRESET_OFFSET 4
380 #define STALL_DELAY_MS 500
381 #define STATIC_DEBUG_LINE_DWORDS 9
382 #define NUM_DBG_BUS_LINES 256
383 #define NUM_COMMON_GLOBAL_PARAMS 8
384 #define FW_IMG_MAIN 1
385 #define REG_FIFO_DEPTH_ELEMENTS 32
386 #define REG_FIFO_ELEMENT_DWORDS 2
387 #define REG_FIFO_DEPTH_DWORDS \
388 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
389 #define IGU_FIFO_DEPTH_ELEMENTS 64
390 #define IGU_FIFO_ELEMENT_DWORDS 4
391 #define IGU_FIFO_DEPTH_DWORDS \
392 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
393 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
394 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
395 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
396 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
397 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
398 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
400 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
401 #define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
402 #define EMPTY_FW_VERSION_STR "???_???_???_???"
403 #define EMPTY_FW_IMAGE_STR "???????????????"
405 /***************************** Constant Arrays *******************************/
408 static struct dbg_array s_dbg_arrays
[MAX_BIN_DBG_BUFFER_TYPE
] = { {NULL
} };
410 /* Chip constant definitions array */
411 static struct chip_defs s_chip_defs
[MAX_CHIP_IDS
] = {
412 { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
414 { {MAX_NUM_PORTS_BB
, MAX_NUM_PFS_BB
}, {0, 0}, {0, 0}, {0, 0} } },
415 { "k2", { {MAX_NUM_PORTS_K2
, MAX_NUM_PFS_K2
}, {0, 0}, {0, 0}, {0, 0} } }
418 /* Storm constant definitions array */
419 static struct storm_defs s_storm_defs
[] = {
422 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
,
423 DBG_BUS_CLIENT_RBCT
}, true,
424 TSEM_REG_FAST_MEMORY
,
425 TSEM_REG_DBG_FRAME_MODE
, TSEM_REG_SLOW_DBG_ACTIVE
,
426 TSEM_REG_SLOW_DBG_MODE
, TSEM_REG_DBG_MODE1_CFG
,
427 TSEM_REG_SYNC_DBG_EMPTY
, TSEM_REG_SLOW_DBG_EMPTY
,
428 TCM_REG_CTX_RBC_ACCS
,
429 4, TCM_REG_AGG_CON_CTX
,
430 16, TCM_REG_SM_CON_CTX
,
431 2, TCM_REG_AGG_TASK_CTX
,
432 4, TCM_REG_SM_TASK_CTX
},
435 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
,
436 DBG_BUS_CLIENT_RBCM
}, false,
437 MSEM_REG_FAST_MEMORY
,
438 MSEM_REG_DBG_FRAME_MODE
, MSEM_REG_SLOW_DBG_ACTIVE
,
439 MSEM_REG_SLOW_DBG_MODE
, MSEM_REG_DBG_MODE1_CFG
,
440 MSEM_REG_SYNC_DBG_EMPTY
, MSEM_REG_SLOW_DBG_EMPTY
,
441 MCM_REG_CTX_RBC_ACCS
,
442 1, MCM_REG_AGG_CON_CTX
,
443 10, MCM_REG_SM_CON_CTX
,
444 2, MCM_REG_AGG_TASK_CTX
,
445 7, MCM_REG_SM_TASK_CTX
},
448 {DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
,
449 DBG_BUS_CLIENT_RBCU
}, false,
450 USEM_REG_FAST_MEMORY
,
451 USEM_REG_DBG_FRAME_MODE
, USEM_REG_SLOW_DBG_ACTIVE
,
452 USEM_REG_SLOW_DBG_MODE
, USEM_REG_DBG_MODE1_CFG
,
453 USEM_REG_SYNC_DBG_EMPTY
, USEM_REG_SLOW_DBG_EMPTY
,
454 UCM_REG_CTX_RBC_ACCS
,
455 2, UCM_REG_AGG_CON_CTX
,
456 13, UCM_REG_SM_CON_CTX
,
457 3, UCM_REG_AGG_TASK_CTX
,
458 3, UCM_REG_SM_TASK_CTX
},
461 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
,
462 DBG_BUS_CLIENT_RBCX
}, false,
463 XSEM_REG_FAST_MEMORY
,
464 XSEM_REG_DBG_FRAME_MODE
, XSEM_REG_SLOW_DBG_ACTIVE
,
465 XSEM_REG_SLOW_DBG_MODE
, XSEM_REG_DBG_MODE1_CFG
,
466 XSEM_REG_SYNC_DBG_EMPTY
, XSEM_REG_SLOW_DBG_EMPTY
,
467 XCM_REG_CTX_RBC_ACCS
,
468 9, XCM_REG_AGG_CON_CTX
,
469 15, XCM_REG_SM_CON_CTX
,
474 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
,
475 DBG_BUS_CLIENT_RBCY
}, false,
476 YSEM_REG_FAST_MEMORY
,
477 YSEM_REG_DBG_FRAME_MODE
, YSEM_REG_SLOW_DBG_ACTIVE
,
478 YSEM_REG_SLOW_DBG_MODE
, YSEM_REG_DBG_MODE1_CFG
,
479 YSEM_REG_SYNC_DBG_EMPTY
, TSEM_REG_SLOW_DBG_EMPTY
,
480 YCM_REG_CTX_RBC_ACCS
,
481 2, YCM_REG_AGG_CON_CTX
,
482 3, YCM_REG_SM_CON_CTX
,
483 2, YCM_REG_AGG_TASK_CTX
,
484 12, YCM_REG_SM_TASK_CTX
},
487 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
,
488 DBG_BUS_CLIENT_RBCS
}, true,
489 PSEM_REG_FAST_MEMORY
,
490 PSEM_REG_DBG_FRAME_MODE
, PSEM_REG_SLOW_DBG_ACTIVE
,
491 PSEM_REG_SLOW_DBG_MODE
, PSEM_REG_DBG_MODE1_CFG
,
492 PSEM_REG_SYNC_DBG_EMPTY
, PSEM_REG_SLOW_DBG_EMPTY
,
493 PCM_REG_CTX_RBC_ACCS
,
495 10, PCM_REG_SM_CON_CTX
,
500 /* Block definitions array */
501 static struct block_defs block_grc_defs
= {
502 "grc", {true, true, true}, false, 0,
503 {DBG_BUS_CLIENT_RBCN
, DBG_BUS_CLIENT_RBCN
, DBG_BUS_CLIENT_RBCN
},
504 GRC_REG_DBG_SELECT
, GRC_REG_DBG_DWORD_ENABLE
,
505 GRC_REG_DBG_SHIFT
, GRC_REG_DBG_FORCE_VALID
,
506 GRC_REG_DBG_FORCE_FRAME
,
507 true, false, DBG_RESET_REG_MISC_PL_UA
, 1
510 static struct block_defs block_miscs_defs
= {
511 "miscs", {false, false, false}, false, 0,
512 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
514 false, false, MAX_DBG_RESET_REGS
, 0
517 static struct block_defs block_misc_defs
= {
518 "misc", {false, false, false}, false, 0,
519 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
521 false, false, MAX_DBG_RESET_REGS
, 0
524 static struct block_defs block_dbu_defs
= {
525 "dbu", {false, false, false}, false, 0,
526 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
528 false, false, MAX_DBG_RESET_REGS
, 0
531 static struct block_defs block_pglue_b_defs
= {
532 "pglue_b", {true, true, true}, false, 0,
533 {DBG_BUS_CLIENT_RBCH
, DBG_BUS_CLIENT_RBCH
, DBG_BUS_CLIENT_RBCH
},
534 PGLUE_B_REG_DBG_SELECT
, PGLUE_B_REG_DBG_DWORD_ENABLE
,
535 PGLUE_B_REG_DBG_SHIFT
, PGLUE_B_REG_DBG_FORCE_VALID
,
536 PGLUE_B_REG_DBG_FORCE_FRAME
,
537 true, false, DBG_RESET_REG_MISCS_PL_HV
, 1
540 static struct block_defs block_cnig_defs
= {
541 "cnig", {false, false, true}, false, 0,
542 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, DBG_BUS_CLIENT_RBCW
},
543 CNIG_REG_DBG_SELECT_K2
, CNIG_REG_DBG_DWORD_ENABLE_K2
,
544 CNIG_REG_DBG_SHIFT_K2
, CNIG_REG_DBG_FORCE_VALID_K2
,
545 CNIG_REG_DBG_FORCE_FRAME_K2
,
546 true, false, DBG_RESET_REG_MISCS_PL_HV
, 0
549 static struct block_defs block_cpmu_defs
= {
550 "cpmu", {false, false, false}, false, 0,
551 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
553 true, false, DBG_RESET_REG_MISCS_PL_HV
, 8
556 static struct block_defs block_ncsi_defs
= {
557 "ncsi", {true, true, true}, false, 0,
558 {DBG_BUS_CLIENT_RBCZ
, DBG_BUS_CLIENT_RBCZ
, DBG_BUS_CLIENT_RBCZ
},
559 NCSI_REG_DBG_SELECT
, NCSI_REG_DBG_DWORD_ENABLE
,
560 NCSI_REG_DBG_SHIFT
, NCSI_REG_DBG_FORCE_VALID
,
561 NCSI_REG_DBG_FORCE_FRAME
,
562 true, false, DBG_RESET_REG_MISCS_PL_HV
, 5
565 static struct block_defs block_opte_defs
= {
566 "opte", {false, false, false}, false, 0,
567 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
569 true, false, DBG_RESET_REG_MISCS_PL_HV
, 4
572 static struct block_defs block_bmb_defs
= {
573 "bmb", {true, true, true}, false, 0,
574 {DBG_BUS_CLIENT_RBCZ
, DBG_BUS_CLIENT_RBCZ
, DBG_BUS_CLIENT_RBCB
},
575 BMB_REG_DBG_SELECT
, BMB_REG_DBG_DWORD_ENABLE
,
576 BMB_REG_DBG_SHIFT
, BMB_REG_DBG_FORCE_VALID
,
577 BMB_REG_DBG_FORCE_FRAME
,
578 true, false, DBG_RESET_REG_MISCS_PL_UA
, 7
581 static struct block_defs block_pcie_defs
= {
582 "pcie", {false, false, true}, false, 0,
583 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, DBG_BUS_CLIENT_RBCH
},
584 PCIE_REG_DBG_COMMON_SELECT
, PCIE_REG_DBG_COMMON_DWORD_ENABLE
,
585 PCIE_REG_DBG_COMMON_SHIFT
, PCIE_REG_DBG_COMMON_FORCE_VALID
,
586 PCIE_REG_DBG_COMMON_FORCE_FRAME
,
587 false, false, MAX_DBG_RESET_REGS
, 0
590 static struct block_defs block_mcp_defs
= {
591 "mcp", {false, false, false}, false, 0,
592 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
594 false, false, MAX_DBG_RESET_REGS
, 0
597 static struct block_defs block_mcp2_defs
= {
598 "mcp2", {true, true, true}, false, 0,
599 {DBG_BUS_CLIENT_RBCZ
, DBG_BUS_CLIENT_RBCZ
, DBG_BUS_CLIENT_RBCZ
},
600 MCP2_REG_DBG_SELECT
, MCP2_REG_DBG_DWORD_ENABLE
,
601 MCP2_REG_DBG_SHIFT
, MCP2_REG_DBG_FORCE_VALID
,
602 MCP2_REG_DBG_FORCE_FRAME
,
603 false, false, MAX_DBG_RESET_REGS
, 0
606 static struct block_defs block_pswhst_defs
= {
607 "pswhst", {true, true, true}, false, 0,
608 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
609 PSWHST_REG_DBG_SELECT
, PSWHST_REG_DBG_DWORD_ENABLE
,
610 PSWHST_REG_DBG_SHIFT
, PSWHST_REG_DBG_FORCE_VALID
,
611 PSWHST_REG_DBG_FORCE_FRAME
,
612 true, false, DBG_RESET_REG_MISC_PL_HV
, 0
615 static struct block_defs block_pswhst2_defs
= {
616 "pswhst2", {true, true, true}, false, 0,
617 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
618 PSWHST2_REG_DBG_SELECT
, PSWHST2_REG_DBG_DWORD_ENABLE
,
619 PSWHST2_REG_DBG_SHIFT
, PSWHST2_REG_DBG_FORCE_VALID
,
620 PSWHST2_REG_DBG_FORCE_FRAME
,
621 true, false, DBG_RESET_REG_MISC_PL_HV
, 0
624 static struct block_defs block_pswrd_defs
= {
625 "pswrd", {true, true, true}, false, 0,
626 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
627 PSWRD_REG_DBG_SELECT
, PSWRD_REG_DBG_DWORD_ENABLE
,
628 PSWRD_REG_DBG_SHIFT
, PSWRD_REG_DBG_FORCE_VALID
,
629 PSWRD_REG_DBG_FORCE_FRAME
,
630 true, false, DBG_RESET_REG_MISC_PL_HV
, 2
633 static struct block_defs block_pswrd2_defs
= {
634 "pswrd2", {true, true, true}, false, 0,
635 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
636 PSWRD2_REG_DBG_SELECT
, PSWRD2_REG_DBG_DWORD_ENABLE
,
637 PSWRD2_REG_DBG_SHIFT
, PSWRD2_REG_DBG_FORCE_VALID
,
638 PSWRD2_REG_DBG_FORCE_FRAME
,
639 true, false, DBG_RESET_REG_MISC_PL_HV
, 2
642 static struct block_defs block_pswwr_defs
= {
643 "pswwr", {true, true, true}, false, 0,
644 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
645 PSWWR_REG_DBG_SELECT
, PSWWR_REG_DBG_DWORD_ENABLE
,
646 PSWWR_REG_DBG_SHIFT
, PSWWR_REG_DBG_FORCE_VALID
,
647 PSWWR_REG_DBG_FORCE_FRAME
,
648 true, false, DBG_RESET_REG_MISC_PL_HV
, 3
651 static struct block_defs block_pswwr2_defs
= {
652 "pswwr2", {false, false, false}, false, 0,
653 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
655 true, false, DBG_RESET_REG_MISC_PL_HV
, 3
658 static struct block_defs block_pswrq_defs
= {
659 "pswrq", {true, true, true}, false, 0,
660 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
661 PSWRQ_REG_DBG_SELECT
, PSWRQ_REG_DBG_DWORD_ENABLE
,
662 PSWRQ_REG_DBG_SHIFT
, PSWRQ_REG_DBG_FORCE_VALID
,
663 PSWRQ_REG_DBG_FORCE_FRAME
,
664 true, false, DBG_RESET_REG_MISC_PL_HV
, 1
667 static struct block_defs block_pswrq2_defs
= {
668 "pswrq2", {true, true, true}, false, 0,
669 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
670 PSWRQ2_REG_DBG_SELECT
, PSWRQ2_REG_DBG_DWORD_ENABLE
,
671 PSWRQ2_REG_DBG_SHIFT
, PSWRQ2_REG_DBG_FORCE_VALID
,
672 PSWRQ2_REG_DBG_FORCE_FRAME
,
673 true, false, DBG_RESET_REG_MISC_PL_HV
, 1
676 static struct block_defs block_pglcs_defs
= {
677 "pglcs", {false, false, true}, false, 0,
678 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, DBG_BUS_CLIENT_RBCH
},
679 PGLCS_REG_DBG_SELECT
, PGLCS_REG_DBG_DWORD_ENABLE
,
680 PGLCS_REG_DBG_SHIFT
, PGLCS_REG_DBG_FORCE_VALID
,
681 PGLCS_REG_DBG_FORCE_FRAME
,
682 true, false, DBG_RESET_REG_MISCS_PL_HV
, 2
685 static struct block_defs block_ptu_defs
= {
686 "ptu", {true, true, true}, false, 0,
687 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
688 PTU_REG_DBG_SELECT
, PTU_REG_DBG_DWORD_ENABLE
,
689 PTU_REG_DBG_SHIFT
, PTU_REG_DBG_FORCE_VALID
,
690 PTU_REG_DBG_FORCE_FRAME
,
691 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 20
694 static struct block_defs block_dmae_defs
= {
695 "dmae", {true, true, true}, false, 0,
696 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
697 DMAE_REG_DBG_SELECT
, DMAE_REG_DBG_DWORD_ENABLE
,
698 DMAE_REG_DBG_SHIFT
, DMAE_REG_DBG_FORCE_VALID
,
699 DMAE_REG_DBG_FORCE_FRAME
,
700 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 28
703 static struct block_defs block_tcm_defs
= {
704 "tcm", {true, true, true}, true, DBG_TSTORM_ID
,
705 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
},
706 TCM_REG_DBG_SELECT
, TCM_REG_DBG_DWORD_ENABLE
,
707 TCM_REG_DBG_SHIFT
, TCM_REG_DBG_FORCE_VALID
,
708 TCM_REG_DBG_FORCE_FRAME
,
709 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 5
712 static struct block_defs block_mcm_defs
= {
713 "mcm", {true, true, true}, true, DBG_MSTORM_ID
,
714 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCM
},
715 MCM_REG_DBG_SELECT
, MCM_REG_DBG_DWORD_ENABLE
,
716 MCM_REG_DBG_SHIFT
, MCM_REG_DBG_FORCE_VALID
,
717 MCM_REG_DBG_FORCE_FRAME
,
718 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 3
721 static struct block_defs block_ucm_defs
= {
722 "ucm", {true, true, true}, true, DBG_USTORM_ID
,
723 {DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
},
724 UCM_REG_DBG_SELECT
, UCM_REG_DBG_DWORD_ENABLE
,
725 UCM_REG_DBG_SHIFT
, UCM_REG_DBG_FORCE_VALID
,
726 UCM_REG_DBG_FORCE_FRAME
,
727 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 8
730 static struct block_defs block_xcm_defs
= {
731 "xcm", {true, true, true}, true, DBG_XSTORM_ID
,
732 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
},
733 XCM_REG_DBG_SELECT
, XCM_REG_DBG_DWORD_ENABLE
,
734 XCM_REG_DBG_SHIFT
, XCM_REG_DBG_FORCE_VALID
,
735 XCM_REG_DBG_FORCE_FRAME
,
736 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 19
739 static struct block_defs block_ycm_defs
= {
740 "ycm", {true, true, true}, true, DBG_YSTORM_ID
,
741 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCY
},
742 YCM_REG_DBG_SELECT
, YCM_REG_DBG_DWORD_ENABLE
,
743 YCM_REG_DBG_SHIFT
, YCM_REG_DBG_FORCE_VALID
,
744 YCM_REG_DBG_FORCE_FRAME
,
745 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 5
748 static struct block_defs block_pcm_defs
= {
749 "pcm", {true, true, true}, true, DBG_PSTORM_ID
,
750 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
},
751 PCM_REG_DBG_SELECT
, PCM_REG_DBG_DWORD_ENABLE
,
752 PCM_REG_DBG_SHIFT
, PCM_REG_DBG_FORCE_VALID
,
753 PCM_REG_DBG_FORCE_FRAME
,
754 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 4
757 static struct block_defs block_qm_defs
= {
758 "qm", {true, true, true}, false, 0,
759 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCQ
},
760 QM_REG_DBG_SELECT
, QM_REG_DBG_DWORD_ENABLE
,
761 QM_REG_DBG_SHIFT
, QM_REG_DBG_FORCE_VALID
,
762 QM_REG_DBG_FORCE_FRAME
,
763 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 16
766 static struct block_defs block_tm_defs
= {
767 "tm", {true, true, true}, false, 0,
768 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
},
769 TM_REG_DBG_SELECT
, TM_REG_DBG_DWORD_ENABLE
,
770 TM_REG_DBG_SHIFT
, TM_REG_DBG_FORCE_VALID
,
771 TM_REG_DBG_FORCE_FRAME
,
772 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 17
775 static struct block_defs block_dorq_defs
= {
776 "dorq", {true, true, true}, false, 0,
777 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCY
},
778 DORQ_REG_DBG_SELECT
, DORQ_REG_DBG_DWORD_ENABLE
,
779 DORQ_REG_DBG_SHIFT
, DORQ_REG_DBG_FORCE_VALID
,
780 DORQ_REG_DBG_FORCE_FRAME
,
781 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 18
784 static struct block_defs block_brb_defs
= {
785 "brb", {true, true, true}, false, 0,
786 {DBG_BUS_CLIENT_RBCR
, DBG_BUS_CLIENT_RBCR
, DBG_BUS_CLIENT_RBCR
},
787 BRB_REG_DBG_SELECT
, BRB_REG_DBG_DWORD_ENABLE
,
788 BRB_REG_DBG_SHIFT
, BRB_REG_DBG_FORCE_VALID
,
789 BRB_REG_DBG_FORCE_FRAME
,
790 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 0
793 static struct block_defs block_src_defs
= {
794 "src", {true, true, true}, false, 0,
795 {DBG_BUS_CLIENT_RBCF
, DBG_BUS_CLIENT_RBCF
, DBG_BUS_CLIENT_RBCF
},
796 SRC_REG_DBG_SELECT
, SRC_REG_DBG_DWORD_ENABLE
,
797 SRC_REG_DBG_SHIFT
, SRC_REG_DBG_FORCE_VALID
,
798 SRC_REG_DBG_FORCE_FRAME
,
799 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 2
802 static struct block_defs block_prs_defs
= {
803 "prs", {true, true, true}, false, 0,
804 {DBG_BUS_CLIENT_RBCR
, DBG_BUS_CLIENT_RBCR
, DBG_BUS_CLIENT_RBCR
},
805 PRS_REG_DBG_SELECT
, PRS_REG_DBG_DWORD_ENABLE
,
806 PRS_REG_DBG_SHIFT
, PRS_REG_DBG_FORCE_VALID
,
807 PRS_REG_DBG_FORCE_FRAME
,
808 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 1
811 static struct block_defs block_tsdm_defs
= {
812 "tsdm", {true, true, true}, true, DBG_TSTORM_ID
,
813 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
},
814 TSDM_REG_DBG_SELECT
, TSDM_REG_DBG_DWORD_ENABLE
,
815 TSDM_REG_DBG_SHIFT
, TSDM_REG_DBG_FORCE_VALID
,
816 TSDM_REG_DBG_FORCE_FRAME
,
817 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 3
820 static struct block_defs block_msdm_defs
= {
821 "msdm", {true, true, true}, true, DBG_MSTORM_ID
,
822 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCM
},
823 MSDM_REG_DBG_SELECT
, MSDM_REG_DBG_DWORD_ENABLE
,
824 MSDM_REG_DBG_SHIFT
, MSDM_REG_DBG_FORCE_VALID
,
825 MSDM_REG_DBG_FORCE_FRAME
,
826 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 6
829 static struct block_defs block_usdm_defs
= {
830 "usdm", {true, true, true}, true, DBG_USTORM_ID
,
831 {DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
},
832 USDM_REG_DBG_SELECT
, USDM_REG_DBG_DWORD_ENABLE
,
833 USDM_REG_DBG_SHIFT
, USDM_REG_DBG_FORCE_VALID
,
834 USDM_REG_DBG_FORCE_FRAME
,
835 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 7
838 static struct block_defs block_xsdm_defs
= {
839 "xsdm", {true, true, true}, true, DBG_XSTORM_ID
,
840 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
},
841 XSDM_REG_DBG_SELECT
, XSDM_REG_DBG_DWORD_ENABLE
,
842 XSDM_REG_DBG_SHIFT
, XSDM_REG_DBG_FORCE_VALID
,
843 XSDM_REG_DBG_FORCE_FRAME
,
844 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 20
847 static struct block_defs block_ysdm_defs
= {
848 "ysdm", {true, true, true}, true, DBG_YSTORM_ID
,
849 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCY
},
850 YSDM_REG_DBG_SELECT
, YSDM_REG_DBG_DWORD_ENABLE
,
851 YSDM_REG_DBG_SHIFT
, YSDM_REG_DBG_FORCE_VALID
,
852 YSDM_REG_DBG_FORCE_FRAME
,
853 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 8
856 static struct block_defs block_psdm_defs
= {
857 "psdm", {true, true, true}, true, DBG_PSTORM_ID
,
858 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
},
859 PSDM_REG_DBG_SELECT
, PSDM_REG_DBG_DWORD_ENABLE
,
860 PSDM_REG_DBG_SHIFT
, PSDM_REG_DBG_FORCE_VALID
,
861 PSDM_REG_DBG_FORCE_FRAME
,
862 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 7
865 static struct block_defs block_tsem_defs
= {
866 "tsem", {true, true, true}, true, DBG_TSTORM_ID
,
867 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
},
868 TSEM_REG_DBG_SELECT
, TSEM_REG_DBG_DWORD_ENABLE
,
869 TSEM_REG_DBG_SHIFT
, TSEM_REG_DBG_FORCE_VALID
,
870 TSEM_REG_DBG_FORCE_FRAME
,
871 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 4
874 static struct block_defs block_msem_defs
= {
875 "msem", {true, true, true}, true, DBG_MSTORM_ID
,
876 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCM
},
877 MSEM_REG_DBG_SELECT
, MSEM_REG_DBG_DWORD_ENABLE
,
878 MSEM_REG_DBG_SHIFT
, MSEM_REG_DBG_FORCE_VALID
,
879 MSEM_REG_DBG_FORCE_FRAME
,
880 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 9
883 static struct block_defs block_usem_defs
= {
884 "usem", {true, true, true}, true, DBG_USTORM_ID
,
885 {DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
},
886 USEM_REG_DBG_SELECT
, USEM_REG_DBG_DWORD_ENABLE
,
887 USEM_REG_DBG_SHIFT
, USEM_REG_DBG_FORCE_VALID
,
888 USEM_REG_DBG_FORCE_FRAME
,
889 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 9
892 static struct block_defs block_xsem_defs
= {
893 "xsem", {true, true, true}, true, DBG_XSTORM_ID
,
894 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
},
895 XSEM_REG_DBG_SELECT
, XSEM_REG_DBG_DWORD_ENABLE
,
896 XSEM_REG_DBG_SHIFT
, XSEM_REG_DBG_FORCE_VALID
,
897 XSEM_REG_DBG_FORCE_FRAME
,
898 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 21
901 static struct block_defs block_ysem_defs
= {
902 "ysem", {true, true, true}, true, DBG_YSTORM_ID
,
903 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCY
},
904 YSEM_REG_DBG_SELECT
, YSEM_REG_DBG_DWORD_ENABLE
,
905 YSEM_REG_DBG_SHIFT
, YSEM_REG_DBG_FORCE_VALID
,
906 YSEM_REG_DBG_FORCE_FRAME
,
907 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 11
910 static struct block_defs block_psem_defs
= {
911 "psem", {true, true, true}, true, DBG_PSTORM_ID
,
912 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
},
913 PSEM_REG_DBG_SELECT
, PSEM_REG_DBG_DWORD_ENABLE
,
914 PSEM_REG_DBG_SHIFT
, PSEM_REG_DBG_FORCE_VALID
,
915 PSEM_REG_DBG_FORCE_FRAME
,
916 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 10
919 static struct block_defs block_rss_defs
= {
920 "rss", {true, true, true}, false, 0,
921 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
},
922 RSS_REG_DBG_SELECT
, RSS_REG_DBG_DWORD_ENABLE
,
923 RSS_REG_DBG_SHIFT
, RSS_REG_DBG_FORCE_VALID
,
924 RSS_REG_DBG_FORCE_FRAME
,
925 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 18
928 static struct block_defs block_tmld_defs
= {
929 "tmld", {true, true, true}, false, 0,
930 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCM
},
931 TMLD_REG_DBG_SELECT
, TMLD_REG_DBG_DWORD_ENABLE
,
932 TMLD_REG_DBG_SHIFT
, TMLD_REG_DBG_FORCE_VALID
,
933 TMLD_REG_DBG_FORCE_FRAME
,
934 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 13
937 static struct block_defs block_muld_defs
= {
938 "muld", {true, true, true}, false, 0,
939 {DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
},
940 MULD_REG_DBG_SELECT
, MULD_REG_DBG_DWORD_ENABLE
,
941 MULD_REG_DBG_SHIFT
, MULD_REG_DBG_FORCE_VALID
,
942 MULD_REG_DBG_FORCE_FRAME
,
943 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 14
946 static struct block_defs block_yuld_defs
= {
947 "yuld", {true, true, true}, false, 0,
948 {DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
, DBG_BUS_CLIENT_RBCU
},
949 YULD_REG_DBG_SELECT
, YULD_REG_DBG_DWORD_ENABLE
,
950 YULD_REG_DBG_SHIFT
, YULD_REG_DBG_FORCE_VALID
,
951 YULD_REG_DBG_FORCE_FRAME
,
952 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 15
955 static struct block_defs block_xyld_defs
= {
956 "xyld", {true, true, true}, false, 0,
957 {DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
, DBG_BUS_CLIENT_RBCX
},
958 XYLD_REG_DBG_SELECT
, XYLD_REG_DBG_DWORD_ENABLE
,
959 XYLD_REG_DBG_SHIFT
, XYLD_REG_DBG_FORCE_VALID
,
960 XYLD_REG_DBG_FORCE_FRAME
,
961 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 12
964 static struct block_defs block_prm_defs
= {
965 "prm", {true, true, true}, false, 0,
966 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCM
},
967 PRM_REG_DBG_SELECT
, PRM_REG_DBG_DWORD_ENABLE
,
968 PRM_REG_DBG_SHIFT
, PRM_REG_DBG_FORCE_VALID
,
969 PRM_REG_DBG_FORCE_FRAME
,
970 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 21
973 static struct block_defs block_pbf_pb1_defs
= {
974 "pbf_pb1", {true, true, true}, false, 0,
975 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCV
},
976 PBF_PB1_REG_DBG_SELECT
, PBF_PB1_REG_DBG_DWORD_ENABLE
,
977 PBF_PB1_REG_DBG_SHIFT
, PBF_PB1_REG_DBG_FORCE_VALID
,
978 PBF_PB1_REG_DBG_FORCE_FRAME
,
979 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
,
983 static struct block_defs block_pbf_pb2_defs
= {
984 "pbf_pb2", {true, true, true}, false, 0,
985 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCV
},
986 PBF_PB2_REG_DBG_SELECT
, PBF_PB2_REG_DBG_DWORD_ENABLE
,
987 PBF_PB2_REG_DBG_SHIFT
, PBF_PB2_REG_DBG_FORCE_VALID
,
988 PBF_PB2_REG_DBG_FORCE_FRAME
,
989 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
,
993 static struct block_defs block_rpb_defs
= {
994 "rpb", {true, true, true}, false, 0,
995 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCM
},
996 RPB_REG_DBG_SELECT
, RPB_REG_DBG_DWORD_ENABLE
,
997 RPB_REG_DBG_SHIFT
, RPB_REG_DBG_FORCE_VALID
,
998 RPB_REG_DBG_FORCE_FRAME
,
999 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 13
1002 static struct block_defs block_btb_defs
= {
1003 "btb", {true, true, true}, false, 0,
1004 {DBG_BUS_CLIENT_RBCR
, DBG_BUS_CLIENT_RBCR
, DBG_BUS_CLIENT_RBCV
},
1005 BTB_REG_DBG_SELECT
, BTB_REG_DBG_DWORD_ENABLE
,
1006 BTB_REG_DBG_SHIFT
, BTB_REG_DBG_FORCE_VALID
,
1007 BTB_REG_DBG_FORCE_FRAME
,
1008 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 10
1011 static struct block_defs block_pbf_defs
= {
1012 "pbf", {true, true, true}, false, 0,
1013 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCV
},
1014 PBF_REG_DBG_SELECT
, PBF_REG_DBG_DWORD_ENABLE
,
1015 PBF_REG_DBG_SHIFT
, PBF_REG_DBG_FORCE_VALID
,
1016 PBF_REG_DBG_FORCE_FRAME
,
1017 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 15
1020 static struct block_defs block_rdif_defs
= {
1021 "rdif", {true, true, true}, false, 0,
1022 {DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCT
, DBG_BUS_CLIENT_RBCM
},
1023 RDIF_REG_DBG_SELECT
, RDIF_REG_DBG_DWORD_ENABLE
,
1024 RDIF_REG_DBG_SHIFT
, RDIF_REG_DBG_FORCE_VALID
,
1025 RDIF_REG_DBG_FORCE_FRAME
,
1026 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 16
1029 static struct block_defs block_tdif_defs
= {
1030 "tdif", {true, true, true}, false, 0,
1031 {DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
, DBG_BUS_CLIENT_RBCS
},
1032 TDIF_REG_DBG_SELECT
, TDIF_REG_DBG_DWORD_ENABLE
,
1033 TDIF_REG_DBG_SHIFT
, TDIF_REG_DBG_FORCE_VALID
,
1034 TDIF_REG_DBG_FORCE_FRAME
,
1035 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 17
1038 static struct block_defs block_cdu_defs
= {
1039 "cdu", {true, true, true}, false, 0,
1040 {DBG_BUS_CLIENT_RBCF
, DBG_BUS_CLIENT_RBCF
, DBG_BUS_CLIENT_RBCF
},
1041 CDU_REG_DBG_SELECT
, CDU_REG_DBG_DWORD_ENABLE
,
1042 CDU_REG_DBG_SHIFT
, CDU_REG_DBG_FORCE_VALID
,
1043 CDU_REG_DBG_FORCE_FRAME
,
1044 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 23
1047 static struct block_defs block_ccfc_defs
= {
1048 "ccfc", {true, true, true}, false, 0,
1049 {DBG_BUS_CLIENT_RBCF
, DBG_BUS_CLIENT_RBCF
, DBG_BUS_CLIENT_RBCF
},
1050 CCFC_REG_DBG_SELECT
, CCFC_REG_DBG_DWORD_ENABLE
,
1051 CCFC_REG_DBG_SHIFT
, CCFC_REG_DBG_FORCE_VALID
,
1052 CCFC_REG_DBG_FORCE_FRAME
,
1053 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 24
1056 static struct block_defs block_tcfc_defs
= {
1057 "tcfc", {true, true, true}, false, 0,
1058 {DBG_BUS_CLIENT_RBCF
, DBG_BUS_CLIENT_RBCF
, DBG_BUS_CLIENT_RBCF
},
1059 TCFC_REG_DBG_SELECT
, TCFC_REG_DBG_DWORD_ENABLE
,
1060 TCFC_REG_DBG_SHIFT
, TCFC_REG_DBG_FORCE_VALID
,
1061 TCFC_REG_DBG_FORCE_FRAME
,
1062 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 25
1065 static struct block_defs block_igu_defs
= {
1066 "igu", {true, true, true}, false, 0,
1067 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
1068 IGU_REG_DBG_SELECT
, IGU_REG_DBG_DWORD_ENABLE
,
1069 IGU_REG_DBG_SHIFT
, IGU_REG_DBG_FORCE_VALID
,
1070 IGU_REG_DBG_FORCE_FRAME
,
1071 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1
, 27
1074 static struct block_defs block_cau_defs
= {
1075 "cau", {true, true, true}, false, 0,
1076 {DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
, DBG_BUS_CLIENT_RBCP
},
1077 CAU_REG_DBG_SELECT
, CAU_REG_DBG_DWORD_ENABLE
,
1078 CAU_REG_DBG_SHIFT
, CAU_REG_DBG_FORCE_VALID
,
1079 CAU_REG_DBG_FORCE_FRAME
,
1080 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2
, 19
1083 static struct block_defs block_umac_defs
= {
1084 "umac", {false, false, true}, false, 0,
1085 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, DBG_BUS_CLIENT_RBCZ
},
1086 UMAC_REG_DBG_SELECT
, UMAC_REG_DBG_DWORD_ENABLE
,
1087 UMAC_REG_DBG_SHIFT
, UMAC_REG_DBG_FORCE_VALID
,
1088 UMAC_REG_DBG_FORCE_FRAME
,
1089 true, false, DBG_RESET_REG_MISCS_PL_HV
, 6
1092 static struct block_defs block_xmac_defs
= {
1093 "xmac", {false, false, false}, false, 0,
1094 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
1096 false, false, MAX_DBG_RESET_REGS
, 0
1099 static struct block_defs block_dbg_defs
= {
1100 "dbg", {false, false, false}, false, 0,
1101 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
1103 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX
, 3
1106 static struct block_defs block_nig_defs
= {
1107 "nig", {true, true, true}, false, 0,
1108 {DBG_BUS_CLIENT_RBCN
, DBG_BUS_CLIENT_RBCN
, DBG_BUS_CLIENT_RBCN
},
1109 NIG_REG_DBG_SELECT
, NIG_REG_DBG_DWORD_ENABLE
,
1110 NIG_REG_DBG_SHIFT
, NIG_REG_DBG_FORCE_VALID
,
1111 NIG_REG_DBG_FORCE_FRAME
,
1112 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX
, 0
1115 static struct block_defs block_wol_defs
= {
1116 "wol", {false, false, true}, false, 0,
1117 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, DBG_BUS_CLIENT_RBCZ
},
1118 WOL_REG_DBG_SELECT
, WOL_REG_DBG_DWORD_ENABLE
,
1119 WOL_REG_DBG_SHIFT
, WOL_REG_DBG_FORCE_VALID
,
1120 WOL_REG_DBG_FORCE_FRAME
,
1121 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX
, 7
1124 static struct block_defs block_bmbn_defs
= {
1125 "bmbn", {false, false, true}, false, 0,
1126 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, DBG_BUS_CLIENT_RBCB
},
1127 BMBN_REG_DBG_SELECT
, BMBN_REG_DBG_DWORD_ENABLE
,
1128 BMBN_REG_DBG_SHIFT
, BMBN_REG_DBG_FORCE_VALID
,
1129 BMBN_REG_DBG_FORCE_FRAME
,
1130 false, false, MAX_DBG_RESET_REGS
, 0
1133 static struct block_defs block_ipc_defs
= {
1134 "ipc", {false, false, false}, false, 0,
1135 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
1137 true, false, DBG_RESET_REG_MISCS_PL_UA
, 8
1140 static struct block_defs block_nwm_defs
= {
1141 "nwm", {false, false, true}, false, 0,
1142 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, DBG_BUS_CLIENT_RBCW
},
1143 NWM_REG_DBG_SELECT
, NWM_REG_DBG_DWORD_ENABLE
,
1144 NWM_REG_DBG_SHIFT
, NWM_REG_DBG_FORCE_VALID
,
1145 NWM_REG_DBG_FORCE_FRAME
,
1146 true, false, DBG_RESET_REG_MISCS_PL_HV_2
, 0
1149 static struct block_defs block_nws_defs
= {
1150 "nws", {false, false, false}, false, 0,
1151 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
1153 true, false, DBG_RESET_REG_MISCS_PL_HV
, 12
1156 static struct block_defs block_ms_defs
= {
1157 "ms", {false, false, false}, false, 0,
1158 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
1160 true, false, DBG_RESET_REG_MISCS_PL_HV
, 13
1163 static struct block_defs block_phy_pcie_defs
= {
1164 "phy_pcie", {false, false, true}, false, 0,
1165 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, DBG_BUS_CLIENT_RBCH
},
1166 PCIE_REG_DBG_COMMON_SELECT
, PCIE_REG_DBG_COMMON_DWORD_ENABLE
,
1167 PCIE_REG_DBG_COMMON_SHIFT
, PCIE_REG_DBG_COMMON_FORCE_VALID
,
1168 PCIE_REG_DBG_COMMON_FORCE_FRAME
,
1169 false, false, MAX_DBG_RESET_REGS
, 0
1172 static struct block_defs block_led_defs
= {
1173 "led", {false, false, false}, false, 0,
1174 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
1176 true, true, DBG_RESET_REG_MISCS_PL_HV
, 14
1179 static struct block_defs block_misc_aeu_defs
= {
1180 "misc_aeu", {false, false, false}, false, 0,
1181 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
1183 false, false, MAX_DBG_RESET_REGS
, 0
1186 static struct block_defs block_bar0_map_defs
= {
1187 "bar0_map", {false, false, false}, false, 0,
1188 {MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
, MAX_DBG_BUS_CLIENTS
},
1190 false, false, MAX_DBG_RESET_REGS
, 0
1193 static struct block_defs
*s_block_defs
[MAX_BLOCK_ID
] = {
1198 &block_pglue_b_defs
,
1208 &block_pswhst2_defs
,
1248 &block_pbf_pb1_defs
,
1249 &block_pbf_pb2_defs
,
1270 &block_phy_pcie_defs
,
1272 &block_misc_aeu_defs
,
1273 &block_bar0_map_defs
,
1276 static struct platform_defs s_platform_defs
[] = {
1283 static struct grc_param_defs s_grc_param_defs
[] = {
1284 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
1285 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
1286 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
1287 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
1288 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
1289 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
1290 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
1291 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
1292 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
1293 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
1294 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
1295 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
1296 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
1297 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
1298 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
1299 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
1300 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
1301 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
1302 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
1303 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
1304 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
1305 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
1306 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
1307 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
1308 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
1309 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
1310 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
1311 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
1312 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
1313 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
1314 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
1315 {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
1316 {{MAX_LCIDS
, MAX_LCIDS
, MAX_LCIDS
}, 1, MAX_LCIDS
, false, MAX_LCIDS
,
1317 MAX_LCIDS
}, /* DBG_GRC_PARAM_NUM_LCIDS */
1318 {{MAX_LTIDS
, MAX_LTIDS
, MAX_LTIDS
}, 1, MAX_LTIDS
, false, MAX_LTIDS
,
1319 MAX_LTIDS
}, /* DBG_GRC_PARAM_NUM_LTIDS */
1320 {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
1321 {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
1322 {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
1323 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
1324 {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
1327 static struct rss_mem_defs s_rss_mem_defs
[] = {
1328 { "rss_mem_cid", "rss_cid", 0,
1331 { "rss_mem_key_msb", "rss_key", 1024,
1334 { "rss_mem_key_lsb", "rss_key", 2048,
1337 { "rss_mem_info", "rss_info", 3072,
1340 { "rss_mem_ind", "rss_ind", 4096,
1341 {(128 * 128), (128 * 128), (128 * 208)},
1345 static struct vfc_ram_defs s_vfc_ram_defs
[] = {
1346 {"vfc_ram_tt1", "vfc_ram", 0, 512},
1347 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
1348 {"vfc_ram_stt2", "vfc_ram", 640, 32},
1349 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1352 static struct big_ram_defs s_big_ram_defs
[] = {
1353 { "BRB", MEM_GROUP_BRB_MEM
, MEM_GROUP_BRB_RAM
, DBG_GRC_PARAM_DUMP_BRB
,
1354 BRB_REG_BIG_RAM_ADDRESS
, BRB_REG_BIG_RAM_DATA
,
1355 {4800, 4800, 5632} },
1356 { "BTB", MEM_GROUP_BTB_MEM
, MEM_GROUP_BTB_RAM
, DBG_GRC_PARAM_DUMP_BTB
,
1357 BTB_REG_BIG_RAM_ADDRESS
, BTB_REG_BIG_RAM_DATA
,
1358 {2880, 2880, 3680} },
1359 { "BMB", MEM_GROUP_BMB_MEM
, MEM_GROUP_BMB_RAM
, DBG_GRC_PARAM_DUMP_BMB
,
1360 BMB_REG_BIG_RAM_ADDRESS
, BMB_REG_BIG_RAM_DATA
,
1361 {1152, 1152, 1152} }
1364 static struct reset_reg_defs s_reset_regs_defs
[] = {
1365 { MISCS_REG_RESET_PL_UA
, 0x0,
1366 {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
1367 { MISCS_REG_RESET_PL_HV
, 0x0,
1368 {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
1369 { MISCS_REG_RESET_PL_HV_2
, 0x0,
1370 {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
1371 { MISC_REG_RESET_PL_UA
, 0x0,
1372 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
1373 { MISC_REG_RESET_PL_HV
, 0x0,
1374 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
1375 { MISC_REG_RESET_PL_PDA_VMAIN_1
, 0x4404040,
1376 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1377 { MISC_REG_RESET_PL_PDA_VMAIN_2
, 0x7c00007,
1378 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1379 { MISC_REG_RESET_PL_PDA_VAUX
, 0x2,
1380 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1383 static struct phy_defs s_phy_defs
[] = {
1384 {"nw_phy", NWS_REG_NWS_CMU
, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0
,
1385 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8
,
1386 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0
,
1387 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8
},
1388 {"sgmii_phy", MS_REG_MS_CMU
, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132
,
1389 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133
,
1390 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130
,
1391 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131
},
1392 {"pcie_phy0", PHY_PCIE_REG_PHY0
, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132
,
1393 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133
,
1394 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130
,
1395 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131
},
1396 {"pcie_phy1", PHY_PCIE_REG_PHY1
, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132
,
1397 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133
,
1398 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130
,
1399 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131
},
1402 /**************************** Private Functions ******************************/
1404 /* Reads and returns a single dword from the specified unaligned buffer */
1405 static u32
qed_read_unaligned_dword(u8
*buf
)
1409 memcpy((u8
*)&dword
, buf
, sizeof(dword
));
1413 /* Initializes debug data for the specified device */
1414 static enum dbg_status
qed_dbg_dev_init(struct qed_hwfn
*p_hwfn
,
1415 struct qed_ptt
*p_ptt
)
1417 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1419 if (dev_data
->initialized
)
1420 return DBG_STATUS_OK
;
1422 if (QED_IS_K2(p_hwfn
->cdev
)) {
1423 dev_data
->chip_id
= CHIP_K2
;
1424 dev_data
->mode_enable
[MODE_K2
] = 1;
1425 } else if (QED_IS_BB_B0(p_hwfn
->cdev
)) {
1426 dev_data
->chip_id
= CHIP_BB_B0
;
1427 dev_data
->mode_enable
[MODE_BB_B0
] = 1;
1429 return DBG_STATUS_UNKNOWN_CHIP
;
1432 dev_data
->platform_id
= PLATFORM_ASIC
;
1433 dev_data
->mode_enable
[MODE_ASIC
] = 1;
1434 dev_data
->initialized
= true;
1435 return DBG_STATUS_OK
;
1438 /* Reads the FW info structure for the specified Storm from the chip,
1439 * and writes it to the specified fw_info pointer.
1441 static void qed_read_fw_info(struct qed_hwfn
*p_hwfn
,
1442 struct qed_ptt
*p_ptt
,
1443 u8 storm_id
, struct fw_info
*fw_info
)
1445 /* Read first the address that points to fw_info location.
1446 * The address is located in the last line of the Storm RAM.
1448 u32 addr
= s_storm_defs
[storm_id
].sem_fast_mem_addr
+
1449 SEM_FAST_REG_INT_RAM
+
1450 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE
) -
1451 sizeof(struct fw_info_location
);
1452 struct fw_info_location fw_info_location
;
1453 u32
*dest
= (u32
*)&fw_info_location
;
1456 memset(&fw_info_location
, 0, sizeof(fw_info_location
));
1457 memset(fw_info
, 0, sizeof(*fw_info
));
1458 for (i
= 0; i
< BYTES_TO_DWORDS(sizeof(fw_info_location
));
1459 i
++, addr
+= BYTES_IN_DWORD
)
1460 dest
[i
] = qed_rd(p_hwfn
, p_ptt
, addr
);
1461 if (fw_info_location
.size
> 0 && fw_info_location
.size
<=
1463 /* Read FW version info from Storm RAM */
1464 addr
= fw_info_location
.grc_addr
;
1465 dest
= (u32
*)fw_info
;
1466 for (i
= 0; i
< BYTES_TO_DWORDS(fw_info_location
.size
);
1467 i
++, addr
+= BYTES_IN_DWORD
)
1468 dest
[i
] = qed_rd(p_hwfn
, p_ptt
, addr
);
1472 /* Dumps the specified string to the specified buffer. Returns the dumped size
1473 * in bytes (actual length + 1 for the null character termination).
1475 static u32
qed_dump_str(char *dump_buf
, bool dump
, const char *str
)
1478 strcpy(dump_buf
, str
);
1479 return (u32
)strlen(str
) + 1;
1482 /* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
1485 static u32
qed_dump_align(char *dump_buf
, bool dump
, u32 byte_offset
)
1487 u8 offset_in_dword
= (u8
)(byte_offset
& 0x3), align_size
;
1489 align_size
= offset_in_dword
? BYTES_IN_DWORD
- offset_in_dword
: 0;
1491 if (dump
&& align_size
)
1492 memset(dump_buf
, 0, align_size
);
1496 /* Writes the specified string param to the specified buffer.
1497 * Returns the dumped size in dwords.
1499 static u32
qed_dump_str_param(u32
*dump_buf
,
1501 const char *param_name
, const char *param_val
)
1503 char *char_buf
= (char *)dump_buf
;
1506 /* Dump param name */
1507 offset
+= qed_dump_str(char_buf
+ offset
, dump
, param_name
);
1509 /* Indicate a string param value */
1511 *(char_buf
+ offset
) = 1;
1514 /* Dump param value */
1515 offset
+= qed_dump_str(char_buf
+ offset
, dump
, param_val
);
1517 /* Align buffer to next dword */
1518 offset
+= qed_dump_align(char_buf
+ offset
, dump
, offset
);
1519 return BYTES_TO_DWORDS(offset
);
1522 /* Writes the specified numeric param to the specified buffer.
1523 * Returns the dumped size in dwords.
1525 static u32
qed_dump_num_param(u32
*dump_buf
,
1526 bool dump
, const char *param_name
, u32 param_val
)
1528 char *char_buf
= (char *)dump_buf
;
1531 /* Dump param name */
1532 offset
+= qed_dump_str(char_buf
+ offset
, dump
, param_name
);
1534 /* Indicate a numeric param value */
1536 *(char_buf
+ offset
) = 0;
1539 /* Align buffer to next dword */
1540 offset
+= qed_dump_align(char_buf
+ offset
, dump
, offset
);
1542 /* Dump param value (and change offset from bytes to dwords) */
1543 offset
= BYTES_TO_DWORDS(offset
);
1545 *(dump_buf
+ offset
) = param_val
;
1550 /* Reads the FW version and writes it as a param to the specified buffer.
1551 * Returns the dumped size in dwords.
1553 static u32
qed_dump_fw_ver_param(struct qed_hwfn
*p_hwfn
,
1554 struct qed_ptt
*p_ptt
,
1555 u32
*dump_buf
, bool dump
)
1557 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1558 char fw_ver_str
[16] = EMPTY_FW_VERSION_STR
;
1559 char fw_img_str
[16] = EMPTY_FW_IMAGE_STR
;
1560 struct fw_info fw_info
= { {0}, {0} };
1565 /* Read FW image/version from PRAM in a non-reset SEMI */
1569 for (storm_id
= 0; storm_id
< MAX_DBG_STORMS
&& !found
;
1571 /* Read FW version/image */
1572 if (!dev_data
->block_in_reset
1573 [s_storm_defs
[storm_id
].block_id
]) {
1574 /* read FW info for the current Storm */
1575 qed_read_fw_info(p_hwfn
,
1576 p_ptt
, storm_id
, &fw_info
);
1578 /* Create FW version/image strings */
1580 snprintf(fw_ver_str
,
1583 fw_info
.ver
.num
.major
,
1584 fw_info
.ver
.num
.minor
,
1585 fw_info
.ver
.num
.rev
,
1586 fw_info
.ver
.num
.eng
);
1587 if (printed_chars
< 0 || printed_chars
>=
1590 "Unexpected debug error: invalid FW version string\n");
1591 switch (fw_info
.ver
.image_id
) {
1593 strcpy(fw_img_str
, "main");
1596 strcpy(fw_img_str
, "unknown");
1605 /* Dump FW version, image and timestamp */
1606 offset
+= qed_dump_str_param(dump_buf
+ offset
,
1607 dump
, "fw-version", fw_ver_str
);
1608 offset
+= qed_dump_str_param(dump_buf
+ offset
,
1609 dump
, "fw-image", fw_img_str
);
1610 offset
+= qed_dump_num_param(dump_buf
+ offset
,
1612 "fw-timestamp", fw_info
.ver
.timestamp
);
1616 /* Reads the MFW version and writes it as a param to the specified buffer.
1617 * Returns the dumped size in dwords.
1619 static u32
qed_dump_mfw_ver_param(struct qed_hwfn
*p_hwfn
,
1620 struct qed_ptt
*p_ptt
,
1621 u32
*dump_buf
, bool dump
)
1623 char mfw_ver_str
[16] = EMPTY_FW_VERSION_STR
;
1626 u32 global_section_offsize
, global_section_addr
, mfw_ver
;
1627 u32 public_data_addr
, global_section_offsize_addr
;
1630 /* Find MCP public data GRC address.
1631 * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
1633 public_data_addr
= qed_rd(p_hwfn
, p_ptt
,
1634 MISC_REG_SHARED_MEM_ADDR
) |
1637 /* Find MCP public global section offset */
1638 global_section_offsize_addr
= public_data_addr
+
1639 offsetof(struct mcp_public_data
,
1641 sizeof(offsize_t
) * PUBLIC_GLOBAL
;
1642 global_section_offsize
= qed_rd(p_hwfn
, p_ptt
,
1643 global_section_offsize_addr
);
1644 global_section_addr
= MCP_REG_SCRATCH
+
1645 (global_section_offsize
&
1646 OFFSIZE_OFFSET_MASK
) * 4;
1648 /* Read MFW version from MCP public global section */
1649 mfw_ver
= qed_rd(p_hwfn
, p_ptt
,
1650 global_section_addr
+
1651 offsetof(struct public_global
, mfw_ver
));
1653 /* Dump MFW version param */
1654 printed_chars
= snprintf(mfw_ver_str
, sizeof(mfw_ver_str
),
1656 (u8
) (mfw_ver
>> 24),
1657 (u8
) (mfw_ver
>> 16),
1658 (u8
) (mfw_ver
>> 8),
1660 if (printed_chars
< 0 || printed_chars
>= sizeof(mfw_ver_str
))
1662 "Unexpected debug error: invalid MFW version string\n");
1665 return qed_dump_str_param(dump_buf
, dump
, "mfw-version", mfw_ver_str
);
1668 /* Writes a section header to the specified buffer.
1669 * Returns the dumped size in dwords.
1671 static u32
qed_dump_section_hdr(u32
*dump_buf
,
1672 bool dump
, const char *name
, u32 num_params
)
1674 return qed_dump_num_param(dump_buf
, dump
, name
, num_params
);
1677 /* Writes the common global params to the specified buffer.
1678 * Returns the dumped size in dwords.
1680 static u32
qed_dump_common_global_params(struct qed_hwfn
*p_hwfn
,
1681 struct qed_ptt
*p_ptt
,
1684 u8 num_specific_global_params
)
1686 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1689 /* Find platform string and dump global params section header */
1690 offset
+= qed_dump_section_hdr(dump_buf
+ offset
,
1693 NUM_COMMON_GLOBAL_PARAMS
+
1694 num_specific_global_params
);
1697 offset
+= qed_dump_fw_ver_param(p_hwfn
, p_ptt
, dump_buf
+ offset
, dump
);
1698 offset
+= qed_dump_mfw_ver_param(p_hwfn
,
1699 p_ptt
, dump_buf
+ offset
, dump
);
1700 offset
+= qed_dump_num_param(dump_buf
+ offset
,
1701 dump
, "tools-version", TOOLS_VERSION
);
1702 offset
+= qed_dump_str_param(dump_buf
+ offset
,
1705 s_chip_defs
[dev_data
->chip_id
].name
);
1706 offset
+= qed_dump_str_param(dump_buf
+ offset
,
1709 s_platform_defs
[dev_data
->platform_id
].
1712 qed_dump_num_param(dump_buf
+ offset
, dump
, "pci-func",
1717 /* Writes the last section to the specified buffer at the given offset.
1718 * Returns the dumped size in dwords.
1720 static u32
qed_dump_last_section(u32
*dump_buf
, u32 offset
, bool dump
)
1722 u32 start_offset
= offset
, crc
= ~0;
1724 /* Dump CRC section header */
1725 offset
+= qed_dump_section_hdr(dump_buf
+ offset
, dump
, "last", 0);
1727 /* Calculate CRC32 and add it to the dword following the "last" section.
1730 *(dump_buf
+ offset
) = ~crc32(crc
, (u8
*)dump_buf
,
1731 DWORDS_TO_BYTES(offset
));
1733 return offset
- start_offset
;
1736 /* Update blocks reset state */
1737 static void qed_update_blocks_reset_state(struct qed_hwfn
*p_hwfn
,
1738 struct qed_ptt
*p_ptt
)
1740 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1741 u32 reg_val
[MAX_DBG_RESET_REGS
] = { 0 };
1744 /* Read reset registers */
1745 for (i
= 0; i
< MAX_DBG_RESET_REGS
; i
++)
1746 if (s_reset_regs_defs
[i
].exists
[dev_data
->chip_id
])
1747 reg_val
[i
] = qed_rd(p_hwfn
,
1748 p_ptt
, s_reset_regs_defs
[i
].addr
);
1750 /* Check if blocks are in reset */
1751 for (i
= 0; i
< MAX_BLOCK_ID
; i
++)
1752 dev_data
->block_in_reset
[i
] =
1753 s_block_defs
[i
]->has_reset_bit
&&
1754 !(reg_val
[s_block_defs
[i
]->reset_reg
] &
1755 BIT(s_block_defs
[i
]->reset_bit_offset
));
1758 /* Enable / disable the Debug block */
1759 static void qed_bus_enable_dbg_block(struct qed_hwfn
*p_hwfn
,
1760 struct qed_ptt
*p_ptt
, bool enable
)
1762 qed_wr(p_hwfn
, p_ptt
, DBG_REG_DBG_BLOCK_ON
, enable
? 1 : 0);
1765 /* Resets the Debug block */
1766 static void qed_bus_reset_dbg_block(struct qed_hwfn
*p_hwfn
,
1767 struct qed_ptt
*p_ptt
)
1769 u32 dbg_reset_reg_addr
, old_reset_reg_val
, new_reset_reg_val
;
1771 dbg_reset_reg_addr
=
1772 s_reset_regs_defs
[s_block_defs
[BLOCK_DBG
]->reset_reg
].addr
;
1773 old_reset_reg_val
= qed_rd(p_hwfn
, p_ptt
, dbg_reset_reg_addr
);
1774 new_reset_reg_val
= old_reset_reg_val
&
1775 ~BIT(s_block_defs
[BLOCK_DBG
]->reset_bit_offset
);
1777 qed_wr(p_hwfn
, p_ptt
, dbg_reset_reg_addr
, new_reset_reg_val
);
1778 qed_wr(p_hwfn
, p_ptt
, dbg_reset_reg_addr
, old_reset_reg_val
);
1781 static void qed_bus_set_framing_mode(struct qed_hwfn
*p_hwfn
,
1782 struct qed_ptt
*p_ptt
,
1783 enum dbg_bus_frame_modes mode
)
1785 qed_wr(p_hwfn
, p_ptt
, DBG_REG_FRAMING_MODE
, (u8
)mode
);
1788 /* Enable / disable Debug Bus clients according to the specified mask.
1789 * (1 = enable, 0 = disable)
1791 static void qed_bus_enable_clients(struct qed_hwfn
*p_hwfn
,
1792 struct qed_ptt
*p_ptt
, u32 client_mask
)
1794 qed_wr(p_hwfn
, p_ptt
, DBG_REG_CLIENT_ENABLE
, client_mask
);
1797 static bool qed_is_mode_match(struct qed_hwfn
*p_hwfn
, u16
*modes_buf_offset
)
1799 const u32
*ptr
= s_dbg_arrays
[BIN_BUF_DBG_MODE_TREE
].ptr
;
1800 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1801 u8 tree_val
= ((u8
*)ptr
)[(*modes_buf_offset
)++];
1805 case INIT_MODE_OP_NOT
:
1806 return !qed_is_mode_match(p_hwfn
, modes_buf_offset
);
1807 case INIT_MODE_OP_OR
:
1808 case INIT_MODE_OP_AND
:
1809 arg1
= qed_is_mode_match(p_hwfn
, modes_buf_offset
);
1810 arg2
= qed_is_mode_match(p_hwfn
, modes_buf_offset
);
1811 return (tree_val
== INIT_MODE_OP_OR
) ? (arg1
||
1812 arg2
) : (arg1
&& arg2
);
1814 return dev_data
->mode_enable
[tree_val
- MAX_INIT_MODE_OPS
] > 0;
1818 /* Returns the value of the specified GRC param */
1819 static u32
qed_grc_get_param(struct qed_hwfn
*p_hwfn
,
1820 enum dbg_grc_params grc_param
)
1822 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1824 return dev_data
->grc
.param_val
[grc_param
];
1827 /* Clear all GRC params */
1828 static void qed_dbg_grc_clear_params(struct qed_hwfn
*p_hwfn
)
1830 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1833 for (i
= 0; i
< MAX_DBG_GRC_PARAMS
; i
++)
1834 dev_data
->grc
.param_set_by_user
[i
] = 0;
1837 /* Assign default GRC param values */
1838 static void qed_dbg_grc_set_params_default(struct qed_hwfn
*p_hwfn
)
1840 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1843 for (i
= 0; i
< MAX_DBG_GRC_PARAMS
; i
++)
1844 if (!dev_data
->grc
.param_set_by_user
[i
])
1845 dev_data
->grc
.param_val
[i
] =
1846 s_grc_param_defs
[i
].default_val
[dev_data
->chip_id
];
1849 /* Returns true if the specified entity (indicated by GRC param) should be
1850 * included in the dump, false otherwise.
1852 static bool qed_grc_is_included(struct qed_hwfn
*p_hwfn
,
1853 enum dbg_grc_params grc_param
)
1855 return qed_grc_get_param(p_hwfn
, grc_param
) > 0;
1858 /* Returns true of the specified Storm should be included in the dump, false
1861 static bool qed_grc_is_storm_included(struct qed_hwfn
*p_hwfn
,
1862 enum dbg_storms storm
)
1864 return qed_grc_get_param(p_hwfn
, (enum dbg_grc_params
)storm
) > 0;
1867 /* Returns true if the specified memory should be included in the dump, false
1870 static bool qed_grc_is_mem_included(struct qed_hwfn
*p_hwfn
,
1871 enum block_id block_id
, u8 mem_group_id
)
1875 /* Check Storm match */
1876 if (s_block_defs
[block_id
]->associated_to_storm
&&
1877 !qed_grc_is_storm_included(p_hwfn
,
1878 (enum dbg_storms
)s_block_defs
[block_id
]->storm_id
))
1881 for (i
= 0; i
< NUM_BIG_RAM_TYPES
; i
++)
1882 if (mem_group_id
== s_big_ram_defs
[i
].mem_group_id
||
1883 mem_group_id
== s_big_ram_defs
[i
].ram_mem_group_id
)
1884 return qed_grc_is_included(p_hwfn
,
1885 s_big_ram_defs
[i
].grc_param
);
1886 if (mem_group_id
== MEM_GROUP_PXP_ILT
|| mem_group_id
==
1888 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_PXP
);
1889 if (mem_group_id
== MEM_GROUP_RAM
)
1890 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_RAM
);
1891 if (mem_group_id
== MEM_GROUP_PBUF
)
1892 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_PBUF
);
1893 if (mem_group_id
== MEM_GROUP_CAU_MEM
||
1894 mem_group_id
== MEM_GROUP_CAU_SB
||
1895 mem_group_id
== MEM_GROUP_CAU_PI
)
1896 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_CAU
);
1897 if (mem_group_id
== MEM_GROUP_QM_MEM
)
1898 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_QM
);
1899 if (mem_group_id
== MEM_GROUP_CONN_CFC_MEM
||
1900 mem_group_id
== MEM_GROUP_TASK_CFC_MEM
)
1901 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_CFC
);
1902 if (mem_group_id
== MEM_GROUP_IGU_MEM
|| mem_group_id
==
1904 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_IGU
);
1905 if (mem_group_id
== MEM_GROUP_MULD_MEM
)
1906 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_MULD
);
1907 if (mem_group_id
== MEM_GROUP_PRS_MEM
)
1908 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_PRS
);
1909 if (mem_group_id
== MEM_GROUP_DMAE_MEM
)
1910 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_DMAE
);
1911 if (mem_group_id
== MEM_GROUP_TM_MEM
)
1912 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_TM
);
1913 if (mem_group_id
== MEM_GROUP_SDM_MEM
)
1914 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_SDM
);
1915 if (mem_group_id
== MEM_GROUP_TDIF_CTX
|| mem_group_id
==
1917 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_DIF
);
1918 if (mem_group_id
== MEM_GROUP_CM_MEM
)
1919 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_CM
);
1920 if (mem_group_id
== MEM_GROUP_IOR
)
1921 return qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_IOR
);
1926 /* Stalls all Storms */
1927 static void qed_grc_stall_storms(struct qed_hwfn
*p_hwfn
,
1928 struct qed_ptt
*p_ptt
, bool stall
)
1930 u8 reg_val
= stall
? 1 : 0;
1933 for (storm_id
= 0; storm_id
< MAX_DBG_STORMS
; storm_id
++) {
1934 if (qed_grc_is_storm_included(p_hwfn
,
1935 (enum dbg_storms
)storm_id
)) {
1937 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
1938 SEM_FAST_REG_STALL_0
;
1940 qed_wr(p_hwfn
, p_ptt
, reg_addr
, reg_val
);
1944 msleep(STALL_DELAY_MS
);
1947 /* Takes all blocks out of reset */
1948 static void qed_grc_unreset_blocks(struct qed_hwfn
*p_hwfn
,
1949 struct qed_ptt
*p_ptt
)
1951 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
1952 u32 reg_val
[MAX_DBG_RESET_REGS
] = { 0 };
1955 /* Fill reset regs values */
1956 for (i
= 0; i
< MAX_BLOCK_ID
; i
++)
1957 if (s_block_defs
[i
]->has_reset_bit
&& s_block_defs
[i
]->unreset
)
1958 reg_val
[s_block_defs
[i
]->reset_reg
] |=
1959 BIT(s_block_defs
[i
]->reset_bit_offset
);
1961 /* Write reset registers */
1962 for (i
= 0; i
< MAX_DBG_RESET_REGS
; i
++) {
1963 if (s_reset_regs_defs
[i
].exists
[dev_data
->chip_id
]) {
1964 reg_val
[i
] |= s_reset_regs_defs
[i
].unreset_val
;
1968 s_reset_regs_defs
[i
].addr
+
1969 RESET_REG_UNRESET_OFFSET
, reg_val
[i
]);
1974 /* Returns the attention name offsets of the specified block */
1975 static const struct dbg_attn_block_type_data
*
1976 qed_get_block_attn_data(enum block_id block_id
, enum dbg_attn_type attn_type
)
1978 const struct dbg_attn_block
*base_attn_block_arr
=
1979 (const struct dbg_attn_block
*)
1980 s_dbg_arrays
[BIN_BUF_DBG_ATTN_BLOCKS
].ptr
;
1982 return &base_attn_block_arr
[block_id
].per_type_data
[attn_type
];
1985 /* Returns the attention registers of the specified block */
1986 static const struct dbg_attn_reg
*
1987 qed_get_block_attn_regs(enum block_id block_id
, enum dbg_attn_type attn_type
,
1990 const struct dbg_attn_block_type_data
*block_type_data
=
1991 qed_get_block_attn_data(block_id
, attn_type
);
1993 *num_attn_regs
= block_type_data
->num_regs
;
1994 return &((const struct dbg_attn_reg
*)
1995 s_dbg_arrays
[BIN_BUF_DBG_ATTN_REGS
].ptr
)[block_type_data
->
1999 /* For each block, clear the status of all parities */
2000 static void qed_grc_clear_all_prty(struct qed_hwfn
*p_hwfn
,
2001 struct qed_ptt
*p_ptt
)
2003 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
2004 u8 reg_idx
, num_attn_regs
;
2007 for (block_id
= 0; block_id
< MAX_BLOCK_ID
; block_id
++) {
2008 const struct dbg_attn_reg
*attn_reg_arr
;
2010 if (dev_data
->block_in_reset
[block_id
])
2013 attn_reg_arr
= qed_get_block_attn_regs((enum block_id
)block_id
,
2016 for (reg_idx
= 0; reg_idx
< num_attn_regs
; reg_idx
++) {
2017 const struct dbg_attn_reg
*reg_data
=
2018 &attn_reg_arr
[reg_idx
];
2021 bool eval_mode
= GET_FIELD(reg_data
->mode
.data
,
2022 DBG_MODE_HDR_EVAL_MODE
) > 0;
2023 u16 modes_buf_offset
=
2024 GET_FIELD(reg_data
->mode
.data
,
2025 DBG_MODE_HDR_MODES_BUF_OFFSET
);
2028 qed_is_mode_match(p_hwfn
, &modes_buf_offset
))
2029 /* Mode match - read parity status read-clear
2032 qed_rd(p_hwfn
, p_ptt
,
2033 DWORDS_TO_BYTES(reg_data
->
2039 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2040 * The following parameters are dumped:
2041 * - 'count' = num_dumped_entries
2042 * - 'split' = split_type
2043 * - 'id'i = split_id (dumped only if split_id >= 0)
2044 * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
2045 * param_val != NULL)
2047 static u32
qed_grc_dump_regs_hdr(u32
*dump_buf
,
2049 u32 num_reg_entries
,
2050 const char *split_type
,
2052 const char *param_name
, const char *param_val
)
2054 u8 num_params
= 2 + (split_id
>= 0 ? 1 : 0) + (param_name
? 1 : 0);
2057 offset
+= qed_dump_section_hdr(dump_buf
+ offset
,
2058 dump
, "grc_regs", num_params
);
2059 offset
+= qed_dump_num_param(dump_buf
+ offset
,
2060 dump
, "count", num_reg_entries
);
2061 offset
+= qed_dump_str_param(dump_buf
+ offset
,
2062 dump
, "split", split_type
);
2064 offset
+= qed_dump_num_param(dump_buf
+ offset
,
2065 dump
, "id", split_id
);
2066 if (param_name
&& param_val
)
2067 offset
+= qed_dump_str_param(dump_buf
+ offset
,
2068 dump
, param_name
, param_val
);
2072 /* Dumps GRC register/memory. Returns the dumped size in dwords. */
2073 static u32
qed_grc_dump_reg_entry(struct qed_hwfn
*p_hwfn
,
2074 struct qed_ptt
*p_ptt
, u32
*dump_buf
,
2075 bool dump
, u32 addr
, u32 len
)
2080 *(dump_buf
+ offset
++) = addr
| (len
<< REG_DUMP_LEN_SHIFT
);
2081 for (i
= 0; i
< len
; i
++, addr
++, offset
++)
2082 *(dump_buf
+ offset
) = qed_rd(p_hwfn
,
2084 DWORDS_TO_BYTES(addr
));
2092 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2093 static u32
qed_grc_dump_regs_entries(struct qed_hwfn
*p_hwfn
,
2094 struct qed_ptt
*p_ptt
,
2095 struct dbg_array input_regs_arr
,
2098 bool block_enable
[MAX_BLOCK_ID
],
2099 u32
*num_dumped_reg_entries
)
2101 u32 i
, offset
= 0, input_offset
= 0;
2102 bool mode_match
= true;
2104 *num_dumped_reg_entries
= 0;
2105 while (input_offset
< input_regs_arr
.size_in_dwords
) {
2106 const struct dbg_dump_cond_hdr
*cond_hdr
=
2107 (const struct dbg_dump_cond_hdr
*)
2108 &input_regs_arr
.ptr
[input_offset
++];
2109 bool eval_mode
= GET_FIELD(cond_hdr
->mode
.data
,
2110 DBG_MODE_HDR_EVAL_MODE
) > 0;
2112 /* Check mode/block */
2114 u16 modes_buf_offset
=
2115 GET_FIELD(cond_hdr
->mode
.data
,
2116 DBG_MODE_HDR_MODES_BUF_OFFSET
);
2117 mode_match
= qed_is_mode_match(p_hwfn
,
2121 if (mode_match
&& block_enable
[cond_hdr
->block_id
]) {
2122 for (i
= 0; i
< cond_hdr
->data_size
;
2123 i
++, input_offset
++) {
2124 const struct dbg_dump_reg
*reg
=
2125 (const struct dbg_dump_reg
*)
2126 &input_regs_arr
.ptr
[input_offset
];
2129 qed_grc_dump_reg_entry(p_hwfn
, p_ptt
,
2130 dump_buf
+ offset
, dump
,
2131 GET_FIELD(reg
->data
,
2132 DBG_DUMP_REG_ADDRESS
),
2133 GET_FIELD(reg
->data
,
2134 DBG_DUMP_REG_LENGTH
));
2135 (*num_dumped_reg_entries
)++;
2138 input_offset
+= cond_hdr
->data_size
;
2145 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2146 static u32
qed_grc_dump_split_data(struct qed_hwfn
*p_hwfn
,
2147 struct qed_ptt
*p_ptt
,
2148 struct dbg_array input_regs_arr
,
2151 bool block_enable
[MAX_BLOCK_ID
],
2152 const char *split_type_name
,
2154 const char *param_name
,
2155 const char *param_val
)
2157 u32 num_dumped_reg_entries
, offset
;
2159 /* Calculate register dump header size (and skip it for now) */
2160 offset
= qed_grc_dump_regs_hdr(dump_buf
,
2164 split_id
, param_name
, param_val
);
2166 /* Dump registers */
2167 offset
+= qed_grc_dump_regs_entries(p_hwfn
,
2173 &num_dumped_reg_entries
);
2175 /* Write register dump header */
2176 if (dump
&& num_dumped_reg_entries
> 0)
2177 qed_grc_dump_regs_hdr(dump_buf
,
2179 num_dumped_reg_entries
,
2181 split_id
, param_name
, param_val
);
2183 return num_dumped_reg_entries
> 0 ? offset
: 0;
2186 /* Dumps registers according to the input registers array.
2187 * Returns the dumped size in dwords.
2189 static u32
qed_grc_dump_registers(struct qed_hwfn
*p_hwfn
,
2190 struct qed_ptt
*p_ptt
,
2193 bool block_enable
[MAX_BLOCK_ID
],
2194 const char *param_name
, const char *param_val
)
2196 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
2197 u32 offset
= 0, input_offset
= 0;
2201 DP_VERBOSE(p_hwfn
, QED_MSG_DEBUG
, "Dumping registers...\n");
2202 while (input_offset
<
2203 s_dbg_arrays
[BIN_BUF_DBG_DUMP_REG
].size_in_dwords
) {
2204 const struct dbg_dump_split_hdr
*split_hdr
=
2205 (const struct dbg_dump_split_hdr
*)
2206 &s_dbg_arrays
[BIN_BUF_DBG_DUMP_REG
].ptr
[input_offset
++];
2207 u8 split_type_id
= GET_FIELD(split_hdr
->hdr
,
2208 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID
);
2209 u32 split_data_size
= GET_FIELD(split_hdr
->hdr
,
2210 DBG_DUMP_SPLIT_HDR_DATA_SIZE
);
2211 struct dbg_array curr_input_regs_arr
= {
2212 &s_dbg_arrays
[BIN_BUF_DBG_DUMP_REG
].ptr
[input_offset
],
2215 switch (split_type_id
) {
2216 case SPLIT_TYPE_NONE
:
2218 offset
+= qed_grc_dump_split_data(p_hwfn
,
2220 curr_input_regs_arr
,
2229 case SPLIT_TYPE_PORT
:
2232 s_chip_defs
[dev_data
->chip_id
].
2233 per_platform
[dev_data
->platform_id
].num_ports
;
2236 qed_port_pretend(p_hwfn
, p_ptt
,
2239 qed_grc_dump_split_data(p_hwfn
, p_ptt
,
2240 curr_input_regs_arr
,
2249 case SPLIT_TYPE_PORT_PF
:
2252 s_chip_defs
[dev_data
->chip_id
].
2253 per_platform
[dev_data
->platform_id
].num_pfs
;
2256 qed_fid_pretend(p_hwfn
, p_ptt
, pf_id
);
2257 offset
+= qed_grc_dump_split_data(p_hwfn
,
2259 curr_input_regs_arr
,
2262 "pf", pf_id
, param_name
,
2270 input_offset
+= split_data_size
;
2273 /* Pretend to original PF */
2275 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
2279 /* Dump reset registers. Returns the dumped size in dwords. */
2280 static u32
qed_grc_dump_reset_regs(struct qed_hwfn
*p_hwfn
,
2281 struct qed_ptt
*p_ptt
,
2282 u32
*dump_buf
, bool dump
)
2284 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
2285 u32 i
, offset
= 0, num_regs
= 0;
2287 /* Calculate header size */
2288 offset
+= qed_grc_dump_regs_hdr(dump_buf
,
2289 false, 0, "eng", -1, NULL
, NULL
);
2291 /* Write reset registers */
2292 for (i
= 0; i
< MAX_DBG_RESET_REGS
; i
++) {
2293 if (s_reset_regs_defs
[i
].exists
[dev_data
->chip_id
]) {
2294 offset
+= qed_grc_dump_reg_entry(p_hwfn
,
2307 qed_grc_dump_regs_hdr(dump_buf
,
2308 true, num_regs
, "eng", -1, NULL
, NULL
);
2312 /* Dump registers that are modified during GRC Dump and therefore must be dumped
2313 * first. Returns the dumped size in dwords.
2315 static u32
qed_grc_dump_modified_regs(struct qed_hwfn
*p_hwfn
,
2316 struct qed_ptt
*p_ptt
,
2317 u32
*dump_buf
, bool dump
)
2319 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
2320 u32 offset
= 0, num_reg_entries
= 0, block_id
;
2321 u8 storm_id
, reg_idx
, num_attn_regs
;
2323 /* Calculate header size */
2324 offset
+= qed_grc_dump_regs_hdr(dump_buf
,
2325 false, 0, "eng", -1, NULL
, NULL
);
2327 /* Write parity registers */
2328 for (block_id
= 0; block_id
< MAX_BLOCK_ID
; block_id
++) {
2329 const struct dbg_attn_reg
*attn_reg_arr
;
2331 if (dev_data
->block_in_reset
[block_id
] && dump
)
2334 attn_reg_arr
= qed_get_block_attn_regs((enum block_id
)block_id
,
2337 for (reg_idx
= 0; reg_idx
< num_attn_regs
; reg_idx
++) {
2338 const struct dbg_attn_reg
*reg_data
=
2339 &attn_reg_arr
[reg_idx
];
2340 u16 modes_buf_offset
;
2344 eval_mode
= GET_FIELD(reg_data
->mode
.data
,
2345 DBG_MODE_HDR_EVAL_MODE
) > 0;
2347 GET_FIELD(reg_data
->mode
.data
,
2348 DBG_MODE_HDR_MODES_BUF_OFFSET
);
2350 qed_is_mode_match(p_hwfn
, &modes_buf_offset
)) {
2351 /* Mode match - read and dump registers */
2352 offset
+= qed_grc_dump_reg_entry(p_hwfn
,
2356 reg_data
->mask_address
,
2358 offset
+= qed_grc_dump_reg_entry(p_hwfn
,
2362 GET_FIELD(reg_data
->data
,
2363 DBG_ATTN_REG_STS_ADDRESS
),
2365 num_reg_entries
+= 2;
2370 /* Write storm stall status registers */
2371 for (storm_id
= 0; storm_id
< MAX_DBG_STORMS
; storm_id
++) {
2372 if (dev_data
->block_in_reset
[s_storm_defs
[storm_id
].block_id
] &&
2376 offset
+= qed_grc_dump_reg_entry(p_hwfn
,
2380 BYTES_TO_DWORDS(s_storm_defs
[storm_id
].
2382 SEM_FAST_REG_STALLED
),
2389 qed_grc_dump_regs_hdr(dump_buf
,
2391 num_reg_entries
, "eng", -1, NULL
, NULL
);
2395 /* Dumps a GRC memory header (section and params).
2396 * The following parameters are dumped:
2397 * name - name is dumped only if it's not NULL.
2398 * addr - byte_addr is dumped only if name is NULL.
2399 * len - dword_len is always dumped.
2400 * width - bit_width is dumped if it's not zero.
2401 * packed - packed=1 is dumped if it's not false.
2402 * mem_group - mem_group is always dumped.
2403 * is_storm - true only if the memory is related to a Storm.
2404 * storm_letter - storm letter (valid only if is_storm is true).
2405 * Returns the dumped size in dwords.
2407 static u32
qed_grc_dump_mem_hdr(struct qed_hwfn
*p_hwfn
,
2415 const char *mem_group
,
2416 bool is_storm
, char storm_letter
)
2424 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2430 /* Dump section header */
2431 offset
+= qed_dump_section_hdr(dump_buf
+ offset
,
2432 dump
, "grc_mem", num_params
);
2436 strcpy(buf
, "?STORM_");
2437 buf
[0] = storm_letter
;
2438 strcpy(buf
+ strlen(buf
), name
);
2443 offset
+= qed_dump_str_param(dump_buf
+ offset
,
2448 "Dumping %d registers from %s...\n",
2452 offset
+= qed_dump_num_param(dump_buf
+ offset
,
2453 dump
, "addr", byte_addr
);
2454 if (dump
&& dword_len
> 64)
2457 "Dumping %d registers from address 0x%x...\n",
2458 dword_len
, byte_addr
);
2462 offset
+= qed_dump_num_param(dump_buf
+ offset
, dump
, "len", dword_len
);
2464 /* Dump bit width */
2466 offset
+= qed_dump_num_param(dump_buf
+ offset
,
2467 dump
, "width", bit_width
);
2471 offset
+= qed_dump_num_param(dump_buf
+ offset
,
2476 strcpy(buf
, "?STORM_");
2477 buf
[0] = storm_letter
;
2478 strcpy(buf
+ strlen(buf
), mem_group
);
2480 strcpy(buf
, mem_group
);
2483 offset
+= qed_dump_str_param(dump_buf
+ offset
, dump
, "type", buf
);
2487 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2488 * Returns the dumped size in dwords.
2490 static u32
qed_grc_dump_mem(struct qed_hwfn
*p_hwfn
,
2491 struct qed_ptt
*p_ptt
,
2499 const char *mem_group
,
2500 bool is_storm
, char storm_letter
)
2504 offset
+= qed_grc_dump_mem_hdr(p_hwfn
,
2512 mem_group
, is_storm
, storm_letter
);
2516 for (i
= 0; i
< dword_len
;
2517 i
++, byte_addr
+= BYTES_IN_DWORD
, offset
++)
2518 *(dump_buf
+ offset
) = qed_rd(p_hwfn
, p_ptt
, byte_addr
);
2520 offset
+= dword_len
;
2526 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2527 static u32
qed_grc_dump_mem_entries(struct qed_hwfn
*p_hwfn
,
2528 struct qed_ptt
*p_ptt
,
2529 struct dbg_array input_mems_arr
,
2530 u32
*dump_buf
, bool dump
)
2532 u32 i
, offset
= 0, input_offset
= 0;
2533 bool mode_match
= true;
2535 while (input_offset
< input_mems_arr
.size_in_dwords
) {
2536 const struct dbg_dump_cond_hdr
*cond_hdr
;
2540 cond_hdr
= (const struct dbg_dump_cond_hdr
*)
2541 &input_mems_arr
.ptr
[input_offset
++];
2542 eval_mode
= GET_FIELD(cond_hdr
->mode
.data
,
2543 DBG_MODE_HDR_EVAL_MODE
) > 0;
2545 /* Check required mode */
2547 u16 modes_buf_offset
=
2548 GET_FIELD(cond_hdr
->mode
.data
,
2549 DBG_MODE_HDR_MODES_BUF_OFFSET
);
2551 mode_match
= qed_is_mode_match(p_hwfn
,
2556 input_offset
+= cond_hdr
->data_size
;
2560 num_entries
= cond_hdr
->data_size
/ MEM_DUMP_ENTRY_SIZE_DWORDS
;
2561 for (i
= 0; i
< num_entries
;
2562 i
++, input_offset
+= MEM_DUMP_ENTRY_SIZE_DWORDS
) {
2563 const struct dbg_dump_mem
*mem
=
2564 (const struct dbg_dump_mem
*)
2565 &input_mems_arr
.ptr
[input_offset
];
2568 mem_group_id
= GET_FIELD(mem
->dword0
,
2569 DBG_DUMP_MEM_MEM_GROUP_ID
);
2570 if (mem_group_id
>= MEM_GROUPS_NUM
) {
2571 DP_NOTICE(p_hwfn
, "Invalid mem_group_id\n");
2575 if (qed_grc_is_mem_included(p_hwfn
,
2576 (enum block_id
)cond_hdr
->block_id
,
2579 DWORDS_TO_BYTES(GET_FIELD(mem
->dword0
,
2580 DBG_DUMP_MEM_ADDRESS
));
2581 u32 mem_len
= GET_FIELD(mem
->dword1
,
2582 DBG_DUMP_MEM_LENGTH
);
2583 char storm_letter
= 'a';
2584 bool is_storm
= false;
2586 /* Update memory length for CCFC/TCFC memories
2587 * according to number of LCIDs/LTIDs.
2589 if (mem_group_id
== MEM_GROUP_CONN_CFC_MEM
)
2590 mem_len
= qed_grc_get_param(p_hwfn
,
2591 DBG_GRC_PARAM_NUM_LCIDS
)
2592 * (mem_len
/ MAX_LCIDS
);
2593 else if (mem_group_id
== MEM_GROUP_TASK_CFC_MEM
)
2594 mem_len
= qed_grc_get_param(p_hwfn
,
2595 DBG_GRC_PARAM_NUM_LTIDS
)
2596 * (mem_len
/ MAX_LTIDS
);
2598 /* If memory is associated with Storm, update
2601 if (s_block_defs
[cond_hdr
->block_id
]->
2602 associated_to_storm
) {
2605 s_storm_defs
[s_block_defs
[
2606 cond_hdr
->block_id
]->
2611 offset
+= qed_grc_dump_mem(p_hwfn
, p_ptt
,
2612 dump_buf
+ offset
, dump
, NULL
,
2613 mem_byte_addr
, mem_len
, 0,
2615 s_mem_group_names
[mem_group_id
],
2616 is_storm
, storm_letter
);
2624 /* Dumps GRC memories according to the input array dump_mem.
2625 * Returns the dumped size in dwords.
2627 static u32
qed_grc_dump_memories(struct qed_hwfn
*p_hwfn
,
2628 struct qed_ptt
*p_ptt
,
2629 u32
*dump_buf
, bool dump
)
2631 u32 offset
= 0, input_offset
= 0;
2633 while (input_offset
<
2634 s_dbg_arrays
[BIN_BUF_DBG_DUMP_MEM
].size_in_dwords
) {
2635 const struct dbg_dump_split_hdr
*split_hdr
=
2636 (const struct dbg_dump_split_hdr
*)
2637 &s_dbg_arrays
[BIN_BUF_DBG_DUMP_MEM
].ptr
[input_offset
++];
2638 u8 split_type_id
= GET_FIELD(split_hdr
->hdr
,
2639 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID
);
2640 u32 split_data_size
= GET_FIELD(split_hdr
->hdr
,
2641 DBG_DUMP_SPLIT_HDR_DATA_SIZE
);
2642 struct dbg_array curr_input_mems_arr
= {
2643 &s_dbg_arrays
[BIN_BUF_DBG_DUMP_MEM
].ptr
[input_offset
],
2646 switch (split_type_id
) {
2647 case SPLIT_TYPE_NONE
:
2648 offset
+= qed_grc_dump_mem_entries(p_hwfn
,
2650 curr_input_mems_arr
,
2656 "Dumping split memories is currently not supported\n");
2660 input_offset
+= split_data_size
;
2666 /* Dumps GRC context data for the specified Storm.
2667 * Returns the dumped size in dwords.
2669 static u32
qed_grc_dump_ctx_data(struct qed_hwfn
*p_hwfn
,
2670 struct qed_ptt
*p_ptt
,
2679 u32 i
, lid
, total_size
;
2684 lid_size
*= BYTES_IN_DWORD
;
2685 total_size
= num_lids
* lid_size
;
2686 offset
+= qed_grc_dump_mem_hdr(p_hwfn
,
2695 true, s_storm_defs
[storm_id
].letter
);
2697 /* Dump context data */
2699 for (lid
= 0; lid
< num_lids
; lid
++) {
2700 for (i
= 0; i
< lid_size
; i
++, offset
++) {
2703 s_storm_defs
[storm_id
].cm_ctx_wr_addr
,
2705 *(dump_buf
+ offset
) = qed_rd(p_hwfn
,
2711 offset
+= total_size
;
2717 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2718 static u32
qed_grc_dump_ctx(struct qed_hwfn
*p_hwfn
,
2719 struct qed_ptt
*p_ptt
, u32
*dump_buf
, bool dump
)
2724 for (storm_id
= 0; storm_id
< MAX_DBG_STORMS
; storm_id
++) {
2725 if (!qed_grc_is_storm_included(p_hwfn
,
2726 (enum dbg_storms
)storm_id
))
2729 /* Dump Conn AG context size */
2731 qed_grc_dump_ctx_data(p_hwfn
,
2736 qed_grc_get_param(p_hwfn
,
2737 DBG_GRC_PARAM_NUM_LCIDS
),
2738 s_storm_defs
[storm_id
].
2739 cm_conn_ag_ctx_lid_size
,
2740 s_storm_defs
[storm_id
].
2741 cm_conn_ag_ctx_rd_addr
,
2744 /* Dump Conn ST context size */
2746 qed_grc_dump_ctx_data(p_hwfn
,
2751 qed_grc_get_param(p_hwfn
,
2752 DBG_GRC_PARAM_NUM_LCIDS
),
2753 s_storm_defs
[storm_id
].
2754 cm_conn_st_ctx_lid_size
,
2755 s_storm_defs
[storm_id
].
2756 cm_conn_st_ctx_rd_addr
,
2759 /* Dump Task AG context size */
2761 qed_grc_dump_ctx_data(p_hwfn
,
2766 qed_grc_get_param(p_hwfn
,
2767 DBG_GRC_PARAM_NUM_LTIDS
),
2768 s_storm_defs
[storm_id
].
2769 cm_task_ag_ctx_lid_size
,
2770 s_storm_defs
[storm_id
].
2771 cm_task_ag_ctx_rd_addr
,
2774 /* Dump Task ST context size */
2776 qed_grc_dump_ctx_data(p_hwfn
,
2781 qed_grc_get_param(p_hwfn
,
2782 DBG_GRC_PARAM_NUM_LTIDS
),
2783 s_storm_defs
[storm_id
].
2784 cm_task_st_ctx_lid_size
,
2785 s_storm_defs
[storm_id
].
2786 cm_task_st_ctx_rd_addr
,
2793 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
2794 static u32
qed_grc_dump_iors(struct qed_hwfn
*p_hwfn
,
2795 struct qed_ptt
*p_ptt
, u32
*dump_buf
, bool dump
)
2797 char buf
[10] = "IOR_SET_?";
2798 u8 storm_id
, set_id
;
2801 for (storm_id
= 0; storm_id
< MAX_DBG_STORMS
; storm_id
++) {
2802 if (qed_grc_is_storm_included(p_hwfn
,
2803 (enum dbg_storms
)storm_id
)) {
2804 for (set_id
= 0; set_id
< NUM_IOR_SETS
; set_id
++) {
2806 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
2807 SEM_FAST_REG_STORM_REG_FILE
+
2808 DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id
));
2810 buf
[strlen(buf
) - 1] = '0' + set_id
;
2811 offset
+= qed_grc_dump_mem(p_hwfn
,
2831 /* Dump VFC CAM. Returns the dumped size in dwords. */
2832 static u32
qed_grc_dump_vfc_cam(struct qed_hwfn
*p_hwfn
,
2833 struct qed_ptt
*p_ptt
,
2834 u32
*dump_buf
, bool dump
, u8 storm_id
)
2836 u32 total_size
= VFC_CAM_NUM_ROWS
* VFC_CAM_RESP_DWORDS
;
2837 u32 cam_addr
[VFC_CAM_ADDR_DWORDS
] = { 0 };
2838 u32 cam_cmd
[VFC_CAM_CMD_DWORDS
] = { 0 };
2842 offset
+= qed_grc_dump_mem_hdr(p_hwfn
,
2851 true, s_storm_defs
[storm_id
].letter
);
2853 /* Prepare CAM address */
2854 SET_VAR_FIELD(cam_addr
, VFC_CAM_ADDR
, OP
, VFC_OPCODE_CAM_RD
);
2855 for (row
= 0; row
< VFC_CAM_NUM_ROWS
;
2856 row
++, offset
+= VFC_CAM_RESP_DWORDS
) {
2857 /* Write VFC CAM command */
2858 SET_VAR_FIELD(cam_cmd
, VFC_CAM_CMD
, ROW
, row
);
2861 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
2862 SEM_FAST_REG_VFC_DATA_WR
,
2863 cam_cmd
, VFC_CAM_CMD_DWORDS
);
2865 /* Write VFC CAM address */
2868 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
2869 SEM_FAST_REG_VFC_ADDR
,
2870 cam_addr
, VFC_CAM_ADDR_DWORDS
);
2872 /* Read VFC CAM read response */
2875 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
2876 SEM_FAST_REG_VFC_DATA_RD
,
2877 dump_buf
+ offset
, VFC_CAM_RESP_DWORDS
);
2880 offset
+= total_size
;
2886 /* Dump VFC RAM. Returns the dumped size in dwords. */
2887 static u32
qed_grc_dump_vfc_ram(struct qed_hwfn
*p_hwfn
,
2888 struct qed_ptt
*p_ptt
,
2891 u8 storm_id
, struct vfc_ram_defs
*ram_defs
)
2893 u32 total_size
= ram_defs
->num_rows
* VFC_RAM_RESP_DWORDS
;
2894 u32 ram_addr
[VFC_RAM_ADDR_DWORDS
] = { 0 };
2895 u32 ram_cmd
[VFC_RAM_CMD_DWORDS
] = { 0 };
2899 offset
+= qed_grc_dump_mem_hdr(p_hwfn
,
2907 ram_defs
->type_name
,
2908 true, s_storm_defs
[storm_id
].letter
);
2910 /* Prepare RAM address */
2911 SET_VAR_FIELD(ram_addr
, VFC_RAM_ADDR
, OP
, VFC_OPCODE_RAM_RD
);
2914 return offset
+ total_size
;
2916 for (row
= ram_defs
->base_row
;
2917 row
< ram_defs
->base_row
+ ram_defs
->num_rows
;
2918 row
++, offset
+= VFC_RAM_RESP_DWORDS
) {
2919 /* Write VFC RAM command */
2922 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
2923 SEM_FAST_REG_VFC_DATA_WR
,
2924 ram_cmd
, VFC_RAM_CMD_DWORDS
);
2926 /* Write VFC RAM address */
2927 SET_VAR_FIELD(ram_addr
, VFC_RAM_ADDR
, ROW
, row
);
2930 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
2931 SEM_FAST_REG_VFC_ADDR
,
2932 ram_addr
, VFC_RAM_ADDR_DWORDS
);
2934 /* Read VFC RAM read response */
2937 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
2938 SEM_FAST_REG_VFC_DATA_RD
,
2939 dump_buf
+ offset
, VFC_RAM_RESP_DWORDS
);
2945 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
2946 static u32
qed_grc_dump_vfc(struct qed_hwfn
*p_hwfn
,
2947 struct qed_ptt
*p_ptt
, u32
*dump_buf
, bool dump
)
2949 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
2953 for (storm_id
= 0; storm_id
< MAX_DBG_STORMS
; storm_id
++) {
2954 if (qed_grc_is_storm_included(p_hwfn
,
2955 (enum dbg_storms
)storm_id
) &&
2956 s_storm_defs
[storm_id
].has_vfc
&&
2957 (storm_id
!= DBG_PSTORM_ID
||
2958 dev_data
->platform_id
== PLATFORM_ASIC
)) {
2960 offset
+= qed_grc_dump_vfc_cam(p_hwfn
,
2966 for (i
= 0; i
< NUM_VFC_RAM_TYPES
; i
++)
2967 offset
+= qed_grc_dump_vfc_ram(p_hwfn
,
2981 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
2982 static u32
qed_grc_dump_rss(struct qed_hwfn
*p_hwfn
,
2983 struct qed_ptt
*p_ptt
, u32
*dump_buf
, bool dump
)
2985 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
2989 for (rss_mem_id
= 0; rss_mem_id
< NUM_RSS_MEM_TYPES
; rss_mem_id
++) {
2990 struct rss_mem_defs
*rss_defs
= &s_rss_mem_defs
[rss_mem_id
];
2991 u32 num_entries
= rss_defs
->num_entries
[dev_data
->chip_id
];
2992 u32 entry_width
= rss_defs
->entry_width
[dev_data
->chip_id
];
2993 u32 total_size
= (num_entries
* entry_width
) / 32;
2994 bool packed
= (entry_width
== 16);
2995 u32 addr
= rss_defs
->addr
;
2998 offset
+= qed_grc_dump_mem_hdr(p_hwfn
,
3006 rss_defs
->type_name
, false, 0);
3009 offset
+= total_size
;
3014 for (i
= 0; i
< BYTES_TO_DWORDS(total_size
); i
++, addr
++) {
3015 qed_wr(p_hwfn
, p_ptt
, RSS_REG_RSS_RAM_ADDR
, addr
);
3016 for (j
= 0; j
< BYTES_IN_DWORD
; j
++, offset
++)
3017 *(dump_buf
+ offset
) =
3018 qed_rd(p_hwfn
, p_ptt
,
3019 RSS_REG_RSS_RAM_DATA
+
3020 DWORDS_TO_BYTES(j
));
3027 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3028 static u32
qed_grc_dump_big_ram(struct qed_hwfn
*p_hwfn
,
3029 struct qed_ptt
*p_ptt
,
3030 u32
*dump_buf
, bool dump
, u8 big_ram_id
)
3032 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
3033 char mem_name
[12] = "???_BIG_RAM";
3034 char type_name
[8] = "???_RAM";
3035 u32 ram_size
, total_blocks
;
3036 u32 offset
= 0, i
, j
;
3039 s_big_ram_defs
[big_ram_id
].num_of_blocks
[dev_data
->chip_id
];
3040 ram_size
= total_blocks
* BIG_RAM_BLOCK_SIZE_DWORDS
;
3042 strncpy(type_name
, s_big_ram_defs
[big_ram_id
].instance_name
,
3043 strlen(s_big_ram_defs
[big_ram_id
].instance_name
));
3044 strncpy(mem_name
, s_big_ram_defs
[big_ram_id
].instance_name
,
3045 strlen(s_big_ram_defs
[big_ram_id
].instance_name
));
3047 /* Dump memory header */
3048 offset
+= qed_grc_dump_mem_hdr(p_hwfn
,
3054 BIG_RAM_BLOCK_SIZE_BYTES
* 8,
3055 false, type_name
, false, 0);
3058 return offset
+ ram_size
;
3060 /* Read and dump Big RAM data */
3061 for (i
= 0; i
< total_blocks
/ 2; i
++) {
3062 qed_wr(p_hwfn
, p_ptt
, s_big_ram_defs
[big_ram_id
].addr_reg_addr
,
3064 for (j
= 0; j
< 2 * BIG_RAM_BLOCK_SIZE_DWORDS
; j
++, offset
++)
3065 *(dump_buf
+ offset
) = qed_rd(p_hwfn
, p_ptt
,
3066 s_big_ram_defs
[big_ram_id
].
3068 DWORDS_TO_BYTES(j
));
3074 static u32
qed_grc_dump_mcp(struct qed_hwfn
*p_hwfn
,
3075 struct qed_ptt
*p_ptt
, u32
*dump_buf
, bool dump
)
3077 bool block_enable
[MAX_BLOCK_ID
] = { 0 };
3078 bool halted
= false;
3083 halted
= !qed_mcp_halt(p_hwfn
, p_ptt
);
3085 DP_NOTICE(p_hwfn
, "MCP halt failed!\n");
3088 /* Dump MCP scratchpad */
3089 offset
+= qed_grc_dump_mem(p_hwfn
,
3095 MCP_REG_SCRATCH_SIZE
,
3096 0, false, "MCP", false, 0);
3098 /* Dump MCP cpu_reg_file */
3099 offset
+= qed_grc_dump_mem(p_hwfn
,
3104 MCP_REG_CPU_REG_FILE
,
3105 MCP_REG_CPU_REG_FILE_SIZE
,
3106 0, false, "MCP", false, 0);
3108 /* Dump MCP registers */
3109 block_enable
[BLOCK_MCP
] = true;
3110 offset
+= qed_grc_dump_registers(p_hwfn
,
3113 dump
, block_enable
, "block", "MCP");
3115 /* Dump required non-MCP registers */
3116 offset
+= qed_grc_dump_regs_hdr(dump_buf
+ offset
,
3117 dump
, 1, "eng", -1, "block", "MCP");
3118 offset
+= qed_grc_dump_reg_entry(p_hwfn
,
3123 (MISC_REG_SHARED_MEM_ADDR
), 1);
3126 if (halted
&& qed_mcp_resume(p_hwfn
, p_ptt
))
3127 DP_NOTICE(p_hwfn
, "Failed to resume MCP after halt!\n");
3131 /* Dumps the tbus indirect memory for all PHYs. */
3132 static u32
qed_grc_dump_phy(struct qed_hwfn
*p_hwfn
,
3133 struct qed_ptt
*p_ptt
, u32
*dump_buf
, bool dump
)
3135 u32 offset
= 0, tbus_lo_offset
, tbus_hi_offset
;
3139 for (phy_id
= 0; phy_id
< ARRAY_SIZE(s_phy_defs
); phy_id
++) {
3140 struct phy_defs
*phy_defs
= &s_phy_defs
[phy_id
];
3143 printed_chars
= snprintf(mem_name
, sizeof(mem_name
), "tbus_%s",
3144 phy_defs
->phy_name
);
3145 if (printed_chars
< 0 || printed_chars
>= sizeof(mem_name
))
3147 "Unexpected debug error: invalid PHY memory name\n");
3148 offset
+= qed_grc_dump_mem_hdr(p_hwfn
,
3153 PHY_DUMP_SIZE_DWORDS
,
3154 16, true, mem_name
, false, 0);
3156 u32 addr_lo_addr
= phy_defs
->base_addr
+
3157 phy_defs
->tbus_addr_lo_addr
;
3158 u32 addr_hi_addr
= phy_defs
->base_addr
+
3159 phy_defs
->tbus_addr_hi_addr
;
3160 u32 data_lo_addr
= phy_defs
->base_addr
+
3161 phy_defs
->tbus_data_lo_addr
;
3162 u32 data_hi_addr
= phy_defs
->base_addr
+
3163 phy_defs
->tbus_data_hi_addr
;
3164 u8
*bytes_buf
= (u8
*)(dump_buf
+ offset
);
3166 for (tbus_hi_offset
= 0;
3167 tbus_hi_offset
< (NUM_PHY_TBUS_ADDRESSES
>> 8);
3170 p_ptt
, addr_hi_addr
, tbus_hi_offset
);
3171 for (tbus_lo_offset
= 0; tbus_lo_offset
< 256;
3175 addr_lo_addr
, tbus_lo_offset
);
3177 (u8
)qed_rd(p_hwfn
, p_ptt
,
3180 (u8
)qed_rd(p_hwfn
, p_ptt
,
3186 offset
+= PHY_DUMP_SIZE_DWORDS
;
3192 static void qed_config_dbg_line(struct qed_hwfn
*p_hwfn
,
3193 struct qed_ptt
*p_ptt
,
3194 enum block_id block_id
,
3197 u8 right_shift
, u8 force_valid
, u8 force_frame
)
3199 struct block_defs
*p_block_defs
= s_block_defs
[block_id
];
3201 qed_wr(p_hwfn
, p_ptt
, p_block_defs
->dbg_select_addr
, line_id
);
3202 qed_wr(p_hwfn
, p_ptt
, p_block_defs
->dbg_cycle_enable_addr
, cycle_en
);
3203 qed_wr(p_hwfn
, p_ptt
, p_block_defs
->dbg_shift_addr
, right_shift
);
3204 qed_wr(p_hwfn
, p_ptt
, p_block_defs
->dbg_force_valid_addr
, force_valid
);
3205 qed_wr(p_hwfn
, p_ptt
, p_block_defs
->dbg_force_frame_addr
, force_frame
);
3208 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3209 static u32
qed_grc_dump_static_debug(struct qed_hwfn
*p_hwfn
,
3210 struct qed_ptt
*p_ptt
,
3211 u32
*dump_buf
, bool dump
)
3213 u32 block_dwords
= NUM_DBG_BUS_LINES
* STATIC_DEBUG_LINE_DWORDS
;
3214 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
3215 u32 offset
= 0, block_id
, line_id
, addr
, i
;
3216 struct block_defs
*p_block_defs
;
3220 QED_MSG_DEBUG
, "Dumping static debug data...\n");
3222 /* Disable all blocks debug output */
3223 for (block_id
= 0; block_id
< MAX_BLOCK_ID
; block_id
++) {
3224 p_block_defs
= s_block_defs
[block_id
];
3226 if (p_block_defs
->has_dbg_bus
[dev_data
->chip_id
])
3227 qed_wr(p_hwfn
, p_ptt
,
3228 p_block_defs
->dbg_cycle_enable_addr
, 0);
3231 qed_bus_reset_dbg_block(p_hwfn
, p_ptt
);
3232 qed_bus_set_framing_mode(p_hwfn
,
3233 p_ptt
, DBG_BUS_FRAME_MODE_8HW_0ST
);
3235 p_ptt
, DBG_REG_DEBUG_TARGET
, DBG_BUS_TARGET_ID_INT_BUF
);
3236 qed_wr(p_hwfn
, p_ptt
, DBG_REG_FULL_MODE
, 1);
3237 qed_bus_enable_dbg_block(p_hwfn
, p_ptt
, true);
3240 /* Dump all static debug lines for each relevant block */
3241 for (block_id
= 0; block_id
< MAX_BLOCK_ID
; block_id
++) {
3242 p_block_defs
= s_block_defs
[block_id
];
3244 if (!p_block_defs
->has_dbg_bus
[dev_data
->chip_id
])
3247 /* Dump static section params */
3248 offset
+= qed_grc_dump_mem_hdr(p_hwfn
,
3251 p_block_defs
->name
, 0,
3252 block_dwords
, 32, false,
3253 "STATIC", false, 0);
3255 if (dump
&& !dev_data
->block_in_reset
[block_id
]) {
3257 p_block_defs
->dbg_client_id
[dev_data
->chip_id
];
3259 /* Enable block's client */
3260 qed_bus_enable_clients(p_hwfn
, p_ptt
,
3261 BIT(dbg_client_id
));
3263 for (line_id
= 0; line_id
< NUM_DBG_BUS_LINES
;
3265 /* Configure debug line ID */
3266 qed_config_dbg_line(p_hwfn
,
3268 (enum block_id
)block_id
,
3272 /* Read debug line info */
3273 for (i
= 0, addr
= DBG_REG_CALENDAR_OUT_DATA
;
3274 i
< STATIC_DEBUG_LINE_DWORDS
;
3275 i
++, offset
++, addr
+= BYTES_IN_DWORD
)
3276 dump_buf
[offset
] = qed_rd(p_hwfn
, p_ptt
,
3280 /* Disable block's client and debug output */
3281 qed_bus_enable_clients(p_hwfn
, p_ptt
, 0);
3282 qed_wr(p_hwfn
, p_ptt
,
3283 p_block_defs
->dbg_cycle_enable_addr
, 0);
3285 /* All lines are invalid - dump zeros */
3287 memset(dump_buf
+ offset
, 0,
3288 DWORDS_TO_BYTES(block_dwords
));
3289 offset
+= block_dwords
;
3294 qed_bus_enable_dbg_block(p_hwfn
, p_ptt
, false);
3295 qed_bus_enable_clients(p_hwfn
, p_ptt
, 0);
3301 /* Performs GRC Dump to the specified buffer.
3302 * Returns the dumped size in dwords.
3304 static enum dbg_status
qed_grc_dump(struct qed_hwfn
*p_hwfn
,
3305 struct qed_ptt
*p_ptt
,
3307 bool dump
, u32
*num_dumped_dwords
)
3309 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
3310 bool parities_masked
= false;
3311 u8 i
, port_mode
= 0;
3314 /* Check if emulation platform */
3315 *num_dumped_dwords
= 0;
3317 /* Fill GRC parameters that were not set by the user with their default
3320 qed_dbg_grc_set_params_default(p_hwfn
);
3322 /* Find port mode */
3324 switch (qed_rd(p_hwfn
, p_ptt
, MISC_REG_PORT_MODE
)) {
3337 /* Update reset state */
3339 qed_update_blocks_reset_state(p_hwfn
, p_ptt
);
3341 /* Dump global params */
3342 offset
+= qed_dump_common_global_params(p_hwfn
,
3344 dump_buf
+ offset
, dump
, 4);
3345 offset
+= qed_dump_str_param(dump_buf
+ offset
,
3346 dump
, "dump-type", "grc-dump");
3347 offset
+= qed_dump_num_param(dump_buf
+ offset
,
3350 qed_grc_get_param(p_hwfn
,
3351 DBG_GRC_PARAM_NUM_LCIDS
));
3352 offset
+= qed_dump_num_param(dump_buf
+ offset
,
3355 qed_grc_get_param(p_hwfn
,
3356 DBG_GRC_PARAM_NUM_LTIDS
));
3357 offset
+= qed_dump_num_param(dump_buf
+ offset
,
3358 dump
, "num-ports", port_mode
);
3360 /* Dump reset registers (dumped before taking blocks out of reset ) */
3361 if (qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_REGS
))
3362 offset
+= qed_grc_dump_reset_regs(p_hwfn
,
3364 dump_buf
+ offset
, dump
);
3366 /* Take all blocks out of reset (using reset registers) */
3368 qed_grc_unreset_blocks(p_hwfn
, p_ptt
);
3369 qed_update_blocks_reset_state(p_hwfn
, p_ptt
);
3372 /* Disable all parities using MFW command */
3374 parities_masked
= !qed_mcp_mask_parities(p_hwfn
, p_ptt
, 1);
3375 if (!parities_masked
) {
3376 if (qed_grc_get_param
3377 (p_hwfn
, DBG_GRC_PARAM_PARITY_SAFE
))
3378 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY
;
3381 "Failed to mask parities using MFW\n");
3385 /* Dump modified registers (dumped before modifying them) */
3386 if (qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_REGS
))
3387 offset
+= qed_grc_dump_modified_regs(p_hwfn
,
3389 dump_buf
+ offset
, dump
);
3393 (qed_grc_is_included(p_hwfn
,
3394 DBG_GRC_PARAM_DUMP_IOR
) ||
3395 qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_VFC
)))
3396 qed_grc_stall_storms(p_hwfn
, p_ptt
, true);
3399 if (qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_REGS
)) {
3400 /* Dump all blocks except MCP */
3401 bool block_enable
[MAX_BLOCK_ID
];
3403 for (i
= 0; i
< MAX_BLOCK_ID
; i
++)
3404 block_enable
[i
] = true;
3405 block_enable
[BLOCK_MCP
] = false;
3406 offset
+= qed_grc_dump_registers(p_hwfn
,
3411 block_enable
, NULL
, NULL
);
3415 offset
+= qed_grc_dump_memories(p_hwfn
, p_ptt
, dump_buf
+ offset
, dump
);
3418 if (qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_MCP
))
3419 offset
+= qed_grc_dump_mcp(p_hwfn
,
3420 p_ptt
, dump_buf
+ offset
, dump
);
3423 if (qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_CM_CTX
))
3424 offset
+= qed_grc_dump_ctx(p_hwfn
,
3425 p_ptt
, dump_buf
+ offset
, dump
);
3427 /* Dump RSS memories */
3428 if (qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_RSS
))
3429 offset
+= qed_grc_dump_rss(p_hwfn
,
3430 p_ptt
, dump_buf
+ offset
, dump
);
3433 for (i
= 0; i
< NUM_BIG_RAM_TYPES
; i
++)
3434 if (qed_grc_is_included(p_hwfn
, s_big_ram_defs
[i
].grc_param
))
3435 offset
+= qed_grc_dump_big_ram(p_hwfn
,
3441 if (qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_IOR
))
3442 offset
+= qed_grc_dump_iors(p_hwfn
,
3443 p_ptt
, dump_buf
+ offset
, dump
);
3446 if (qed_grc_is_included(p_hwfn
, DBG_GRC_PARAM_DUMP_VFC
))
3447 offset
+= qed_grc_dump_vfc(p_hwfn
,
3448 p_ptt
, dump_buf
+ offset
, dump
);
3451 if (qed_grc_is_included(p_hwfn
,
3452 DBG_GRC_PARAM_DUMP_PHY
) && dev_data
->chip_id
==
3453 CHIP_K2
&& dev_data
->platform_id
== PLATFORM_ASIC
)
3454 offset
+= qed_grc_dump_phy(p_hwfn
,
3455 p_ptt
, dump_buf
+ offset
, dump
);
3457 /* Dump static debug data */
3458 if (qed_grc_is_included(p_hwfn
,
3459 DBG_GRC_PARAM_DUMP_STATIC
) &&
3460 dev_data
->bus
.state
== DBG_BUS_STATE_IDLE
)
3461 offset
+= qed_grc_dump_static_debug(p_hwfn
,
3463 dump_buf
+ offset
, dump
);
3465 /* Dump last section */
3466 offset
+= qed_dump_last_section(dump_buf
, offset
, dump
);
3468 /* Unstall storms */
3469 if (qed_grc_get_param(p_hwfn
, DBG_GRC_PARAM_UNSTALL
))
3470 qed_grc_stall_storms(p_hwfn
, p_ptt
, false);
3472 /* Clear parity status */
3473 qed_grc_clear_all_prty(p_hwfn
, p_ptt
);
3475 /* Enable all parities using MFW command */
3476 if (parities_masked
)
3477 qed_mcp_mask_parities(p_hwfn
, p_ptt
, 0);
3480 *num_dumped_dwords
= offset
;
3482 return DBG_STATUS_OK
;
3485 /* Writes the specified failing Idle Check rule to the specified buffer.
3486 * Returns the dumped size in dwords.
3488 static u32
qed_idle_chk_dump_failure(struct qed_hwfn
*p_hwfn
,
3489 struct qed_ptt
*p_ptt
,
3494 const struct dbg_idle_chk_rule
*rule
,
3495 u16 fail_entry_id
, u32
*cond_reg_values
)
3497 const union dbg_idle_chk_reg
*regs
= &((const union dbg_idle_chk_reg
*)
3499 [BIN_BUF_DBG_IDLE_CHK_REGS
].
3500 ptr
)[rule
->reg_offset
];
3501 const struct dbg_idle_chk_cond_reg
*cond_regs
= ®s
[0].cond_reg
;
3502 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
3503 struct dbg_idle_chk_result_hdr
*hdr
=
3504 (struct dbg_idle_chk_result_hdr
*)dump_buf
;
3505 const struct dbg_idle_chk_info_reg
*info_regs
=
3506 ®s
[rule
->num_cond_regs
].info_reg
;
3507 u32 next_reg_offset
= 0, i
, offset
= 0;
3510 /* Dump rule data */
3512 memset(hdr
, 0, sizeof(*hdr
));
3513 hdr
->rule_id
= rule_id
;
3514 hdr
->mem_entry_id
= fail_entry_id
;
3515 hdr
->severity
= rule
->severity
;
3516 hdr
->num_dumped_cond_regs
= rule
->num_cond_regs
;
3519 offset
+= IDLE_CHK_RESULT_HDR_DWORDS
;
3521 /* Dump condition register values */
3522 for (reg_id
= 0; reg_id
< rule
->num_cond_regs
; reg_id
++) {
3523 const struct dbg_idle_chk_cond_reg
*reg
= &cond_regs
[reg_id
];
3525 /* Write register header */
3527 struct dbg_idle_chk_result_reg_hdr
*reg_hdr
=
3528 (struct dbg_idle_chk_result_reg_hdr
*)(dump_buf
3530 offset
+= IDLE_CHK_RESULT_REG_HDR_DWORDS
;
3532 sizeof(struct dbg_idle_chk_result_reg_hdr
));
3533 reg_hdr
->start_entry
= reg
->start_entry
;
3534 reg_hdr
->size
= reg
->entry_size
;
3535 SET_FIELD(reg_hdr
->data
,
3536 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM
,
3537 reg
->num_entries
> 1 || reg
->start_entry
> 0
3539 SET_FIELD(reg_hdr
->data
,
3540 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID
, reg_id
);
3542 /* Write register values */
3543 for (i
= 0; i
< reg_hdr
->size
;
3544 i
++, next_reg_offset
++, offset
++)
3546 cond_reg_values
[next_reg_offset
];
3548 offset
+= IDLE_CHK_RESULT_REG_HDR_DWORDS
+
3553 /* Dump info register values */
3554 for (reg_id
= 0; reg_id
< rule
->num_info_regs
; reg_id
++) {
3555 const struct dbg_idle_chk_info_reg
*reg
= &info_regs
[reg_id
];
3559 offset
+= IDLE_CHK_RESULT_REG_HDR_DWORDS
+ reg
->size
;
3563 /* Check if register's block is in reset */
3564 block_id
= GET_FIELD(reg
->data
, DBG_IDLE_CHK_INFO_REG_BLOCK_ID
);
3565 if (block_id
>= MAX_BLOCK_ID
) {
3566 DP_NOTICE(p_hwfn
, "Invalid block_id\n");
3570 if (!dev_data
->block_in_reset
[block_id
]) {
3571 bool eval_mode
= GET_FIELD(reg
->mode
.data
,
3572 DBG_MODE_HDR_EVAL_MODE
) > 0;
3573 bool mode_match
= true;
3577 u16 modes_buf_offset
=
3578 GET_FIELD(reg
->mode
.data
,
3579 DBG_MODE_HDR_MODES_BUF_OFFSET
);
3581 qed_is_mode_match(p_hwfn
,
3587 DWORDS_TO_BYTES(GET_FIELD(reg
->data
,
3588 DBG_IDLE_CHK_INFO_REG_ADDRESS
));
3590 /* Write register header */
3591 struct dbg_idle_chk_result_reg_hdr
*reg_hdr
=
3592 (struct dbg_idle_chk_result_reg_hdr
*)
3593 (dump_buf
+ offset
);
3595 offset
+= IDLE_CHK_RESULT_REG_HDR_DWORDS
;
3596 hdr
->num_dumped_info_regs
++;
3597 memset(reg_hdr
, 0, sizeof(*reg_hdr
));
3598 reg_hdr
->size
= reg
->size
;
3599 SET_FIELD(reg_hdr
->data
,
3600 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID
,
3601 rule
->num_cond_regs
+ reg_id
);
3603 /* Write register values */
3604 for (i
= 0; i
< reg
->size
;
3605 i
++, offset
++, grc_addr
+= 4)
3607 qed_rd(p_hwfn
, p_ptt
, grc_addr
);
3615 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3617 qed_idle_chk_dump_rule_entries(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
3618 u32
*dump_buf
, bool dump
,
3619 const struct dbg_idle_chk_rule
*input_rules
,
3620 u32 num_input_rules
, u32
*num_failing_rules
)
3622 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
3623 u32 cond_reg_values
[IDLE_CHK_MAX_ENTRIES_SIZE
];
3624 u32 i
, j
, offset
= 0;
3628 *num_failing_rules
= 0;
3629 for (i
= 0; i
< num_input_rules
; i
++) {
3630 const struct dbg_idle_chk_cond_reg
*cond_regs
;
3631 const struct dbg_idle_chk_rule
*rule
;
3632 const union dbg_idle_chk_reg
*regs
;
3633 u16 num_reg_entries
= 1;
3634 bool check_rule
= true;
3635 const u32
*imm_values
;
3637 rule
= &input_rules
[i
];
3638 regs
= &((const union dbg_idle_chk_reg
*)
3639 s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_REGS
].ptr
)
3641 cond_regs
= ®s
[0].cond_reg
;
3642 imm_values
= &s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_IMMS
].ptr
3645 /* Check if all condition register blocks are out of reset, and
3646 * find maximal number of entries (all condition registers that
3647 * are memories must have the same size, which is > 1).
3649 for (reg_id
= 0; reg_id
< rule
->num_cond_regs
&& check_rule
;
3651 u32 block_id
= GET_FIELD(cond_regs
[reg_id
].data
,
3652 DBG_IDLE_CHK_COND_REG_BLOCK_ID
);
3654 if (block_id
>= MAX_BLOCK_ID
) {
3655 DP_NOTICE(p_hwfn
, "Invalid block_id\n");
3659 check_rule
= !dev_data
->block_in_reset
[block_id
];
3660 if (cond_regs
[reg_id
].num_entries
> num_reg_entries
)
3661 num_reg_entries
= cond_regs
[reg_id
].num_entries
;
3664 if (!check_rule
&& dump
)
3667 /* Go over all register entries (number of entries is the same
3668 * for all condition registers).
3670 for (entry_id
= 0; entry_id
< num_reg_entries
; entry_id
++) {
3671 /* Read current entry of all condition registers */
3673 u32 next_reg_offset
= 0;
3676 reg_id
< rule
->num_cond_regs
;
3678 const struct dbg_idle_chk_cond_reg
3679 *reg
= &cond_regs
[reg_id
];
3681 /* Find GRC address (if it's a memory,
3682 * the address of the specific entry is
3687 GET_FIELD(reg
->data
,
3688 DBG_IDLE_CHK_COND_REG_ADDRESS
));
3690 if (reg
->num_entries
> 1 ||
3691 reg
->start_entry
> 0) {
3692 u32 padded_entry_size
=
3693 reg
->entry_size
> 1 ?
3695 (reg
->entry_size
) : 1;
3701 * padded_entry_size
);
3704 /* Read registers */
3705 if (next_reg_offset
+ reg
->entry_size
>=
3706 IDLE_CHK_MAX_ENTRIES_SIZE
) {
3708 "idle check registers entry is too large\n");
3712 for (j
= 0; j
< reg
->entry_size
;
3713 j
++, next_reg_offset
++,
3715 cond_reg_values
[next_reg_offset
] =
3716 qed_rd(p_hwfn
, p_ptt
, grc_addr
);
3720 /* Call rule's condition function - a return value of
3721 * true indicates failure.
3723 if ((*cond_arr
[rule
->cond_id
])(cond_reg_values
,
3724 imm_values
) || !dump
) {
3726 qed_idle_chk_dump_failure(p_hwfn
,
3734 (*num_failing_rules
)++;
3743 /* Performs Idle Check Dump to the specified buffer.
3744 * Returns the dumped size in dwords.
3746 static u32
qed_idle_chk_dump(struct qed_hwfn
*p_hwfn
,
3747 struct qed_ptt
*p_ptt
, u32
*dump_buf
, bool dump
)
3749 u32 offset
= 0, input_offset
= 0, num_failing_rules
= 0;
3750 u32 num_failing_rules_offset
;
3752 /* Dump global params */
3753 offset
+= qed_dump_common_global_params(p_hwfn
,
3755 dump_buf
+ offset
, dump
, 1);
3756 offset
+= qed_dump_str_param(dump_buf
+ offset
,
3757 dump
, "dump-type", "idle-chk");
3759 /* Dump idle check section header with a single parameter */
3760 offset
+= qed_dump_section_hdr(dump_buf
+ offset
, dump
, "idle_chk", 1);
3761 num_failing_rules_offset
= offset
;
3762 offset
+= qed_dump_num_param(dump_buf
+ offset
, dump
, "num_rules", 0);
3763 while (input_offset
<
3764 s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_RULES
].size_in_dwords
) {
3765 const struct dbg_idle_chk_cond_hdr
*cond_hdr
=
3766 (const struct dbg_idle_chk_cond_hdr
*)
3767 &s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_RULES
].ptr
3769 bool eval_mode
= GET_FIELD(cond_hdr
->mode
.data
,
3770 DBG_MODE_HDR_EVAL_MODE
) > 0;
3771 bool mode_match
= true;
3775 u16 modes_buf_offset
=
3776 GET_FIELD(cond_hdr
->mode
.data
,
3777 DBG_MODE_HDR_MODES_BUF_OFFSET
);
3779 mode_match
= qed_is_mode_match(p_hwfn
,
3784 u32 curr_failing_rules
;
3787 qed_idle_chk_dump_rule_entries(p_hwfn
,
3791 (const struct dbg_idle_chk_rule
*)
3792 &s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_RULES
].
3794 cond_hdr
->data_size
/ IDLE_CHK_RULE_SIZE_DWORDS
,
3795 &curr_failing_rules
);
3796 num_failing_rules
+= curr_failing_rules
;
3799 input_offset
+= cond_hdr
->data_size
;
3802 /* Overwrite num_rules parameter */
3804 qed_dump_num_param(dump_buf
+ num_failing_rules_offset
,
3805 dump
, "num_rules", num_failing_rules
);
3810 /* Finds the meta data image in NVRAM. */
3811 static enum dbg_status
qed_find_nvram_image(struct qed_hwfn
*p_hwfn
,
3812 struct qed_ptt
*p_ptt
,
3814 u32
*nvram_offset_bytes
,
3815 u32
*nvram_size_bytes
)
3817 u32 ret_mcp_resp
, ret_mcp_param
, ret_txn_size
;
3818 struct mcp_file_att file_att
;
3820 /* Call NVRAM get file command */
3821 if (qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_NVM_GET_FILE_ATT
,
3822 image_type
, &ret_mcp_resp
, &ret_mcp_param
,
3823 &ret_txn_size
, (u32
*)&file_att
) != 0)
3824 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED
;
3826 /* Check response */
3827 if ((ret_mcp_resp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_NVM_OK
)
3828 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED
;
3830 /* Update return values */
3831 *nvram_offset_bytes
= file_att
.nvm_start_addr
;
3832 *nvram_size_bytes
= file_att
.len
;
3835 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
3836 image_type
, *nvram_offset_bytes
, *nvram_size_bytes
);
3838 /* Check alignment */
3839 if (*nvram_size_bytes
& 0x3)
3840 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE
;
3841 return DBG_STATUS_OK
;
3844 static enum dbg_status
qed_nvram_read(struct qed_hwfn
*p_hwfn
,
3845 struct qed_ptt
*p_ptt
,
3846 u32 nvram_offset_bytes
,
3847 u32 nvram_size_bytes
, u32
*ret_buf
)
3849 u32 ret_mcp_resp
, ret_mcp_param
, ret_read_size
;
3850 u32 bytes_to_copy
, read_offset
= 0;
3851 s32 bytes_left
= nvram_size_bytes
;
3855 "nvram_read: reading image of size %d bytes from NVRAM\n",
3860 MCP_DRV_NVM_BUF_LEN
) ? MCP_DRV_NVM_BUF_LEN
: bytes_left
;
3862 /* Call NVRAM read command */
3863 if (qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
3864 DRV_MSG_CODE_NVM_READ_NVRAM
,
3865 (nvram_offset_bytes
+
3868 DRV_MB_PARAM_NVM_LEN_SHIFT
),
3869 &ret_mcp_resp
, &ret_mcp_param
,
3871 (u32
*)((u8
*)ret_buf
+
3873 return DBG_STATUS_NVRAM_READ_FAILED
;
3875 /* Check response */
3876 if ((ret_mcp_resp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_NVM_OK
)
3877 return DBG_STATUS_NVRAM_READ_FAILED
;
3879 /* Update read offset */
3880 read_offset
+= ret_read_size
;
3881 bytes_left
-= ret_read_size
;
3882 } while (bytes_left
> 0);
3884 return DBG_STATUS_OK
;
3887 /* Get info on the MCP Trace data in the scratchpad:
3888 * - trace_data_grc_addr - the GRC address of the trace data
3889 * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
3892 static enum dbg_status
qed_mcp_trace_get_data_info(struct qed_hwfn
*p_hwfn
,
3893 struct qed_ptt
*p_ptt
,
3894 u32
*trace_data_grc_addr
,
3895 u32
*trace_data_size_bytes
)
3897 /* Read MCP trace section offsize structure from MCP scratchpad */
3898 u32 spad_trace_offsize
= qed_rd(p_hwfn
,
3900 MCP_SPAD_TRACE_OFFSIZE_ADDR
);
3903 /* Extract MCP trace section GRC address from offsize structure (within
3906 *trace_data_grc_addr
=
3907 MCP_REG_SCRATCH
+ SECTION_OFFSET(spad_trace_offsize
);
3909 /* Read signature from MCP trace section */
3910 signature
= qed_rd(p_hwfn
, p_ptt
,
3911 *trace_data_grc_addr
+
3912 offsetof(struct mcp_trace
, signature
));
3913 if (signature
!= MFW_TRACE_SIGNATURE
)
3914 return DBG_STATUS_INVALID_TRACE_SIGNATURE
;
3916 /* Read trace size from MCP trace section */
3917 *trace_data_size_bytes
= qed_rd(p_hwfn
,
3919 *trace_data_grc_addr
+
3920 offsetof(struct mcp_trace
, size
));
3921 return DBG_STATUS_OK
;
3924 /* Reads MCP trace meta data image from NVRAM.
3925 * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
3927 * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
3928 * Trace meta data starts (invalid when loaded from file)
3929 * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
3931 static enum dbg_status
qed_mcp_trace_get_meta_info(struct qed_hwfn
*p_hwfn
,
3932 struct qed_ptt
*p_ptt
,
3933 u32 trace_data_size_bytes
,
3934 u32
*running_bundle_id
,
3935 u32
*trace_meta_offset_bytes
,
3936 u32
*trace_meta_size_bytes
)
3938 /* Read MCP trace section offsize structure from MCP scratchpad */
3939 u32 spad_trace_offsize
= qed_rd(p_hwfn
,
3941 MCP_SPAD_TRACE_OFFSIZE_ADDR
);
3943 /* Find running bundle ID */
3944 u32 running_mfw_addr
=
3945 MCP_REG_SCRATCH
+ SECTION_OFFSET(spad_trace_offsize
) +
3946 QED_SECTION_SIZE(spad_trace_offsize
) + trace_data_size_bytes
;
3947 enum dbg_status status
;
3948 u32 nvram_image_type
;
3950 *running_bundle_id
= qed_rd(p_hwfn
, p_ptt
, running_mfw_addr
);
3951 if (*running_bundle_id
> 1)
3952 return DBG_STATUS_INVALID_NVRAM_BUNDLE
;
3954 /* Find image in NVRAM */
3956 (*running_bundle_id
==
3957 DIR_ID_1
) ? NVM_TYPE_MFW_TRACE1
: NVM_TYPE_MFW_TRACE2
;
3958 status
= qed_find_nvram_image(p_hwfn
,
3961 trace_meta_offset_bytes
,
3962 trace_meta_size_bytes
);
3967 /* Reads the MCP Trace data from the specified GRC address into the specified
3970 static void qed_mcp_trace_read_data(struct qed_hwfn
*p_hwfn
,
3971 struct qed_ptt
*p_ptt
,
3972 u32 grc_addr
, u32 size_in_dwords
, u32
*buf
)
3978 "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
3979 size_in_dwords
, grc_addr
);
3980 for (i
= 0; i
< size_in_dwords
; i
++, grc_addr
+= BYTES_IN_DWORD
)
3981 buf
[i
] = qed_rd(p_hwfn
, p_ptt
, grc_addr
);
3984 /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
3987 static enum dbg_status
qed_mcp_trace_read_meta(struct qed_hwfn
*p_hwfn
,
3988 struct qed_ptt
*p_ptt
,
3989 u32 nvram_offset_in_bytes
,
3990 u32 size_in_bytes
, u32
*buf
)
3992 u8
*byte_buf
= (u8
*)buf
;
3996 /* Read meta data from NVRAM */
3997 enum dbg_status status
= qed_nvram_read(p_hwfn
,
3999 nvram_offset_in_bytes
,
4003 if (status
!= DBG_STATUS_OK
)
4006 /* Extract and check first signature */
4007 signature
= qed_read_unaligned_dword(byte_buf
);
4008 byte_buf
+= sizeof(u32
);
4009 if (signature
!= MCP_TRACE_META_IMAGE_SIGNATURE
)
4010 return DBG_STATUS_INVALID_TRACE_SIGNATURE
;
4012 /* Extract number of modules */
4013 modules_num
= *(byte_buf
++);
4015 /* Skip all modules */
4016 for (i
= 0; i
< modules_num
; i
++) {
4017 u8 module_len
= *(byte_buf
++);
4019 byte_buf
+= module_len
;
4022 /* Extract and check second signature */
4023 signature
= qed_read_unaligned_dword(byte_buf
);
4024 byte_buf
+= sizeof(u32
);
4025 if (signature
!= MCP_TRACE_META_IMAGE_SIGNATURE
)
4026 return DBG_STATUS_INVALID_TRACE_SIGNATURE
;
4027 return DBG_STATUS_OK
;
4030 /* Dump MCP Trace */
4031 static enum dbg_status
qed_mcp_trace_dump(struct qed_hwfn
*p_hwfn
,
4032 struct qed_ptt
*p_ptt
,
4034 bool dump
, u32
*num_dumped_dwords
)
4036 u32 trace_data_grc_addr
, trace_data_size_bytes
, trace_data_size_dwords
;
4037 u32 trace_meta_size_dwords
, running_bundle_id
, offset
= 0;
4038 u32 trace_meta_offset_bytes
, trace_meta_size_bytes
;
4039 enum dbg_status status
;
4042 *num_dumped_dwords
= 0;
4044 /* Get trace data info */
4045 status
= qed_mcp_trace_get_data_info(p_hwfn
,
4047 &trace_data_grc_addr
,
4048 &trace_data_size_bytes
);
4049 if (status
!= DBG_STATUS_OK
)
4052 /* Dump global params */
4053 offset
+= qed_dump_common_global_params(p_hwfn
,
4055 dump_buf
+ offset
, dump
, 1);
4056 offset
+= qed_dump_str_param(dump_buf
+ offset
,
4057 dump
, "dump-type", "mcp-trace");
4059 /* Halt MCP while reading from scratchpad so the read data will be
4060 * consistent if halt fails, MCP trace is taken anyway, with a small
4061 * risk that it may be corrupt.
4064 halted
= !qed_mcp_halt(p_hwfn
, p_ptt
);
4066 DP_NOTICE(p_hwfn
, "MCP halt failed!\n");
4069 /* Find trace data size */
4070 trace_data_size_dwords
=
4071 DIV_ROUND_UP(trace_data_size_bytes
+ sizeof(struct mcp_trace
),
4074 /* Dump trace data section header and param */
4075 offset
+= qed_dump_section_hdr(dump_buf
+ offset
,
4076 dump
, "mcp_trace_data", 1);
4077 offset
+= qed_dump_num_param(dump_buf
+ offset
,
4078 dump
, "size", trace_data_size_dwords
);
4080 /* Read trace data from scratchpad into dump buffer */
4082 qed_mcp_trace_read_data(p_hwfn
,
4084 trace_data_grc_addr
,
4085 trace_data_size_dwords
,
4087 offset
+= trace_data_size_dwords
;
4089 /* Resume MCP (only if halt succeeded) */
4090 if (halted
&& qed_mcp_resume(p_hwfn
, p_ptt
) != 0)
4091 DP_NOTICE(p_hwfn
, "Failed to resume MCP after halt!\n");
4093 /* Dump trace meta section header */
4094 offset
+= qed_dump_section_hdr(dump_buf
+ offset
,
4095 dump
, "mcp_trace_meta", 1);
4097 /* Read trace meta info */
4098 status
= qed_mcp_trace_get_meta_info(p_hwfn
,
4100 trace_data_size_bytes
,
4102 &trace_meta_offset_bytes
,
4103 &trace_meta_size_bytes
);
4104 if (status
!= DBG_STATUS_OK
)
4107 /* Dump trace meta size param (trace_meta_size_bytes is always
4110 trace_meta_size_dwords
= BYTES_TO_DWORDS(trace_meta_size_bytes
);
4111 offset
+= qed_dump_num_param(dump_buf
+ offset
, dump
, "size",
4112 trace_meta_size_dwords
);
4114 /* Read trace meta image into dump buffer */
4116 status
= qed_mcp_trace_read_meta(p_hwfn
,
4118 trace_meta_offset_bytes
,
4119 trace_meta_size_bytes
,
4121 if (status
!= DBG_STATUS_OK
)
4125 offset
+= trace_meta_size_dwords
;
4127 *num_dumped_dwords
= offset
;
4129 return DBG_STATUS_OK
;
4133 static enum dbg_status
qed_reg_fifo_dump(struct qed_hwfn
*p_hwfn
,
4134 struct qed_ptt
*p_ptt
,
4136 bool dump
, u32
*num_dumped_dwords
)
4138 u32 offset
= 0, dwords_read
, size_param_offset
;
4141 *num_dumped_dwords
= 0;
4143 /* Dump global params */
4144 offset
+= qed_dump_common_global_params(p_hwfn
,
4146 dump_buf
+ offset
, dump
, 1);
4147 offset
+= qed_dump_str_param(dump_buf
+ offset
,
4148 dump
, "dump-type", "reg-fifo");
4150 /* Dump fifo data section header and param. The size param is 0 for now,
4151 * and is overwritten after reading the FIFO.
4153 offset
+= qed_dump_section_hdr(dump_buf
+ offset
,
4154 dump
, "reg_fifo_data", 1);
4155 size_param_offset
= offset
;
4156 offset
+= qed_dump_num_param(dump_buf
+ offset
, dump
, "size", 0);
4159 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4160 * test how much data is available, except for reading it.
4162 offset
+= REG_FIFO_DEPTH_DWORDS
;
4163 *num_dumped_dwords
= offset
;
4164 return DBG_STATUS_OK
;
4167 fifo_has_data
= qed_rd(p_hwfn
, p_ptt
,
4168 GRC_REG_TRACE_FIFO_VALID_DATA
) > 0;
4170 /* Pull available data from fifo. Use DMAE since this is widebus memory
4171 * and must be accessed atomically. Test for dwords_read not passing
4172 * buffer size since more entries could be added to the buffer as we are
4175 for (dwords_read
= 0;
4176 fifo_has_data
&& dwords_read
< REG_FIFO_DEPTH_DWORDS
;
4177 dwords_read
+= REG_FIFO_ELEMENT_DWORDS
, offset
+=
4178 REG_FIFO_ELEMENT_DWORDS
) {
4179 if (qed_dmae_grc2host(p_hwfn
, p_ptt
, GRC_REG_TRACE_FIFO
,
4180 (u64
)(uintptr_t)(&dump_buf
[offset
]),
4181 REG_FIFO_ELEMENT_DWORDS
, 0))
4182 return DBG_STATUS_DMAE_FAILED
;
4183 fifo_has_data
= qed_rd(p_hwfn
, p_ptt
,
4184 GRC_REG_TRACE_FIFO_VALID_DATA
) > 0;
4187 qed_dump_num_param(dump_buf
+ size_param_offset
, dump
, "size",
4190 *num_dumped_dwords
= offset
;
4191 return DBG_STATUS_OK
;
4195 static enum dbg_status
qed_igu_fifo_dump(struct qed_hwfn
*p_hwfn
,
4196 struct qed_ptt
*p_ptt
,
4198 bool dump
, u32
*num_dumped_dwords
)
4200 u32 offset
= 0, dwords_read
, size_param_offset
;
4203 *num_dumped_dwords
= 0;
4205 /* Dump global params */
4206 offset
+= qed_dump_common_global_params(p_hwfn
,
4208 dump_buf
+ offset
, dump
, 1);
4209 offset
+= qed_dump_str_param(dump_buf
+ offset
,
4210 dump
, "dump-type", "igu-fifo");
4212 /* Dump fifo data section header and param. The size param is 0 for now,
4213 * and is overwritten after reading the FIFO.
4215 offset
+= qed_dump_section_hdr(dump_buf
+ offset
,
4216 dump
, "igu_fifo_data", 1);
4217 size_param_offset
= offset
;
4218 offset
+= qed_dump_num_param(dump_buf
+ offset
, dump
, "size", 0);
4221 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4222 * test how much data is available, except for reading it.
4224 offset
+= IGU_FIFO_DEPTH_DWORDS
;
4225 *num_dumped_dwords
= offset
;
4226 return DBG_STATUS_OK
;
4229 fifo_has_data
= qed_rd(p_hwfn
, p_ptt
,
4230 IGU_REG_ERROR_HANDLING_DATA_VALID
) > 0;
4232 /* Pull available data from fifo. Use DMAE since this is widebus memory
4233 * and must be accessed atomically. Test for dwords_read not passing
4234 * buffer size since more entries could be added to the buffer as we are
4237 for (dwords_read
= 0;
4238 fifo_has_data
&& dwords_read
< IGU_FIFO_DEPTH_DWORDS
;
4239 dwords_read
+= IGU_FIFO_ELEMENT_DWORDS
, offset
+=
4240 IGU_FIFO_ELEMENT_DWORDS
) {
4241 if (qed_dmae_grc2host(p_hwfn
, p_ptt
,
4242 IGU_REG_ERROR_HANDLING_MEMORY
,
4243 (u64
)(uintptr_t)(&dump_buf
[offset
]),
4244 IGU_FIFO_ELEMENT_DWORDS
, 0))
4245 return DBG_STATUS_DMAE_FAILED
;
4246 fifo_has_data
= qed_rd(p_hwfn
, p_ptt
,
4247 IGU_REG_ERROR_HANDLING_DATA_VALID
) > 0;
4250 qed_dump_num_param(dump_buf
+ size_param_offset
, dump
, "size",
4253 *num_dumped_dwords
= offset
;
4254 return DBG_STATUS_OK
;
4257 /* Protection Override dump */
4258 static enum dbg_status
qed_protection_override_dump(struct qed_hwfn
*p_hwfn
,
4259 struct qed_ptt
*p_ptt
,
4262 u32
*num_dumped_dwords
)
4264 u32 offset
= 0, size_param_offset
, override_window_dwords
;
4266 *num_dumped_dwords
= 0;
4268 /* Dump global params */
4269 offset
+= qed_dump_common_global_params(p_hwfn
,
4271 dump_buf
+ offset
, dump
, 1);
4272 offset
+= qed_dump_str_param(dump_buf
+ offset
,
4273 dump
, "dump-type", "protection-override");
4275 /* Dump data section header and param. The size param is 0 for now, and
4276 * is overwritten after reading the data.
4278 offset
+= qed_dump_section_hdr(dump_buf
+ offset
,
4279 dump
, "protection_override_data", 1);
4280 size_param_offset
= offset
;
4281 offset
+= qed_dump_num_param(dump_buf
+ offset
, dump
, "size", 0);
4284 offset
+= PROTECTION_OVERRIDE_DEPTH_DWORDS
;
4285 *num_dumped_dwords
= offset
;
4286 return DBG_STATUS_OK
;
4289 /* Add override window info to buffer */
4290 override_window_dwords
=
4291 qed_rd(p_hwfn
, p_ptt
,
4292 GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW
) *
4293 PROTECTION_OVERRIDE_ELEMENT_DWORDS
;
4294 if (qed_dmae_grc2host(p_hwfn
, p_ptt
,
4295 GRC_REG_PROTECTION_OVERRIDE_WINDOW
,
4296 (u64
)(uintptr_t)(dump_buf
+ offset
),
4297 override_window_dwords
, 0))
4298 return DBG_STATUS_DMAE_FAILED
;
4299 offset
+= override_window_dwords
;
4300 qed_dump_num_param(dump_buf
+ size_param_offset
, dump
, "size",
4301 override_window_dwords
);
4303 *num_dumped_dwords
= offset
;
4304 return DBG_STATUS_OK
;
4307 /* Performs FW Asserts Dump to the specified buffer.
4308 * Returns the dumped size in dwords.
4310 static u32
qed_fw_asserts_dump(struct qed_hwfn
*p_hwfn
,
4311 struct qed_ptt
*p_ptt
, u32
*dump_buf
, bool dump
)
4313 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
4314 char storm_letter_str
[2] = "?";
4315 struct fw_info fw_info
;
4319 /* Dump global params */
4320 offset
+= qed_dump_common_global_params(p_hwfn
,
4322 dump_buf
+ offset
, dump
, 1);
4323 offset
+= qed_dump_str_param(dump_buf
+ offset
,
4324 dump
, "dump-type", "fw-asserts");
4325 for (storm_id
= 0; storm_id
< MAX_DBG_STORMS
; storm_id
++) {
4326 u32 fw_asserts_section_addr
, next_list_idx_addr
, next_list_idx
,
4327 last_list_idx
, element_addr
;
4329 if (dev_data
->block_in_reset
[s_storm_defs
[storm_id
].block_id
])
4332 /* Read FW info for the current Storm */
4333 qed_read_fw_info(p_hwfn
, p_ptt
, storm_id
, &fw_info
);
4335 /* Dump FW Asserts section header and params */
4336 storm_letter_str
[0] = s_storm_defs
[storm_id
].letter
;
4337 offset
+= qed_dump_section_hdr(dump_buf
+ offset
, dump
,
4339 offset
+= qed_dump_str_param(dump_buf
+ offset
, dump
, "storm",
4341 offset
+= qed_dump_num_param(dump_buf
+ offset
, dump
, "size",
4342 fw_info
.fw_asserts_section
.
4343 list_element_dword_size
);
4346 offset
+= fw_info
.fw_asserts_section
.
4347 list_element_dword_size
;
4351 /* Read and dump FW Asserts data */
4352 fw_asserts_section_addr
=
4353 s_storm_defs
[storm_id
].sem_fast_mem_addr
+
4354 SEM_FAST_REG_INT_RAM
+
4355 RAM_LINES_TO_BYTES(fw_info
.fw_asserts_section
.
4356 section_ram_line_offset
);
4357 next_list_idx_addr
=
4358 fw_asserts_section_addr
+
4359 DWORDS_TO_BYTES(fw_info
.fw_asserts_section
.
4360 list_next_index_dword_offset
);
4361 next_list_idx
= qed_rd(p_hwfn
, p_ptt
, next_list_idx_addr
);
4362 last_list_idx
= (next_list_idx
> 0
4364 : fw_info
.fw_asserts_section
.list_num_elements
)
4367 fw_asserts_section_addr
+
4368 DWORDS_TO_BYTES(fw_info
.fw_asserts_section
.
4369 list_dword_offset
) +
4371 DWORDS_TO_BYTES(fw_info
.fw_asserts_section
.
4372 list_element_dword_size
);
4374 i
< fw_info
.fw_asserts_section
.list_element_dword_size
;
4375 i
++, offset
++, element_addr
+= BYTES_IN_DWORD
)
4376 dump_buf
[offset
] = qed_rd(p_hwfn
, p_ptt
, element_addr
);
4379 /* Dump last section */
4380 offset
+= qed_dump_section_hdr(dump_buf
+ offset
, dump
, "last", 0);
4384 /***************************** Public Functions *******************************/
4386 enum dbg_status
qed_dbg_set_bin_ptr(const u8
* const bin_ptr
)
4388 /* Convert binary data to debug arrays */
4389 u32 num_of_buffers
= *(u32
*)bin_ptr
;
4390 struct bin_buffer_hdr
*buf_array
;
4393 buf_array
= (struct bin_buffer_hdr
*)((u32
*)bin_ptr
+ 1);
4395 for (buf_id
= 0; buf_id
< num_of_buffers
; buf_id
++) {
4396 s_dbg_arrays
[buf_id
].ptr
=
4397 (u32
*)(bin_ptr
+ buf_array
[buf_id
].offset
);
4398 s_dbg_arrays
[buf_id
].size_in_dwords
=
4399 BYTES_TO_DWORDS(buf_array
[buf_id
].length
);
4402 return DBG_STATUS_OK
;
4405 enum dbg_status
qed_dbg_grc_get_dump_buf_size(struct qed_hwfn
*p_hwfn
,
4406 struct qed_ptt
*p_ptt
,
4409 enum dbg_status status
= qed_dbg_dev_init(p_hwfn
, p_ptt
);
4412 if (status
!= DBG_STATUS_OK
)
4414 if (!s_dbg_arrays
[BIN_BUF_DBG_MODE_TREE
].ptr
||
4415 !s_dbg_arrays
[BIN_BUF_DBG_DUMP_REG
].ptr
||
4416 !s_dbg_arrays
[BIN_BUF_DBG_DUMP_MEM
].ptr
||
4417 !s_dbg_arrays
[BIN_BUF_DBG_ATTN_BLOCKS
].ptr
||
4418 !s_dbg_arrays
[BIN_BUF_DBG_ATTN_REGS
].ptr
)
4419 return DBG_STATUS_DBG_ARRAY_NOT_SET
;
4420 return qed_grc_dump(p_hwfn
, p_ptt
, NULL
, false, buf_size
);
4423 enum dbg_status
qed_dbg_grc_dump(struct qed_hwfn
*p_hwfn
,
4424 struct qed_ptt
*p_ptt
,
4426 u32 buf_size_in_dwords
,
4427 u32
*num_dumped_dwords
)
4429 u32 needed_buf_size_in_dwords
;
4430 enum dbg_status status
;
4432 status
= qed_dbg_grc_get_dump_buf_size(p_hwfn
, p_ptt
,
4433 &needed_buf_size_in_dwords
);
4435 *num_dumped_dwords
= 0;
4436 if (status
!= DBG_STATUS_OK
)
4438 if (buf_size_in_dwords
< needed_buf_size_in_dwords
)
4439 return DBG_STATUS_DUMP_BUF_TOO_SMALL
;
4442 status
= qed_grc_dump(p_hwfn
, p_ptt
, dump_buf
, true, num_dumped_dwords
);
4444 /* Clear all GRC params */
4445 qed_dbg_grc_clear_params(p_hwfn
);
4449 enum dbg_status
qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn
*p_hwfn
,
4450 struct qed_ptt
*p_ptt
,
4453 enum dbg_status status
= qed_dbg_dev_init(p_hwfn
, p_ptt
);
4454 struct dbg_tools_data
*dev_data
= &p_hwfn
->dbg_info
;
4457 if (status
!= DBG_STATUS_OK
)
4459 if (!s_dbg_arrays
[BIN_BUF_DBG_MODE_TREE
].ptr
||
4460 !s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_REGS
].ptr
||
4461 !s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_IMMS
].ptr
||
4462 !s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_RULES
].ptr
)
4463 return DBG_STATUS_DBG_ARRAY_NOT_SET
;
4464 if (!dev_data
->idle_chk
.buf_size_set
) {
4465 dev_data
->idle_chk
.buf_size
= qed_idle_chk_dump(p_hwfn
,
4468 dev_data
->idle_chk
.buf_size_set
= true;
4471 *buf_size
= dev_data
->idle_chk
.buf_size
;
4472 return DBG_STATUS_OK
;
4475 enum dbg_status
qed_dbg_idle_chk_dump(struct qed_hwfn
*p_hwfn
,
4476 struct qed_ptt
*p_ptt
,
4478 u32 buf_size_in_dwords
,
4479 u32
*num_dumped_dwords
)
4481 u32 needed_buf_size_in_dwords
;
4482 enum dbg_status status
;
4484 status
= qed_dbg_idle_chk_get_dump_buf_size(p_hwfn
, p_ptt
,
4485 &needed_buf_size_in_dwords
);
4487 *num_dumped_dwords
= 0;
4488 if (status
!= DBG_STATUS_OK
)
4490 if (buf_size_in_dwords
< needed_buf_size_in_dwords
)
4491 return DBG_STATUS_DUMP_BUF_TOO_SMALL
;
4493 /* Update reset state */
4494 qed_update_blocks_reset_state(p_hwfn
, p_ptt
);
4496 /* Idle Check Dump */
4497 *num_dumped_dwords
= qed_idle_chk_dump(p_hwfn
, p_ptt
, dump_buf
, true);
4498 return DBG_STATUS_OK
;
4501 enum dbg_status
qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn
*p_hwfn
,
4502 struct qed_ptt
*p_ptt
,
4505 enum dbg_status status
= qed_dbg_dev_init(p_hwfn
, p_ptt
);
4508 if (status
!= DBG_STATUS_OK
)
4510 return qed_mcp_trace_dump(p_hwfn
, p_ptt
, NULL
, false, buf_size
);
4513 enum dbg_status
qed_dbg_mcp_trace_dump(struct qed_hwfn
*p_hwfn
,
4514 struct qed_ptt
*p_ptt
,
4516 u32 buf_size_in_dwords
,
4517 u32
*num_dumped_dwords
)
4519 u32 needed_buf_size_in_dwords
;
4520 enum dbg_status status
;
4522 status
= qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn
, p_ptt
,
4523 &needed_buf_size_in_dwords
);
4525 if (status
!= DBG_STATUS_OK
)
4527 if (buf_size_in_dwords
< needed_buf_size_in_dwords
)
4528 return DBG_STATUS_DUMP_BUF_TOO_SMALL
;
4530 /* Update reset state */
4531 qed_update_blocks_reset_state(p_hwfn
, p_ptt
);
4534 return qed_mcp_trace_dump(p_hwfn
,
4535 p_ptt
, dump_buf
, true, num_dumped_dwords
);
4538 enum dbg_status
qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn
*p_hwfn
,
4539 struct qed_ptt
*p_ptt
,
4542 enum dbg_status status
= qed_dbg_dev_init(p_hwfn
, p_ptt
);
4545 if (status
!= DBG_STATUS_OK
)
4547 return qed_reg_fifo_dump(p_hwfn
, p_ptt
, NULL
, false, buf_size
);
4550 enum dbg_status
qed_dbg_reg_fifo_dump(struct qed_hwfn
*p_hwfn
,
4551 struct qed_ptt
*p_ptt
,
4553 u32 buf_size_in_dwords
,
4554 u32
*num_dumped_dwords
)
4556 u32 needed_buf_size_in_dwords
;
4557 enum dbg_status status
;
4559 status
= qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn
, p_ptt
,
4560 &needed_buf_size_in_dwords
);
4562 *num_dumped_dwords
= 0;
4563 if (status
!= DBG_STATUS_OK
)
4565 if (buf_size_in_dwords
< needed_buf_size_in_dwords
)
4566 return DBG_STATUS_DUMP_BUF_TOO_SMALL
;
4568 /* Update reset state */
4569 qed_update_blocks_reset_state(p_hwfn
, p_ptt
);
4570 return qed_reg_fifo_dump(p_hwfn
,
4571 p_ptt
, dump_buf
, true, num_dumped_dwords
);
4574 enum dbg_status
qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn
*p_hwfn
,
4575 struct qed_ptt
*p_ptt
,
4578 enum dbg_status status
= qed_dbg_dev_init(p_hwfn
, p_ptt
);
4581 if (status
!= DBG_STATUS_OK
)
4583 return qed_igu_fifo_dump(p_hwfn
, p_ptt
, NULL
, false, buf_size
);
4586 enum dbg_status
qed_dbg_igu_fifo_dump(struct qed_hwfn
*p_hwfn
,
4587 struct qed_ptt
*p_ptt
,
4589 u32 buf_size_in_dwords
,
4590 u32
*num_dumped_dwords
)
4592 u32 needed_buf_size_in_dwords
;
4593 enum dbg_status status
;
4595 status
= qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn
, p_ptt
,
4596 &needed_buf_size_in_dwords
);
4598 *num_dumped_dwords
= 0;
4599 if (status
!= DBG_STATUS_OK
)
4601 if (buf_size_in_dwords
< needed_buf_size_in_dwords
)
4602 return DBG_STATUS_DUMP_BUF_TOO_SMALL
;
4604 /* Update reset state */
4605 qed_update_blocks_reset_state(p_hwfn
, p_ptt
);
4606 return qed_igu_fifo_dump(p_hwfn
,
4607 p_ptt
, dump_buf
, true, num_dumped_dwords
);
4611 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn
*p_hwfn
,
4612 struct qed_ptt
*p_ptt
,
4615 enum dbg_status status
= qed_dbg_dev_init(p_hwfn
, p_ptt
);
4618 if (status
!= DBG_STATUS_OK
)
4620 return qed_protection_override_dump(p_hwfn
,
4621 p_ptt
, NULL
, false, buf_size
);
4624 enum dbg_status
qed_dbg_protection_override_dump(struct qed_hwfn
*p_hwfn
,
4625 struct qed_ptt
*p_ptt
,
4627 u32 buf_size_in_dwords
,
4628 u32
*num_dumped_dwords
)
4630 u32 needed_buf_size_in_dwords
;
4631 enum dbg_status status
;
4633 status
= qed_dbg_protection_override_get_dump_buf_size(p_hwfn
, p_ptt
,
4634 &needed_buf_size_in_dwords
);
4636 *num_dumped_dwords
= 0;
4637 if (status
!= DBG_STATUS_OK
)
4639 if (buf_size_in_dwords
< needed_buf_size_in_dwords
)
4640 return DBG_STATUS_DUMP_BUF_TOO_SMALL
;
4642 /* Update reset state */
4643 qed_update_blocks_reset_state(p_hwfn
, p_ptt
);
4644 return qed_protection_override_dump(p_hwfn
,
4646 dump_buf
, true, num_dumped_dwords
);
4649 enum dbg_status
qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn
*p_hwfn
,
4650 struct qed_ptt
*p_ptt
,
4653 enum dbg_status status
= qed_dbg_dev_init(p_hwfn
, p_ptt
);
4656 if (status
!= DBG_STATUS_OK
)
4659 /* Update reset state */
4660 qed_update_blocks_reset_state(p_hwfn
, p_ptt
);
4661 *buf_size
= qed_fw_asserts_dump(p_hwfn
, p_ptt
, NULL
, false);
4662 return DBG_STATUS_OK
;
4665 enum dbg_status
qed_dbg_fw_asserts_dump(struct qed_hwfn
*p_hwfn
,
4666 struct qed_ptt
*p_ptt
,
4668 u32 buf_size_in_dwords
,
4669 u32
*num_dumped_dwords
)
4671 u32 needed_buf_size_in_dwords
;
4672 enum dbg_status status
;
4674 status
= qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn
, p_ptt
,
4675 &needed_buf_size_in_dwords
);
4677 *num_dumped_dwords
= 0;
4678 if (status
!= DBG_STATUS_OK
)
4680 if (buf_size_in_dwords
< needed_buf_size_in_dwords
)
4681 return DBG_STATUS_DUMP_BUF_TOO_SMALL
;
4683 *num_dumped_dwords
= qed_fw_asserts_dump(p_hwfn
, p_ptt
, dump_buf
, true);
4684 return DBG_STATUS_OK
;
4687 /******************************* Data Types **********************************/
4689 struct mcp_trace_format
{
4691 #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
4692 #define MCP_TRACE_FORMAT_MODULE_SHIFT 0
4693 #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
4694 #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
4695 #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
4696 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
4697 #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
4698 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
4699 #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
4700 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
4701 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
4702 #define MCP_TRACE_FORMAT_LEN_SHIFT 24
4706 struct mcp_trace_meta
{
4710 struct mcp_trace_format
*formats
;
4713 /* Reg fifo element */
4714 struct reg_fifo_element
{
4716 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
4717 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
4718 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
4719 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
4720 #define REG_FIFO_ELEMENT_PF_SHIFT 24
4721 #define REG_FIFO_ELEMENT_PF_MASK 0xf
4722 #define REG_FIFO_ELEMENT_VF_SHIFT 28
4723 #define REG_FIFO_ELEMENT_VF_MASK 0xff
4724 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
4725 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
4726 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
4727 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
4728 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
4729 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
4730 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
4731 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
4732 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
4733 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
4736 /* IGU fifo element */
4737 struct igu_fifo_element
{
4739 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
4740 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
4741 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
4742 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
4743 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
4744 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
4745 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
4746 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
4747 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
4748 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
4751 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
4752 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
4753 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
4754 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
4758 struct igu_fifo_wr_data
{
4760 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
4761 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
4762 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
4763 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
4764 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
4765 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
4766 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
4767 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
4768 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
4769 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
4770 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
4771 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
4774 struct igu_fifo_cleanup_wr_data
{
4776 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
4777 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
4778 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
4779 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
4780 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
4781 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
4782 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
4783 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
4786 /* Protection override element */
4787 struct protection_override_element
{
4789 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
4790 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
4791 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
4792 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
4793 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
4794 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
4795 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
4796 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
4797 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
4798 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
4799 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
4800 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
4803 enum igu_fifo_sources
{
4817 enum igu_fifo_addr_types
{
4818 IGU_ADDR_TYPE_MSIX_MEM
,
4819 IGU_ADDR_TYPE_WRITE_PBA
,
4820 IGU_ADDR_TYPE_WRITE_INT_ACK
,
4821 IGU_ADDR_TYPE_WRITE_ATTN_BITS
,
4822 IGU_ADDR_TYPE_READ_INT
,
4823 IGU_ADDR_TYPE_WRITE_PROD_UPDATE
,
4824 IGU_ADDR_TYPE_RESERVED
4827 struct igu_fifo_addr_data
{
4832 enum igu_fifo_addr_types type
;
4835 /******************************** Constants **********************************/
4837 #define MAX_MSG_LEN 1024
4838 #define MCP_TRACE_MAX_MODULE_LEN 8
4839 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
4840 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
4841 (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
4842 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
4843 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
4844 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
4846 /********************************* Macros ************************************/
4848 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
4850 /***************************** Constant Arrays *******************************/
4852 /* Status string array */
4853 static const char * const s_status_str
[] = {
4854 "Operation completed successfully",
4855 "Debug application version wasn't set",
4856 "Unsupported debug application version",
4857 "The debug block wasn't reset since the last recording",
4858 "Invalid arguments",
4859 "The debug output was already set",
4860 "Invalid PCI buffer size",
4861 "PCI buffer allocation failed",
4862 "A PCI buffer wasn't allocated",
4863 "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
4864 "GRC/Timestamp input overlap in cycle dword 0",
4865 "Cannot record Storm data since the entire recording cycle is used by HW",
4866 "The Storm was already enabled",
4867 "The specified Storm wasn't enabled",
4868 "The block was already enabled",
4869 "The specified block wasn't enabled",
4870 "No input was enabled for recording",
4871 "Filters and triggers are not allowed when recording in 64b units",
4872 "The filter was already enabled",
4873 "The trigger was already enabled",
4874 "The trigger wasn't enabled",
4875 "A constraint can be added only after a filter was enabled or a trigger state was added",
4876 "Cannot add more than 3 trigger states",
4877 "Cannot add more than 4 constraints per filter or trigger state",
4878 "The recording wasn't started",
4879 "A trigger was configured, but it didn't trigger",
4880 "No data was recorded",
4881 "Dump buffer is too small",
4882 "Dumped data is not aligned to chunks",
4884 "Failed allocating virtual memory",
4885 "The input block is in reset",
4886 "Invalid MCP trace signature found in NVRAM",
4887 "Invalid bundle ID found in NVRAM",
4888 "Failed getting NVRAM image",
4889 "NVRAM image is not dword-aligned",
4890 "Failed reading from NVRAM",
4891 "Idle check parsing failed",
4892 "MCP Trace data is corrupt",
4893 "Dump doesn't contain meta data - it must be provided in an image file",
4894 "Failed to halt MCP",
4895 "Failed to resume MCP after halt",
4896 "DMAE transaction failed",
4897 "Failed to empty SEMI sync FIFO",
4898 "IGU FIFO data is corrupt",
4899 "MCP failed to mask parities",
4900 "FW Asserts parsing failed",
4901 "GRC FIFO data is corrupt",
4902 "Protection Override data is corrupt",
4903 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
4904 "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
4907 /* Idle check severity names array */
4908 static const char * const s_idle_chk_severity_str
[] = {
4910 "Error if no traffic",
4914 /* MCP Trace level names array */
4915 static const char * const s_mcp_trace_level_str
[] = {
4921 /* Parsing strings */
4922 static const char * const s_access_strs
[] = {
4927 static const char * const s_privilege_strs
[] = {
4934 static const char * const s_protection_strs
[] = {
4945 static const char * const s_master_strs
[] = {
4964 static const char * const s_reg_fifo_error_strs
[] = {
4966 "address doesn't belong to any block",
4967 "reserved address in block or write to read-only address",
4968 "privilege/protection mismatch",
4969 "path isolation error"
4972 static const char * const s_igu_fifo_source_strs
[] = {
4986 static const char * const s_igu_fifo_error_strs
[] = {
4989 "function disabled",
4990 "VF sent command to attnetion address",
4991 "host sent prod update command",
4992 "read of during interrupt register while in MIMD mode",
4993 "access to PXP BAR reserved address",
4994 "producer update command to attention index",
4996 "SB index not valid",
4997 "SB relative index and FID not found",
4999 "command with error flag asserted (PCI error or CAU discard)",
5000 "VF sent cleanup and RF cleanup is disabled",
5001 "cleanup command on type bigger than 4"
5004 /* IGU FIFO address data */
5005 static const struct igu_fifo_addr_data s_igu_fifo_addr_data
[] = {
5006 {0x0, 0x101, "MSI-X Memory", NULL
, IGU_ADDR_TYPE_MSIX_MEM
},
5007 {0x102, 0x1ff, "reserved", NULL
, IGU_ADDR_TYPE_RESERVED
},
5008 {0x200, 0x200, "Write PBA[0:63]", NULL
, IGU_ADDR_TYPE_WRITE_PBA
},
5009 {0x201, 0x201, "Write PBA[64:127]", "reserved",
5010 IGU_ADDR_TYPE_WRITE_PBA
},
5011 {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA
},
5012 {0x203, 0x3ff, "reserved", NULL
, IGU_ADDR_TYPE_RESERVED
},
5013 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL
,
5014 IGU_ADDR_TYPE_WRITE_INT_ACK
},
5015 {0x5f0, 0x5f0, "Attention bits update", NULL
,
5016 IGU_ADDR_TYPE_WRITE_ATTN_BITS
},
5017 {0x5f1, 0x5f1, "Attention bits set", NULL
,
5018 IGU_ADDR_TYPE_WRITE_ATTN_BITS
},
5019 {0x5f2, 0x5f2, "Attention bits clear", NULL
,
5020 IGU_ADDR_TYPE_WRITE_ATTN_BITS
},
5021 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL
,
5022 IGU_ADDR_TYPE_READ_INT
},
5023 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL
,
5024 IGU_ADDR_TYPE_READ_INT
},
5025 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL
,
5026 IGU_ADDR_TYPE_READ_INT
},
5027 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL
,
5028 IGU_ADDR_TYPE_READ_INT
},
5029 {0x5f7, 0x5ff, "reserved", NULL
, IGU_ADDR_TYPE_RESERVED
},
5030 {0x600, 0x7ff, "Producer update", NULL
, IGU_ADDR_TYPE_WRITE_PROD_UPDATE
}
5033 /******************************** Variables **********************************/
5035 /* MCP Trace meta data - used in case the dump doesn't contain the meta data
5036 * (e.g. due to no NVRAM access).
5038 static struct dbg_array s_mcp_trace_meta
= { NULL
, 0 };
5040 /* Temporary buffer, used for print size calculations */
5041 static char s_temp_buf
[MAX_MSG_LEN
];
5043 /***************************** Public Functions *******************************/
5045 enum dbg_status
qed_dbg_user_set_bin_ptr(const u8
* const bin_ptr
)
5047 /* Convert binary data to debug arrays */
5048 u32 num_of_buffers
= *(u32
*)bin_ptr
;
5049 struct bin_buffer_hdr
*buf_array
;
5052 buf_array
= (struct bin_buffer_hdr
*)((u32
*)bin_ptr
+ 1);
5054 for (buf_id
= 0; buf_id
< num_of_buffers
; buf_id
++) {
5055 s_dbg_arrays
[buf_id
].ptr
=
5056 (u32
*)(bin_ptr
+ buf_array
[buf_id
].offset
);
5057 s_dbg_arrays
[buf_id
].size_in_dwords
=
5058 BYTES_TO_DWORDS(buf_array
[buf_id
].length
);
5061 return DBG_STATUS_OK
;
5064 static u32
qed_cyclic_add(u32 a
, u32 b
, u32 size
)
5066 return (a
+ b
) % size
;
5069 static u32
qed_cyclic_sub(u32 a
, u32 b
, u32 size
)
5071 return (size
+ a
- b
) % size
;
5074 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5075 * bytes) and returns them as a dword value. the specified buffer offset is
5078 static u32
qed_read_from_cyclic_buf(void *buf
,
5080 u32 buf_size
, u8 num_bytes_to_read
)
5082 u8
*bytes_buf
= (u8
*)buf
;
5087 val_ptr
= (u8
*)&val
;
5089 for (i
= 0; i
< num_bytes_to_read
; i
++) {
5090 val_ptr
[i
] = bytes_buf
[*offset
];
5091 *offset
= qed_cyclic_add(*offset
, 1, buf_size
);
5097 /* Reads and returns the next byte from the specified buffer.
5098 * The specified buffer offset is updated.
5100 static u8
qed_read_byte_from_buf(void *buf
, u32
*offset
)
5102 return ((u8
*)buf
)[(*offset
)++];
5105 /* Reads and returns the next dword from the specified buffer.
5106 * The specified buffer offset is updated.
5108 static u32
qed_read_dword_from_buf(void *buf
, u32
*offset
)
5110 u32 dword_val
= *(u32
*)&((u8
*)buf
)[*offset
];
5116 /* Reads the next string from the specified buffer, and copies it to the
5117 * specified pointer. The specified buffer offset is updated.
5119 static void qed_read_str_from_buf(void *buf
, u32
*offset
, u32 size
, char *dest
)
5121 const char *source_str
= &((const char *)buf
)[*offset
];
5123 strncpy(dest
, source_str
, size
);
5124 dest
[size
- 1] = '\0';
5128 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5129 * If the specified buffer in NULL, a temporary buffer pointer is returned.
5131 static char *qed_get_buf_ptr(void *buf
, u32 offset
)
5133 return buf
? (char *)buf
+ offset
: s_temp_buf
;
5136 /* Reads a param from the specified buffer. Returns the number of dwords read.
5137 * If the returned str_param is NULL, the param is numeric and its value is
5138 * returned in num_param.
5139 * Otheriwise, the param is a string and its pointer is returned in str_param.
5141 static u32
qed_read_param(u32
*dump_buf
,
5142 const char **param_name
,
5143 const char **param_str_val
, u32
*param_num_val
)
5145 char *char_buf
= (char *)dump_buf
;
5146 u32 offset
= 0; /* In bytes */
5148 /* Extract param name */
5149 *param_name
= char_buf
;
5150 offset
+= strlen(*param_name
) + 1;
5152 /* Check param type */
5153 if (*(char_buf
+ offset
++)) {
5155 *param_str_val
= char_buf
+ offset
;
5156 offset
+= strlen(*param_str_val
) + 1;
5158 offset
+= (4 - (offset
& 0x3));
5161 *param_str_val
= NULL
;
5163 offset
+= (4 - (offset
& 0x3));
5164 *param_num_val
= *(u32
*)(char_buf
+ offset
);
5171 /* Reads a section header from the specified buffer.
5172 * Returns the number of dwords read.
5174 static u32
qed_read_section_hdr(u32
*dump_buf
,
5175 const char **section_name
,
5176 u32
*num_section_params
)
5178 const char *param_str_val
;
5180 return qed_read_param(dump_buf
,
5181 section_name
, ¶m_str_val
, num_section_params
);
5184 /* Reads section params from the specified buffer and prints them to the results
5185 * buffer. Returns the number of dwords read.
5187 static u32
qed_print_section_params(u32
*dump_buf
,
5188 u32 num_section_params
,
5189 char *results_buf
, u32
*num_chars_printed
)
5191 u32 i
, dump_offset
= 0, results_offset
= 0;
5193 for (i
= 0; i
< num_section_params
; i
++) {
5194 const char *param_name
;
5195 const char *param_str_val
;
5196 u32 param_num_val
= 0;
5198 dump_offset
+= qed_read_param(dump_buf
+ dump_offset
,
5200 ¶m_str_val
, ¶m_num_val
);
5204 sprintf(qed_get_buf_ptr(results_buf
,
5206 "%s: %s\n", param_name
, param_str_val
);
5207 else if (strcmp(param_name
, "fw-timestamp"))
5210 sprintf(qed_get_buf_ptr(results_buf
,
5212 "%s: %d\n", param_name
, param_num_val
);
5216 sprintf(qed_get_buf_ptr(results_buf
, results_offset
), "\n");
5217 *num_chars_printed
= results_offset
;
5221 const char *qed_dbg_get_status_str(enum dbg_status status
)
5224 MAX_DBG_STATUS
) ? s_status_str
[status
] : "Invalid debug status";
5227 /* Parses the idle check rules and returns the number of characters printed.
5228 * In case of parsing error, returns 0.
5230 static u32
qed_parse_idle_chk_dump_rules(struct qed_hwfn
*p_hwfn
,
5234 bool print_fw_idle_chk
,
5236 u32
*num_errors
, u32
*num_warnings
)
5238 u32 rule_idx
, results_offset
= 0; /* Offset in results_buf in bytes */
5244 /* Go over dumped results */
5245 for (rule_idx
= 0; rule_idx
< num_rules
&& dump_buf
< dump_buf_end
;
5247 const struct dbg_idle_chk_rule_parsing_data
*rule_parsing_data
;
5248 struct dbg_idle_chk_result_hdr
*hdr
;
5249 const char *parsing_str
;
5250 u32 parsing_str_offset
;
5251 const char *lsi_msg
;
5255 hdr
= (struct dbg_idle_chk_result_hdr
*)dump_buf
;
5257 (const struct dbg_idle_chk_rule_parsing_data
*)
5258 &s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA
].
5260 parsing_str_offset
=
5261 GET_FIELD(rule_parsing_data
->data
,
5262 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET
);
5264 GET_FIELD(rule_parsing_data
->data
,
5265 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG
) > 0;
5266 parsing_str
= &((const char *)
5267 s_dbg_arrays
[BIN_BUF_DBG_PARSING_STRINGS
].ptr
)
5268 [parsing_str_offset
];
5269 lsi_msg
= parsing_str
;
5271 if (hdr
->severity
>= MAX_DBG_IDLE_CHK_SEVERITY_TYPES
)
5274 /* Skip rule header */
5275 dump_buf
+= (sizeof(struct dbg_idle_chk_result_hdr
) / 4);
5277 /* Update errors/warnings count */
5278 if (hdr
->severity
== IDLE_CHK_SEVERITY_ERROR
||
5279 hdr
->severity
== IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC
)
5284 /* Print rule severity */
5286 sprintf(qed_get_buf_ptr(results_buf
,
5287 results_offset
), "%s: ",
5288 s_idle_chk_severity_str
[hdr
->severity
]);
5290 /* Print rule message */
5292 parsing_str
+= strlen(parsing_str
) + 1;
5294 sprintf(qed_get_buf_ptr(results_buf
,
5295 results_offset
), "%s.",
5297 print_fw_idle_chk
? parsing_str
: lsi_msg
);
5298 parsing_str
+= strlen(parsing_str
) + 1;
5300 /* Print register values */
5302 sprintf(qed_get_buf_ptr(results_buf
,
5303 results_offset
), " Registers:");
5305 i
< hdr
->num_dumped_cond_regs
+ hdr
->num_dumped_info_regs
;
5307 struct dbg_idle_chk_result_reg_hdr
*reg_hdr
5308 = (struct dbg_idle_chk_result_reg_hdr
*)
5311 GET_FIELD(reg_hdr
->data
,
5312 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM
);
5314 GET_FIELD(reg_hdr
->data
,
5315 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID
);
5317 /* Skip reg header */
5319 (sizeof(struct dbg_idle_chk_result_reg_hdr
) / 4);
5321 /* Skip register names until the required reg_id is
5324 for (; reg_id
> curr_reg_id
;
5326 parsing_str
+= strlen(parsing_str
) + 1);
5329 sprintf(qed_get_buf_ptr(results_buf
,
5330 results_offset
), " %s",
5332 if (i
< hdr
->num_dumped_cond_regs
&& is_mem
)
5334 sprintf(qed_get_buf_ptr(results_buf
,
5336 "[%d]", hdr
->mem_entry_id
+
5337 reg_hdr
->start_entry
);
5339 sprintf(qed_get_buf_ptr(results_buf
,
5340 results_offset
), "=");
5341 for (j
= 0; j
< reg_hdr
->size
; j
++, dump_buf
++) {
5343 sprintf(qed_get_buf_ptr(results_buf
,
5346 if (j
< reg_hdr
->size
- 1)
5348 sprintf(qed_get_buf_ptr
5350 results_offset
), ",");
5355 sprintf(qed_get_buf_ptr(results_buf
, results_offset
), "\n");
5358 /* Check if end of dump buffer was exceeded */
5359 if (dump_buf
> dump_buf_end
)
5361 return results_offset
;
5364 /* Parses an idle check dump buffer.
5365 * If result_buf is not NULL, the idle check results are printed to it.
5366 * In any case, the required results buffer size is assigned to
5367 * parsed_results_bytes.
5368 * The parsing status is returned.
5370 static enum dbg_status
qed_parse_idle_chk_dump(struct qed_hwfn
*p_hwfn
,
5372 u32 num_dumped_dwords
,
5374 u32
*parsed_results_bytes
,
5378 const char *section_name
, *param_name
, *param_str_val
;
5379 u32
*dump_buf_end
= dump_buf
+ num_dumped_dwords
;
5380 u32 num_section_params
= 0, num_rules
;
5381 u32 results_offset
= 0; /* Offset in results_buf in bytes */
5383 *parsed_results_bytes
= 0;
5386 if (!s_dbg_arrays
[BIN_BUF_DBG_PARSING_STRINGS
].ptr
||
5387 !s_dbg_arrays
[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA
].ptr
)
5388 return DBG_STATUS_DBG_ARRAY_NOT_SET
;
5390 /* Read global_params section */
5391 dump_buf
+= qed_read_section_hdr(dump_buf
,
5392 §ion_name
, &num_section_params
);
5393 if (strcmp(section_name
, "global_params"))
5394 return DBG_STATUS_IDLE_CHK_PARSE_FAILED
;
5396 /* Print global params */
5397 dump_buf
+= qed_print_section_params(dump_buf
,
5399 results_buf
, &results_offset
);
5401 /* Read idle_chk section */
5402 dump_buf
+= qed_read_section_hdr(dump_buf
,
5403 §ion_name
, &num_section_params
);
5404 if (strcmp(section_name
, "idle_chk") || num_section_params
!= 1)
5405 return DBG_STATUS_IDLE_CHK_PARSE_FAILED
;
5407 dump_buf
+= qed_read_param(dump_buf
,
5408 ¶m_name
, ¶m_str_val
, &num_rules
);
5409 if (strcmp(param_name
, "num_rules") != 0)
5410 return DBG_STATUS_IDLE_CHK_PARSE_FAILED
;
5413 u32 rules_print_size
;
5415 /* Print FW output */
5417 sprintf(qed_get_buf_ptr(results_buf
,
5419 "FW_IDLE_CHECK:\n");
5421 qed_parse_idle_chk_dump_rules(p_hwfn
, dump_buf
,
5422 dump_buf_end
, num_rules
,
5426 results_offset
: NULL
,
5427 num_errors
, num_warnings
);
5428 results_offset
+= rules_print_size
;
5429 if (rules_print_size
== 0)
5430 return DBG_STATUS_IDLE_CHK_PARSE_FAILED
;
5432 /* Print LSI output */
5434 sprintf(qed_get_buf_ptr(results_buf
,
5436 "\nLSI_IDLE_CHECK:\n");
5438 qed_parse_idle_chk_dump_rules(p_hwfn
, dump_buf
,
5439 dump_buf_end
, num_rules
,
5443 results_offset
: NULL
,
5444 num_errors
, num_warnings
);
5445 results_offset
+= rules_print_size
;
5446 if (rules_print_size
== 0)
5447 return DBG_STATUS_IDLE_CHK_PARSE_FAILED
;
5450 /* Print errors/warnings count */
5453 sprintf(qed_get_buf_ptr(results_buf
,
5455 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
5456 *num_errors
, *num_warnings
);
5457 } else if (*num_warnings
) {
5459 sprintf(qed_get_buf_ptr(results_buf
,
5461 "\nIdle Check completed successfuly (with %d warnings)\n",
5465 sprintf(qed_get_buf_ptr(results_buf
,
5467 "\nIdle Check completed successfuly\n");
5470 /* Add 1 for string NULL termination */
5471 *parsed_results_bytes
= results_offset
+ 1;
5472 return DBG_STATUS_OK
;
5475 enum dbg_status
qed_get_idle_chk_results_buf_size(struct qed_hwfn
*p_hwfn
,
5477 u32 num_dumped_dwords
,
5478 u32
*results_buf_size
)
5480 u32 num_errors
, num_warnings
;
5482 return qed_parse_idle_chk_dump(p_hwfn
,
5487 &num_errors
, &num_warnings
);
5490 enum dbg_status
qed_print_idle_chk_results(struct qed_hwfn
*p_hwfn
,
5492 u32 num_dumped_dwords
,
5494 u32
*num_errors
, u32
*num_warnings
)
5496 u32 parsed_buf_size
;
5498 return qed_parse_idle_chk_dump(p_hwfn
,
5503 num_errors
, num_warnings
);
5506 /* Frees the specified MCP Trace meta data */
5507 static void qed_mcp_trace_free_meta(struct qed_hwfn
*p_hwfn
,
5508 struct mcp_trace_meta
*meta
)
5512 /* Release modules */
5513 if (meta
->modules
) {
5514 for (i
= 0; i
< meta
->modules_num
; i
++)
5515 kfree(meta
->modules
[i
]);
5516 kfree(meta
->modules
);
5519 /* Release formats */
5520 if (meta
->formats
) {
5521 for (i
= 0; i
< meta
->formats_num
; i
++)
5522 kfree(meta
->formats
[i
].format_str
);
5523 kfree(meta
->formats
);
5527 /* Allocates and fills MCP Trace meta data based on the specified meta data
5529 * Returns debug status code.
5531 static enum dbg_status
qed_mcp_trace_alloc_meta(struct qed_hwfn
*p_hwfn
,
5532 const u32
*meta_buf
,
5533 struct mcp_trace_meta
*meta
)
5535 u8
*meta_buf_bytes
= (u8
*)meta_buf
;
5536 u32 offset
= 0, signature
, i
;
5538 memset(meta
, 0, sizeof(*meta
));
5540 /* Read first signature */
5541 signature
= qed_read_dword_from_buf(meta_buf_bytes
, &offset
);
5542 if (signature
!= MCP_TRACE_META_IMAGE_SIGNATURE
)
5543 return DBG_STATUS_INVALID_TRACE_SIGNATURE
;
5545 /* Read number of modules and allocate memory for all the modules
5548 meta
->modules_num
= qed_read_byte_from_buf(meta_buf_bytes
, &offset
);
5549 meta
->modules
= kzalloc(meta
->modules_num
* sizeof(char *), GFP_KERNEL
);
5551 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED
;
5553 /* Allocate and read all module strings */
5554 for (i
= 0; i
< meta
->modules_num
; i
++) {
5555 u8 module_len
= qed_read_byte_from_buf(meta_buf_bytes
, &offset
);
5557 *(meta
->modules
+ i
) = kzalloc(module_len
, GFP_KERNEL
);
5558 if (!(*(meta
->modules
+ i
))) {
5559 /* Update number of modules to be released */
5560 meta
->modules_num
= i
? i
- 1 : 0;
5561 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED
;
5564 qed_read_str_from_buf(meta_buf_bytes
, &offset
, module_len
,
5565 *(meta
->modules
+ i
));
5566 if (module_len
> MCP_TRACE_MAX_MODULE_LEN
)
5567 (*(meta
->modules
+ i
))[MCP_TRACE_MAX_MODULE_LEN
] = '\0';
5570 /* Read second signature */
5571 signature
= qed_read_dword_from_buf(meta_buf_bytes
, &offset
);
5572 if (signature
!= MCP_TRACE_META_IMAGE_SIGNATURE
)
5573 return DBG_STATUS_INVALID_TRACE_SIGNATURE
;
5575 /* Read number of formats and allocate memory for all formats */
5576 meta
->formats_num
= qed_read_dword_from_buf(meta_buf_bytes
, &offset
);
5577 meta
->formats
= kzalloc(meta
->formats_num
*
5578 sizeof(struct mcp_trace_format
),
5581 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED
;
5583 /* Allocate and read all strings */
5584 for (i
= 0; i
< meta
->formats_num
; i
++) {
5585 struct mcp_trace_format
*format_ptr
= &meta
->formats
[i
];
5588 format_ptr
->data
= qed_read_dword_from_buf(meta_buf_bytes
,
5592 MCP_TRACE_FORMAT_LEN_MASK
) >> MCP_TRACE_FORMAT_LEN_SHIFT
;
5593 format_ptr
->format_str
= kzalloc(format_len
, GFP_KERNEL
);
5594 if (!format_ptr
->format_str
) {
5595 /* Update number of modules to be released */
5596 meta
->formats_num
= i
? i
- 1 : 0;
5597 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED
;
5600 qed_read_str_from_buf(meta_buf_bytes
,
5602 format_len
, format_ptr
->format_str
);
5605 return DBG_STATUS_OK
;
5608 /* Parses an MCP Trace dump buffer.
5609 * If result_buf is not NULL, the MCP Trace results are printed to it.
5610 * In any case, the required results buffer size is assigned to
5611 * parsed_results_bytes.
5612 * The parsing status is returned.
5614 static enum dbg_status
qed_parse_mcp_trace_dump(struct qed_hwfn
*p_hwfn
,
5616 u32 num_dumped_dwords
,
5618 u32
*parsed_results_bytes
)
5620 u32 results_offset
= 0, param_mask
, param_shift
, param_num_val
;
5621 u32 num_section_params
, offset
, end_offset
, bytes_left
;
5622 const char *section_name
, *param_name
, *param_str_val
;
5623 u32 trace_data_dwords
, trace_meta_dwords
;
5624 struct mcp_trace_meta meta
;
5625 struct mcp_trace
*trace
;
5626 enum dbg_status status
;
5627 const u32
*meta_buf
;
5630 *parsed_results_bytes
= 0;
5632 /* Read global_params section */
5633 dump_buf
+= qed_read_section_hdr(dump_buf
,
5634 §ion_name
, &num_section_params
);
5635 if (strcmp(section_name
, "global_params"))
5636 return DBG_STATUS_MCP_TRACE_BAD_DATA
;
5638 /* Print global params */
5639 dump_buf
+= qed_print_section_params(dump_buf
,
5641 results_buf
, &results_offset
);
5643 /* Read trace_data section */
5644 dump_buf
+= qed_read_section_hdr(dump_buf
,
5645 §ion_name
, &num_section_params
);
5646 if (strcmp(section_name
, "mcp_trace_data") || num_section_params
!= 1)
5647 return DBG_STATUS_MCP_TRACE_BAD_DATA
;
5648 dump_buf
+= qed_read_param(dump_buf
,
5649 ¶m_name
, ¶m_str_val
, ¶m_num_val
);
5650 if (strcmp(param_name
, "size"))
5651 return DBG_STATUS_MCP_TRACE_BAD_DATA
;
5652 trace_data_dwords
= param_num_val
;
5654 /* Prepare trace info */
5655 trace
= (struct mcp_trace
*)dump_buf
;
5656 trace_buf
= (u8
*)dump_buf
+ sizeof(struct mcp_trace
);
5657 offset
= trace
->trace_oldest
;
5658 end_offset
= trace
->trace_prod
;
5659 bytes_left
= qed_cyclic_sub(end_offset
, offset
, trace
->size
);
5660 dump_buf
+= trace_data_dwords
;
5662 /* Read meta_data section */
5663 dump_buf
+= qed_read_section_hdr(dump_buf
,
5664 §ion_name
, &num_section_params
);
5665 if (strcmp(section_name
, "mcp_trace_meta"))
5666 return DBG_STATUS_MCP_TRACE_BAD_DATA
;
5667 dump_buf
+= qed_read_param(dump_buf
,
5668 ¶m_name
, ¶m_str_val
, ¶m_num_val
);
5669 if (strcmp(param_name
, "size") != 0)
5670 return DBG_STATUS_MCP_TRACE_BAD_DATA
;
5671 trace_meta_dwords
= param_num_val
;
5673 /* Choose meta data buffer */
5674 if (!trace_meta_dwords
) {
5675 /* Dump doesn't include meta data */
5676 if (!s_mcp_trace_meta
.ptr
)
5677 return DBG_STATUS_MCP_TRACE_NO_META
;
5678 meta_buf
= s_mcp_trace_meta
.ptr
;
5680 /* Dump includes meta data */
5681 meta_buf
= dump_buf
;
5684 /* Allocate meta data memory */
5685 status
= qed_mcp_trace_alloc_meta(p_hwfn
, meta_buf
, &meta
);
5686 if (status
!= DBG_STATUS_OK
)
5689 /* Ignore the level and modules masks - just print everything that is
5690 * already in the buffer.
5692 while (bytes_left
) {
5693 struct mcp_trace_format
*format_ptr
;
5694 u8 format_level
, format_module
;
5695 u32 params
[3] = { 0, 0, 0 };
5696 u32 header
, format_idx
, i
;
5698 if (bytes_left
< MFW_TRACE_ENTRY_SIZE
) {
5699 status
= DBG_STATUS_MCP_TRACE_BAD_DATA
;
5703 header
= qed_read_from_cyclic_buf(trace_buf
,
5706 MFW_TRACE_ENTRY_SIZE
);
5707 bytes_left
-= MFW_TRACE_ENTRY_SIZE
;
5708 format_idx
= header
& MFW_TRACE_EVENTID_MASK
;
5710 /* Skip message if its index doesn't exist in the meta data */
5711 if (format_idx
> meta
.formats_num
) {
5714 MFW_TRACE_PRM_SIZE_MASK
) >>
5715 MFW_TRACE_PRM_SIZE_SHIFT
);
5717 if (bytes_left
< format_size
) {
5718 status
= DBG_STATUS_MCP_TRACE_BAD_DATA
;
5722 offset
= qed_cyclic_add(offset
,
5723 format_size
, trace
->size
);
5724 bytes_left
-= format_size
;
5728 format_ptr
= &meta
.formats
[format_idx
];
5730 param_mask
= MCP_TRACE_FORMAT_P1_SIZE_MASK
, param_shift
=
5731 MCP_TRACE_FORMAT_P1_SIZE_SHIFT
;
5732 i
< MCP_TRACE_FORMAT_MAX_PARAMS
;
5733 i
++, param_mask
<<= MCP_TRACE_FORMAT_PARAM_WIDTH
,
5734 param_shift
+= MCP_TRACE_FORMAT_PARAM_WIDTH
) {
5735 /* Extract param size (0..3) */
5737 (u8
)((format_ptr
->data
&
5738 param_mask
) >> param_shift
);
5740 /* If the param size is zero, there are no other
5746 /* Size is encoded using 2 bits, where 3 is used to
5749 if (param_size
== 3)
5751 if (bytes_left
< param_size
) {
5752 status
= DBG_STATUS_MCP_TRACE_BAD_DATA
;
5756 params
[i
] = qed_read_from_cyclic_buf(trace_buf
,
5760 bytes_left
-= param_size
;
5764 (u8
)((format_ptr
->data
&
5765 MCP_TRACE_FORMAT_LEVEL_MASK
) >>
5766 MCP_TRACE_FORMAT_LEVEL_SHIFT
);
5768 (u8
)((format_ptr
->data
&
5769 MCP_TRACE_FORMAT_MODULE_MASK
) >>
5770 MCP_TRACE_FORMAT_MODULE_SHIFT
);
5771 if (format_level
>= ARRAY_SIZE(s_mcp_trace_level_str
)) {
5772 status
= DBG_STATUS_MCP_TRACE_BAD_DATA
;
5776 /* Print current message to results buffer */
5778 sprintf(qed_get_buf_ptr(results_buf
,
5779 results_offset
), "%s %-8s: ",
5780 s_mcp_trace_level_str
[format_level
],
5781 meta
.modules
[format_module
]);
5783 sprintf(qed_get_buf_ptr(results_buf
,
5785 format_ptr
->format_str
, params
[0], params
[1],
5790 *parsed_results_bytes
= results_offset
+ 1;
5791 qed_mcp_trace_free_meta(p_hwfn
, &meta
);
5795 enum dbg_status
qed_get_mcp_trace_results_buf_size(struct qed_hwfn
*p_hwfn
,
5797 u32 num_dumped_dwords
,
5798 u32
*results_buf_size
)
5800 return qed_parse_mcp_trace_dump(p_hwfn
,
5803 NULL
, results_buf_size
);
5806 enum dbg_status
qed_print_mcp_trace_results(struct qed_hwfn
*p_hwfn
,
5808 u32 num_dumped_dwords
,
5811 u32 parsed_buf_size
;
5813 return qed_parse_mcp_trace_dump(p_hwfn
,
5816 results_buf
, &parsed_buf_size
);
5819 /* Parses a Reg FIFO dump buffer.
5820 * If result_buf is not NULL, the Reg FIFO results are printed to it.
5821 * In any case, the required results buffer size is assigned to
5822 * parsed_results_bytes.
5823 * The parsing status is returned.
5825 static enum dbg_status
qed_parse_reg_fifo_dump(struct qed_hwfn
*p_hwfn
,
5827 u32 num_dumped_dwords
,
5829 u32
*parsed_results_bytes
)
5831 u32 results_offset
= 0, param_num_val
, num_section_params
, num_elements
;
5832 const char *section_name
, *param_name
, *param_str_val
;
5833 struct reg_fifo_element
*elements
;
5834 u8 i
, j
, err_val
, vf_val
;
5837 /* Read global_params section */
5838 dump_buf
+= qed_read_section_hdr(dump_buf
,
5839 §ion_name
, &num_section_params
);
5840 if (strcmp(section_name
, "global_params"))
5841 return DBG_STATUS_REG_FIFO_BAD_DATA
;
5843 /* Print global params */
5844 dump_buf
+= qed_print_section_params(dump_buf
,
5846 results_buf
, &results_offset
);
5848 /* Read reg_fifo_data section */
5849 dump_buf
+= qed_read_section_hdr(dump_buf
,
5850 §ion_name
, &num_section_params
);
5851 if (strcmp(section_name
, "reg_fifo_data"))
5852 return DBG_STATUS_REG_FIFO_BAD_DATA
;
5853 dump_buf
+= qed_read_param(dump_buf
,
5854 ¶m_name
, ¶m_str_val
, ¶m_num_val
);
5855 if (strcmp(param_name
, "size"))
5856 return DBG_STATUS_REG_FIFO_BAD_DATA
;
5857 if (param_num_val
% REG_FIFO_ELEMENT_DWORDS
)
5858 return DBG_STATUS_REG_FIFO_BAD_DATA
;
5859 num_elements
= param_num_val
/ REG_FIFO_ELEMENT_DWORDS
;
5860 elements
= (struct reg_fifo_element
*)dump_buf
;
5862 /* Decode elements */
5863 for (i
= 0; i
< num_elements
; i
++) {
5864 bool err_printed
= false;
5866 /* Discover if element belongs to a VF or a PF */
5867 vf_val
= GET_FIELD(elements
[i
].data
, REG_FIFO_ELEMENT_VF
);
5868 if (vf_val
== REG_FIFO_ELEMENT_IS_PF_VF_VAL
)
5869 sprintf(vf_str
, "%s", "N/A");
5871 sprintf(vf_str
, "%d", vf_val
);
5873 /* Add parsed element to parsed buffer */
5875 sprintf(qed_get_buf_ptr(results_buf
,
5877 "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
5879 GET_FIELD(elements
[i
].data
,
5880 REG_FIFO_ELEMENT_ADDRESS
) *
5881 REG_FIFO_ELEMENT_ADDR_FACTOR
,
5882 s_access_strs
[GET_FIELD(elements
[i
].data
,
5883 REG_FIFO_ELEMENT_ACCESS
)],
5884 GET_FIELD(elements
[i
].data
,
5885 REG_FIFO_ELEMENT_PF
), vf_str
,
5886 GET_FIELD(elements
[i
].data
,
5887 REG_FIFO_ELEMENT_PORT
),
5888 s_privilege_strs
[GET_FIELD(elements
[i
].
5890 REG_FIFO_ELEMENT_PRIVILEGE
)],
5891 s_protection_strs
[GET_FIELD(elements
[i
].data
,
5892 REG_FIFO_ELEMENT_PROTECTION
)],
5893 s_master_strs
[GET_FIELD(elements
[i
].data
,
5894 REG_FIFO_ELEMENT_MASTER
)]);
5898 err_val
= GET_FIELD(elements
[i
].data
,
5899 REG_FIFO_ELEMENT_ERROR
);
5900 j
< ARRAY_SIZE(s_reg_fifo_error_strs
);
5901 j
++, err_val
>>= 1) {
5902 if (!(err_val
& 0x1))
5906 sprintf(qed_get_buf_ptr(results_buf
,
5910 sprintf(qed_get_buf_ptr(results_buf
,
5911 results_offset
), "%s",
5912 s_reg_fifo_error_strs
[j
]);
5917 sprintf(qed_get_buf_ptr(results_buf
, results_offset
), "\n");
5920 results_offset
+= sprintf(qed_get_buf_ptr(results_buf
,
5922 "fifo contained %d elements", num_elements
);
5924 /* Add 1 for string NULL termination */
5925 *parsed_results_bytes
= results_offset
+ 1;
5926 return DBG_STATUS_OK
;
5929 enum dbg_status
qed_get_reg_fifo_results_buf_size(struct qed_hwfn
*p_hwfn
,
5931 u32 num_dumped_dwords
,
5932 u32
*results_buf_size
)
5934 return qed_parse_reg_fifo_dump(p_hwfn
,
5937 NULL
, results_buf_size
);
5940 enum dbg_status
qed_print_reg_fifo_results(struct qed_hwfn
*p_hwfn
,
5942 u32 num_dumped_dwords
,
5945 u32 parsed_buf_size
;
5947 return qed_parse_reg_fifo_dump(p_hwfn
,
5950 results_buf
, &parsed_buf_size
);
5953 /* Parses an IGU FIFO dump buffer.
5954 * If result_buf is not NULL, the IGU FIFO results are printed to it.
5955 * In any case, the required results buffer size is assigned to
5956 * parsed_results_bytes.
5957 * The parsing status is returned.
5959 static enum dbg_status
qed_parse_igu_fifo_dump(struct qed_hwfn
*p_hwfn
,
5961 u32 num_dumped_dwords
,
5963 u32
*parsed_results_bytes
)
5965 u32 results_offset
= 0, param_num_val
, num_section_params
, num_elements
;
5966 const char *section_name
, *param_name
, *param_str_val
;
5967 struct igu_fifo_element
*elements
;
5968 char parsed_addr_data
[32];
5969 char parsed_wr_data
[256];
5972 /* Read global_params section */
5973 dump_buf
+= qed_read_section_hdr(dump_buf
,
5974 §ion_name
, &num_section_params
);
5975 if (strcmp(section_name
, "global_params"))
5976 return DBG_STATUS_IGU_FIFO_BAD_DATA
;
5978 /* Print global params */
5979 dump_buf
+= qed_print_section_params(dump_buf
,
5981 results_buf
, &results_offset
);
5983 /* Read igu_fifo_data section */
5984 dump_buf
+= qed_read_section_hdr(dump_buf
,
5985 §ion_name
, &num_section_params
);
5986 if (strcmp(section_name
, "igu_fifo_data"))
5987 return DBG_STATUS_IGU_FIFO_BAD_DATA
;
5988 dump_buf
+= qed_read_param(dump_buf
,
5989 ¶m_name
, ¶m_str_val
, ¶m_num_val
);
5990 if (strcmp(param_name
, "size"))
5991 return DBG_STATUS_IGU_FIFO_BAD_DATA
;
5992 if (param_num_val
% IGU_FIFO_ELEMENT_DWORDS
)
5993 return DBG_STATUS_IGU_FIFO_BAD_DATA
;
5994 num_elements
= param_num_val
/ IGU_FIFO_ELEMENT_DWORDS
;
5995 elements
= (struct igu_fifo_element
*)dump_buf
;
5997 /* Decode elements */
5998 for (i
= 0; i
< num_elements
; i
++) {
5999 /* dword12 (dword index 1 and 2) contains bits 32..95 of the
6003 ((u64
)elements
[i
].dword2
<< 32) | elements
[i
].dword1
;
6004 bool is_wr_cmd
= GET_FIELD(dword12
,
6005 IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD
);
6006 bool is_pf
= GET_FIELD(elements
[i
].dword0
,
6007 IGU_FIFO_ELEMENT_DWORD0_IS_PF
);
6008 u16 cmd_addr
= GET_FIELD(elements
[i
].dword0
,
6009 IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR
);
6010 u8 source
= GET_FIELD(elements
[i
].dword0
,
6011 IGU_FIFO_ELEMENT_DWORD0_SOURCE
);
6012 u8 err_type
= GET_FIELD(elements
[i
].dword0
,
6013 IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE
);
6014 const struct igu_fifo_addr_data
*addr_data
= NULL
;
6016 if (source
>= ARRAY_SIZE(s_igu_fifo_source_strs
))
6017 return DBG_STATUS_IGU_FIFO_BAD_DATA
;
6018 if (err_type
>= ARRAY_SIZE(s_igu_fifo_error_strs
))
6019 return DBG_STATUS_IGU_FIFO_BAD_DATA
;
6021 /* Find address data */
6022 for (j
= 0; j
< ARRAY_SIZE(s_igu_fifo_addr_data
) && !addr_data
;
6024 if (cmd_addr
>= s_igu_fifo_addr_data
[j
].start_addr
&&
6025 cmd_addr
<= s_igu_fifo_addr_data
[j
].end_addr
)
6026 addr_data
= &s_igu_fifo_addr_data
[j
];
6028 return DBG_STATUS_IGU_FIFO_BAD_DATA
;
6030 /* Prepare parsed address data */
6031 switch (addr_data
->type
) {
6032 case IGU_ADDR_TYPE_MSIX_MEM
:
6033 sprintf(parsed_addr_data
,
6034 " vector_num=0x%x", cmd_addr
/ 2);
6036 case IGU_ADDR_TYPE_WRITE_INT_ACK
:
6037 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE
:
6038 sprintf(parsed_addr_data
,
6039 " SB=0x%x", cmd_addr
- addr_data
->start_addr
);
6042 parsed_addr_data
[0] = '\0';
6045 /* Prepare parsed write data */
6047 u32 wr_data
= GET_FIELD(dword12
,
6048 IGU_FIFO_ELEMENT_DWORD12_WR_DATA
);
6049 u32 prod_cons
= GET_FIELD(wr_data
,
6050 IGU_FIFO_WR_DATA_PROD_CONS
);
6051 u8 is_cleanup
= GET_FIELD(wr_data
,
6052 IGU_FIFO_WR_DATA_CMD_TYPE
);
6054 if (source
== IGU_SRC_ATTN
) {
6055 sprintf(parsed_wr_data
,
6056 "prod: 0x%x, ", prod_cons
);
6059 u8 cleanup_val
= GET_FIELD(wr_data
,
6060 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL
);
6061 u8 cleanup_type
= GET_FIELD(wr_data
,
6062 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE
);
6064 sprintf(parsed_wr_data
,
6065 "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
6066 cleanup_val
? "set" : "clear",
6069 u8 update_flag
= GET_FIELD(wr_data
,
6070 IGU_FIFO_WR_DATA_UPDATE_FLAG
);
6071 u8 en_dis_int_for_sb
=
6073 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB
);
6074 u8 segment
= GET_FIELD(wr_data
,
6075 IGU_FIFO_WR_DATA_SEGMENT
);
6076 u8 timer_mask
= GET_FIELD(wr_data
,
6077 IGU_FIFO_WR_DATA_TIMER_MASK
);
6079 sprintf(parsed_wr_data
,
6080 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
6082 update_flag
? "update" : "nop",
6084 ? (en_dis_int_for_sb
==
6085 1 ? "disable" : "nop") :
6087 segment
? "attn" : "regular",
6092 parsed_wr_data
[0] = '\0';
6095 /* Add parsed element to parsed buffer */
6097 sprintf(qed_get_buf_ptr(results_buf
,
6099 "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
6100 elements
[i
].dword2
, elements
[i
].dword1
,
6102 is_pf
? "pf" : "vf",
6103 GET_FIELD(elements
[i
].dword0
,
6104 IGU_FIFO_ELEMENT_DWORD0_FID
),
6105 s_igu_fifo_source_strs
[source
],
6106 is_wr_cmd
? "wr" : "rd", cmd_addr
,
6107 (!is_pf
&& addr_data
->vf_desc
)
6108 ? addr_data
->vf_desc
: addr_data
->desc
,
6109 parsed_addr_data
, parsed_wr_data
,
6110 s_igu_fifo_error_strs
[err_type
]);
6113 results_offset
+= sprintf(qed_get_buf_ptr(results_buf
,
6115 "fifo contained %d elements", num_elements
);
6117 /* Add 1 for string NULL termination */
6118 *parsed_results_bytes
= results_offset
+ 1;
6119 return DBG_STATUS_OK
;
6122 enum dbg_status
qed_get_igu_fifo_results_buf_size(struct qed_hwfn
*p_hwfn
,
6124 u32 num_dumped_dwords
,
6125 u32
*results_buf_size
)
6127 return qed_parse_igu_fifo_dump(p_hwfn
,
6130 NULL
, results_buf_size
);
6133 enum dbg_status
qed_print_igu_fifo_results(struct qed_hwfn
*p_hwfn
,
6135 u32 num_dumped_dwords
,
6138 u32 parsed_buf_size
;
6140 return qed_parse_igu_fifo_dump(p_hwfn
,
6143 results_buf
, &parsed_buf_size
);
6146 static enum dbg_status
6147 qed_parse_protection_override_dump(struct qed_hwfn
*p_hwfn
,
6149 u32 num_dumped_dwords
,
6151 u32
*parsed_results_bytes
)
6153 u32 results_offset
= 0, param_num_val
, num_section_params
, num_elements
;
6154 const char *section_name
, *param_name
, *param_str_val
;
6155 struct protection_override_element
*elements
;
6158 /* Read global_params section */
6159 dump_buf
+= qed_read_section_hdr(dump_buf
,
6160 §ion_name
, &num_section_params
);
6161 if (strcmp(section_name
, "global_params"))
6162 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA
;
6164 /* Print global params */
6165 dump_buf
+= qed_print_section_params(dump_buf
,
6167 results_buf
, &results_offset
);
6169 /* Read protection_override_data section */
6170 dump_buf
+= qed_read_section_hdr(dump_buf
,
6171 §ion_name
, &num_section_params
);
6172 if (strcmp(section_name
, "protection_override_data"))
6173 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA
;
6174 dump_buf
+= qed_read_param(dump_buf
,
6175 ¶m_name
, ¶m_str_val
, ¶m_num_val
);
6176 if (strcmp(param_name
, "size"))
6177 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA
;
6178 if (param_num_val
% PROTECTION_OVERRIDE_ELEMENT_DWORDS
!= 0)
6179 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA
;
6180 num_elements
= param_num_val
/ PROTECTION_OVERRIDE_ELEMENT_DWORDS
;
6181 elements
= (struct protection_override_element
*)dump_buf
;
6183 /* Decode elements */
6184 for (i
= 0; i
< num_elements
; i
++) {
6185 u32 address
= GET_FIELD(elements
[i
].data
,
6186 PROTECTION_OVERRIDE_ELEMENT_ADDRESS
) *
6187 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR
;
6190 sprintf(qed_get_buf_ptr(results_buf
,
6192 "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
6194 GET_FIELD(elements
[i
].data
,
6195 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE
),
6196 GET_FIELD(elements
[i
].data
,
6197 PROTECTION_OVERRIDE_ELEMENT_READ
),
6198 GET_FIELD(elements
[i
].data
,
6199 PROTECTION_OVERRIDE_ELEMENT_WRITE
),
6200 s_protection_strs
[GET_FIELD(elements
[i
].data
,
6201 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION
)],
6202 s_protection_strs
[GET_FIELD(elements
[i
].data
,
6203 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION
)]);
6206 results_offset
+= sprintf(qed_get_buf_ptr(results_buf
,
6208 "protection override contained %d elements",
6211 /* Add 1 for string NULL termination */
6212 *parsed_results_bytes
= results_offset
+ 1;
6213 return DBG_STATUS_OK
;
6217 qed_get_protection_override_results_buf_size(struct qed_hwfn
*p_hwfn
,
6219 u32 num_dumped_dwords
,
6220 u32
*results_buf_size
)
6222 return qed_parse_protection_override_dump(p_hwfn
,
6225 NULL
, results_buf_size
);
6228 enum dbg_status
qed_print_protection_override_results(struct qed_hwfn
*p_hwfn
,
6230 u32 num_dumped_dwords
,
6233 u32 parsed_buf_size
;
6235 return qed_parse_protection_override_dump(p_hwfn
,
6242 /* Parses a FW Asserts dump buffer.
6243 * If result_buf is not NULL, the FW Asserts results are printed to it.
6244 * In any case, the required results buffer size is assigned to
6245 * parsed_results_bytes.
6246 * The parsing status is returned.
6248 static enum dbg_status
qed_parse_fw_asserts_dump(struct qed_hwfn
*p_hwfn
,
6250 u32 num_dumped_dwords
,
6252 u32
*parsed_results_bytes
)
6254 u32 results_offset
= 0, num_section_params
, param_num_val
, i
;
6255 const char *param_name
, *param_str_val
, *section_name
;
6256 bool last_section_found
= false;
6258 *parsed_results_bytes
= 0;
6260 /* Read global_params section */
6261 dump_buf
+= qed_read_section_hdr(dump_buf
,
6262 §ion_name
, &num_section_params
);
6263 if (strcmp(section_name
, "global_params"))
6264 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED
;
6266 /* Print global params */
6267 dump_buf
+= qed_print_section_params(dump_buf
,
6269 results_buf
, &results_offset
);
6270 while (!last_section_found
) {
6271 const char *storm_letter
= NULL
;
6272 u32 storm_dump_size
= 0;
6274 dump_buf
+= qed_read_section_hdr(dump_buf
,
6276 &num_section_params
);
6277 if (!strcmp(section_name
, "last")) {
6278 last_section_found
= true;
6280 } else if (strcmp(section_name
, "fw_asserts")) {
6281 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED
;
6284 /* Extract params */
6285 for (i
= 0; i
< num_section_params
; i
++) {
6286 dump_buf
+= qed_read_param(dump_buf
,
6290 if (!strcmp(param_name
, "storm"))
6291 storm_letter
= param_str_val
;
6292 else if (!strcmp(param_name
, "size"))
6293 storm_dump_size
= param_num_val
;
6295 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED
;
6298 if (!storm_letter
|| !storm_dump_size
)
6299 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED
;
6302 results_offset
+= sprintf(qed_get_buf_ptr(results_buf
,
6304 "\n%sSTORM_ASSERT: size=%d\n",
6305 storm_letter
, storm_dump_size
);
6306 for (i
= 0; i
< storm_dump_size
; i
++, dump_buf
++)
6308 sprintf(qed_get_buf_ptr(results_buf
,
6310 "%08x\n", *dump_buf
);
6313 /* Add 1 for string NULL termination */
6314 *parsed_results_bytes
= results_offset
+ 1;
6315 return DBG_STATUS_OK
;
6318 enum dbg_status
qed_get_fw_asserts_results_buf_size(struct qed_hwfn
*p_hwfn
,
6320 u32 num_dumped_dwords
,
6321 u32
*results_buf_size
)
6323 return qed_parse_fw_asserts_dump(p_hwfn
,
6326 NULL
, results_buf_size
);
6329 enum dbg_status
qed_print_fw_asserts_results(struct qed_hwfn
*p_hwfn
,
6331 u32 num_dumped_dwords
,
6334 u32 parsed_buf_size
;
6336 return qed_parse_fw_asserts_dump(p_hwfn
,
6339 results_buf
, &parsed_buf_size
);
6342 /* Wrapper for unifying the idle_chk and mcp_trace api */
6343 static enum dbg_status
6344 qed_print_idle_chk_results_wrapper(struct qed_hwfn
*p_hwfn
,
6346 u32 num_dumped_dwords
,
6349 u32 num_errors
, num_warnnings
;
6351 return qed_print_idle_chk_results(p_hwfn
, dump_buf
, num_dumped_dwords
,
6352 results_buf
, &num_errors
,
6356 /* Feature meta data lookup table */
6359 enum dbg_status (*get_size
)(struct qed_hwfn
*p_hwfn
,
6360 struct qed_ptt
*p_ptt
, u32
*size
);
6361 enum dbg_status (*perform_dump
)(struct qed_hwfn
*p_hwfn
,
6362 struct qed_ptt
*p_ptt
, u32
*dump_buf
,
6363 u32 buf_size
, u32
*dumped_dwords
);
6364 enum dbg_status (*print_results
)(struct qed_hwfn
*p_hwfn
,
6365 u32
*dump_buf
, u32 num_dumped_dwords
,
6367 enum dbg_status (*results_buf_size
)(struct qed_hwfn
*p_hwfn
,
6369 u32 num_dumped_dwords
,
6370 u32
*results_buf_size
);
6371 } qed_features_lookup
[] = {
6373 "grc", qed_dbg_grc_get_dump_buf_size
,
6374 qed_dbg_grc_dump
, NULL
, NULL
}, {
6376 qed_dbg_idle_chk_get_dump_buf_size
,
6377 qed_dbg_idle_chk_dump
,
6378 qed_print_idle_chk_results_wrapper
,
6379 qed_get_idle_chk_results_buf_size
}, {
6381 qed_dbg_mcp_trace_get_dump_buf_size
,
6382 qed_dbg_mcp_trace_dump
, qed_print_mcp_trace_results
,
6383 qed_get_mcp_trace_results_buf_size
}, {
6385 qed_dbg_reg_fifo_get_dump_buf_size
,
6386 qed_dbg_reg_fifo_dump
, qed_print_reg_fifo_results
,
6387 qed_get_reg_fifo_results_buf_size
}, {
6389 qed_dbg_igu_fifo_get_dump_buf_size
,
6390 qed_dbg_igu_fifo_dump
, qed_print_igu_fifo_results
,
6391 qed_get_igu_fifo_results_buf_size
}, {
6392 "protection_override",
6393 qed_dbg_protection_override_get_dump_buf_size
,
6394 qed_dbg_protection_override_dump
,
6395 qed_print_protection_override_results
,
6396 qed_get_protection_override_results_buf_size
}, {
6398 qed_dbg_fw_asserts_get_dump_buf_size
,
6399 qed_dbg_fw_asserts_dump
,
6400 qed_print_fw_asserts_results
,
6401 qed_get_fw_asserts_results_buf_size
},};
6403 static void qed_dbg_print_feature(u8
*p_text_buf
, u32 text_size
)
6405 u32 i
, precision
= 80;
6410 pr_notice("\n%.*s", precision
, p_text_buf
);
6411 for (i
= precision
; i
< text_size
; i
+= precision
)
6412 pr_cont("%.*s", precision
, p_text_buf
+ i
);
6416 #define QED_RESULTS_BUF_MIN_SIZE 16
6417 /* Generic function for decoding debug feature info */
6418 static enum dbg_status
format_feature(struct qed_hwfn
*p_hwfn
,
6419 enum qed_dbg_features feature_idx
)
6421 struct qed_dbg_feature
*feature
=
6422 &p_hwfn
->cdev
->dbg_params
.features
[feature_idx
];
6423 u32 text_size_bytes
, null_char_pos
, i
;
6427 /* Check if feature supports formatting capability */
6428 if (!qed_features_lookup
[feature_idx
].results_buf_size
)
6429 return DBG_STATUS_OK
;
6431 /* Obtain size of formatted output */
6432 rc
= qed_features_lookup
[feature_idx
].
6433 results_buf_size(p_hwfn
, (u32
*)feature
->dump_buf
,
6434 feature
->dumped_dwords
, &text_size_bytes
);
6435 if (rc
!= DBG_STATUS_OK
)
6438 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
6439 null_char_pos
= text_size_bytes
- 1;
6440 text_size_bytes
= (text_size_bytes
+ 3) & ~0x3;
6442 if (text_size_bytes
< QED_RESULTS_BUF_MIN_SIZE
) {
6443 DP_NOTICE(p_hwfn
->cdev
,
6444 "formatted size of feature was too small %d. Aborting\n",
6446 return DBG_STATUS_INVALID_ARGS
;
6449 /* Allocate temp text buf */
6450 text_buf
= vzalloc(text_size_bytes
);
6452 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED
;
6454 /* Decode feature opcodes to string on temp buf */
6455 rc
= qed_features_lookup
[feature_idx
].
6456 print_results(p_hwfn
, (u32
*)feature
->dump_buf
,
6457 feature
->dumped_dwords
, text_buf
);
6458 if (rc
!= DBG_STATUS_OK
) {
6463 /* Replace the original null character with a '\n' character.
6464 * The bytes that were added as a result of the dword alignment are also
6465 * padded with '\n' characters.
6467 for (i
= null_char_pos
; i
< text_size_bytes
; i
++)
6470 /* Dump printable feature to log */
6471 if (p_hwfn
->cdev
->dbg_params
.print_data
)
6472 qed_dbg_print_feature(text_buf
, text_size_bytes
);
6474 /* Free the old dump_buf and point the dump_buf to the newly allocagted
6475 * and formatted text buffer.
6477 vfree(feature
->dump_buf
);
6478 feature
->dump_buf
= text_buf
;
6479 feature
->buf_size
= text_size_bytes
;
6480 feature
->dumped_dwords
= text_size_bytes
/ 4;
6484 /* Generic function for performing the dump of a debug feature. */
6485 static enum dbg_status
qed_dbg_dump(struct qed_hwfn
*p_hwfn
,
6486 struct qed_ptt
*p_ptt
,
6487 enum qed_dbg_features feature_idx
)
6489 struct qed_dbg_feature
*feature
=
6490 &p_hwfn
->cdev
->dbg_params
.features
[feature_idx
];
6491 u32 buf_size_dwords
;
6494 DP_NOTICE(p_hwfn
->cdev
, "Collecting a debug feature [\"%s\"]\n",
6495 qed_features_lookup
[feature_idx
].name
);
6497 /* Dump_buf was already allocated need to free (this can happen if dump
6498 * was called but file was never read).
6499 * We can't use the buffer as is since size may have changed.
6501 if (feature
->dump_buf
) {
6502 vfree(feature
->dump_buf
);
6503 feature
->dump_buf
= NULL
;
6506 /* Get buffer size from hsi, allocate accordingly, and perform the
6509 rc
= qed_features_lookup
[feature_idx
].get_size(p_hwfn
, p_ptt
,
6511 if (rc
!= DBG_STATUS_OK
)
6513 feature
->buf_size
= buf_size_dwords
* sizeof(u32
);
6514 feature
->dump_buf
= vmalloc(feature
->buf_size
);
6515 if (!feature
->dump_buf
)
6516 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED
;
6518 rc
= qed_features_lookup
[feature_idx
].
6519 perform_dump(p_hwfn
, p_ptt
, (u32
*)feature
->dump_buf
,
6520 feature
->buf_size
/ sizeof(u32
),
6521 &feature
->dumped_dwords
);
6523 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
6524 * In this case the buffer holds valid binary data, but we wont able
6525 * to parse it (since parsing relies on data in NVRAM which is only
6526 * accessible when MFW is responsive). skip the formatting but return
6527 * success so that binary data is provided.
6529 if (rc
== DBG_STATUS_NVRAM_GET_IMAGE_FAILED
)
6530 return DBG_STATUS_OK
;
6532 if (rc
!= DBG_STATUS_OK
)
6536 rc
= format_feature(p_hwfn
, feature_idx
);
6540 int qed_dbg_grc(struct qed_dev
*cdev
, void *buffer
, u32
*num_dumped_bytes
)
6542 return qed_dbg_feature(cdev
, buffer
, DBG_FEATURE_GRC
, num_dumped_bytes
);
6545 int qed_dbg_grc_size(struct qed_dev
*cdev
)
6547 return qed_dbg_feature_size(cdev
, DBG_FEATURE_GRC
);
6550 int qed_dbg_idle_chk(struct qed_dev
*cdev
, void *buffer
, u32
*num_dumped_bytes
)
6552 return qed_dbg_feature(cdev
, buffer
, DBG_FEATURE_IDLE_CHK
,
6556 int qed_dbg_idle_chk_size(struct qed_dev
*cdev
)
6558 return qed_dbg_feature_size(cdev
, DBG_FEATURE_IDLE_CHK
);
6561 int qed_dbg_reg_fifo(struct qed_dev
*cdev
, void *buffer
, u32
*num_dumped_bytes
)
6563 return qed_dbg_feature(cdev
, buffer
, DBG_FEATURE_REG_FIFO
,
6567 int qed_dbg_reg_fifo_size(struct qed_dev
*cdev
)
6569 return qed_dbg_feature_size(cdev
, DBG_FEATURE_REG_FIFO
);
6572 int qed_dbg_igu_fifo(struct qed_dev
*cdev
, void *buffer
, u32
*num_dumped_bytes
)
6574 return qed_dbg_feature(cdev
, buffer
, DBG_FEATURE_IGU_FIFO
,
6578 int qed_dbg_igu_fifo_size(struct qed_dev
*cdev
)
6580 return qed_dbg_feature_size(cdev
, DBG_FEATURE_IGU_FIFO
);
6583 int qed_dbg_protection_override(struct qed_dev
*cdev
, void *buffer
,
6584 u32
*num_dumped_bytes
)
6586 return qed_dbg_feature(cdev
, buffer
, DBG_FEATURE_PROTECTION_OVERRIDE
,
6590 int qed_dbg_protection_override_size(struct qed_dev
*cdev
)
6592 return qed_dbg_feature_size(cdev
, DBG_FEATURE_PROTECTION_OVERRIDE
);
6595 int qed_dbg_fw_asserts(struct qed_dev
*cdev
, void *buffer
,
6596 u32
*num_dumped_bytes
)
6598 return qed_dbg_feature(cdev
, buffer
, DBG_FEATURE_FW_ASSERTS
,
6602 int qed_dbg_fw_asserts_size(struct qed_dev
*cdev
)
6604 return qed_dbg_feature_size(cdev
, DBG_FEATURE_FW_ASSERTS
);
6607 int qed_dbg_mcp_trace(struct qed_dev
*cdev
, void *buffer
,
6608 u32
*num_dumped_bytes
)
6610 return qed_dbg_feature(cdev
, buffer
, DBG_FEATURE_MCP_TRACE
,
6614 int qed_dbg_mcp_trace_size(struct qed_dev
*cdev
)
6616 return qed_dbg_feature_size(cdev
, DBG_FEATURE_MCP_TRACE
);
6619 /* Defines the amount of bytes allocated for recording the length of debugfs
6622 #define REGDUMP_HEADER_SIZE sizeof(u32)
6623 #define REGDUMP_HEADER_FEATURE_SHIFT 24
6624 #define REGDUMP_HEADER_ENGINE_SHIFT 31
6625 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
6626 enum debug_print_features
{
6632 PROTECTION_OVERRIDE
= 5,
6638 static u32
qed_calc_regdump_header(enum debug_print_features feature
,
6639 int engine
, u32 feature_size
, u8 omit_engine
)
6641 /* Insert the engine, feature and mode inside the header and combine it
6642 * with feature size.
6644 return feature_size
| (feature
<< REGDUMP_HEADER_FEATURE_SHIFT
) |
6645 (omit_engine
<< REGDUMP_HEADER_OMIT_ENGINE_SHIFT
) |
6646 (engine
<< REGDUMP_HEADER_ENGINE_SHIFT
);
6649 int qed_dbg_all_data(struct qed_dev
*cdev
, void *buffer
)
6651 u8 cur_engine
, omit_engine
= 0, org_engine
;
6652 u32 offset
= 0, feature_size
;
6655 if (cdev
->num_hwfns
== 1)
6658 org_engine
= qed_get_debug_engine(cdev
);
6659 for (cur_engine
= 0; cur_engine
< cdev
->num_hwfns
; cur_engine
++) {
6660 /* Collect idle_chks and grcDump for each hw function */
6661 DP_VERBOSE(cdev
, QED_MSG_DEBUG
,
6662 "obtaining idle_chk and grcdump for current engine\n");
6663 qed_set_debug_engine(cdev
, cur_engine
);
6665 /* First idle_chk */
6666 rc
= qed_dbg_idle_chk(cdev
, (u8
*)buffer
+ offset
+
6667 REGDUMP_HEADER_SIZE
, &feature_size
);
6669 *(u32
*)((u8
*)buffer
+ offset
) =
6670 qed_calc_regdump_header(IDLE_CHK
, cur_engine
,
6671 feature_size
, omit_engine
);
6672 offset
+= (feature_size
+ REGDUMP_HEADER_SIZE
);
6674 DP_ERR(cdev
, "qed_dbg_idle_chk failed. rc = %d\n", rc
);
6677 /* Second idle_chk */
6678 rc
= qed_dbg_idle_chk(cdev
, (u8
*)buffer
+ offset
+
6679 REGDUMP_HEADER_SIZE
, &feature_size
);
6681 *(u32
*)((u8
*)buffer
+ offset
) =
6682 qed_calc_regdump_header(IDLE_CHK
, cur_engine
,
6683 feature_size
, omit_engine
);
6684 offset
+= (feature_size
+ REGDUMP_HEADER_SIZE
);
6686 DP_ERR(cdev
, "qed_dbg_idle_chk failed. rc = %d\n", rc
);
6690 rc
= qed_dbg_reg_fifo(cdev
, (u8
*)buffer
+ offset
+
6691 REGDUMP_HEADER_SIZE
, &feature_size
);
6693 *(u32
*)((u8
*)buffer
+ offset
) =
6694 qed_calc_regdump_header(REG_FIFO
, cur_engine
,
6695 feature_size
, omit_engine
);
6696 offset
+= (feature_size
+ REGDUMP_HEADER_SIZE
);
6698 DP_ERR(cdev
, "qed_dbg_reg_fifo failed. rc = %d\n", rc
);
6702 rc
= qed_dbg_igu_fifo(cdev
, (u8
*)buffer
+ offset
+
6703 REGDUMP_HEADER_SIZE
, &feature_size
);
6705 *(u32
*)((u8
*)buffer
+ offset
) =
6706 qed_calc_regdump_header(IGU_FIFO
, cur_engine
,
6707 feature_size
, omit_engine
);
6708 offset
+= (feature_size
+ REGDUMP_HEADER_SIZE
);
6710 DP_ERR(cdev
, "qed_dbg_igu_fifo failed. rc = %d", rc
);
6713 /* protection_override dump */
6714 rc
= qed_dbg_protection_override(cdev
, (u8
*)buffer
+ offset
+
6715 REGDUMP_HEADER_SIZE
,
6718 *(u32
*)((u8
*)buffer
+ offset
) =
6719 qed_calc_regdump_header(PROTECTION_OVERRIDE
,
6721 feature_size
, omit_engine
);
6722 offset
+= (feature_size
+ REGDUMP_HEADER_SIZE
);
6725 "qed_dbg_protection_override failed. rc = %d\n",
6729 /* fw_asserts dump */
6730 rc
= qed_dbg_fw_asserts(cdev
, (u8
*)buffer
+ offset
+
6731 REGDUMP_HEADER_SIZE
, &feature_size
);
6733 *(u32
*)((u8
*)buffer
+ offset
) =
6734 qed_calc_regdump_header(FW_ASSERTS
, cur_engine
,
6735 feature_size
, omit_engine
);
6736 offset
+= (feature_size
+ REGDUMP_HEADER_SIZE
);
6738 DP_ERR(cdev
, "qed_dbg_fw_asserts failed. rc = %d\n",
6742 /* GRC dump - must be last because when mcp stuck it will
6743 * clutter idle_chk, reg_fifo, ...
6745 rc
= qed_dbg_grc(cdev
, (u8
*)buffer
+ offset
+
6746 REGDUMP_HEADER_SIZE
, &feature_size
);
6748 *(u32
*)((u8
*)buffer
+ offset
) =
6749 qed_calc_regdump_header(GRC_DUMP
, cur_engine
,
6750 feature_size
, omit_engine
);
6751 offset
+= (feature_size
+ REGDUMP_HEADER_SIZE
);
6753 DP_ERR(cdev
, "qed_dbg_grc failed. rc = %d", rc
);
6758 rc
= qed_dbg_mcp_trace(cdev
, (u8
*)buffer
+ offset
+
6759 REGDUMP_HEADER_SIZE
, &feature_size
);
6761 *(u32
*)((u8
*)buffer
+ offset
) =
6762 qed_calc_regdump_header(MCP_TRACE
, cur_engine
,
6763 feature_size
, omit_engine
);
6764 offset
+= (feature_size
+ REGDUMP_HEADER_SIZE
);
6766 DP_ERR(cdev
, "qed_dbg_mcp_trace failed. rc = %d\n", rc
);
6769 qed_set_debug_engine(cdev
, org_engine
);
6774 int qed_dbg_all_data_size(struct qed_dev
*cdev
)
6776 u8 cur_engine
, org_engine
;
6779 org_engine
= qed_get_debug_engine(cdev
);
6780 for (cur_engine
= 0; cur_engine
< cdev
->num_hwfns
; cur_engine
++) {
6781 /* Engine specific */
6782 DP_VERBOSE(cdev
, QED_MSG_DEBUG
,
6783 "calculating idle_chk and grcdump register length for current engine\n");
6784 qed_set_debug_engine(cdev
, cur_engine
);
6785 regs_len
+= REGDUMP_HEADER_SIZE
+ qed_dbg_idle_chk_size(cdev
) +
6786 REGDUMP_HEADER_SIZE
+ qed_dbg_idle_chk_size(cdev
) +
6787 REGDUMP_HEADER_SIZE
+ qed_dbg_grc_size(cdev
) +
6788 REGDUMP_HEADER_SIZE
+ qed_dbg_reg_fifo_size(cdev
) +
6789 REGDUMP_HEADER_SIZE
+ qed_dbg_igu_fifo_size(cdev
) +
6790 REGDUMP_HEADER_SIZE
+
6791 qed_dbg_protection_override_size(cdev
) +
6792 REGDUMP_HEADER_SIZE
+ qed_dbg_fw_asserts_size(cdev
);
6796 regs_len
+= REGDUMP_HEADER_SIZE
+ qed_dbg_mcp_trace_size(cdev
);
6797 qed_set_debug_engine(cdev
, org_engine
);
6802 int qed_dbg_feature(struct qed_dev
*cdev
, void *buffer
,
6803 enum qed_dbg_features feature
, u32
*num_dumped_bytes
)
6805 struct qed_hwfn
*p_hwfn
=
6806 &cdev
->hwfns
[cdev
->dbg_params
.engine_for_debug
];
6807 struct qed_dbg_feature
*qed_feature
=
6808 &cdev
->dbg_params
.features
[feature
];
6809 enum dbg_status dbg_rc
;
6810 struct qed_ptt
*p_ptt
;
6814 p_ptt
= qed_ptt_acquire(p_hwfn
);
6819 dbg_rc
= qed_dbg_dump(p_hwfn
, p_ptt
, feature
);
6820 if (dbg_rc
!= DBG_STATUS_OK
) {
6821 DP_VERBOSE(cdev
, QED_MSG_DEBUG
, "%s\n",
6822 qed_dbg_get_status_str(dbg_rc
));
6823 *num_dumped_bytes
= 0;
6828 DP_VERBOSE(cdev
, QED_MSG_DEBUG
,
6829 "copying debugfs feature to external buffer\n");
6830 memcpy(buffer
, qed_feature
->dump_buf
, qed_feature
->buf_size
);
6831 *num_dumped_bytes
= cdev
->dbg_params
.features
[feature
].dumped_dwords
*
6835 qed_ptt_release(p_hwfn
, p_ptt
);
6839 int qed_dbg_feature_size(struct qed_dev
*cdev
, enum qed_dbg_features feature
)
6841 struct qed_hwfn
*p_hwfn
=
6842 &cdev
->hwfns
[cdev
->dbg_params
.engine_for_debug
];
6843 struct qed_ptt
*p_ptt
= qed_ptt_acquire(p_hwfn
);
6844 struct qed_dbg_feature
*qed_feature
=
6845 &cdev
->dbg_params
.features
[feature
];
6846 u32 buf_size_dwords
;
6852 rc
= qed_features_lookup
[feature
].get_size(p_hwfn
, p_ptt
,
6854 if (rc
!= DBG_STATUS_OK
)
6855 buf_size_dwords
= 0;
6857 qed_ptt_release(p_hwfn
, p_ptt
);
6858 qed_feature
->buf_size
= buf_size_dwords
* sizeof(u32
);
6859 return qed_feature
->buf_size
;
6862 u8
qed_get_debug_engine(struct qed_dev
*cdev
)
6864 return cdev
->dbg_params
.engine_for_debug
;
6867 void qed_set_debug_engine(struct qed_dev
*cdev
, int engine_number
)
6869 DP_VERBOSE(cdev
, QED_MSG_DEBUG
, "set debug engine to %d\n",
6871 cdev
->dbg_params
.engine_for_debug
= engine_number
;
6874 void qed_dbg_pf_init(struct qed_dev
*cdev
)
6876 const u8
*dbg_values
;
6878 /* Debug values are after init values.
6879 * The offset is the first dword of the file.
6881 dbg_values
= cdev
->firmware
->data
+ *(u32
*)cdev
->firmware
->data
;
6882 qed_dbg_set_bin_ptr((u8
*)dbg_values
);
6883 qed_dbg_user_set_bin_ptr((u8
*)dbg_values
);
6886 void qed_dbg_pf_exit(struct qed_dev
*cdev
)
6888 struct qed_dbg_feature
*feature
= NULL
;
6889 enum qed_dbg_features feature_idx
;
6891 /* Debug features' buffers may be allocated if debug feature was used
6892 * but dump wasn't called.
6894 for (feature_idx
= 0; feature_idx
< DBG_FEATURE_NUM
; feature_idx
++) {
6895 feature
= &cdev
->dbg_params
.features
[feature_idx
];
6896 if (feature
->dump_buf
) {
6897 vfree(feature
->dump_buf
);
6898 feature
->dump_buf
= NULL
;