1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Chelsio Communications. All rights reserved.
8 #include "cxgb4_cudbg.h"
9 #include "cudbg_zlib.h"
11 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump
[] = {
12 { CUDBG_EDC0
, cudbg_collect_edc0_meminfo
},
13 { CUDBG_EDC1
, cudbg_collect_edc1_meminfo
},
14 { CUDBG_MC0
, cudbg_collect_mc0_meminfo
},
15 { CUDBG_MC1
, cudbg_collect_mc1_meminfo
},
16 { CUDBG_HMA
, cudbg_collect_hma_meminfo
},
19 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump
[] = {
20 { CUDBG_MBOX_LOG
, cudbg_collect_mbox_log
},
21 { CUDBG_QDESC
, cudbg_collect_qdesc
},
22 { CUDBG_DEV_LOG
, cudbg_collect_fw_devlog
},
23 { CUDBG_REG_DUMP
, cudbg_collect_reg_dump
},
24 { CUDBG_CIM_LA
, cudbg_collect_cim_la
},
25 { CUDBG_CIM_MA_LA
, cudbg_collect_cim_ma_la
},
26 { CUDBG_CIM_QCFG
, cudbg_collect_cim_qcfg
},
27 { CUDBG_CIM_IBQ_TP0
, cudbg_collect_cim_ibq_tp0
},
28 { CUDBG_CIM_IBQ_TP1
, cudbg_collect_cim_ibq_tp1
},
29 { CUDBG_CIM_IBQ_ULP
, cudbg_collect_cim_ibq_ulp
},
30 { CUDBG_CIM_IBQ_SGE0
, cudbg_collect_cim_ibq_sge0
},
31 { CUDBG_CIM_IBQ_SGE1
, cudbg_collect_cim_ibq_sge1
},
32 { CUDBG_CIM_IBQ_NCSI
, cudbg_collect_cim_ibq_ncsi
},
33 { CUDBG_CIM_OBQ_ULP0
, cudbg_collect_cim_obq_ulp0
},
34 { CUDBG_CIM_OBQ_ULP1
, cudbg_collect_cim_obq_ulp1
},
35 { CUDBG_CIM_OBQ_ULP2
, cudbg_collect_cim_obq_ulp2
},
36 { CUDBG_CIM_OBQ_ULP3
, cudbg_collect_cim_obq_ulp3
},
37 { CUDBG_CIM_OBQ_SGE
, cudbg_collect_cim_obq_sge
},
38 { CUDBG_CIM_OBQ_NCSI
, cudbg_collect_cim_obq_ncsi
},
39 { CUDBG_RSS
, cudbg_collect_rss
},
40 { CUDBG_RSS_VF_CONF
, cudbg_collect_rss_vf_config
},
41 { CUDBG_PATH_MTU
, cudbg_collect_path_mtu
},
42 { CUDBG_PM_STATS
, cudbg_collect_pm_stats
},
43 { CUDBG_HW_SCHED
, cudbg_collect_hw_sched
},
44 { CUDBG_TP_INDIRECT
, cudbg_collect_tp_indirect
},
45 { CUDBG_SGE_INDIRECT
, cudbg_collect_sge_indirect
},
46 { CUDBG_ULPRX_LA
, cudbg_collect_ulprx_la
},
47 { CUDBG_TP_LA
, cudbg_collect_tp_la
},
48 { CUDBG_MEMINFO
, cudbg_collect_meminfo
},
49 { CUDBG_CIM_PIF_LA
, cudbg_collect_cim_pif_la
},
50 { CUDBG_CLK
, cudbg_collect_clk_info
},
51 { CUDBG_CIM_OBQ_RXQ0
, cudbg_collect_obq_sge_rx_q0
},
52 { CUDBG_CIM_OBQ_RXQ1
, cudbg_collect_obq_sge_rx_q1
},
53 { CUDBG_PCIE_INDIRECT
, cudbg_collect_pcie_indirect
},
54 { CUDBG_PM_INDIRECT
, cudbg_collect_pm_indirect
},
55 { CUDBG_TID_INFO
, cudbg_collect_tid
},
56 { CUDBG_PCIE_CONFIG
, cudbg_collect_pcie_config
},
57 { CUDBG_DUMP_CONTEXT
, cudbg_collect_dump_context
},
58 { CUDBG_MPS_TCAM
, cudbg_collect_mps_tcam
},
59 { CUDBG_VPD_DATA
, cudbg_collect_vpd_data
},
60 { CUDBG_LE_TCAM
, cudbg_collect_le_tcam
},
61 { CUDBG_CCTRL
, cudbg_collect_cctrl
},
62 { CUDBG_MA_INDIRECT
, cudbg_collect_ma_indirect
},
63 { CUDBG_ULPTX_LA
, cudbg_collect_ulptx_la
},
64 { CUDBG_UP_CIM_INDIRECT
, cudbg_collect_up_cim_indirect
},
65 { CUDBG_PBT_TABLE
, cudbg_collect_pbt_tables
},
66 { CUDBG_HMA_INDIRECT
, cudbg_collect_hma_indirect
},
69 static u32
cxgb4_get_entity_length(struct adapter
*adap
, u32 entity
)
71 struct cudbg_tcam tcam_region
= { 0 };
72 u32 value
, n
= 0, len
= 0;
76 switch (CHELSIO_CHIP_VERSION(adap
->params
.chip
)) {
89 len
= adap
->params
.devlog
.size
;
92 if (is_t6(adap
->params
.chip
)) {
93 len
= adap
->params
.cim_la_size
/ 10 + 1;
94 len
*= 10 * sizeof(u32
);
96 len
= adap
->params
.cim_la_size
/ 8;
97 len
*= 8 * sizeof(u32
);
99 len
+= sizeof(u32
); /* for reading CIM LA configuration */
101 case CUDBG_CIM_MA_LA
:
102 len
= 2 * CIM_MALA_SIZE
* 5 * sizeof(u32
);
105 len
= sizeof(struct cudbg_cim_qcfg
);
107 case CUDBG_CIM_IBQ_TP0
:
108 case CUDBG_CIM_IBQ_TP1
:
109 case CUDBG_CIM_IBQ_ULP
:
110 case CUDBG_CIM_IBQ_SGE0
:
111 case CUDBG_CIM_IBQ_SGE1
:
112 case CUDBG_CIM_IBQ_NCSI
:
113 len
= CIM_IBQ_SIZE
* 4 * sizeof(u32
);
115 case CUDBG_CIM_OBQ_ULP0
:
116 len
= cudbg_cim_obq_size(adap
, 0);
118 case CUDBG_CIM_OBQ_ULP1
:
119 len
= cudbg_cim_obq_size(adap
, 1);
121 case CUDBG_CIM_OBQ_ULP2
:
122 len
= cudbg_cim_obq_size(adap
, 2);
124 case CUDBG_CIM_OBQ_ULP3
:
125 len
= cudbg_cim_obq_size(adap
, 3);
127 case CUDBG_CIM_OBQ_SGE
:
128 len
= cudbg_cim_obq_size(adap
, 4);
130 case CUDBG_CIM_OBQ_NCSI
:
131 len
= cudbg_cim_obq_size(adap
, 5);
133 case CUDBG_CIM_OBQ_RXQ0
:
134 len
= cudbg_cim_obq_size(adap
, 6);
136 case CUDBG_CIM_OBQ_RXQ1
:
137 len
= cudbg_cim_obq_size(adap
, 7);
140 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
141 if (value
& EDRAM0_ENABLE_F
) {
142 value
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
143 len
= EDRAM0_SIZE_G(value
);
145 len
= cudbg_mbytes_to_bytes(len
);
148 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
149 if (value
& EDRAM1_ENABLE_F
) {
150 value
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
151 len
= EDRAM1_SIZE_G(value
);
153 len
= cudbg_mbytes_to_bytes(len
);
156 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
157 if (value
& EXT_MEM0_ENABLE_F
) {
158 value
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
159 len
= EXT_MEM0_SIZE_G(value
);
161 len
= cudbg_mbytes_to_bytes(len
);
164 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
165 if (value
& EXT_MEM1_ENABLE_F
) {
166 value
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
167 len
= EXT_MEM1_SIZE_G(value
);
169 len
= cudbg_mbytes_to_bytes(len
);
172 len
= t4_chip_rss_size(adap
) * sizeof(u16
);
174 case CUDBG_RSS_VF_CONF
:
175 len
= adap
->params
.arch
.vfcount
*
176 sizeof(struct cudbg_rss_vf_conf
);
179 len
= NMTUS
* sizeof(u16
);
182 len
= sizeof(struct cudbg_pm_stats
);
185 len
= sizeof(struct cudbg_hw_sched
);
187 case CUDBG_TP_INDIRECT
:
188 switch (CHELSIO_CHIP_VERSION(adap
->params
.chip
)) {
190 n
= sizeof(t5_tp_pio_array
) +
191 sizeof(t5_tp_tm_pio_array
) +
192 sizeof(t5_tp_mib_index_array
);
195 n
= sizeof(t6_tp_pio_array
) +
196 sizeof(t6_tp_tm_pio_array
) +
197 sizeof(t6_tp_mib_index_array
);
202 n
= n
/ (IREG_NUM_ELEM
* sizeof(u32
));
203 len
= sizeof(struct ireg_buf
) * n
;
205 case CUDBG_SGE_INDIRECT
:
206 len
= sizeof(struct ireg_buf
) * 2 +
207 sizeof(struct sge_qbase_reg_field
);
210 len
= sizeof(struct cudbg_ulprx_la
);
213 len
= sizeof(struct cudbg_tp_la
) + TPLA_SIZE
* sizeof(u64
);
216 len
= sizeof(struct cudbg_ver_hdr
) +
217 sizeof(struct cudbg_meminfo
);
219 case CUDBG_CIM_PIF_LA
:
220 len
= sizeof(struct cudbg_cim_pif_la
);
221 len
+= 2 * CIM_PIFLA_SIZE
* 6 * sizeof(u32
);
224 len
= sizeof(struct cudbg_clk_info
);
226 case CUDBG_PCIE_INDIRECT
:
227 n
= sizeof(t5_pcie_pdbg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
228 len
= sizeof(struct ireg_buf
) * n
* 2;
230 case CUDBG_PM_INDIRECT
:
231 n
= sizeof(t5_pm_rx_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
232 len
= sizeof(struct ireg_buf
) * n
* 2;
235 len
= sizeof(struct cudbg_tid_info_region_rev1
);
237 case CUDBG_PCIE_CONFIG
:
238 len
= sizeof(u32
) * CUDBG_NUM_PCIE_CONFIG_REGS
;
240 case CUDBG_DUMP_CONTEXT
:
241 len
= cudbg_dump_context_size(adap
);
244 len
= sizeof(struct cudbg_mps_tcam
) *
245 adap
->params
.arch
.mps_tcam_size
;
248 len
= sizeof(struct cudbg_vpd_data
);
251 cudbg_fill_le_tcam_info(adap
, &tcam_region
);
252 len
= sizeof(struct cudbg_tcam
) +
253 sizeof(struct cudbg_tid_data
) * tcam_region
.max_tid
;
256 len
= sizeof(u16
) * NMTUS
* NCCTRL_WIN
;
258 case CUDBG_MA_INDIRECT
:
259 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) > CHELSIO_T5
) {
260 n
= sizeof(t6_ma_ireg_array
) /
261 (IREG_NUM_ELEM
* sizeof(u32
));
262 len
= sizeof(struct ireg_buf
) * n
* 2;
266 len
= sizeof(struct cudbg_ver_hdr
) +
267 sizeof(struct cudbg_ulptx_la
);
269 case CUDBG_UP_CIM_INDIRECT
:
271 if (is_t5(adap
->params
.chip
))
272 n
= sizeof(t5_up_cim_reg_array
) /
273 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
274 else if (is_t6(adap
->params
.chip
))
275 n
= sizeof(t6_up_cim_reg_array
) /
276 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
277 len
= sizeof(struct ireg_buf
) * n
;
279 case CUDBG_PBT_TABLE
:
280 len
= sizeof(struct cudbg_pbt_tables
);
283 len
= sizeof(struct cudbg_mbox_log
) * adap
->mbox_log
->size
;
285 case CUDBG_HMA_INDIRECT
:
286 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) > CHELSIO_T5
) {
287 n
= sizeof(t6_hma_ireg_array
) /
288 (IREG_NUM_ELEM
* sizeof(u32
));
289 len
= sizeof(struct ireg_buf
) * n
;
293 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
294 if (value
& HMA_MUX_F
) {
295 /* In T6, there's no MC1. So, HMA shares MC1
298 value
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
299 len
= EXT_MEM1_SIZE_G(value
);
301 len
= cudbg_mbytes_to_bytes(len
);
304 cudbg_fill_qdesc_num_and_size(adap
, NULL
, &len
);
313 u32
cxgb4_get_dump_length(struct adapter
*adap
, u32 flag
)
319 if (flag
& CXGB4_ETH_DUMP_HW
) {
320 for (i
= 0; i
< ARRAY_SIZE(cxgb4_collect_hw_dump
); i
++) {
321 entity
= cxgb4_collect_hw_dump
[i
].entity
;
322 len
+= cxgb4_get_entity_length(adap
, entity
);
326 if (flag
& CXGB4_ETH_DUMP_MEM
) {
327 for (i
= 0; i
< ARRAY_SIZE(cxgb4_collect_mem_dump
); i
++) {
328 entity
= cxgb4_collect_mem_dump
[i
].entity
;
329 len
+= cxgb4_get_entity_length(adap
, entity
);
333 /* If compression is enabled, a smaller destination buffer is enough */
334 wsize
= cudbg_get_workspace_size();
335 if (wsize
&& len
> CUDBG_DUMP_BUFF_SIZE
)
336 len
= CUDBG_DUMP_BUFF_SIZE
;
341 static void cxgb4_cudbg_collect_entity(struct cudbg_init
*pdbg_init
,
342 struct cudbg_buffer
*dbg_buff
,
343 const struct cxgb4_collect_entity
*e_arr
,
344 u32 arr_size
, void *buf
, u32
*tot_size
)
346 struct cudbg_error cudbg_err
= { 0 };
347 struct cudbg_entity_hdr
*entity_hdr
;
348 u32 i
, total_size
= 0;
351 for (i
= 0; i
< arr_size
; i
++) {
352 const struct cxgb4_collect_entity
*e
= &e_arr
[i
];
354 entity_hdr
= cudbg_get_entity_hdr(buf
, e
->entity
);
355 entity_hdr
->entity_type
= e
->entity
;
356 entity_hdr
->start_offset
= dbg_buff
->offset
;
357 memset(&cudbg_err
, 0, sizeof(struct cudbg_error
));
358 ret
= e
->collect_cb(pdbg_init
, dbg_buff
, &cudbg_err
);
360 entity_hdr
->size
= 0;
361 dbg_buff
->offset
= entity_hdr
->start_offset
;
363 cudbg_align_debug_buffer(dbg_buff
, entity_hdr
);
366 /* Log error and continue with next entity */
367 if (cudbg_err
.sys_err
)
368 ret
= CUDBG_SYSTEM_ERROR
;
370 entity_hdr
->hdr_flags
= ret
;
371 entity_hdr
->sys_err
= cudbg_err
.sys_err
;
372 entity_hdr
->sys_warn
= cudbg_err
.sys_warn
;
373 total_size
+= entity_hdr
->size
;
376 *tot_size
+= total_size
;
379 static int cudbg_alloc_compress_buff(struct cudbg_init
*pdbg_init
)
383 workspace_size
= cudbg_get_workspace_size();
384 pdbg_init
->compress_buff
= vzalloc(CUDBG_COMPRESS_BUFF_SIZE
+
386 if (!pdbg_init
->compress_buff
)
389 pdbg_init
->compress_buff_size
= CUDBG_COMPRESS_BUFF_SIZE
;
390 pdbg_init
->workspace
= (u8
*)pdbg_init
->compress_buff
+
391 CUDBG_COMPRESS_BUFF_SIZE
- workspace_size
;
395 static void cudbg_free_compress_buff(struct cudbg_init
*pdbg_init
)
397 if (pdbg_init
->compress_buff
)
398 vfree(pdbg_init
->compress_buff
);
401 int cxgb4_cudbg_collect(struct adapter
*adap
, void *buf
, u32
*buf_size
,
404 struct cudbg_buffer dbg_buff
= { 0 };
405 u32 size
, min_size
, total_size
= 0;
406 struct cudbg_init cudbg_init
;
407 struct cudbg_hdr
*cudbg_hdr
;
412 memset(&cudbg_init
, 0, sizeof(struct cudbg_init
));
413 cudbg_init
.adap
= adap
;
414 cudbg_init
.outbuf
= buf
;
415 cudbg_init
.outbuf_size
= size
;
418 dbg_buff
.size
= size
;
421 cudbg_hdr
= (struct cudbg_hdr
*)buf
;
422 cudbg_hdr
->signature
= CUDBG_SIGNATURE
;
423 cudbg_hdr
->hdr_len
= sizeof(struct cudbg_hdr
);
424 cudbg_hdr
->major_ver
= CUDBG_MAJOR_VERSION
;
425 cudbg_hdr
->minor_ver
= CUDBG_MINOR_VERSION
;
426 cudbg_hdr
->max_entities
= CUDBG_MAX_ENTITY
;
427 cudbg_hdr
->chip_ver
= adap
->params
.chip
;
428 cudbg_hdr
->dump_type
= CUDBG_DUMP_TYPE_MINI
;
430 min_size
= sizeof(struct cudbg_hdr
) +
431 sizeof(struct cudbg_entity_hdr
) *
432 cudbg_hdr
->max_entities
;
436 rc
= cudbg_get_workspace_size();
438 /* Zlib available. So, use zlib deflate */
439 cudbg_init
.compress_type
= CUDBG_COMPRESSION_ZLIB
;
440 rc
= cudbg_alloc_compress_buff(&cudbg_init
);
442 /* Ignore error and continue without compression. */
443 dev_warn(adap
->pdev_dev
,
444 "Fail allocating compression buffer ret: %d. Continuing without compression.\n",
446 cudbg_init
.compress_type
= CUDBG_COMPRESSION_NONE
;
450 cudbg_init
.compress_type
= CUDBG_COMPRESSION_NONE
;
453 cudbg_hdr
->compress_type
= cudbg_init
.compress_type
;
454 dbg_buff
.offset
+= min_size
;
455 total_size
= dbg_buff
.offset
;
457 if (flag
& CXGB4_ETH_DUMP_HW
)
458 cxgb4_cudbg_collect_entity(&cudbg_init
, &dbg_buff
,
459 cxgb4_collect_hw_dump
,
460 ARRAY_SIZE(cxgb4_collect_hw_dump
),
464 if (flag
& CXGB4_ETH_DUMP_MEM
)
465 cxgb4_cudbg_collect_entity(&cudbg_init
, &dbg_buff
,
466 cxgb4_collect_mem_dump
,
467 ARRAY_SIZE(cxgb4_collect_mem_dump
),
471 cudbg_free_compress_buff(&cudbg_init
);
472 cudbg_hdr
->data_len
= total_size
;
473 if (cudbg_init
.compress_type
!= CUDBG_COMPRESSION_NONE
)
476 *buf_size
= total_size
;
480 void cxgb4_init_ethtool_dump(struct adapter
*adapter
)
482 adapter
->eth_dump
.flag
= CXGB4_ETH_DUMP_NONE
;
483 adapter
->eth_dump
.version
= adapter
->params
.fw_vers
;
484 adapter
->eth_dump
.len
= 0;
487 static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data
*data
, void *buf
)
489 struct adapter
*adap
= container_of(data
, struct adapter
, vmcoredd
);
490 u32 len
= data
->size
;
492 return cxgb4_cudbg_collect(adap
, buf
, &len
, CXGB4_ETH_DUMP_ALL
);
495 int cxgb4_cudbg_vmcore_add_dump(struct adapter
*adap
)
497 struct vmcoredd_data
*data
= &adap
->vmcoredd
;
500 len
= sizeof(struct cudbg_hdr
) +
501 sizeof(struct cudbg_entity_hdr
) * CUDBG_MAX_ENTITY
;
502 len
+= CUDBG_DUMP_BUFF_SIZE
;
505 snprintf(data
->dump_name
, sizeof(data
->dump_name
), "%s_%s",
506 cxgb4_driver_name
, adap
->name
);
507 data
->vmcoredd_callback
= cxgb4_cudbg_vmcoredd_collect
;
509 return vmcore_add_device_dump(data
);