1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Chelsio Communications. All rights reserved.
6 #include <linux/sort.h>
7 #include <linux/string.h>
11 #include "cxgb4_cudbg.h"
13 #include "cudbg_lib_common.h"
14 #include "cudbg_entity.h"
15 #include "cudbg_lib.h"
16 #include "cudbg_zlib.h"
18 static const u32 t6_tp_pio_array
[][IREG_NUM_ELEM
] = {
19 {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
20 {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */
21 {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */
22 {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */
23 {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */
24 {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */
25 {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */
26 {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */
27 {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */
28 {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */
29 {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */
30 {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */
33 static const u32 t5_tp_pio_array
[][IREG_NUM_ELEM
] = {
34 {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */
35 {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */
36 {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */
37 {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */
38 {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */
39 {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */
40 {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */
41 {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */
42 {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */
43 {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */
44 {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */
47 static const u32 t6_tp_tm_pio_array
[][IREG_NUM_ELEM
] = {
48 {0x7e18, 0x7e1c, 0x0, 12}
51 static const u32 t5_tp_tm_pio_array
[][IREG_NUM_ELEM
] = {
52 {0x7e18, 0x7e1c, 0x0, 12}
55 static const u32 t6_tp_mib_index_array
[6][IREG_NUM_ELEM
] = {
56 {0x7e50, 0x7e54, 0x0, 13},
57 {0x7e50, 0x7e54, 0x10, 6},
58 {0x7e50, 0x7e54, 0x18, 21},
59 {0x7e50, 0x7e54, 0x30, 32},
60 {0x7e50, 0x7e54, 0x50, 22},
61 {0x7e50, 0x7e54, 0x68, 12}
64 static const u32 t5_tp_mib_index_array
[9][IREG_NUM_ELEM
] = {
65 {0x7e50, 0x7e54, 0x0, 13},
66 {0x7e50, 0x7e54, 0x10, 6},
67 {0x7e50, 0x7e54, 0x18, 8},
68 {0x7e50, 0x7e54, 0x20, 13},
69 {0x7e50, 0x7e54, 0x30, 16},
70 {0x7e50, 0x7e54, 0x40, 16},
71 {0x7e50, 0x7e54, 0x50, 16},
72 {0x7e50, 0x7e54, 0x60, 6},
73 {0x7e50, 0x7e54, 0x68, 4}
76 static const u32 t5_sge_dbg_index_array
[2][IREG_NUM_ELEM
] = {
77 {0x10cc, 0x10d0, 0x0, 16},
78 {0x10cc, 0x10d4, 0x0, 16},
81 static const u32 t6_sge_qbase_index_array
[] = {
82 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */
83 0x1250, 0x1240, 0x1244, 0x1248, 0x124c,
86 static const u32 t5_pcie_pdbg_array
[][IREG_NUM_ELEM
] = {
87 {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */
88 {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */
89 {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */
92 static const u32 t5_pcie_cdbg_array
[][IREG_NUM_ELEM
] = {
93 {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */
94 {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */
97 static const u32 t5_pm_rx_array
[][IREG_NUM_ELEM
] = {
98 {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */
99 {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */
102 static const u32 t5_pm_tx_array
[][IREG_NUM_ELEM
] = {
103 {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */
104 {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
107 static const u32 t5_pcie_config_array
[][2] = {
124 static const u32 t6_ma_ireg_array
[][IREG_NUM_ELEM
] = {
125 {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
126 {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
127 {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */
130 static const u32 t6_ma_ireg_array2
[][IREG_NUM_ELEM
] = {
131 {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */
132 {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
135 static const u32 t6_up_cim_reg_array
[][IREG_NUM_ELEM
+ 1] = {
136 {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
137 {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
138 {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
139 {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
140 {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
141 {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
142 {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
143 {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
144 {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
145 {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
146 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
147 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
148 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
149 {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
150 {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
151 {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
152 {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
153 {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
154 {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
155 {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
156 {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
157 {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
160 static const u32 t5_up_cim_reg_array
[][IREG_NUM_ELEM
+ 1] = {
161 {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
162 {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
163 {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
164 {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
165 {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
166 {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
167 {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
168 {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
169 {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
170 {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
171 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
172 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
173 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
176 static const u32 t6_hma_ireg_array
[][IREG_NUM_ELEM
] = {
177 {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */
180 u32
cudbg_get_entity_length(struct adapter
*adap
, u32 entity
)
182 struct cudbg_tcam tcam_region
= { 0 };
183 u32 value
, n
= 0, len
= 0;
187 switch (CHELSIO_CHIP_VERSION(adap
->params
.chip
)) {
189 len
= T4_REGMAP_SIZE
;
193 len
= T5_REGMAP_SIZE
;
200 len
= adap
->params
.devlog
.size
;
203 if (is_t6(adap
->params
.chip
)) {
204 len
= adap
->params
.cim_la_size
/ 10 + 1;
205 len
*= 10 * sizeof(u32
);
207 len
= adap
->params
.cim_la_size
/ 8;
208 len
*= 8 * sizeof(u32
);
210 len
+= sizeof(u32
); /* for reading CIM LA configuration */
212 case CUDBG_CIM_MA_LA
:
213 len
= 2 * CIM_MALA_SIZE
* 5 * sizeof(u32
);
216 len
= sizeof(struct cudbg_cim_qcfg
);
218 case CUDBG_CIM_IBQ_TP0
:
219 case CUDBG_CIM_IBQ_TP1
:
220 case CUDBG_CIM_IBQ_ULP
:
221 case CUDBG_CIM_IBQ_SGE0
:
222 case CUDBG_CIM_IBQ_SGE1
:
223 case CUDBG_CIM_IBQ_NCSI
:
224 len
= CIM_IBQ_SIZE
* 4 * sizeof(u32
);
226 case CUDBG_CIM_OBQ_ULP0
:
227 len
= cudbg_cim_obq_size(adap
, 0);
229 case CUDBG_CIM_OBQ_ULP1
:
230 len
= cudbg_cim_obq_size(adap
, 1);
232 case CUDBG_CIM_OBQ_ULP2
:
233 len
= cudbg_cim_obq_size(adap
, 2);
235 case CUDBG_CIM_OBQ_ULP3
:
236 len
= cudbg_cim_obq_size(adap
, 3);
238 case CUDBG_CIM_OBQ_SGE
:
239 len
= cudbg_cim_obq_size(adap
, 4);
241 case CUDBG_CIM_OBQ_NCSI
:
242 len
= cudbg_cim_obq_size(adap
, 5);
244 case CUDBG_CIM_OBQ_RXQ0
:
245 len
= cudbg_cim_obq_size(adap
, 6);
247 case CUDBG_CIM_OBQ_RXQ1
:
248 len
= cudbg_cim_obq_size(adap
, 7);
251 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
252 if (value
& EDRAM0_ENABLE_F
) {
253 value
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
254 len
= EDRAM0_SIZE_G(value
);
256 len
= cudbg_mbytes_to_bytes(len
);
259 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
260 if (value
& EDRAM1_ENABLE_F
) {
261 value
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
262 len
= EDRAM1_SIZE_G(value
);
264 len
= cudbg_mbytes_to_bytes(len
);
267 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
268 if (value
& EXT_MEM0_ENABLE_F
) {
269 value
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
270 len
= EXT_MEM0_SIZE_G(value
);
272 len
= cudbg_mbytes_to_bytes(len
);
275 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
276 if (value
& EXT_MEM1_ENABLE_F
) {
277 value
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
278 len
= EXT_MEM1_SIZE_G(value
);
280 len
= cudbg_mbytes_to_bytes(len
);
283 len
= t4_chip_rss_size(adap
) * sizeof(u16
);
285 case CUDBG_RSS_VF_CONF
:
286 len
= adap
->params
.arch
.vfcount
*
287 sizeof(struct cudbg_rss_vf_conf
);
290 len
= NMTUS
* sizeof(u16
);
293 len
= sizeof(struct cudbg_pm_stats
);
296 len
= sizeof(struct cudbg_hw_sched
);
298 case CUDBG_TP_INDIRECT
:
299 switch (CHELSIO_CHIP_VERSION(adap
->params
.chip
)) {
301 n
= sizeof(t5_tp_pio_array
) +
302 sizeof(t5_tp_tm_pio_array
) +
303 sizeof(t5_tp_mib_index_array
);
306 n
= sizeof(t6_tp_pio_array
) +
307 sizeof(t6_tp_tm_pio_array
) +
308 sizeof(t6_tp_mib_index_array
);
313 n
= n
/ (IREG_NUM_ELEM
* sizeof(u32
));
314 len
= sizeof(struct ireg_buf
) * n
;
316 case CUDBG_SGE_INDIRECT
:
317 len
= sizeof(struct ireg_buf
) * 2 +
318 sizeof(struct sge_qbase_reg_field
);
321 len
= sizeof(struct cudbg_ulprx_la
);
324 len
= sizeof(struct cudbg_tp_la
) + TPLA_SIZE
* sizeof(u64
);
327 len
= sizeof(struct cudbg_ver_hdr
) +
328 sizeof(struct cudbg_meminfo
);
330 case CUDBG_CIM_PIF_LA
:
331 len
= sizeof(struct cudbg_cim_pif_la
);
332 len
+= 2 * CIM_PIFLA_SIZE
* 6 * sizeof(u32
);
335 len
= sizeof(struct cudbg_clk_info
);
337 case CUDBG_PCIE_INDIRECT
:
338 n
= sizeof(t5_pcie_pdbg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
339 len
= sizeof(struct ireg_buf
) * n
* 2;
341 case CUDBG_PM_INDIRECT
:
342 n
= sizeof(t5_pm_rx_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
343 len
= sizeof(struct ireg_buf
) * n
* 2;
346 len
= sizeof(struct cudbg_tid_info_region_rev1
);
348 case CUDBG_PCIE_CONFIG
:
349 len
= sizeof(u32
) * CUDBG_NUM_PCIE_CONFIG_REGS
;
351 case CUDBG_DUMP_CONTEXT
:
352 len
= cudbg_dump_context_size(adap
);
355 len
= sizeof(struct cudbg_mps_tcam
) *
356 adap
->params
.arch
.mps_tcam_size
;
359 len
= sizeof(struct cudbg_vpd_data
);
362 cudbg_fill_le_tcam_info(adap
, &tcam_region
);
363 len
= sizeof(struct cudbg_tcam
) +
364 sizeof(struct cudbg_tid_data
) * tcam_region
.max_tid
;
367 len
= sizeof(u16
) * NMTUS
* NCCTRL_WIN
;
369 case CUDBG_MA_INDIRECT
:
370 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) > CHELSIO_T5
) {
371 n
= sizeof(t6_ma_ireg_array
) /
372 (IREG_NUM_ELEM
* sizeof(u32
));
373 len
= sizeof(struct ireg_buf
) * n
* 2;
377 len
= sizeof(struct cudbg_ver_hdr
) +
378 sizeof(struct cudbg_ulptx_la
);
380 case CUDBG_UP_CIM_INDIRECT
:
382 if (is_t5(adap
->params
.chip
))
383 n
= sizeof(t5_up_cim_reg_array
) /
384 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
385 else if (is_t6(adap
->params
.chip
))
386 n
= sizeof(t6_up_cim_reg_array
) /
387 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
388 len
= sizeof(struct ireg_buf
) * n
;
390 case CUDBG_PBT_TABLE
:
391 len
= sizeof(struct cudbg_pbt_tables
);
394 len
= sizeof(struct cudbg_mbox_log
) * adap
->mbox_log
->size
;
396 case CUDBG_HMA_INDIRECT
:
397 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) > CHELSIO_T5
) {
398 n
= sizeof(t6_hma_ireg_array
) /
399 (IREG_NUM_ELEM
* sizeof(u32
));
400 len
= sizeof(struct ireg_buf
) * n
;
404 value
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
405 if (value
& HMA_MUX_F
) {
406 /* In T6, there's no MC1. So, HMA shares MC1
409 value
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
410 len
= EXT_MEM1_SIZE_G(value
);
412 len
= cudbg_mbytes_to_bytes(len
);
415 cudbg_fill_qdesc_num_and_size(adap
, NULL
, &len
);
424 static int cudbg_do_compression(struct cudbg_init
*pdbg_init
,
425 struct cudbg_buffer
*pin_buff
,
426 struct cudbg_buffer
*dbg_buff
)
428 struct cudbg_buffer temp_in_buff
= { 0 };
429 int bytes_left
, bytes_read
, bytes
;
430 u32 offset
= dbg_buff
->offset
;
433 temp_in_buff
.offset
= pin_buff
->offset
;
434 temp_in_buff
.data
= pin_buff
->data
;
435 temp_in_buff
.size
= pin_buff
->size
;
437 bytes_left
= pin_buff
->size
;
439 while (bytes_left
> 0) {
440 /* Do compression in smaller chunks */
441 bytes
= min_t(unsigned long, bytes_left
,
442 (unsigned long)CUDBG_CHUNK_SIZE
);
443 temp_in_buff
.data
= (char *)pin_buff
->data
+ bytes_read
;
444 temp_in_buff
.size
= bytes
;
445 rc
= cudbg_compress_buff(pdbg_init
, &temp_in_buff
, dbg_buff
);
452 pin_buff
->size
= dbg_buff
->offset
- offset
;
456 static int cudbg_write_and_release_buff(struct cudbg_init
*pdbg_init
,
457 struct cudbg_buffer
*pin_buff
,
458 struct cudbg_buffer
*dbg_buff
)
462 if (pdbg_init
->compress_type
== CUDBG_COMPRESSION_NONE
) {
463 cudbg_update_buff(pin_buff
, dbg_buff
);
465 rc
= cudbg_do_compression(pdbg_init
, pin_buff
, dbg_buff
);
471 cudbg_put_buff(pdbg_init
, pin_buff
);
475 static int is_fw_attached(struct cudbg_init
*pdbg_init
)
477 struct adapter
*padap
= pdbg_init
->adap
;
479 if (!(padap
->flags
& CXGB4_FW_OK
) || padap
->use_bd
)
485 /* This function will add additional padding bytes into debug_buffer to make it
488 void cudbg_align_debug_buffer(struct cudbg_buffer
*dbg_buff
,
489 struct cudbg_entity_hdr
*entity_hdr
)
491 u8 zero_buf
[4] = {0};
494 remain
= (dbg_buff
->offset
- entity_hdr
->start_offset
) % 4;
495 padding
= 4 - remain
;
497 memcpy(((u8
*)dbg_buff
->data
) + dbg_buff
->offset
, &zero_buf
,
499 dbg_buff
->offset
+= padding
;
500 entity_hdr
->num_pad
= padding
;
502 entity_hdr
->size
= dbg_buff
->offset
- entity_hdr
->start_offset
;
505 struct cudbg_entity_hdr
*cudbg_get_entity_hdr(void *outbuf
, int i
)
507 struct cudbg_hdr
*cudbg_hdr
= (struct cudbg_hdr
*)outbuf
;
509 return (struct cudbg_entity_hdr
*)
510 ((char *)outbuf
+ cudbg_hdr
->hdr_len
+
511 (sizeof(struct cudbg_entity_hdr
) * (i
- 1)));
514 static int cudbg_read_vpd_reg(struct adapter
*padap
, u32 addr
, u32 len
,
519 vaddr
= t4_eeprom_ptov(addr
, padap
->pf
, EEPROMPFSIZE
);
523 rc
= pci_read_vpd(padap
->pdev
, vaddr
, len
, dest
);
530 static int cudbg_mem_desc_cmp(const void *a
, const void *b
)
532 return ((const struct cudbg_mem_desc
*)a
)->base
-
533 ((const struct cudbg_mem_desc
*)b
)->base
;
536 int cudbg_fill_meminfo(struct adapter
*padap
,
537 struct cudbg_meminfo
*meminfo_buff
)
539 struct cudbg_mem_desc
*md
;
540 u32 lo
, hi
, used
, alloc
;
543 memset(meminfo_buff
->avail
, 0,
544 ARRAY_SIZE(meminfo_buff
->avail
) *
545 sizeof(struct cudbg_mem_desc
));
546 memset(meminfo_buff
->mem
, 0,
547 (ARRAY_SIZE(cudbg_region
) + 3) * sizeof(struct cudbg_mem_desc
));
548 md
= meminfo_buff
->mem
;
550 for (i
= 0; i
< ARRAY_SIZE(meminfo_buff
->mem
); i
++) {
551 meminfo_buff
->mem
[i
].limit
= 0;
552 meminfo_buff
->mem
[i
].idx
= i
;
555 /* Find and sort the populated memory ranges */
557 lo
= t4_read_reg(padap
, MA_TARGET_MEM_ENABLE_A
);
558 if (lo
& EDRAM0_ENABLE_F
) {
559 hi
= t4_read_reg(padap
, MA_EDRAM0_BAR_A
);
560 meminfo_buff
->avail
[i
].base
=
561 cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi
));
562 meminfo_buff
->avail
[i
].limit
=
563 meminfo_buff
->avail
[i
].base
+
564 cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi
));
565 meminfo_buff
->avail
[i
].idx
= 0;
569 if (lo
& EDRAM1_ENABLE_F
) {
570 hi
= t4_read_reg(padap
, MA_EDRAM1_BAR_A
);
571 meminfo_buff
->avail
[i
].base
=
572 cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi
));
573 meminfo_buff
->avail
[i
].limit
=
574 meminfo_buff
->avail
[i
].base
+
575 cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi
));
576 meminfo_buff
->avail
[i
].idx
= 1;
580 if (is_t5(padap
->params
.chip
)) {
581 if (lo
& EXT_MEM0_ENABLE_F
) {
582 hi
= t4_read_reg(padap
, MA_EXT_MEMORY0_BAR_A
);
583 meminfo_buff
->avail
[i
].base
=
584 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi
));
585 meminfo_buff
->avail
[i
].limit
=
586 meminfo_buff
->avail
[i
].base
+
587 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi
));
588 meminfo_buff
->avail
[i
].idx
= 3;
592 if (lo
& EXT_MEM1_ENABLE_F
) {
593 hi
= t4_read_reg(padap
, MA_EXT_MEMORY1_BAR_A
);
594 meminfo_buff
->avail
[i
].base
=
595 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi
));
596 meminfo_buff
->avail
[i
].limit
=
597 meminfo_buff
->avail
[i
].base
+
598 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi
));
599 meminfo_buff
->avail
[i
].idx
= 4;
603 if (lo
& EXT_MEM_ENABLE_F
) {
604 hi
= t4_read_reg(padap
, MA_EXT_MEMORY_BAR_A
);
605 meminfo_buff
->avail
[i
].base
=
606 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi
));
607 meminfo_buff
->avail
[i
].limit
=
608 meminfo_buff
->avail
[i
].base
+
609 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi
));
610 meminfo_buff
->avail
[i
].idx
= 2;
614 if (lo
& HMA_MUX_F
) {
615 hi
= t4_read_reg(padap
, MA_EXT_MEMORY1_BAR_A
);
616 meminfo_buff
->avail
[i
].base
=
617 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi
));
618 meminfo_buff
->avail
[i
].limit
=
619 meminfo_buff
->avail
[i
].base
+
620 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi
));
621 meminfo_buff
->avail
[i
].idx
= 5;
626 if (!i
) /* no memory available */
627 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
629 meminfo_buff
->avail_c
= i
;
630 sort(meminfo_buff
->avail
, i
, sizeof(struct cudbg_mem_desc
),
631 cudbg_mem_desc_cmp
, NULL
);
632 (md
++)->base
= t4_read_reg(padap
, SGE_DBQ_CTXT_BADDR_A
);
633 (md
++)->base
= t4_read_reg(padap
, SGE_IMSG_CTXT_BADDR_A
);
634 (md
++)->base
= t4_read_reg(padap
, SGE_FLM_CACHE_BADDR_A
);
635 (md
++)->base
= t4_read_reg(padap
, TP_CMM_TCB_BASE_A
);
636 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_BASE_A
);
637 (md
++)->base
= t4_read_reg(padap
, TP_CMM_TIMER_BASE_A
);
638 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_RX_FLST_BASE_A
);
639 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_TX_FLST_BASE_A
);
640 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_PS_FLST_BASE_A
);
642 /* the next few have explicit upper bounds */
643 md
->base
= t4_read_reg(padap
, TP_PMM_TX_BASE_A
);
644 md
->limit
= md
->base
- 1 +
645 t4_read_reg(padap
, TP_PMM_TX_PAGE_SIZE_A
) *
646 PMTXMAXPAGE_G(t4_read_reg(padap
, TP_PMM_TX_MAX_PAGE_A
));
649 md
->base
= t4_read_reg(padap
, TP_PMM_RX_BASE_A
);
650 md
->limit
= md
->base
- 1 +
651 t4_read_reg(padap
, TP_PMM_RX_PAGE_SIZE_A
) *
652 PMRXMAXPAGE_G(t4_read_reg(padap
, TP_PMM_RX_MAX_PAGE_A
));
655 if (t4_read_reg(padap
, LE_DB_CONFIG_A
) & HASHEN_F
) {
656 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) <= CHELSIO_T5
) {
657 hi
= t4_read_reg(padap
, LE_DB_TID_HASHBASE_A
) / 4;
658 md
->base
= t4_read_reg(padap
, LE_DB_HASH_TID_BASE_A
);
660 hi
= t4_read_reg(padap
, LE_DB_HASH_TID_BASE_A
);
661 md
->base
= t4_read_reg(padap
,
662 LE_DB_HASH_TBL_BASE_ADDR_A
);
667 md
->idx
= ARRAY_SIZE(cudbg_region
); /* hide it */
671 #define ulp_region(reg) do { \
672 md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
673 (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
676 ulp_region(RX_ISCSI
);
681 ulp_region(RX_RQUDP
);
686 md
->idx
= ARRAY_SIZE(cudbg_region
);
687 if (!is_t4(padap
->params
.chip
)) {
688 u32 fifo_size
= t4_read_reg(padap
, SGE_DBVFIFO_SIZE_A
);
689 u32 sge_ctrl
= t4_read_reg(padap
, SGE_CONTROL2_A
);
692 if (is_t5(padap
->params
.chip
)) {
693 if (sge_ctrl
& VFIFO_ENABLE_F
)
694 size
= DBVFIFO_SIZE_G(fifo_size
);
696 size
= T6_DBVFIFO_SIZE_G(fifo_size
);
700 md
->base
= BASEADDR_G(t4_read_reg(padap
,
701 SGE_DBVFIFO_BADDR_A
));
702 md
->limit
= md
->base
+ (size
<< 2) - 1;
708 md
->base
= t4_read_reg(padap
, ULP_RX_CTX_BASE_A
);
711 md
->base
= t4_read_reg(padap
, ULP_TX_ERR_TABLE_BASE_A
);
715 md
->base
= padap
->vres
.ocq
.start
;
716 if (padap
->vres
.ocq
.size
)
717 md
->limit
= md
->base
+ padap
->vres
.ocq
.size
- 1;
719 md
->idx
= ARRAY_SIZE(cudbg_region
); /* hide it */
722 /* add any address-space holes, there can be up to 3 */
723 for (n
= 0; n
< i
- 1; n
++)
724 if (meminfo_buff
->avail
[n
].limit
<
725 meminfo_buff
->avail
[n
+ 1].base
)
726 (md
++)->base
= meminfo_buff
->avail
[n
].limit
;
728 if (meminfo_buff
->avail
[n
].limit
)
729 (md
++)->base
= meminfo_buff
->avail
[n
].limit
;
731 n
= md
- meminfo_buff
->mem
;
732 meminfo_buff
->mem_c
= n
;
734 sort(meminfo_buff
->mem
, n
, sizeof(struct cudbg_mem_desc
),
735 cudbg_mem_desc_cmp
, NULL
);
737 lo
= t4_read_reg(padap
, CIM_SDRAM_BASE_ADDR_A
);
738 hi
= t4_read_reg(padap
, CIM_SDRAM_ADDR_SIZE_A
) + lo
- 1;
739 meminfo_buff
->up_ram_lo
= lo
;
740 meminfo_buff
->up_ram_hi
= hi
;
742 lo
= t4_read_reg(padap
, CIM_EXTMEM2_BASE_ADDR_A
);
743 hi
= t4_read_reg(padap
, CIM_EXTMEM2_ADDR_SIZE_A
) + lo
- 1;
744 meminfo_buff
->up_extmem2_lo
= lo
;
745 meminfo_buff
->up_extmem2_hi
= hi
;
747 lo
= t4_read_reg(padap
, TP_PMM_RX_MAX_PAGE_A
);
748 for (i
= 0, meminfo_buff
->free_rx_cnt
= 0; i
< 2; i
++)
749 meminfo_buff
->free_rx_cnt
+=
750 FREERXPAGECOUNT_G(t4_read_reg(padap
,
751 TP_FLM_FREE_RX_CNT_A
));
753 meminfo_buff
->rx_pages_data
[0] = PMRXMAXPAGE_G(lo
);
754 meminfo_buff
->rx_pages_data
[1] =
755 t4_read_reg(padap
, TP_PMM_RX_PAGE_SIZE_A
) >> 10;
756 meminfo_buff
->rx_pages_data
[2] = (lo
& PMRXNUMCHN_F
) ? 2 : 1;
758 lo
= t4_read_reg(padap
, TP_PMM_TX_MAX_PAGE_A
);
759 hi
= t4_read_reg(padap
, TP_PMM_TX_PAGE_SIZE_A
);
760 for (i
= 0, meminfo_buff
->free_tx_cnt
= 0; i
< 4; i
++)
761 meminfo_buff
->free_tx_cnt
+=
762 FREETXPAGECOUNT_G(t4_read_reg(padap
,
763 TP_FLM_FREE_TX_CNT_A
));
765 meminfo_buff
->tx_pages_data
[0] = PMTXMAXPAGE_G(lo
);
766 meminfo_buff
->tx_pages_data
[1] =
767 hi
>= (1 << 20) ? (hi
>> 20) : (hi
>> 10);
768 meminfo_buff
->tx_pages_data
[2] =
769 hi
>= (1 << 20) ? 'M' : 'K';
770 meminfo_buff
->tx_pages_data
[3] = 1 << PMTXNUMCHN_G(lo
);
772 meminfo_buff
->p_structs
= t4_read_reg(padap
, TP_CMM_MM_MAX_PSTRUCT_A
);
773 meminfo_buff
->p_structs_free_cnt
=
774 FREEPSTRUCTCOUNT_G(t4_read_reg(padap
, TP_FLM_FREE_PS_CNT_A
));
776 for (i
= 0; i
< 4; i
++) {
777 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
)
778 lo
= t4_read_reg(padap
,
779 MPS_RX_MAC_BG_PG_CNT0_A
+ i
* 4);
781 lo
= t4_read_reg(padap
, MPS_RX_PG_RSV0_A
+ i
* 4);
782 if (is_t5(padap
->params
.chip
)) {
783 used
= T5_USED_G(lo
);
784 alloc
= T5_ALLOC_G(lo
);
789 meminfo_buff
->port_used
[i
] = used
;
790 meminfo_buff
->port_alloc
[i
] = alloc
;
793 for (i
= 0; i
< padap
->params
.arch
.nchan
; i
++) {
794 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
)
795 lo
= t4_read_reg(padap
,
796 MPS_RX_LPBK_BG_PG_CNT0_A
+ i
* 4);
798 lo
= t4_read_reg(padap
, MPS_RX_PG_RSV4_A
+ i
* 4);
799 if (is_t5(padap
->params
.chip
)) {
800 used
= T5_USED_G(lo
);
801 alloc
= T5_ALLOC_G(lo
);
806 meminfo_buff
->loopback_used
[i
] = used
;
807 meminfo_buff
->loopback_alloc
[i
] = alloc
;
813 int cudbg_collect_reg_dump(struct cudbg_init
*pdbg_init
,
814 struct cudbg_buffer
*dbg_buff
,
815 struct cudbg_error
*cudbg_err
)
817 struct adapter
*padap
= pdbg_init
->adap
;
818 struct cudbg_buffer temp_buff
= { 0 };
822 if (is_t4(padap
->params
.chip
))
823 buf_size
= T4_REGMAP_SIZE
;
824 else if (is_t5(padap
->params
.chip
) || is_t6(padap
->params
.chip
))
825 buf_size
= T5_REGMAP_SIZE
;
827 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, buf_size
, &temp_buff
);
830 t4_get_regs(padap
, (void *)temp_buff
.data
, temp_buff
.size
);
831 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
834 int cudbg_collect_fw_devlog(struct cudbg_init
*pdbg_init
,
835 struct cudbg_buffer
*dbg_buff
,
836 struct cudbg_error
*cudbg_err
)
838 struct adapter
*padap
= pdbg_init
->adap
;
839 struct cudbg_buffer temp_buff
= { 0 };
840 struct devlog_params
*dparams
;
843 rc
= t4_init_devlog_params(padap
);
845 cudbg_err
->sys_err
= rc
;
849 dparams
= &padap
->params
.devlog
;
850 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, dparams
->size
, &temp_buff
);
854 /* Collect FW devlog */
855 if (dparams
->start
!= 0) {
856 spin_lock(&padap
->win0_lock
);
857 rc
= t4_memory_rw(padap
, padap
->params
.drv_memwin
,
858 dparams
->memtype
, dparams
->start
,
860 (__be32
*)(char *)temp_buff
.data
,
862 spin_unlock(&padap
->win0_lock
);
864 cudbg_err
->sys_err
= rc
;
865 cudbg_put_buff(pdbg_init
, &temp_buff
);
869 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
872 int cudbg_collect_cim_la(struct cudbg_init
*pdbg_init
,
873 struct cudbg_buffer
*dbg_buff
,
874 struct cudbg_error
*cudbg_err
)
876 struct adapter
*padap
= pdbg_init
->adap
;
877 struct cudbg_buffer temp_buff
= { 0 };
881 if (is_t6(padap
->params
.chip
)) {
882 size
= padap
->params
.cim_la_size
/ 10 + 1;
883 size
*= 10 * sizeof(u32
);
885 size
= padap
->params
.cim_la_size
/ 8;
886 size
*= 8 * sizeof(u32
);
890 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
894 rc
= t4_cim_read(padap
, UP_UP_DBG_LA_CFG_A
, 1, &cfg
);
896 cudbg_err
->sys_err
= rc
;
897 cudbg_put_buff(pdbg_init
, &temp_buff
);
901 memcpy((char *)temp_buff
.data
, &cfg
, sizeof(cfg
));
902 rc
= t4_cim_read_la(padap
,
903 (u32
*)((char *)temp_buff
.data
+ sizeof(cfg
)),
906 cudbg_err
->sys_err
= rc
;
907 cudbg_put_buff(pdbg_init
, &temp_buff
);
910 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
913 int cudbg_collect_cim_ma_la(struct cudbg_init
*pdbg_init
,
914 struct cudbg_buffer
*dbg_buff
,
915 struct cudbg_error
*cudbg_err
)
917 struct adapter
*padap
= pdbg_init
->adap
;
918 struct cudbg_buffer temp_buff
= { 0 };
921 size
= 2 * CIM_MALA_SIZE
* 5 * sizeof(u32
);
922 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
926 t4_cim_read_ma_la(padap
,
927 (u32
*)temp_buff
.data
,
928 (u32
*)((char *)temp_buff
.data
+
930 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
933 int cudbg_collect_cim_qcfg(struct cudbg_init
*pdbg_init
,
934 struct cudbg_buffer
*dbg_buff
,
935 struct cudbg_error
*cudbg_err
)
937 struct adapter
*padap
= pdbg_init
->adap
;
938 struct cudbg_buffer temp_buff
= { 0 };
939 struct cudbg_cim_qcfg
*cim_qcfg_data
;
942 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_cim_qcfg
),
947 cim_qcfg_data
= (struct cudbg_cim_qcfg
*)temp_buff
.data
;
948 cim_qcfg_data
->chip
= padap
->params
.chip
;
949 rc
= t4_cim_read(padap
, UP_IBQ_0_RDADDR_A
,
950 ARRAY_SIZE(cim_qcfg_data
->stat
), cim_qcfg_data
->stat
);
952 cudbg_err
->sys_err
= rc
;
953 cudbg_put_buff(pdbg_init
, &temp_buff
);
957 rc
= t4_cim_read(padap
, UP_OBQ_0_REALADDR_A
,
958 ARRAY_SIZE(cim_qcfg_data
->obq_wr
),
959 cim_qcfg_data
->obq_wr
);
961 cudbg_err
->sys_err
= rc
;
962 cudbg_put_buff(pdbg_init
, &temp_buff
);
966 t4_read_cimq_cfg(padap
, cim_qcfg_data
->base
, cim_qcfg_data
->size
,
967 cim_qcfg_data
->thres
);
968 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
971 static int cudbg_read_cim_ibq(struct cudbg_init
*pdbg_init
,
972 struct cudbg_buffer
*dbg_buff
,
973 struct cudbg_error
*cudbg_err
, int qid
)
975 struct adapter
*padap
= pdbg_init
->adap
;
976 struct cudbg_buffer temp_buff
= { 0 };
977 int no_of_read_words
, rc
= 0;
980 /* collect CIM IBQ */
981 qsize
= CIM_IBQ_SIZE
* 4 * sizeof(u32
);
982 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, qsize
, &temp_buff
);
986 /* t4_read_cim_ibq will return no. of read words or error */
987 no_of_read_words
= t4_read_cim_ibq(padap
, qid
,
988 (u32
*)temp_buff
.data
, qsize
);
989 /* no_of_read_words is less than or equal to 0 means error */
990 if (no_of_read_words
<= 0) {
991 if (!no_of_read_words
)
992 rc
= CUDBG_SYSTEM_ERROR
;
994 rc
= no_of_read_words
;
995 cudbg_err
->sys_err
= rc
;
996 cudbg_put_buff(pdbg_init
, &temp_buff
);
999 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1002 int cudbg_collect_cim_ibq_tp0(struct cudbg_init
*pdbg_init
,
1003 struct cudbg_buffer
*dbg_buff
,
1004 struct cudbg_error
*cudbg_err
)
1006 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 0);
1009 int cudbg_collect_cim_ibq_tp1(struct cudbg_init
*pdbg_init
,
1010 struct cudbg_buffer
*dbg_buff
,
1011 struct cudbg_error
*cudbg_err
)
1013 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 1);
1016 int cudbg_collect_cim_ibq_ulp(struct cudbg_init
*pdbg_init
,
1017 struct cudbg_buffer
*dbg_buff
,
1018 struct cudbg_error
*cudbg_err
)
1020 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 2);
1023 int cudbg_collect_cim_ibq_sge0(struct cudbg_init
*pdbg_init
,
1024 struct cudbg_buffer
*dbg_buff
,
1025 struct cudbg_error
*cudbg_err
)
1027 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 3);
1030 int cudbg_collect_cim_ibq_sge1(struct cudbg_init
*pdbg_init
,
1031 struct cudbg_buffer
*dbg_buff
,
1032 struct cudbg_error
*cudbg_err
)
1034 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 4);
1037 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init
*pdbg_init
,
1038 struct cudbg_buffer
*dbg_buff
,
1039 struct cudbg_error
*cudbg_err
)
1041 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 5);
1044 u32
cudbg_cim_obq_size(struct adapter
*padap
, int qid
)
1048 t4_write_reg(padap
, CIM_QUEUE_CONFIG_REF_A
, OBQSELECT_F
|
1049 QUENUMSELECT_V(qid
));
1050 value
= t4_read_reg(padap
, CIM_QUEUE_CONFIG_CTRL_A
);
1051 value
= CIMQSIZE_G(value
) * 64; /* size in number of words */
1052 return value
* sizeof(u32
);
1055 static int cudbg_read_cim_obq(struct cudbg_init
*pdbg_init
,
1056 struct cudbg_buffer
*dbg_buff
,
1057 struct cudbg_error
*cudbg_err
, int qid
)
1059 struct adapter
*padap
= pdbg_init
->adap
;
1060 struct cudbg_buffer temp_buff
= { 0 };
1061 int no_of_read_words
, rc
= 0;
1064 /* collect CIM OBQ */
1065 qsize
= cudbg_cim_obq_size(padap
, qid
);
1066 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, qsize
, &temp_buff
);
1070 /* t4_read_cim_obq will return no. of read words or error */
1071 no_of_read_words
= t4_read_cim_obq(padap
, qid
,
1072 (u32
*)temp_buff
.data
, qsize
);
1073 /* no_of_read_words is less than or equal to 0 means error */
1074 if (no_of_read_words
<= 0) {
1075 if (!no_of_read_words
)
1076 rc
= CUDBG_SYSTEM_ERROR
;
1078 rc
= no_of_read_words
;
1079 cudbg_err
->sys_err
= rc
;
1080 cudbg_put_buff(pdbg_init
, &temp_buff
);
1083 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1086 int cudbg_collect_cim_obq_ulp0(struct cudbg_init
*pdbg_init
,
1087 struct cudbg_buffer
*dbg_buff
,
1088 struct cudbg_error
*cudbg_err
)
1090 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 0);
1093 int cudbg_collect_cim_obq_ulp1(struct cudbg_init
*pdbg_init
,
1094 struct cudbg_buffer
*dbg_buff
,
1095 struct cudbg_error
*cudbg_err
)
1097 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 1);
1100 int cudbg_collect_cim_obq_ulp2(struct cudbg_init
*pdbg_init
,
1101 struct cudbg_buffer
*dbg_buff
,
1102 struct cudbg_error
*cudbg_err
)
1104 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 2);
1107 int cudbg_collect_cim_obq_ulp3(struct cudbg_init
*pdbg_init
,
1108 struct cudbg_buffer
*dbg_buff
,
1109 struct cudbg_error
*cudbg_err
)
1111 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 3);
1114 int cudbg_collect_cim_obq_sge(struct cudbg_init
*pdbg_init
,
1115 struct cudbg_buffer
*dbg_buff
,
1116 struct cudbg_error
*cudbg_err
)
1118 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 4);
1121 int cudbg_collect_cim_obq_ncsi(struct cudbg_init
*pdbg_init
,
1122 struct cudbg_buffer
*dbg_buff
,
1123 struct cudbg_error
*cudbg_err
)
1125 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 5);
1128 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init
*pdbg_init
,
1129 struct cudbg_buffer
*dbg_buff
,
1130 struct cudbg_error
*cudbg_err
)
1132 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 6);
1135 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init
*pdbg_init
,
1136 struct cudbg_buffer
*dbg_buff
,
1137 struct cudbg_error
*cudbg_err
)
1139 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 7);
1142 static int cudbg_meminfo_get_mem_index(struct adapter
*padap
,
1143 struct cudbg_meminfo
*mem_info
,
1144 u8 mem_type
, u8
*idx
)
1156 /* Some T5 cards have both MC0 and MC1. */
1157 flag
= is_t5(padap
->params
.chip
) ? MC0_FLAG
: MC_FLAG
;
1166 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
1169 for (i
= 0; i
< mem_info
->avail_c
; i
++) {
1170 if (mem_info
->avail
[i
].idx
== flag
) {
1176 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
1179 /* Fetch the @region_name's start and end from @meminfo. */
1180 static int cudbg_get_mem_region(struct adapter
*padap
,
1181 struct cudbg_meminfo
*meminfo
,
1182 u8 mem_type
, const char *region_name
,
1183 struct cudbg_mem_desc
*mem_desc
)
1189 rc
= cudbg_meminfo_get_mem_index(padap
, meminfo
, mem_type
, &mc
);
1193 i
= match_string(cudbg_region
, ARRAY_SIZE(cudbg_region
), region_name
);
1198 for (i
= 0; i
< meminfo
->mem_c
; i
++) {
1199 if (meminfo
->mem
[i
].idx
>= ARRAY_SIZE(cudbg_region
))
1200 continue; /* Skip holes */
1202 if (!(meminfo
->mem
[i
].limit
))
1203 meminfo
->mem
[i
].limit
=
1204 i
< meminfo
->mem_c
- 1 ?
1205 meminfo
->mem
[i
+ 1].base
- 1 : ~0;
1207 if (meminfo
->mem
[i
].idx
== idx
) {
1208 /* Check if the region exists in @mem_type memory */
1209 if (meminfo
->mem
[i
].base
< meminfo
->avail
[mc
].base
&&
1210 meminfo
->mem
[i
].limit
< meminfo
->avail
[mc
].base
)
1213 if (meminfo
->mem
[i
].base
> meminfo
->avail
[mc
].limit
)
1216 memcpy(mem_desc
, &meminfo
->mem
[i
],
1217 sizeof(struct cudbg_mem_desc
));
1228 /* Fetch and update the start and end of the requested memory region w.r.t 0
1229 * in the corresponding EDC/MC/HMA.
1231 static int cudbg_get_mem_relative(struct adapter
*padap
,
1232 struct cudbg_meminfo
*meminfo
,
1233 u8 mem_type
, u32
*out_base
, u32
*out_end
)
1238 rc
= cudbg_meminfo_get_mem_index(padap
, meminfo
, mem_type
, &mc_idx
);
1242 if (*out_base
< meminfo
->avail
[mc_idx
].base
)
1245 *out_base
-= meminfo
->avail
[mc_idx
].base
;
1247 if (*out_end
> meminfo
->avail
[mc_idx
].limit
)
1248 *out_end
= meminfo
->avail
[mc_idx
].limit
;
1250 *out_end
-= meminfo
->avail
[mc_idx
].base
;
1255 /* Get TX and RX Payload region */
1256 static int cudbg_get_payload_range(struct adapter
*padap
, u8 mem_type
,
1257 const char *region_name
,
1258 struct cudbg_region_info
*payload
)
1260 struct cudbg_mem_desc mem_desc
= { 0 };
1261 struct cudbg_meminfo meminfo
;
1264 rc
= cudbg_fill_meminfo(padap
, &meminfo
);
1268 rc
= cudbg_get_mem_region(padap
, &meminfo
, mem_type
, region_name
,
1271 payload
->exist
= false;
1275 payload
->exist
= true;
1276 payload
->start
= mem_desc
.base
;
1277 payload
->end
= mem_desc
.limit
;
1279 return cudbg_get_mem_relative(padap
, &meminfo
, mem_type
,
1280 &payload
->start
, &payload
->end
);
1283 static int cudbg_memory_read(struct cudbg_init
*pdbg_init
, int win
,
1284 int mtype
, u32 addr
, u32 len
, void *hbuf
)
1286 u32 win_pf
, memoffset
, mem_aperture
, mem_base
;
1287 struct adapter
*adap
= pdbg_init
->adap
;
1288 u32 pos
, offset
, resid
;
1293 /* Argument sanity checks ...
1295 if (addr
& 0x3 || (uintptr_t)hbuf
& 0x3)
1300 /* Try to do 64-bit reads. Residual will be handled later. */
1304 ret
= t4_memory_rw_init(adap
, win
, mtype
, &memoffset
, &mem_base
,
1309 addr
= addr
+ memoffset
;
1310 win_pf
= is_t4(adap
->params
.chip
) ? 0 : PFNUM_V(adap
->pf
);
1312 pos
= addr
& ~(mem_aperture
- 1);
1313 offset
= addr
- pos
;
1315 /* Set up initial PCI-E Memory Window to cover the start of our
1318 t4_memory_update_win(adap
, win
, pos
| win_pf
);
1320 /* Transfer data from the adapter */
1322 *buf
++ = le64_to_cpu((__force __le64
)
1323 t4_read_reg64(adap
, mem_base
+ offset
));
1324 offset
+= sizeof(u64
);
1327 /* If we've reached the end of our current window aperture,
1328 * move the PCI-E Memory Window on to the next.
1330 if (offset
== mem_aperture
) {
1331 pos
+= mem_aperture
;
1333 t4_memory_update_win(adap
, win
, pos
| win_pf
);
1337 res_buf
= (u32
*)buf
;
1338 /* Read residual in 32-bit multiples */
1339 while (resid
> sizeof(u32
)) {
1340 *res_buf
++ = le32_to_cpu((__force __le32
)
1341 t4_read_reg(adap
, mem_base
+ offset
));
1342 offset
+= sizeof(u32
);
1343 resid
-= sizeof(u32
);
1345 /* If we've reached the end of our current window aperture,
1346 * move the PCI-E Memory Window on to the next.
1348 if (offset
== mem_aperture
) {
1349 pos
+= mem_aperture
;
1351 t4_memory_update_win(adap
, win
, pos
| win_pf
);
1355 /* Transfer residual < 32-bits */
1357 t4_memory_rw_residual(adap
, resid
, mem_base
+ offset
,
1358 (u8
*)res_buf
, T4_MEMORY_READ
);
1363 #define CUDBG_YIELD_ITERATION 256
1365 static int cudbg_read_fw_mem(struct cudbg_init
*pdbg_init
,
1366 struct cudbg_buffer
*dbg_buff
, u8 mem_type
,
1367 unsigned long tot_len
,
1368 struct cudbg_error
*cudbg_err
)
1370 static const char * const region_name
[] = { "Tx payload:",
1372 unsigned long bytes
, bytes_left
, bytes_read
= 0;
1373 struct adapter
*padap
= pdbg_init
->adap
;
1374 struct cudbg_buffer temp_buff
= { 0 };
1375 struct cudbg_region_info payload
[2];
1376 u32 yield_count
= 0;
1380 /* Get TX/RX Payload region range if they exist */
1381 memset(payload
, 0, sizeof(payload
));
1382 for (i
= 0; i
< ARRAY_SIZE(region_name
); i
++) {
1383 rc
= cudbg_get_payload_range(padap
, mem_type
, region_name
[i
],
1388 if (payload
[i
].exist
) {
1389 /* Align start and end to avoid wrap around */
1390 payload
[i
].start
= roundup(payload
[i
].start
,
1392 payload
[i
].end
= rounddown(payload
[i
].end
,
1397 bytes_left
= tot_len
;
1398 while (bytes_left
> 0) {
1399 /* As MC size is huge and read through PIO access, this
1400 * loop will hold cpu for a longer time. OS may think that
1401 * the process is hanged and will generate CPU stall traces.
1402 * So yield the cpu regularly.
1405 if (!(yield_count
% CUDBG_YIELD_ITERATION
))
1408 bytes
= min_t(unsigned long, bytes_left
,
1409 (unsigned long)CUDBG_CHUNK_SIZE
);
1410 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, bytes
, &temp_buff
);
1414 for (i
= 0; i
< ARRAY_SIZE(payload
); i
++)
1415 if (payload
[i
].exist
&&
1416 bytes_read
>= payload
[i
].start
&&
1417 bytes_read
+ bytes
<= payload
[i
].end
)
1418 /* TX and RX Payload regions can't overlap */
1421 spin_lock(&padap
->win0_lock
);
1422 rc
= cudbg_memory_read(pdbg_init
, MEMWIN_NIC
, mem_type
,
1423 bytes_read
, bytes
, temp_buff
.data
);
1424 spin_unlock(&padap
->win0_lock
);
1426 cudbg_err
->sys_err
= rc
;
1427 cudbg_put_buff(pdbg_init
, &temp_buff
);
1432 bytes_left
-= bytes
;
1433 bytes_read
+= bytes
;
1434 rc
= cudbg_write_and_release_buff(pdbg_init
, &temp_buff
,
1437 cudbg_put_buff(pdbg_init
, &temp_buff
);
1444 static void cudbg_t4_fwcache(struct cudbg_init
*pdbg_init
,
1445 struct cudbg_error
*cudbg_err
)
1447 struct adapter
*padap
= pdbg_init
->adap
;
1450 if (is_fw_attached(pdbg_init
)) {
1451 /* Flush uP dcache before reading edcX/mcX */
1452 rc
= t4_fwcache(padap
, FW_PARAM_DEV_FWCACHE_FLUSH
);
1454 cudbg_err
->sys_warn
= rc
;
1458 static int cudbg_mem_region_size(struct cudbg_init
*pdbg_init
,
1459 struct cudbg_error
*cudbg_err
,
1460 u8 mem_type
, unsigned long *region_size
)
1462 struct adapter
*padap
= pdbg_init
->adap
;
1463 struct cudbg_meminfo mem_info
;
1467 memset(&mem_info
, 0, sizeof(struct cudbg_meminfo
));
1468 rc
= cudbg_fill_meminfo(padap
, &mem_info
);
1470 cudbg_err
->sys_err
= rc
;
1474 cudbg_t4_fwcache(pdbg_init
, cudbg_err
);
1475 rc
= cudbg_meminfo_get_mem_index(padap
, &mem_info
, mem_type
, &mc_idx
);
1477 cudbg_err
->sys_err
= rc
;
1482 *region_size
= mem_info
.avail
[mc_idx
].limit
-
1483 mem_info
.avail
[mc_idx
].base
;
1488 static int cudbg_collect_mem_region(struct cudbg_init
*pdbg_init
,
1489 struct cudbg_buffer
*dbg_buff
,
1490 struct cudbg_error
*cudbg_err
,
1493 unsigned long size
= 0;
1496 rc
= cudbg_mem_region_size(pdbg_init
, cudbg_err
, mem_type
, &size
);
1500 return cudbg_read_fw_mem(pdbg_init
, dbg_buff
, mem_type
, size
,
1504 int cudbg_collect_edc0_meminfo(struct cudbg_init
*pdbg_init
,
1505 struct cudbg_buffer
*dbg_buff
,
1506 struct cudbg_error
*cudbg_err
)
1508 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1512 int cudbg_collect_edc1_meminfo(struct cudbg_init
*pdbg_init
,
1513 struct cudbg_buffer
*dbg_buff
,
1514 struct cudbg_error
*cudbg_err
)
1516 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1520 int cudbg_collect_mc0_meminfo(struct cudbg_init
*pdbg_init
,
1521 struct cudbg_buffer
*dbg_buff
,
1522 struct cudbg_error
*cudbg_err
)
1524 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1528 int cudbg_collect_mc1_meminfo(struct cudbg_init
*pdbg_init
,
1529 struct cudbg_buffer
*dbg_buff
,
1530 struct cudbg_error
*cudbg_err
)
1532 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1536 int cudbg_collect_hma_meminfo(struct cudbg_init
*pdbg_init
,
1537 struct cudbg_buffer
*dbg_buff
,
1538 struct cudbg_error
*cudbg_err
)
1540 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1544 int cudbg_collect_rss(struct cudbg_init
*pdbg_init
,
1545 struct cudbg_buffer
*dbg_buff
,
1546 struct cudbg_error
*cudbg_err
)
1548 struct adapter
*padap
= pdbg_init
->adap
;
1549 struct cudbg_buffer temp_buff
= { 0 };
1552 nentries
= t4_chip_rss_size(padap
);
1553 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, nentries
* sizeof(u16
),
1558 rc
= t4_read_rss(padap
, (u16
*)temp_buff
.data
);
1560 cudbg_err
->sys_err
= rc
;
1561 cudbg_put_buff(pdbg_init
, &temp_buff
);
1564 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1567 int cudbg_collect_rss_vf_config(struct cudbg_init
*pdbg_init
,
1568 struct cudbg_buffer
*dbg_buff
,
1569 struct cudbg_error
*cudbg_err
)
1571 struct adapter
*padap
= pdbg_init
->adap
;
1572 struct cudbg_buffer temp_buff
= { 0 };
1573 struct cudbg_rss_vf_conf
*vfconf
;
1574 int vf
, rc
, vf_count
;
1576 vf_count
= padap
->params
.arch
.vfcount
;
1577 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1578 vf_count
* sizeof(struct cudbg_rss_vf_conf
),
1583 vfconf
= (struct cudbg_rss_vf_conf
*)temp_buff
.data
;
1584 for (vf
= 0; vf
< vf_count
; vf
++)
1585 t4_read_rss_vf_config(padap
, vf
, &vfconf
[vf
].rss_vf_vfl
,
1586 &vfconf
[vf
].rss_vf_vfh
, true);
1587 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1590 int cudbg_collect_path_mtu(struct cudbg_init
*pdbg_init
,
1591 struct cudbg_buffer
*dbg_buff
,
1592 struct cudbg_error
*cudbg_err
)
1594 struct adapter
*padap
= pdbg_init
->adap
;
1595 struct cudbg_buffer temp_buff
= { 0 };
1598 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, NMTUS
* sizeof(u16
),
1603 t4_read_mtu_tbl(padap
, (u16
*)temp_buff
.data
, NULL
);
1604 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1607 int cudbg_collect_pm_stats(struct cudbg_init
*pdbg_init
,
1608 struct cudbg_buffer
*dbg_buff
,
1609 struct cudbg_error
*cudbg_err
)
1611 struct adapter
*padap
= pdbg_init
->adap
;
1612 struct cudbg_buffer temp_buff
= { 0 };
1613 struct cudbg_pm_stats
*pm_stats_buff
;
1616 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_pm_stats
),
1621 pm_stats_buff
= (struct cudbg_pm_stats
*)temp_buff
.data
;
1622 t4_pmtx_get_stats(padap
, pm_stats_buff
->tx_cnt
, pm_stats_buff
->tx_cyc
);
1623 t4_pmrx_get_stats(padap
, pm_stats_buff
->rx_cnt
, pm_stats_buff
->rx_cyc
);
1624 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1627 int cudbg_collect_hw_sched(struct cudbg_init
*pdbg_init
,
1628 struct cudbg_buffer
*dbg_buff
,
1629 struct cudbg_error
*cudbg_err
)
1631 struct adapter
*padap
= pdbg_init
->adap
;
1632 struct cudbg_buffer temp_buff
= { 0 };
1633 struct cudbg_hw_sched
*hw_sched_buff
;
1636 if (!padap
->params
.vpd
.cclk
)
1637 return CUDBG_STATUS_CCLK_NOT_DEFINED
;
1639 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_hw_sched
),
1645 hw_sched_buff
= (struct cudbg_hw_sched
*)temp_buff
.data
;
1646 hw_sched_buff
->map
= t4_read_reg(padap
, TP_TX_MOD_QUEUE_REQ_MAP_A
);
1647 hw_sched_buff
->mode
= TIMERMODE_G(t4_read_reg(padap
, TP_MOD_CONFIG_A
));
1648 t4_read_pace_tbl(padap
, hw_sched_buff
->pace_tab
);
1649 for (i
= 0; i
< NTX_SCHED
; ++i
)
1650 t4_get_tx_sched(padap
, i
, &hw_sched_buff
->kbps
[i
],
1651 &hw_sched_buff
->ipg
[i
], true);
1652 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1655 int cudbg_collect_tp_indirect(struct cudbg_init
*pdbg_init
,
1656 struct cudbg_buffer
*dbg_buff
,
1657 struct cudbg_error
*cudbg_err
)
1659 struct adapter
*padap
= pdbg_init
->adap
;
1660 struct cudbg_buffer temp_buff
= { 0 };
1661 struct ireg_buf
*ch_tp_pio
;
1665 if (is_t5(padap
->params
.chip
))
1666 n
= sizeof(t5_tp_pio_array
) +
1667 sizeof(t5_tp_tm_pio_array
) +
1668 sizeof(t5_tp_mib_index_array
);
1670 n
= sizeof(t6_tp_pio_array
) +
1671 sizeof(t6_tp_tm_pio_array
) +
1672 sizeof(t6_tp_mib_index_array
);
1674 n
= n
/ (IREG_NUM_ELEM
* sizeof(u32
));
1675 size
= sizeof(struct ireg_buf
) * n
;
1676 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1680 ch_tp_pio
= (struct ireg_buf
*)temp_buff
.data
;
1683 if (is_t5(padap
->params
.chip
))
1684 n
= sizeof(t5_tp_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1685 else if (is_t6(padap
->params
.chip
))
1686 n
= sizeof(t6_tp_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1688 for (i
= 0; i
< n
; i
++) {
1689 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1690 u32
*buff
= ch_tp_pio
->outbuf
;
1692 if (is_t5(padap
->params
.chip
)) {
1693 tp_pio
->ireg_addr
= t5_tp_pio_array
[i
][0];
1694 tp_pio
->ireg_data
= t5_tp_pio_array
[i
][1];
1695 tp_pio
->ireg_local_offset
= t5_tp_pio_array
[i
][2];
1696 tp_pio
->ireg_offset_range
= t5_tp_pio_array
[i
][3];
1697 } else if (is_t6(padap
->params
.chip
)) {
1698 tp_pio
->ireg_addr
= t6_tp_pio_array
[i
][0];
1699 tp_pio
->ireg_data
= t6_tp_pio_array
[i
][1];
1700 tp_pio
->ireg_local_offset
= t6_tp_pio_array
[i
][2];
1701 tp_pio
->ireg_offset_range
= t6_tp_pio_array
[i
][3];
1703 t4_tp_pio_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1704 tp_pio
->ireg_local_offset
, true);
1709 if (is_t5(padap
->params
.chip
))
1710 n
= sizeof(t5_tp_tm_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1711 else if (is_t6(padap
->params
.chip
))
1712 n
= sizeof(t6_tp_tm_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1714 for (i
= 0; i
< n
; i
++) {
1715 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1716 u32
*buff
= ch_tp_pio
->outbuf
;
1718 if (is_t5(padap
->params
.chip
)) {
1719 tp_pio
->ireg_addr
= t5_tp_tm_pio_array
[i
][0];
1720 tp_pio
->ireg_data
= t5_tp_tm_pio_array
[i
][1];
1721 tp_pio
->ireg_local_offset
= t5_tp_tm_pio_array
[i
][2];
1722 tp_pio
->ireg_offset_range
= t5_tp_tm_pio_array
[i
][3];
1723 } else if (is_t6(padap
->params
.chip
)) {
1724 tp_pio
->ireg_addr
= t6_tp_tm_pio_array
[i
][0];
1725 tp_pio
->ireg_data
= t6_tp_tm_pio_array
[i
][1];
1726 tp_pio
->ireg_local_offset
= t6_tp_tm_pio_array
[i
][2];
1727 tp_pio
->ireg_offset_range
= t6_tp_tm_pio_array
[i
][3];
1729 t4_tp_tm_pio_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1730 tp_pio
->ireg_local_offset
, true);
1735 if (is_t5(padap
->params
.chip
))
1736 n
= sizeof(t5_tp_mib_index_array
) /
1737 (IREG_NUM_ELEM
* sizeof(u32
));
1738 else if (is_t6(padap
->params
.chip
))
1739 n
= sizeof(t6_tp_mib_index_array
) /
1740 (IREG_NUM_ELEM
* sizeof(u32
));
1742 for (i
= 0; i
< n
; i
++) {
1743 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1744 u32
*buff
= ch_tp_pio
->outbuf
;
1746 if (is_t5(padap
->params
.chip
)) {
1747 tp_pio
->ireg_addr
= t5_tp_mib_index_array
[i
][0];
1748 tp_pio
->ireg_data
= t5_tp_mib_index_array
[i
][1];
1749 tp_pio
->ireg_local_offset
=
1750 t5_tp_mib_index_array
[i
][2];
1751 tp_pio
->ireg_offset_range
=
1752 t5_tp_mib_index_array
[i
][3];
1753 } else if (is_t6(padap
->params
.chip
)) {
1754 tp_pio
->ireg_addr
= t6_tp_mib_index_array
[i
][0];
1755 tp_pio
->ireg_data
= t6_tp_mib_index_array
[i
][1];
1756 tp_pio
->ireg_local_offset
=
1757 t6_tp_mib_index_array
[i
][2];
1758 tp_pio
->ireg_offset_range
=
1759 t6_tp_mib_index_array
[i
][3];
1761 t4_tp_mib_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1762 tp_pio
->ireg_local_offset
, true);
1765 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1768 static void cudbg_read_sge_qbase_indirect_reg(struct adapter
*padap
,
1769 struct sge_qbase_reg_field
*qbase
,
1770 u32 func
, bool is_pf
)
1775 buff
= qbase
->pf_data_value
[func
];
1777 buff
= qbase
->vf_data_value
[func
];
1778 /* In SGE_QBASE_INDEX,
1779 * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
1784 t4_write_reg(padap
, qbase
->reg_addr
, func
);
1785 for (i
= 0; i
< SGE_QBASE_DATA_REG_NUM
; i
++, buff
++)
1786 *buff
= t4_read_reg(padap
, qbase
->reg_data
[i
]);
1789 int cudbg_collect_sge_indirect(struct cudbg_init
*pdbg_init
,
1790 struct cudbg_buffer
*dbg_buff
,
1791 struct cudbg_error
*cudbg_err
)
1793 struct adapter
*padap
= pdbg_init
->adap
;
1794 struct cudbg_buffer temp_buff
= { 0 };
1795 struct sge_qbase_reg_field
*sge_qbase
;
1796 struct ireg_buf
*ch_sge_dbg
;
1799 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1800 sizeof(*ch_sge_dbg
) * 2 + sizeof(*sge_qbase
),
1805 ch_sge_dbg
= (struct ireg_buf
*)temp_buff
.data
;
1806 for (i
= 0; i
< 2; i
++) {
1807 struct ireg_field
*sge_pio
= &ch_sge_dbg
->tp_pio
;
1808 u32
*buff
= ch_sge_dbg
->outbuf
;
1810 sge_pio
->ireg_addr
= t5_sge_dbg_index_array
[i
][0];
1811 sge_pio
->ireg_data
= t5_sge_dbg_index_array
[i
][1];
1812 sge_pio
->ireg_local_offset
= t5_sge_dbg_index_array
[i
][2];
1813 sge_pio
->ireg_offset_range
= t5_sge_dbg_index_array
[i
][3];
1814 t4_read_indirect(padap
,
1818 sge_pio
->ireg_offset_range
,
1819 sge_pio
->ireg_local_offset
);
1823 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
) {
1824 sge_qbase
= (struct sge_qbase_reg_field
*)ch_sge_dbg
;
1825 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
1826 * SGE_QBASE_MAP[0-3]
1828 sge_qbase
->reg_addr
= t6_sge_qbase_index_array
[0];
1829 for (i
= 0; i
< SGE_QBASE_DATA_REG_NUM
; i
++)
1830 sge_qbase
->reg_data
[i
] =
1831 t6_sge_qbase_index_array
[i
+ 1];
1833 for (i
= 0; i
<= PCIE_FW_MASTER_M
; i
++)
1834 cudbg_read_sge_qbase_indirect_reg(padap
, sge_qbase
,
1837 for (i
= 0; i
< padap
->params
.arch
.vfcount
; i
++)
1838 cudbg_read_sge_qbase_indirect_reg(padap
, sge_qbase
,
1841 sge_qbase
->vfcount
= padap
->params
.arch
.vfcount
;
1844 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1847 int cudbg_collect_ulprx_la(struct cudbg_init
*pdbg_init
,
1848 struct cudbg_buffer
*dbg_buff
,
1849 struct cudbg_error
*cudbg_err
)
1851 struct adapter
*padap
= pdbg_init
->adap
;
1852 struct cudbg_buffer temp_buff
= { 0 };
1853 struct cudbg_ulprx_la
*ulprx_la_buff
;
1856 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_ulprx_la
),
1861 ulprx_la_buff
= (struct cudbg_ulprx_la
*)temp_buff
.data
;
1862 t4_ulprx_read_la(padap
, (u32
*)ulprx_la_buff
->data
);
1863 ulprx_la_buff
->size
= ULPRX_LA_SIZE
;
1864 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1867 int cudbg_collect_tp_la(struct cudbg_init
*pdbg_init
,
1868 struct cudbg_buffer
*dbg_buff
,
1869 struct cudbg_error
*cudbg_err
)
1871 struct adapter
*padap
= pdbg_init
->adap
;
1872 struct cudbg_buffer temp_buff
= { 0 };
1873 struct cudbg_tp_la
*tp_la_buff
;
1876 size
= sizeof(struct cudbg_tp_la
) + TPLA_SIZE
* sizeof(u64
);
1877 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1881 tp_la_buff
= (struct cudbg_tp_la
*)temp_buff
.data
;
1882 tp_la_buff
->mode
= DBGLAMODE_G(t4_read_reg(padap
, TP_DBG_LA_CONFIG_A
));
1883 t4_tp_read_la(padap
, (u64
*)tp_la_buff
->data
, NULL
);
1884 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1887 int cudbg_collect_meminfo(struct cudbg_init
*pdbg_init
,
1888 struct cudbg_buffer
*dbg_buff
,
1889 struct cudbg_error
*cudbg_err
)
1891 struct adapter
*padap
= pdbg_init
->adap
;
1892 struct cudbg_buffer temp_buff
= { 0 };
1893 struct cudbg_meminfo
*meminfo_buff
;
1894 struct cudbg_ver_hdr
*ver_hdr
;
1897 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1898 sizeof(struct cudbg_ver_hdr
) +
1899 sizeof(struct cudbg_meminfo
),
1904 ver_hdr
= (struct cudbg_ver_hdr
*)temp_buff
.data
;
1905 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
1906 ver_hdr
->revision
= CUDBG_MEMINFO_REV
;
1907 ver_hdr
->size
= sizeof(struct cudbg_meminfo
);
1909 meminfo_buff
= (struct cudbg_meminfo
*)(temp_buff
.data
+
1911 rc
= cudbg_fill_meminfo(padap
, meminfo_buff
);
1913 cudbg_err
->sys_err
= rc
;
1914 cudbg_put_buff(pdbg_init
, &temp_buff
);
1918 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1921 int cudbg_collect_cim_pif_la(struct cudbg_init
*pdbg_init
,
1922 struct cudbg_buffer
*dbg_buff
,
1923 struct cudbg_error
*cudbg_err
)
1925 struct cudbg_cim_pif_la
*cim_pif_la_buff
;
1926 struct adapter
*padap
= pdbg_init
->adap
;
1927 struct cudbg_buffer temp_buff
= { 0 };
1930 size
= sizeof(struct cudbg_cim_pif_la
) +
1931 2 * CIM_PIFLA_SIZE
* 6 * sizeof(u32
);
1932 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1936 cim_pif_la_buff
= (struct cudbg_cim_pif_la
*)temp_buff
.data
;
1937 cim_pif_la_buff
->size
= CIM_PIFLA_SIZE
;
1938 t4_cim_read_pif_la(padap
, (u32
*)cim_pif_la_buff
->data
,
1939 (u32
*)cim_pif_la_buff
->data
+ 6 * CIM_PIFLA_SIZE
,
1941 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1944 int cudbg_collect_clk_info(struct cudbg_init
*pdbg_init
,
1945 struct cudbg_buffer
*dbg_buff
,
1946 struct cudbg_error
*cudbg_err
)
1948 struct adapter
*padap
= pdbg_init
->adap
;
1949 struct cudbg_buffer temp_buff
= { 0 };
1950 struct cudbg_clk_info
*clk_info_buff
;
1954 if (!padap
->params
.vpd
.cclk
)
1955 return CUDBG_STATUS_CCLK_NOT_DEFINED
;
1957 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_clk_info
),
1962 clk_info_buff
= (struct cudbg_clk_info
*)temp_buff
.data
;
1963 clk_info_buff
->cclk_ps
= 1000000000 / padap
->params
.vpd
.cclk
; /* psec */
1964 clk_info_buff
->res
= t4_read_reg(padap
, TP_TIMER_RESOLUTION_A
);
1965 clk_info_buff
->tre
= TIMERRESOLUTION_G(clk_info_buff
->res
);
1966 clk_info_buff
->dack_re
= DELAYEDACKRESOLUTION_G(clk_info_buff
->res
);
1967 tp_tick_us
= (clk_info_buff
->cclk_ps
<< clk_info_buff
->tre
) / 1000000;
1969 clk_info_buff
->dack_timer
=
1970 (clk_info_buff
->cclk_ps
<< clk_info_buff
->dack_re
) / 1000000 *
1971 t4_read_reg(padap
, TP_DACK_TIMER_A
);
1972 clk_info_buff
->retransmit_min
=
1973 tp_tick_us
* t4_read_reg(padap
, TP_RXT_MIN_A
);
1974 clk_info_buff
->retransmit_max
=
1975 tp_tick_us
* t4_read_reg(padap
, TP_RXT_MAX_A
);
1976 clk_info_buff
->persist_timer_min
=
1977 tp_tick_us
* t4_read_reg(padap
, TP_PERS_MIN_A
);
1978 clk_info_buff
->persist_timer_max
=
1979 tp_tick_us
* t4_read_reg(padap
, TP_PERS_MAX_A
);
1980 clk_info_buff
->keepalive_idle_timer
=
1981 tp_tick_us
* t4_read_reg(padap
, TP_KEEP_IDLE_A
);
1982 clk_info_buff
->keepalive_interval
=
1983 tp_tick_us
* t4_read_reg(padap
, TP_KEEP_INTVL_A
);
1984 clk_info_buff
->initial_srtt
=
1985 tp_tick_us
* INITSRTT_G(t4_read_reg(padap
, TP_INIT_SRTT_A
));
1986 clk_info_buff
->finwait2_timer
=
1987 tp_tick_us
* t4_read_reg(padap
, TP_FINWAIT2_TIMER_A
);
1989 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1992 int cudbg_collect_pcie_indirect(struct cudbg_init
*pdbg_init
,
1993 struct cudbg_buffer
*dbg_buff
,
1994 struct cudbg_error
*cudbg_err
)
1996 struct adapter
*padap
= pdbg_init
->adap
;
1997 struct cudbg_buffer temp_buff
= { 0 };
1998 struct ireg_buf
*ch_pcie
;
2002 n
= sizeof(t5_pcie_pdbg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2003 size
= sizeof(struct ireg_buf
) * n
* 2;
2004 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2008 ch_pcie
= (struct ireg_buf
*)temp_buff
.data
;
2010 for (i
= 0; i
< n
; i
++) {
2011 struct ireg_field
*pcie_pio
= &ch_pcie
->tp_pio
;
2012 u32
*buff
= ch_pcie
->outbuf
;
2014 pcie_pio
->ireg_addr
= t5_pcie_pdbg_array
[i
][0];
2015 pcie_pio
->ireg_data
= t5_pcie_pdbg_array
[i
][1];
2016 pcie_pio
->ireg_local_offset
= t5_pcie_pdbg_array
[i
][2];
2017 pcie_pio
->ireg_offset_range
= t5_pcie_pdbg_array
[i
][3];
2018 t4_read_indirect(padap
,
2019 pcie_pio
->ireg_addr
,
2020 pcie_pio
->ireg_data
,
2022 pcie_pio
->ireg_offset_range
,
2023 pcie_pio
->ireg_local_offset
);
2028 n
= sizeof(t5_pcie_cdbg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2029 for (i
= 0; i
< n
; i
++) {
2030 struct ireg_field
*pcie_pio
= &ch_pcie
->tp_pio
;
2031 u32
*buff
= ch_pcie
->outbuf
;
2033 pcie_pio
->ireg_addr
= t5_pcie_cdbg_array
[i
][0];
2034 pcie_pio
->ireg_data
= t5_pcie_cdbg_array
[i
][1];
2035 pcie_pio
->ireg_local_offset
= t5_pcie_cdbg_array
[i
][2];
2036 pcie_pio
->ireg_offset_range
= t5_pcie_cdbg_array
[i
][3];
2037 t4_read_indirect(padap
,
2038 pcie_pio
->ireg_addr
,
2039 pcie_pio
->ireg_data
,
2041 pcie_pio
->ireg_offset_range
,
2042 pcie_pio
->ireg_local_offset
);
2045 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2048 int cudbg_collect_pm_indirect(struct cudbg_init
*pdbg_init
,
2049 struct cudbg_buffer
*dbg_buff
,
2050 struct cudbg_error
*cudbg_err
)
2052 struct adapter
*padap
= pdbg_init
->adap
;
2053 struct cudbg_buffer temp_buff
= { 0 };
2054 struct ireg_buf
*ch_pm
;
2058 n
= sizeof(t5_pm_rx_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2059 size
= sizeof(struct ireg_buf
) * n
* 2;
2060 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2064 ch_pm
= (struct ireg_buf
*)temp_buff
.data
;
2066 for (i
= 0; i
< n
; i
++) {
2067 struct ireg_field
*pm_pio
= &ch_pm
->tp_pio
;
2068 u32
*buff
= ch_pm
->outbuf
;
2070 pm_pio
->ireg_addr
= t5_pm_rx_array
[i
][0];
2071 pm_pio
->ireg_data
= t5_pm_rx_array
[i
][1];
2072 pm_pio
->ireg_local_offset
= t5_pm_rx_array
[i
][2];
2073 pm_pio
->ireg_offset_range
= t5_pm_rx_array
[i
][3];
2074 t4_read_indirect(padap
,
2078 pm_pio
->ireg_offset_range
,
2079 pm_pio
->ireg_local_offset
);
2084 n
= sizeof(t5_pm_tx_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2085 for (i
= 0; i
< n
; i
++) {
2086 struct ireg_field
*pm_pio
= &ch_pm
->tp_pio
;
2087 u32
*buff
= ch_pm
->outbuf
;
2089 pm_pio
->ireg_addr
= t5_pm_tx_array
[i
][0];
2090 pm_pio
->ireg_data
= t5_pm_tx_array
[i
][1];
2091 pm_pio
->ireg_local_offset
= t5_pm_tx_array
[i
][2];
2092 pm_pio
->ireg_offset_range
= t5_pm_tx_array
[i
][3];
2093 t4_read_indirect(padap
,
2097 pm_pio
->ireg_offset_range
,
2098 pm_pio
->ireg_local_offset
);
2101 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2104 int cudbg_collect_tid(struct cudbg_init
*pdbg_init
,
2105 struct cudbg_buffer
*dbg_buff
,
2106 struct cudbg_error
*cudbg_err
)
2108 struct adapter
*padap
= pdbg_init
->adap
;
2109 struct cudbg_tid_info_region_rev1
*tid1
;
2110 struct cudbg_buffer temp_buff
= { 0 };
2111 struct cudbg_tid_info_region
*tid
;
2112 u32 para
[2], val
[2];
2115 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
2116 sizeof(struct cudbg_tid_info_region_rev1
),
2121 tid1
= (struct cudbg_tid_info_region_rev1
*)temp_buff
.data
;
2123 tid1
->ver_hdr
.signature
= CUDBG_ENTITY_SIGNATURE
;
2124 tid1
->ver_hdr
.revision
= CUDBG_TID_INFO_REV
;
2125 tid1
->ver_hdr
.size
= sizeof(struct cudbg_tid_info_region_rev1
) -
2126 sizeof(struct cudbg_ver_hdr
);
2128 /* If firmware is not attached/alive, use backdoor register
2129 * access to collect dump.
2131 if (!is_fw_attached(pdbg_init
))
2134 #define FW_PARAM_PFVF_A(param) \
2135 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
2136 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
2137 FW_PARAMS_PARAM_Y_V(0) | \
2138 FW_PARAMS_PARAM_Z_V(0))
2140 para
[0] = FW_PARAM_PFVF_A(ETHOFLD_START
);
2141 para
[1] = FW_PARAM_PFVF_A(ETHOFLD_END
);
2142 rc
= t4_query_params(padap
, padap
->mbox
, padap
->pf
, 0, 2, para
, val
);
2144 cudbg_err
->sys_err
= rc
;
2145 cudbg_put_buff(pdbg_init
, &temp_buff
);
2148 tid
->uotid_base
= val
[0];
2149 tid
->nuotids
= val
[1] - val
[0] + 1;
2151 if (is_t5(padap
->params
.chip
)) {
2152 tid
->sb
= t4_read_reg(padap
, LE_DB_SERVER_INDEX_A
) / 4;
2153 } else if (is_t6(padap
->params
.chip
)) {
2155 t4_read_reg(padap
, LE_DB_ACTIVE_TABLE_START_INDEX_A
);
2156 tid
->sb
= t4_read_reg(padap
, LE_DB_SRVR_START_INDEX_A
);
2158 para
[0] = FW_PARAM_PFVF_A(HPFILTER_START
);
2159 para
[1] = FW_PARAM_PFVF_A(HPFILTER_END
);
2160 rc
= t4_query_params(padap
, padap
->mbox
, padap
->pf
, 0, 2,
2163 cudbg_err
->sys_err
= rc
;
2164 cudbg_put_buff(pdbg_init
, &temp_buff
);
2167 tid
->hpftid_base
= val
[0];
2168 tid
->nhpftids
= val
[1] - val
[0] + 1;
2171 #undef FW_PARAM_PFVF_A
2174 tid
->ntids
= padap
->tids
.ntids
;
2175 tid
->nstids
= padap
->tids
.nstids
;
2176 tid
->stid_base
= padap
->tids
.stid_base
;
2177 tid
->hash_base
= padap
->tids
.hash_base
;
2179 tid
->natids
= padap
->tids
.natids
;
2180 tid
->nftids
= padap
->tids
.nftids
;
2181 tid
->ftid_base
= padap
->tids
.ftid_base
;
2182 tid
->aftid_base
= padap
->tids
.aftid_base
;
2183 tid
->aftid_end
= padap
->tids
.aftid_end
;
2185 tid
->sftid_base
= padap
->tids
.sftid_base
;
2186 tid
->nsftids
= padap
->tids
.nsftids
;
2188 tid
->flags
= padap
->flags
;
2189 tid
->le_db_conf
= t4_read_reg(padap
, LE_DB_CONFIG_A
);
2190 tid
->ip_users
= t4_read_reg(padap
, LE_DB_ACT_CNT_IPV4_A
);
2191 tid
->ipv6_users
= t4_read_reg(padap
, LE_DB_ACT_CNT_IPV6_A
);
2193 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2196 int cudbg_collect_pcie_config(struct cudbg_init
*pdbg_init
,
2197 struct cudbg_buffer
*dbg_buff
,
2198 struct cudbg_error
*cudbg_err
)
2200 struct adapter
*padap
= pdbg_init
->adap
;
2201 struct cudbg_buffer temp_buff
= { 0 };
2202 u32 size
, *value
, j
;
2205 size
= sizeof(u32
) * CUDBG_NUM_PCIE_CONFIG_REGS
;
2206 n
= sizeof(t5_pcie_config_array
) / (2 * sizeof(u32
));
2207 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2211 value
= (u32
*)temp_buff
.data
;
2212 for (i
= 0; i
< n
; i
++) {
2213 for (j
= t5_pcie_config_array
[i
][0];
2214 j
<= t5_pcie_config_array
[i
][1]; j
+= 4) {
2215 t4_hw_pci_read_cfg4(padap
, j
, value
);
2219 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2222 static int cudbg_sge_ctxt_check_valid(u32
*buf
, int type
)
2224 int index
, bit
, bit_pos
= 0;
2237 index
= bit_pos
/ 32;
2239 return buf
[index
] & (1U << bit
);
2242 static int cudbg_get_ctxt_region_info(struct adapter
*padap
,
2243 struct cudbg_region_info
*ctx_info
,
2246 struct cudbg_mem_desc mem_desc
;
2247 struct cudbg_meminfo meminfo
;
2248 u32 i
, j
, value
, found
;
2252 rc
= cudbg_fill_meminfo(padap
, &meminfo
);
2256 /* Get EGRESS and INGRESS context region size */
2257 for (i
= CTXT_EGRESS
; i
<= CTXT_INGRESS
; i
++) {
2259 memset(&mem_desc
, 0, sizeof(struct cudbg_mem_desc
));
2260 for (j
= 0; j
< ARRAY_SIZE(meminfo
.avail
); j
++) {
2261 rc
= cudbg_get_mem_region(padap
, &meminfo
, j
,
2266 rc
= cudbg_get_mem_relative(padap
, &meminfo
, j
,
2270 ctx_info
[i
].exist
= false;
2273 ctx_info
[i
].exist
= true;
2274 ctx_info
[i
].start
= mem_desc
.base
;
2275 ctx_info
[i
].end
= mem_desc
.limit
;
2281 ctx_info
[i
].exist
= false;
2284 /* Get FLM and CNM max qid. */
2285 value
= t4_read_reg(padap
, SGE_FLM_CFG_A
);
2287 /* Get number of data freelist queues */
2288 flq
= HDRSTARTFLQ_G(value
);
2289 ctx_info
[CTXT_FLM
].exist
= true;
2290 ctx_info
[CTXT_FLM
].end
= (CUDBG_MAX_FL_QIDS
>> flq
) * SGE_CTXT_SIZE
;
2292 /* The number of CONM contexts are same as number of freelist
2295 ctx_info
[CTXT_CNM
].exist
= true;
2296 ctx_info
[CTXT_CNM
].end
= ctx_info
[CTXT_FLM
].end
;
2301 int cudbg_dump_context_size(struct adapter
*padap
)
2303 struct cudbg_region_info region_info
[CTXT_CNM
+ 1] = { {0} };
2304 u8 mem_type
[CTXT_INGRESS
+ 1] = { 0 };
2308 /* Get max valid qid for each type of queue */
2309 rc
= cudbg_get_ctxt_region_info(padap
, region_info
, mem_type
);
2313 for (i
= 0; i
< CTXT_CNM
; i
++) {
2314 if (!region_info
[i
].exist
) {
2315 if (i
== CTXT_EGRESS
|| i
== CTXT_INGRESS
)
2316 size
+= CUDBG_LOWMEM_MAX_CTXT_QIDS
*
2321 size
+= (region_info
[i
].end
- region_info
[i
].start
+ 1) /
2324 return size
* sizeof(struct cudbg_ch_cntxt
);
2327 static void cudbg_read_sge_ctxt(struct cudbg_init
*pdbg_init
, u32 cid
,
2328 enum ctxt_type ctype
, u32
*data
)
2330 struct adapter
*padap
= pdbg_init
->adap
;
2333 /* Under heavy traffic, the SGE Queue contexts registers will be
2334 * frequently accessed by firmware.
2336 * To avoid conflicts with firmware, always ask firmware to fetch
2337 * the SGE Queue contexts via mailbox. On failure, fallback to
2338 * accessing hardware registers directly.
2340 if (is_fw_attached(pdbg_init
))
2341 rc
= t4_sge_ctxt_rd(padap
, padap
->mbox
, cid
, ctype
, data
);
2343 t4_sge_ctxt_rd_bd(padap
, cid
, ctype
, data
);
2346 static void cudbg_get_sge_ctxt_fw(struct cudbg_init
*pdbg_init
, u32 max_qid
,
2348 struct cudbg_ch_cntxt
**out_buff
)
2350 struct cudbg_ch_cntxt
*buff
= *out_buff
;
2354 for (j
= 0; j
< max_qid
; j
++) {
2355 cudbg_read_sge_ctxt(pdbg_init
, j
, ctxt_type
, buff
->data
);
2356 rc
= cudbg_sge_ctxt_check_valid(buff
->data
, ctxt_type
);
2360 buff
->cntxt_type
= ctxt_type
;
2363 if (ctxt_type
== CTXT_FLM
) {
2364 cudbg_read_sge_ctxt(pdbg_init
, j
, CTXT_CNM
, buff
->data
);
2365 buff
->cntxt_type
= CTXT_CNM
;
2374 int cudbg_collect_dump_context(struct cudbg_init
*pdbg_init
,
2375 struct cudbg_buffer
*dbg_buff
,
2376 struct cudbg_error
*cudbg_err
)
2378 struct cudbg_region_info region_info
[CTXT_CNM
+ 1] = { {0} };
2379 struct adapter
*padap
= pdbg_init
->adap
;
2380 u32 j
, size
, max_ctx_size
, max_ctx_qid
;
2381 u8 mem_type
[CTXT_INGRESS
+ 1] = { 0 };
2382 struct cudbg_buffer temp_buff
= { 0 };
2383 struct cudbg_ch_cntxt
*buff
;
2388 /* Get max valid qid for each type of queue */
2389 rc
= cudbg_get_ctxt_region_info(padap
, region_info
, mem_type
);
2393 rc
= cudbg_dump_context_size(padap
);
2395 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
2398 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2402 /* Get buffer with enough space to read the biggest context
2405 max_ctx_size
= max(region_info
[CTXT_EGRESS
].end
-
2406 region_info
[CTXT_EGRESS
].start
+ 1,
2407 region_info
[CTXT_INGRESS
].end
-
2408 region_info
[CTXT_INGRESS
].start
+ 1);
2410 ctx_buf
= kvzalloc(max_ctx_size
, GFP_KERNEL
);
2412 cudbg_put_buff(pdbg_init
, &temp_buff
);
2416 buff
= (struct cudbg_ch_cntxt
*)temp_buff
.data
;
2418 /* Collect EGRESS and INGRESS context data.
2419 * In case of failures, fallback to collecting via FW or
2422 for (i
= CTXT_EGRESS
; i
<= CTXT_INGRESS
; i
++) {
2423 if (!region_info
[i
].exist
) {
2424 max_ctx_qid
= CUDBG_LOWMEM_MAX_CTXT_QIDS
;
2425 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, i
,
2430 max_ctx_size
= region_info
[i
].end
- region_info
[i
].start
+ 1;
2431 max_ctx_qid
= max_ctx_size
/ SGE_CTXT_SIZE
;
2433 /* If firmware is not attached/alive, use backdoor register
2434 * access to collect dump.
2436 if (is_fw_attached(pdbg_init
)) {
2437 t4_sge_ctxt_flush(padap
, padap
->mbox
, i
);
2439 rc
= t4_memory_rw(padap
, MEMWIN_NIC
, mem_type
[i
],
2440 region_info
[i
].start
, max_ctx_size
,
2441 (__be32
*)ctx_buf
, 1);
2444 if (rc
|| !is_fw_attached(pdbg_init
)) {
2445 max_ctx_qid
= CUDBG_LOWMEM_MAX_CTXT_QIDS
;
2446 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, i
,
2451 for (j
= 0; j
< max_ctx_qid
; j
++) {
2455 src_off
= (u64
*)(ctx_buf
+ j
* SGE_CTXT_SIZE
);
2456 dst_off
= (__be64
*)buff
->data
;
2458 /* The data is stored in 64-bit cpu order. Convert it
2459 * to big endian before parsing.
2461 for (k
= 0; k
< SGE_CTXT_SIZE
/ sizeof(u64
); k
++)
2462 dst_off
[k
] = cpu_to_be64(src_off
[k
]);
2464 rc
= cudbg_sge_ctxt_check_valid(buff
->data
, i
);
2468 buff
->cntxt_type
= i
;
2476 /* Collect FREELIST and CONGESTION MANAGER contexts */
2477 max_ctx_size
= region_info
[CTXT_FLM
].end
-
2478 region_info
[CTXT_FLM
].start
+ 1;
2479 max_ctx_qid
= max_ctx_size
/ SGE_CTXT_SIZE
;
2480 /* Since FLM and CONM are 1-to-1 mapped, the below function
2481 * will fetch both FLM and CONM contexts.
2483 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, CTXT_FLM
, &buff
);
2485 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2488 static inline void cudbg_tcamxy2valmask(u64 x
, u64 y
, u8
*addr
, u64
*mask
)
2491 y
= (__force u64
)cpu_to_be64(y
);
2492 memcpy(addr
, (char *)&y
+ 2, ETH_ALEN
);
2495 static void cudbg_mps_rpl_backdoor(struct adapter
*padap
,
2496 struct fw_ldst_mps_rplc
*mps_rplc
)
2498 if (is_t5(padap
->params
.chip
)) {
2499 mps_rplc
->rplc255_224
= htonl(t4_read_reg(padap
,
2500 MPS_VF_RPLCT_MAP3_A
));
2501 mps_rplc
->rplc223_192
= htonl(t4_read_reg(padap
,
2502 MPS_VF_RPLCT_MAP2_A
));
2503 mps_rplc
->rplc191_160
= htonl(t4_read_reg(padap
,
2504 MPS_VF_RPLCT_MAP1_A
));
2505 mps_rplc
->rplc159_128
= htonl(t4_read_reg(padap
,
2506 MPS_VF_RPLCT_MAP0_A
));
2508 mps_rplc
->rplc255_224
= htonl(t4_read_reg(padap
,
2509 MPS_VF_RPLCT_MAP7_A
));
2510 mps_rplc
->rplc223_192
= htonl(t4_read_reg(padap
,
2511 MPS_VF_RPLCT_MAP6_A
));
2512 mps_rplc
->rplc191_160
= htonl(t4_read_reg(padap
,
2513 MPS_VF_RPLCT_MAP5_A
));
2514 mps_rplc
->rplc159_128
= htonl(t4_read_reg(padap
,
2515 MPS_VF_RPLCT_MAP4_A
));
2517 mps_rplc
->rplc127_96
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP3_A
));
2518 mps_rplc
->rplc95_64
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP2_A
));
2519 mps_rplc
->rplc63_32
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP1_A
));
2520 mps_rplc
->rplc31_0
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP0_A
));
2523 static int cudbg_collect_tcam_index(struct cudbg_init
*pdbg_init
,
2524 struct cudbg_mps_tcam
*tcam
, u32 idx
)
2526 struct adapter
*padap
= pdbg_init
->adap
;
2527 u64 tcamy
, tcamx
, val
;
2531 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) >= CHELSIO_T6
) {
2532 /* CtlReqID - 1: use Host Driver Requester ID
2533 * CtlCmdType - 0: Read, 1: Write
2534 * CtlTcamSel - 0: TCAM0, 1: TCAM1
2535 * CtlXYBitSel- 0: Y bit, 1: X bit
2539 ctl
= CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2541 ctl
|= CTLTCAMINDEX_V(idx
) | CTLTCAMSEL_V(0);
2543 ctl
|= CTLTCAMINDEX_V(idx
- 256) | CTLTCAMSEL_V(1);
2545 t4_write_reg(padap
, MPS_CLS_TCAM_DATA2_CTL_A
, ctl
);
2546 val
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA1_REQ_ID1_A
);
2547 tcamy
= DMACH_G(val
) << 32;
2548 tcamy
|= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA0_REQ_ID1_A
);
2549 data2
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA2_REQ_ID1_A
);
2550 tcam
->lookup_type
= DATALKPTYPE_G(data2
);
2552 /* 0 - Outer header, 1 - Inner header
2553 * [71:48] bit locations are overloaded for
2554 * outer vs. inner lookup types.
2556 if (tcam
->lookup_type
&& tcam
->lookup_type
!= DATALKPTYPE_M
) {
2557 /* Inner header VNI */
2558 tcam
->vniy
= (data2
& DATAVIDH2_F
) | DATAVIDH1_G(data2
);
2559 tcam
->vniy
= (tcam
->vniy
<< 16) | VIDL_G(val
);
2560 tcam
->dip_hit
= data2
& DATADIPHIT_F
;
2562 tcam
->vlan_vld
= data2
& DATAVIDH2_F
;
2563 tcam
->ivlan
= VIDL_G(val
);
2566 tcam
->port_num
= DATAPORTNUM_G(data2
);
2568 /* Read tcamx. Change the control param */
2569 ctl
|= CTLXYBITSEL_V(1);
2570 t4_write_reg(padap
, MPS_CLS_TCAM_DATA2_CTL_A
, ctl
);
2571 val
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA1_REQ_ID1_A
);
2572 tcamx
= DMACH_G(val
) << 32;
2573 tcamx
|= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA0_REQ_ID1_A
);
2574 data2
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA2_REQ_ID1_A
);
2575 if (tcam
->lookup_type
&& tcam
->lookup_type
!= DATALKPTYPE_M
) {
2576 /* Inner header VNI mask */
2577 tcam
->vnix
= (data2
& DATAVIDH2_F
) | DATAVIDH1_G(data2
);
2578 tcam
->vnix
= (tcam
->vnix
<< 16) | VIDL_G(val
);
2581 tcamy
= t4_read_reg64(padap
, MPS_CLS_TCAM_Y_L(idx
));
2582 tcamx
= t4_read_reg64(padap
, MPS_CLS_TCAM_X_L(idx
));
2585 /* If no entry, return */
2589 tcam
->cls_lo
= t4_read_reg(padap
, MPS_CLS_SRAM_L(idx
));
2590 tcam
->cls_hi
= t4_read_reg(padap
, MPS_CLS_SRAM_H(idx
));
2592 if (is_t5(padap
->params
.chip
))
2593 tcam
->repli
= (tcam
->cls_lo
& REPLICATE_F
);
2594 else if (is_t6(padap
->params
.chip
))
2595 tcam
->repli
= (tcam
->cls_lo
& T6_REPLICATE_F
);
2598 struct fw_ldst_cmd ldst_cmd
;
2599 struct fw_ldst_mps_rplc mps_rplc
;
2601 memset(&ldst_cmd
, 0, sizeof(ldst_cmd
));
2602 ldst_cmd
.op_to_addrspace
=
2603 htonl(FW_CMD_OP_V(FW_LDST_CMD
) |
2604 FW_CMD_REQUEST_F
| FW_CMD_READ_F
|
2605 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS
));
2606 ldst_cmd
.cycles_to_len16
= htonl(FW_LEN16(ldst_cmd
));
2607 ldst_cmd
.u
.mps
.rplc
.fid_idx
=
2608 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC
) |
2609 FW_LDST_CMD_IDX_V(idx
));
2611 /* If firmware is not attached/alive, use backdoor register
2612 * access to collect dump.
2614 if (is_fw_attached(pdbg_init
))
2615 rc
= t4_wr_mbox(padap
, padap
->mbox
, &ldst_cmd
,
2616 sizeof(ldst_cmd
), &ldst_cmd
);
2618 if (rc
|| !is_fw_attached(pdbg_init
)) {
2619 cudbg_mps_rpl_backdoor(padap
, &mps_rplc
);
2620 /* Ignore error since we collected directly from
2621 * reading registers.
2625 mps_rplc
= ldst_cmd
.u
.mps
.rplc
;
2628 tcam
->rplc
[0] = ntohl(mps_rplc
.rplc31_0
);
2629 tcam
->rplc
[1] = ntohl(mps_rplc
.rplc63_32
);
2630 tcam
->rplc
[2] = ntohl(mps_rplc
.rplc95_64
);
2631 tcam
->rplc
[3] = ntohl(mps_rplc
.rplc127_96
);
2632 if (padap
->params
.arch
.mps_rplc_size
> CUDBG_MAX_RPLC_SIZE
) {
2633 tcam
->rplc
[4] = ntohl(mps_rplc
.rplc159_128
);
2634 tcam
->rplc
[5] = ntohl(mps_rplc
.rplc191_160
);
2635 tcam
->rplc
[6] = ntohl(mps_rplc
.rplc223_192
);
2636 tcam
->rplc
[7] = ntohl(mps_rplc
.rplc255_224
);
2639 cudbg_tcamxy2valmask(tcamx
, tcamy
, tcam
->addr
, &tcam
->mask
);
2641 tcam
->rplc_size
= padap
->params
.arch
.mps_rplc_size
;
2645 int cudbg_collect_mps_tcam(struct cudbg_init
*pdbg_init
,
2646 struct cudbg_buffer
*dbg_buff
,
2647 struct cudbg_error
*cudbg_err
)
2649 struct adapter
*padap
= pdbg_init
->adap
;
2650 struct cudbg_buffer temp_buff
= { 0 };
2651 u32 size
= 0, i
, n
, total_size
= 0;
2652 struct cudbg_mps_tcam
*tcam
;
2655 n
= padap
->params
.arch
.mps_tcam_size
;
2656 size
= sizeof(struct cudbg_mps_tcam
) * n
;
2657 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2661 tcam
= (struct cudbg_mps_tcam
*)temp_buff
.data
;
2662 for (i
= 0; i
< n
; i
++) {
2663 rc
= cudbg_collect_tcam_index(pdbg_init
, tcam
, i
);
2665 cudbg_err
->sys_err
= rc
;
2666 cudbg_put_buff(pdbg_init
, &temp_buff
);
2669 total_size
+= sizeof(struct cudbg_mps_tcam
);
2674 rc
= CUDBG_SYSTEM_ERROR
;
2675 cudbg_err
->sys_err
= rc
;
2676 cudbg_put_buff(pdbg_init
, &temp_buff
);
2679 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2682 int cudbg_collect_vpd_data(struct cudbg_init
*pdbg_init
,
2683 struct cudbg_buffer
*dbg_buff
,
2684 struct cudbg_error
*cudbg_err
)
2686 struct adapter
*padap
= pdbg_init
->adap
;
2687 struct cudbg_buffer temp_buff
= { 0 };
2688 char vpd_str
[CUDBG_VPD_VER_LEN
+ 1];
2689 u32 scfg_vers
, vpd_vers
, fw_vers
;
2690 struct cudbg_vpd_data
*vpd_data
;
2691 struct vpd_params vpd
= { 0 };
2694 rc
= t4_get_raw_vpd_params(padap
, &vpd
);
2698 rc
= t4_get_fw_version(padap
, &fw_vers
);
2702 /* Serial Configuration Version is located beyond the PF's vpd size.
2703 * Temporarily give access to entire EEPROM to get it.
2705 rc
= pci_set_vpd_size(padap
->pdev
, EEPROMVSIZE
);
2709 ret
= cudbg_read_vpd_reg(padap
, CUDBG_SCFG_VER_ADDR
, CUDBG_SCFG_VER_LEN
,
2712 /* Restore back to original PF's vpd size */
2713 rc
= pci_set_vpd_size(padap
->pdev
, CUDBG_VPD_PF_SIZE
);
2720 rc
= cudbg_read_vpd_reg(padap
, CUDBG_VPD_VER_ADDR
, CUDBG_VPD_VER_LEN
,
2725 vpd_str
[CUDBG_VPD_VER_LEN
] = '\0';
2726 rc
= kstrtouint(vpd_str
, 0, &vpd_vers
);
2730 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_vpd_data
),
2735 vpd_data
= (struct cudbg_vpd_data
*)temp_buff
.data
;
2736 memcpy(vpd_data
->sn
, vpd
.sn
, SERNUM_LEN
+ 1);
2737 memcpy(vpd_data
->bn
, vpd
.pn
, PN_LEN
+ 1);
2738 memcpy(vpd_data
->na
, vpd
.na
, MACADDR_LEN
+ 1);
2739 memcpy(vpd_data
->mn
, vpd
.id
, ID_LEN
+ 1);
2740 vpd_data
->scfg_vers
= scfg_vers
;
2741 vpd_data
->vpd_vers
= vpd_vers
;
2742 vpd_data
->fw_major
= FW_HDR_FW_VER_MAJOR_G(fw_vers
);
2743 vpd_data
->fw_minor
= FW_HDR_FW_VER_MINOR_G(fw_vers
);
2744 vpd_data
->fw_micro
= FW_HDR_FW_VER_MICRO_G(fw_vers
);
2745 vpd_data
->fw_build
= FW_HDR_FW_VER_BUILD_G(fw_vers
);
2746 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2749 static int cudbg_read_tid(struct cudbg_init
*pdbg_init
, u32 tid
,
2750 struct cudbg_tid_data
*tid_data
)
2752 struct adapter
*padap
= pdbg_init
->adap
;
2753 int i
, cmd_retry
= 8;
2756 /* Fill REQ_DATA regs with 0's */
2757 for (i
= 0; i
< NUM_LE_DB_DBGI_REQ_DATA_INSTANCES
; i
++)
2758 t4_write_reg(padap
, LE_DB_DBGI_REQ_DATA_A
+ (i
<< 2), 0);
2760 /* Write DBIG command */
2761 val
= DBGICMD_V(4) | DBGITID_V(tid
);
2762 t4_write_reg(padap
, LE_DB_DBGI_REQ_TCAM_CMD_A
, val
);
2763 tid_data
->dbig_cmd
= val
;
2765 val
= DBGICMDSTRT_F
| DBGICMDMODE_V(1); /* LE mode */
2766 t4_write_reg(padap
, LE_DB_DBGI_CONFIG_A
, val
);
2767 tid_data
->dbig_conf
= val
;
2769 /* Poll the DBGICMDBUSY bit */
2772 val
= t4_read_reg(padap
, LE_DB_DBGI_CONFIG_A
);
2773 val
= val
& DBGICMDBUSY_F
;
2776 return CUDBG_SYSTEM_ERROR
;
2779 /* Check RESP status */
2780 val
= t4_read_reg(padap
, LE_DB_DBGI_RSP_STATUS_A
);
2781 tid_data
->dbig_rsp_stat
= val
;
2783 return CUDBG_SYSTEM_ERROR
;
2785 /* Read RESP data */
2786 for (i
= 0; i
< NUM_LE_DB_DBGI_RSP_DATA_INSTANCES
; i
++)
2787 tid_data
->data
[i
] = t4_read_reg(padap
,
2788 LE_DB_DBGI_RSP_DATA_A
+
2790 tid_data
->tid
= tid
;
2794 static int cudbg_get_le_type(u32 tid
, struct cudbg_tcam tcam_region
)
2796 int type
= LE_ET_UNKNOWN
;
2798 if (tid
< tcam_region
.server_start
)
2799 type
= LE_ET_TCAM_CON
;
2800 else if (tid
< tcam_region
.filter_start
)
2801 type
= LE_ET_TCAM_SERVER
;
2802 else if (tid
< tcam_region
.clip_start
)
2803 type
= LE_ET_TCAM_FILTER
;
2804 else if (tid
< tcam_region
.routing_start
)
2805 type
= LE_ET_TCAM_CLIP
;
2806 else if (tid
< tcam_region
.tid_hash_base
)
2807 type
= LE_ET_TCAM_ROUTING
;
2808 else if (tid
< tcam_region
.max_tid
)
2809 type
= LE_ET_HASH_CON
;
2811 type
= LE_ET_INVALID_TID
;
2816 static int cudbg_is_ipv6_entry(struct cudbg_tid_data
*tid_data
,
2817 struct cudbg_tcam tcam_region
)
2822 le_type
= cudbg_get_le_type(tid_data
->tid
, tcam_region
);
2823 if (tid_data
->tid
& 1)
2826 if (le_type
== LE_ET_HASH_CON
) {
2827 ipv6
= tid_data
->data
[16] & 0x8000;
2828 } else if (le_type
== LE_ET_TCAM_CON
) {
2829 ipv6
= tid_data
->data
[16] & 0x8000;
2831 ipv6
= tid_data
->data
[9] == 0x00C00000;
2838 void cudbg_fill_le_tcam_info(struct adapter
*padap
,
2839 struct cudbg_tcam
*tcam_region
)
2843 /* Get the LE regions */
2844 value
= t4_read_reg(padap
, LE_DB_TID_HASHBASE_A
); /* hash base index */
2845 tcam_region
->tid_hash_base
= value
;
2847 /* Get routing table index */
2848 value
= t4_read_reg(padap
, LE_DB_ROUTING_TABLE_INDEX_A
);
2849 tcam_region
->routing_start
= value
;
2851 /* Get clip table index. For T6 there is separate CLIP TCAM */
2852 if (is_t6(padap
->params
.chip
))
2853 value
= t4_read_reg(padap
, LE_DB_CLCAM_TID_BASE_A
);
2855 value
= t4_read_reg(padap
, LE_DB_CLIP_TABLE_INDEX_A
);
2856 tcam_region
->clip_start
= value
;
2858 /* Get filter table index */
2859 value
= t4_read_reg(padap
, LE_DB_FILTER_TABLE_INDEX_A
);
2860 tcam_region
->filter_start
= value
;
2862 /* Get server table index */
2863 value
= t4_read_reg(padap
, LE_DB_SERVER_INDEX_A
);
2864 tcam_region
->server_start
= value
;
2866 /* Check whether hash is enabled and calculate the max tids */
2867 value
= t4_read_reg(padap
, LE_DB_CONFIG_A
);
2868 if ((value
>> HASHEN_S
) & 1) {
2869 value
= t4_read_reg(padap
, LE_DB_HASH_CONFIG_A
);
2870 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
) {
2871 tcam_region
->max_tid
= (value
& 0xFFFFF) +
2872 tcam_region
->tid_hash_base
;
2874 value
= HASHTIDSIZE_G(value
);
2876 tcam_region
->max_tid
= value
+
2877 tcam_region
->tid_hash_base
;
2879 } else { /* hash not enabled */
2880 if (is_t6(padap
->params
.chip
))
2881 tcam_region
->max_tid
= (value
& ASLIPCOMPEN_F
) ?
2882 CUDBG_MAX_TID_COMP_EN
:
2883 CUDBG_MAX_TID_COMP_DIS
;
2885 tcam_region
->max_tid
= CUDBG_MAX_TCAM_TID
;
2888 if (is_t6(padap
->params
.chip
))
2889 tcam_region
->max_tid
+= CUDBG_T6_CLIP
;
2892 int cudbg_collect_le_tcam(struct cudbg_init
*pdbg_init
,
2893 struct cudbg_buffer
*dbg_buff
,
2894 struct cudbg_error
*cudbg_err
)
2896 struct adapter
*padap
= pdbg_init
->adap
;
2897 struct cudbg_buffer temp_buff
= { 0 };
2898 struct cudbg_tcam tcam_region
= { 0 };
2899 struct cudbg_tid_data
*tid_data
;
2904 cudbg_fill_le_tcam_info(padap
, &tcam_region
);
2906 size
= sizeof(struct cudbg_tid_data
) * tcam_region
.max_tid
;
2907 size
+= sizeof(struct cudbg_tcam
);
2908 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2912 memcpy(temp_buff
.data
, &tcam_region
, sizeof(struct cudbg_tcam
));
2913 bytes
= sizeof(struct cudbg_tcam
);
2914 tid_data
= (struct cudbg_tid_data
*)(temp_buff
.data
+ bytes
);
2916 for (i
= 0; i
< tcam_region
.max_tid
; ) {
2917 rc
= cudbg_read_tid(pdbg_init
, i
, tid_data
);
2919 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
2920 /* Update tcam header and exit */
2921 tcam_region
.max_tid
= i
;
2922 memcpy(temp_buff
.data
, &tcam_region
,
2923 sizeof(struct cudbg_tcam
));
2927 if (cudbg_is_ipv6_entry(tid_data
, tcam_region
)) {
2928 /* T6 CLIP TCAM: ipv6 takes 4 entries */
2929 if (is_t6(padap
->params
.chip
) &&
2930 i
>= tcam_region
.clip_start
&&
2931 i
< tcam_region
.clip_start
+ CUDBG_T6_CLIP
)
2933 else /* Main TCAM: ipv6 takes two tids */
2940 bytes
+= sizeof(struct cudbg_tid_data
);
2944 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2947 int cudbg_collect_cctrl(struct cudbg_init
*pdbg_init
,
2948 struct cudbg_buffer
*dbg_buff
,
2949 struct cudbg_error
*cudbg_err
)
2951 struct adapter
*padap
= pdbg_init
->adap
;
2952 struct cudbg_buffer temp_buff
= { 0 };
2956 size
= sizeof(u16
) * NMTUS
* NCCTRL_WIN
;
2957 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2961 t4_read_cong_tbl(padap
, (void *)temp_buff
.data
);
2962 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2965 int cudbg_collect_ma_indirect(struct cudbg_init
*pdbg_init
,
2966 struct cudbg_buffer
*dbg_buff
,
2967 struct cudbg_error
*cudbg_err
)
2969 struct adapter
*padap
= pdbg_init
->adap
;
2970 struct cudbg_buffer temp_buff
= { 0 };
2971 struct ireg_buf
*ma_indr
;
2975 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) < CHELSIO_T6
)
2976 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
2978 n
= sizeof(t6_ma_ireg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2979 size
= sizeof(struct ireg_buf
) * n
* 2;
2980 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2984 ma_indr
= (struct ireg_buf
*)temp_buff
.data
;
2985 for (i
= 0; i
< n
; i
++) {
2986 struct ireg_field
*ma_fli
= &ma_indr
->tp_pio
;
2987 u32
*buff
= ma_indr
->outbuf
;
2989 ma_fli
->ireg_addr
= t6_ma_ireg_array
[i
][0];
2990 ma_fli
->ireg_data
= t6_ma_ireg_array
[i
][1];
2991 ma_fli
->ireg_local_offset
= t6_ma_ireg_array
[i
][2];
2992 ma_fli
->ireg_offset_range
= t6_ma_ireg_array
[i
][3];
2993 t4_read_indirect(padap
, ma_fli
->ireg_addr
, ma_fli
->ireg_data
,
2994 buff
, ma_fli
->ireg_offset_range
,
2995 ma_fli
->ireg_local_offset
);
2999 n
= sizeof(t6_ma_ireg_array2
) / (IREG_NUM_ELEM
* sizeof(u32
));
3000 for (i
= 0; i
< n
; i
++) {
3001 struct ireg_field
*ma_fli
= &ma_indr
->tp_pio
;
3002 u32
*buff
= ma_indr
->outbuf
;
3004 ma_fli
->ireg_addr
= t6_ma_ireg_array2
[i
][0];
3005 ma_fli
->ireg_data
= t6_ma_ireg_array2
[i
][1];
3006 ma_fli
->ireg_local_offset
= t6_ma_ireg_array2
[i
][2];
3007 for (j
= 0; j
< t6_ma_ireg_array2
[i
][3]; j
++) {
3008 t4_read_indirect(padap
, ma_fli
->ireg_addr
,
3009 ma_fli
->ireg_data
, buff
, 1,
3010 ma_fli
->ireg_local_offset
);
3012 ma_fli
->ireg_local_offset
+= 0x20;
3016 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
3019 int cudbg_collect_ulptx_la(struct cudbg_init
*pdbg_init
,
3020 struct cudbg_buffer
*dbg_buff
,
3021 struct cudbg_error
*cudbg_err
)
3023 struct adapter
*padap
= pdbg_init
->adap
;
3024 struct cudbg_buffer temp_buff
= { 0 };
3025 struct cudbg_ulptx_la
*ulptx_la_buff
;
3026 struct cudbg_ver_hdr
*ver_hdr
;
3030 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
3031 sizeof(struct cudbg_ver_hdr
) +
3032 sizeof(struct cudbg_ulptx_la
),
3037 ver_hdr
= (struct cudbg_ver_hdr
*)temp_buff
.data
;
3038 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
3039 ver_hdr
->revision
= CUDBG_ULPTX_LA_REV
;
3040 ver_hdr
->size
= sizeof(struct cudbg_ulptx_la
);
3042 ulptx_la_buff
= (struct cudbg_ulptx_la
*)(temp_buff
.data
+
3044 for (i
= 0; i
< CUDBG_NUM_ULPTX
; i
++) {
3045 ulptx_la_buff
->rdptr
[i
] = t4_read_reg(padap
,
3046 ULP_TX_LA_RDPTR_0_A
+
3048 ulptx_la_buff
->wrptr
[i
] = t4_read_reg(padap
,
3049 ULP_TX_LA_WRPTR_0_A
+
3051 ulptx_la_buff
->rddata
[i
] = t4_read_reg(padap
,
3052 ULP_TX_LA_RDDATA_0_A
+
3054 for (j
= 0; j
< CUDBG_NUM_ULPTX_READ
; j
++)
3055 ulptx_la_buff
->rd_data
[i
][j
] =
3057 ULP_TX_LA_RDDATA_0_A
+ 0x10 * i
);
3060 for (i
= 0; i
< CUDBG_NUM_ULPTX_ASIC_READ
; i
++) {
3061 t4_write_reg(padap
, ULP_TX_ASIC_DEBUG_CTRL_A
, 0x1);
3062 ulptx_la_buff
->rdptr_asic
[i
] =
3063 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_CTRL_A
);
3064 ulptx_la_buff
->rddata_asic
[i
][0] =
3065 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_0_A
);
3066 ulptx_la_buff
->rddata_asic
[i
][1] =
3067 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_1_A
);
3068 ulptx_la_buff
->rddata_asic
[i
][2] =
3069 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_2_A
);
3070 ulptx_la_buff
->rddata_asic
[i
][3] =
3071 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_3_A
);
3072 ulptx_la_buff
->rddata_asic
[i
][4] =
3073 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_4_A
);
3074 ulptx_la_buff
->rddata_asic
[i
][5] =
3075 t4_read_reg(padap
, PM_RX_BASE_ADDR
);
3078 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
3081 int cudbg_collect_up_cim_indirect(struct cudbg_init
*pdbg_init
,
3082 struct cudbg_buffer
*dbg_buff
,
3083 struct cudbg_error
*cudbg_err
)
3085 struct adapter
*padap
= pdbg_init
->adap
;
3086 struct cudbg_buffer temp_buff
= { 0 };
3087 u32 local_offset
, local_range
;
3088 struct ireg_buf
*up_cim
;
3093 if (is_t5(padap
->params
.chip
))
3094 n
= sizeof(t5_up_cim_reg_array
) /
3095 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
3096 else if (is_t6(padap
->params
.chip
))
3097 n
= sizeof(t6_up_cim_reg_array
) /
3098 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
3100 return CUDBG_STATUS_NOT_IMPLEMENTED
;
3102 size
= sizeof(struct ireg_buf
) * n
;
3103 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
3107 up_cim
= (struct ireg_buf
*)temp_buff
.data
;
3108 for (i
= 0; i
< n
; i
++) {
3109 struct ireg_field
*up_cim_reg
= &up_cim
->tp_pio
;
3110 u32
*buff
= up_cim
->outbuf
;
3112 if (is_t5(padap
->params
.chip
)) {
3113 up_cim_reg
->ireg_addr
= t5_up_cim_reg_array
[i
][0];
3114 up_cim_reg
->ireg_data
= t5_up_cim_reg_array
[i
][1];
3115 up_cim_reg
->ireg_local_offset
=
3116 t5_up_cim_reg_array
[i
][2];
3117 up_cim_reg
->ireg_offset_range
=
3118 t5_up_cim_reg_array
[i
][3];
3119 instance
= t5_up_cim_reg_array
[i
][4];
3120 } else if (is_t6(padap
->params
.chip
)) {
3121 up_cim_reg
->ireg_addr
= t6_up_cim_reg_array
[i
][0];
3122 up_cim_reg
->ireg_data
= t6_up_cim_reg_array
[i
][1];
3123 up_cim_reg
->ireg_local_offset
=
3124 t6_up_cim_reg_array
[i
][2];
3125 up_cim_reg
->ireg_offset_range
=
3126 t6_up_cim_reg_array
[i
][3];
3127 instance
= t6_up_cim_reg_array
[i
][4];
3131 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES
:
3132 iter
= up_cim_reg
->ireg_offset_range
;
3133 local_offset
= 0x120;
3136 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES
:
3137 iter
= up_cim_reg
->ireg_offset_range
;
3138 local_offset
= 0x10;
3144 local_range
= up_cim_reg
->ireg_offset_range
;
3148 for (j
= 0; j
< iter
; j
++, buff
++) {
3149 rc
= t4_cim_read(padap
,
3150 up_cim_reg
->ireg_local_offset
+
3151 (j
* local_offset
), local_range
, buff
);
3153 cudbg_put_buff(pdbg_init
, &temp_buff
);
3159 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
3162 int cudbg_collect_pbt_tables(struct cudbg_init
*pdbg_init
,
3163 struct cudbg_buffer
*dbg_buff
,
3164 struct cudbg_error
*cudbg_err
)
3166 struct adapter
*padap
= pdbg_init
->adap
;
3167 struct cudbg_buffer temp_buff
= { 0 };
3168 struct cudbg_pbt_tables
*pbt
;
3172 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
3173 sizeof(struct cudbg_pbt_tables
),
3178 pbt
= (struct cudbg_pbt_tables
*)temp_buff
.data
;
3179 /* PBT dynamic entries */
3180 addr
= CUDBG_CHAC_PBT_ADDR
;
3181 for (i
= 0; i
< CUDBG_PBT_DYNAMIC_ENTRIES
; i
++) {
3182 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
3183 &pbt
->pbt_dynamic
[i
]);
3185 cudbg_err
->sys_err
= rc
;
3186 cudbg_put_buff(pdbg_init
, &temp_buff
);
3191 /* PBT static entries */
3192 /* static entries start when bit 6 is set */
3193 addr
= CUDBG_CHAC_PBT_ADDR
+ (1 << 6);
3194 for (i
= 0; i
< CUDBG_PBT_STATIC_ENTRIES
; i
++) {
3195 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
3196 &pbt
->pbt_static
[i
]);
3198 cudbg_err
->sys_err
= rc
;
3199 cudbg_put_buff(pdbg_init
, &temp_buff
);
3205 addr
= CUDBG_CHAC_PBT_LRF
;
3206 for (i
= 0; i
< CUDBG_LRF_ENTRIES
; i
++) {
3207 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
3208 &pbt
->lrf_table
[i
]);
3210 cudbg_err
->sys_err
= rc
;
3211 cudbg_put_buff(pdbg_init
, &temp_buff
);
3216 /* PBT data entries */
3217 addr
= CUDBG_CHAC_PBT_DATA
;
3218 for (i
= 0; i
< CUDBG_PBT_DATA_ENTRIES
; i
++) {
3219 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
3222 cudbg_err
->sys_err
= rc
;
3223 cudbg_put_buff(pdbg_init
, &temp_buff
);
3227 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
3230 int cudbg_collect_mbox_log(struct cudbg_init
*pdbg_init
,
3231 struct cudbg_buffer
*dbg_buff
,
3232 struct cudbg_error
*cudbg_err
)
3234 struct adapter
*padap
= pdbg_init
->adap
;
3235 struct cudbg_mbox_log
*mboxlog
= NULL
;
3236 struct cudbg_buffer temp_buff
= { 0 };
3237 struct mbox_cmd_log
*log
= NULL
;
3238 struct mbox_cmd
*entry
;
3239 unsigned int entry_idx
;
3245 log
= padap
->mbox_log
;
3246 mbox_cmds
= padap
->mbox_log
->size
;
3247 size
= sizeof(struct cudbg_mbox_log
) * mbox_cmds
;
3248 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
3252 mboxlog
= (struct cudbg_mbox_log
*)temp_buff
.data
;
3253 for (k
= 0; k
< mbox_cmds
; k
++) {
3254 entry_idx
= log
->cursor
+ k
;
3255 if (entry_idx
>= log
->size
)
3256 entry_idx
-= log
->size
;
3258 entry
= mbox_cmd_log_entry(log
, entry_idx
);
3259 /* skip over unused entries */
3260 if (entry
->timestamp
== 0)
3263 memcpy(&mboxlog
->entry
, entry
, sizeof(struct mbox_cmd
));
3264 for (i
= 0; i
< MBOX_LEN
/ 8; i
++) {
3265 flit
= entry
->cmd
[i
];
3266 mboxlog
->hi
[i
] = (u32
)(flit
>> 32);
3267 mboxlog
->lo
[i
] = (u32
)flit
;
3271 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
3274 int cudbg_collect_hma_indirect(struct cudbg_init
*pdbg_init
,
3275 struct cudbg_buffer
*dbg_buff
,
3276 struct cudbg_error
*cudbg_err
)
3278 struct adapter
*padap
= pdbg_init
->adap
;
3279 struct cudbg_buffer temp_buff
= { 0 };
3280 struct ireg_buf
*hma_indr
;
3284 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) < CHELSIO_T6
)
3285 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
3287 n
= sizeof(t6_hma_ireg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
3288 size
= sizeof(struct ireg_buf
) * n
;
3289 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
3293 hma_indr
= (struct ireg_buf
*)temp_buff
.data
;
3294 for (i
= 0; i
< n
; i
++) {
3295 struct ireg_field
*hma_fli
= &hma_indr
->tp_pio
;
3296 u32
*buff
= hma_indr
->outbuf
;
3298 hma_fli
->ireg_addr
= t6_hma_ireg_array
[i
][0];
3299 hma_fli
->ireg_data
= t6_hma_ireg_array
[i
][1];
3300 hma_fli
->ireg_local_offset
= t6_hma_ireg_array
[i
][2];
3301 hma_fli
->ireg_offset_range
= t6_hma_ireg_array
[i
][3];
3302 t4_read_indirect(padap
, hma_fli
->ireg_addr
, hma_fli
->ireg_data
,
3303 buff
, hma_fli
->ireg_offset_range
,
3304 hma_fli
->ireg_local_offset
);
3307 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
3310 void cudbg_fill_qdesc_num_and_size(const struct adapter
*padap
,
3311 u32
*num
, u32
*size
)
3313 u32 tot_entries
= 0, tot_size
= 0;
3315 /* NIC TXQ, RXQ, FLQ, and CTRLQ */
3316 tot_entries
+= MAX_ETH_QSETS
* 3;
3317 tot_entries
+= MAX_CTRL_QUEUES
;
3319 tot_size
+= MAX_ETH_QSETS
* MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
3320 tot_size
+= MAX_ETH_QSETS
* MAX_RSPQ_ENTRIES
* MAX_RXQ_DESC_SIZE
;
3321 tot_size
+= MAX_ETH_QSETS
* MAX_RX_BUFFERS
* MAX_FL_DESC_SIZE
;
3322 tot_size
+= MAX_CTRL_QUEUES
* MAX_CTRL_TXQ_ENTRIES
*
3323 MAX_CTRL_TXQ_DESC_SIZE
;
3325 /* FW_EVTQ and INTRQ */
3326 tot_entries
+= INGQ_EXTRAS
;
3327 tot_size
+= INGQ_EXTRAS
* MAX_RSPQ_ENTRIES
* MAX_RXQ_DESC_SIZE
;
3331 tot_size
+= MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
3333 /* ULD TXQ, RXQ, and FLQ */
3334 tot_entries
+= CXGB4_TX_MAX
* MAX_OFLD_QSETS
;
3335 tot_entries
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* 2;
3337 tot_size
+= CXGB4_TX_MAX
* MAX_OFLD_QSETS
* MAX_TXQ_ENTRIES
*
3339 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* MAX_RSPQ_ENTRIES
*
3341 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* MAX_RX_BUFFERS
*
3345 tot_entries
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
;
3346 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* SGE_MAX_IQ_SIZE
*
3349 /* ETHOFLD TXQ, RXQ, and FLQ */
3350 tot_entries
+= MAX_OFLD_QSETS
* 3;
3351 tot_size
+= MAX_OFLD_QSETS
* MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
3353 tot_size
+= sizeof(struct cudbg_ver_hdr
) +
3354 sizeof(struct cudbg_qdesc_info
) +
3355 sizeof(struct cudbg_qdesc_entry
) * tot_entries
;
3364 int cudbg_collect_qdesc(struct cudbg_init
*pdbg_init
,
3365 struct cudbg_buffer
*dbg_buff
,
3366 struct cudbg_error
*cudbg_err
)
3368 u32 num_queues
= 0, tot_entries
= 0, size
= 0;
3369 struct adapter
*padap
= pdbg_init
->adap
;
3370 struct cudbg_buffer temp_buff
= { 0 };
3371 struct cudbg_qdesc_entry
*qdesc_entry
;
3372 struct cudbg_qdesc_info
*qdesc_info
;
3373 struct cudbg_ver_hdr
*ver_hdr
;
3374 struct sge
*s
= &padap
->sge
;
3375 u32 i
, j
, cur_off
, tot_len
;
3379 cudbg_fill_qdesc_num_and_size(padap
, &tot_entries
, &size
);
3380 size
= min_t(u32
, size
, CUDBG_DUMP_BUFF_SIZE
);
3382 data
= kvzalloc(size
, GFP_KERNEL
);
3386 ver_hdr
= (struct cudbg_ver_hdr
*)data
;
3387 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
3388 ver_hdr
->revision
= CUDBG_QDESC_REV
;
3389 ver_hdr
->size
= sizeof(struct cudbg_qdesc_info
);
3390 size
-= sizeof(*ver_hdr
);
3392 qdesc_info
= (struct cudbg_qdesc_info
*)(data
+
3394 size
-= sizeof(*qdesc_info
);
3395 qdesc_entry
= (struct cudbg_qdesc_entry
*)qdesc_info
->data
;
3397 #define QDESC_GET(q, desc, type, label) do { \
3402 cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
3403 size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
3405 qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
3409 #define QDESC_GET_TXQ(q, type, label) do { \
3410 struct sge_txq *txq = (struct sge_txq *)q; \
3411 QDESC_GET(txq, txq->desc, type, label); \
3414 #define QDESC_GET_RXQ(q, type, label) do { \
3415 struct sge_rspq *rxq = (struct sge_rspq *)q; \
3416 QDESC_GET(rxq, rxq->desc, type, label); \
3419 #define QDESC_GET_FLQ(q, type, label) do { \
3420 struct sge_fl *flq = (struct sge_fl *)q; \
3421 QDESC_GET(flq, flq->desc, type, label); \
3425 for (i
= 0; i
< s
->ethqsets
; i
++)
3426 QDESC_GET_TXQ(&s
->ethtxq
[i
].q
, CUDBG_QTYPE_NIC_TXQ
, out
);
3429 for (i
= 0; i
< s
->ethqsets
; i
++)
3430 QDESC_GET_RXQ(&s
->ethrxq
[i
].rspq
, CUDBG_QTYPE_NIC_RXQ
, out
);
3433 for (i
= 0; i
< s
->ethqsets
; i
++)
3434 QDESC_GET_FLQ(&s
->ethrxq
[i
].fl
, CUDBG_QTYPE_NIC_FLQ
, out
);
3437 for (i
= 0; i
< padap
->params
.nports
; i
++)
3438 QDESC_GET_TXQ(&s
->ctrlq
[i
].q
, CUDBG_QTYPE_CTRLQ
, out
);
3441 QDESC_GET_RXQ(&s
->fw_evtq
, CUDBG_QTYPE_FWEVTQ
, out
);
3444 QDESC_GET_RXQ(&s
->intrq
, CUDBG_QTYPE_INTRQ
, out
);
3447 QDESC_GET_TXQ(&s
->ptptxq
.q
, CUDBG_QTYPE_PTP_TXQ
, out
);
3450 mutex_lock(&uld_mutex
);
3452 if (s
->uld_txq_info
) {
3453 struct sge_uld_txq_info
*utxq
;
3456 for (j
= 0; j
< CXGB4_TX_MAX
; j
++) {
3457 if (!s
->uld_txq_info
[j
])
3460 utxq
= s
->uld_txq_info
[j
];
3461 for (i
= 0; i
< utxq
->ntxq
; i
++)
3462 QDESC_GET_TXQ(&utxq
->uldtxq
[i
].q
,
3463 cudbg_uld_txq_to_qtype(j
),
3468 if (s
->uld_rxq_info
) {
3469 struct sge_uld_rxq_info
*urxq
;
3473 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3474 if (!s
->uld_rxq_info
[j
])
3477 urxq
= s
->uld_rxq_info
[j
];
3478 for (i
= 0; i
< urxq
->nrxq
; i
++)
3479 QDESC_GET_RXQ(&urxq
->uldrxq
[i
].rspq
,
3480 cudbg_uld_rxq_to_qtype(j
),
3485 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3486 if (!s
->uld_rxq_info
[j
])
3489 urxq
= s
->uld_rxq_info
[j
];
3490 for (i
= 0; i
< urxq
->nrxq
; i
++)
3491 QDESC_GET_FLQ(&urxq
->uldrxq
[i
].fl
,
3492 cudbg_uld_flq_to_qtype(j
),
3497 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3498 if (!s
->uld_rxq_info
[j
])
3501 urxq
= s
->uld_rxq_info
[j
];
3503 for (i
= 0; i
< urxq
->nciq
; i
++)
3504 QDESC_GET_RXQ(&urxq
->uldrxq
[base
+ i
].rspq
,
3505 cudbg_uld_ciq_to_qtype(j
),
3512 for (i
= 0; i
< s
->eoqsets
; i
++)
3513 QDESC_GET_TXQ(&s
->eohw_txq
[i
].q
,
3514 CUDBG_QTYPE_ETHOFLD_TXQ
, out
);
3516 /* ETHOFLD RXQ and FLQ */
3518 for (i
= 0; i
< s
->eoqsets
; i
++)
3519 QDESC_GET_RXQ(&s
->eohw_rxq
[i
].rspq
,
3520 CUDBG_QTYPE_ETHOFLD_RXQ
, out
);
3522 for (i
= 0; i
< s
->eoqsets
; i
++)
3523 QDESC_GET_FLQ(&s
->eohw_rxq
[i
].fl
,
3524 CUDBG_QTYPE_ETHOFLD_FLQ
, out
);
3528 mutex_unlock(&uld_mutex
);
3531 qdesc_info
->qdesc_entry_size
= sizeof(*qdesc_entry
);
3532 qdesc_info
->num_queues
= num_queues
;
3535 u32 chunk_size
= min_t(u32
, tot_len
, CUDBG_CHUNK_SIZE
);
3537 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, chunk_size
,
3540 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
3544 memcpy(temp_buff
.data
, data
+ cur_off
, chunk_size
);
3545 tot_len
-= chunk_size
;
3546 cur_off
+= chunk_size
;
3547 rc
= cudbg_write_and_release_buff(pdbg_init
, &temp_buff
,
3550 cudbg_put_buff(pdbg_init
, &temp_buff
);
3551 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
3560 #undef QDESC_GET_FLQ
3561 #undef QDESC_GET_RXQ
3562 #undef QDESC_GET_TXQ
3568 int cudbg_collect_flash(struct cudbg_init
*pdbg_init
,
3569 struct cudbg_buffer
*dbg_buff
,
3570 struct cudbg_error
*cudbg_err
)
3572 struct adapter
*padap
= pdbg_init
->adap
;
3573 u32 count
= padap
->params
.sf_size
, n
;
3574 struct cudbg_buffer temp_buff
= {0};
3578 addr
= FLASH_EXP_ROM_START
;
3580 for (i
= 0; i
< count
; i
+= SF_PAGE_SIZE
) {
3581 n
= min_t(u32
, count
- i
, SF_PAGE_SIZE
);
3583 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, n
, &temp_buff
);
3585 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
3588 rc
= t4_read_flash(padap
, addr
, n
, (u32
*)temp_buff
.data
, 0);
3593 rc
= cudbg_write_and_release_buff(pdbg_init
, &temp_buff
,
3596 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;