1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Chelsio Communications. All rights reserved.
6 #include <linux/sort.h>
7 #include <linux/string.h>
11 #include "cxgb4_cudbg.h"
13 #include "cudbg_lib_common.h"
14 #include "cudbg_entity.h"
15 #include "cudbg_lib.h"
16 #include "cudbg_zlib.h"
18 static int cudbg_do_compression(struct cudbg_init
*pdbg_init
,
19 struct cudbg_buffer
*pin_buff
,
20 struct cudbg_buffer
*dbg_buff
)
22 struct cudbg_buffer temp_in_buff
= { 0 };
23 int bytes_left
, bytes_read
, bytes
;
24 u32 offset
= dbg_buff
->offset
;
27 temp_in_buff
.offset
= pin_buff
->offset
;
28 temp_in_buff
.data
= pin_buff
->data
;
29 temp_in_buff
.size
= pin_buff
->size
;
31 bytes_left
= pin_buff
->size
;
33 while (bytes_left
> 0) {
34 /* Do compression in smaller chunks */
35 bytes
= min_t(unsigned long, bytes_left
,
36 (unsigned long)CUDBG_CHUNK_SIZE
);
37 temp_in_buff
.data
= (char *)pin_buff
->data
+ bytes_read
;
38 temp_in_buff
.size
= bytes
;
39 rc
= cudbg_compress_buff(pdbg_init
, &temp_in_buff
, dbg_buff
);
46 pin_buff
->size
= dbg_buff
->offset
- offset
;
50 static int cudbg_write_and_release_buff(struct cudbg_init
*pdbg_init
,
51 struct cudbg_buffer
*pin_buff
,
52 struct cudbg_buffer
*dbg_buff
)
56 if (pdbg_init
->compress_type
== CUDBG_COMPRESSION_NONE
) {
57 cudbg_update_buff(pin_buff
, dbg_buff
);
59 rc
= cudbg_do_compression(pdbg_init
, pin_buff
, dbg_buff
);
65 cudbg_put_buff(pdbg_init
, pin_buff
);
69 static int is_fw_attached(struct cudbg_init
*pdbg_init
)
71 struct adapter
*padap
= pdbg_init
->adap
;
73 if (!(padap
->flags
& CXGB4_FW_OK
) || padap
->use_bd
)
79 /* This function will add additional padding bytes into debug_buffer to make it
82 void cudbg_align_debug_buffer(struct cudbg_buffer
*dbg_buff
,
83 struct cudbg_entity_hdr
*entity_hdr
)
88 remain
= (dbg_buff
->offset
- entity_hdr
->start_offset
) % 4;
91 memcpy(((u8
*)dbg_buff
->data
) + dbg_buff
->offset
, &zero_buf
,
93 dbg_buff
->offset
+= padding
;
94 entity_hdr
->num_pad
= padding
;
96 entity_hdr
->size
= dbg_buff
->offset
- entity_hdr
->start_offset
;
99 struct cudbg_entity_hdr
*cudbg_get_entity_hdr(void *outbuf
, int i
)
101 struct cudbg_hdr
*cudbg_hdr
= (struct cudbg_hdr
*)outbuf
;
103 return (struct cudbg_entity_hdr
*)
104 ((char *)outbuf
+ cudbg_hdr
->hdr_len
+
105 (sizeof(struct cudbg_entity_hdr
) * (i
- 1)));
108 static int cudbg_read_vpd_reg(struct adapter
*padap
, u32 addr
, u32 len
,
113 vaddr
= t4_eeprom_ptov(addr
, padap
->pf
, EEPROMPFSIZE
);
117 rc
= pci_read_vpd(padap
->pdev
, vaddr
, len
, dest
);
124 static int cudbg_mem_desc_cmp(const void *a
, const void *b
)
126 return ((const struct cudbg_mem_desc
*)a
)->base
-
127 ((const struct cudbg_mem_desc
*)b
)->base
;
130 int cudbg_fill_meminfo(struct adapter
*padap
,
131 struct cudbg_meminfo
*meminfo_buff
)
133 struct cudbg_mem_desc
*md
;
134 u32 lo
, hi
, used
, alloc
;
137 memset(meminfo_buff
->avail
, 0,
138 ARRAY_SIZE(meminfo_buff
->avail
) *
139 sizeof(struct cudbg_mem_desc
));
140 memset(meminfo_buff
->mem
, 0,
141 (ARRAY_SIZE(cudbg_region
) + 3) * sizeof(struct cudbg_mem_desc
));
142 md
= meminfo_buff
->mem
;
144 for (i
= 0; i
< ARRAY_SIZE(meminfo_buff
->mem
); i
++) {
145 meminfo_buff
->mem
[i
].limit
= 0;
146 meminfo_buff
->mem
[i
].idx
= i
;
149 /* Find and sort the populated memory ranges */
151 lo
= t4_read_reg(padap
, MA_TARGET_MEM_ENABLE_A
);
152 if (lo
& EDRAM0_ENABLE_F
) {
153 hi
= t4_read_reg(padap
, MA_EDRAM0_BAR_A
);
154 meminfo_buff
->avail
[i
].base
=
155 cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi
));
156 meminfo_buff
->avail
[i
].limit
=
157 meminfo_buff
->avail
[i
].base
+
158 cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi
));
159 meminfo_buff
->avail
[i
].idx
= 0;
163 if (lo
& EDRAM1_ENABLE_F
) {
164 hi
= t4_read_reg(padap
, MA_EDRAM1_BAR_A
);
165 meminfo_buff
->avail
[i
].base
=
166 cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi
));
167 meminfo_buff
->avail
[i
].limit
=
168 meminfo_buff
->avail
[i
].base
+
169 cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi
));
170 meminfo_buff
->avail
[i
].idx
= 1;
174 if (is_t5(padap
->params
.chip
)) {
175 if (lo
& EXT_MEM0_ENABLE_F
) {
176 hi
= t4_read_reg(padap
, MA_EXT_MEMORY0_BAR_A
);
177 meminfo_buff
->avail
[i
].base
=
178 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi
));
179 meminfo_buff
->avail
[i
].limit
=
180 meminfo_buff
->avail
[i
].base
+
181 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi
));
182 meminfo_buff
->avail
[i
].idx
= 3;
186 if (lo
& EXT_MEM1_ENABLE_F
) {
187 hi
= t4_read_reg(padap
, MA_EXT_MEMORY1_BAR_A
);
188 meminfo_buff
->avail
[i
].base
=
189 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi
));
190 meminfo_buff
->avail
[i
].limit
=
191 meminfo_buff
->avail
[i
].base
+
192 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi
));
193 meminfo_buff
->avail
[i
].idx
= 4;
197 if (lo
& EXT_MEM_ENABLE_F
) {
198 hi
= t4_read_reg(padap
, MA_EXT_MEMORY_BAR_A
);
199 meminfo_buff
->avail
[i
].base
=
200 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi
));
201 meminfo_buff
->avail
[i
].limit
=
202 meminfo_buff
->avail
[i
].base
+
203 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi
));
204 meminfo_buff
->avail
[i
].idx
= 2;
208 if (lo
& HMA_MUX_F
) {
209 hi
= t4_read_reg(padap
, MA_EXT_MEMORY1_BAR_A
);
210 meminfo_buff
->avail
[i
].base
=
211 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi
));
212 meminfo_buff
->avail
[i
].limit
=
213 meminfo_buff
->avail
[i
].base
+
214 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi
));
215 meminfo_buff
->avail
[i
].idx
= 5;
220 if (!i
) /* no memory available */
221 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
223 meminfo_buff
->avail_c
= i
;
224 sort(meminfo_buff
->avail
, i
, sizeof(struct cudbg_mem_desc
),
225 cudbg_mem_desc_cmp
, NULL
);
226 (md
++)->base
= t4_read_reg(padap
, SGE_DBQ_CTXT_BADDR_A
);
227 (md
++)->base
= t4_read_reg(padap
, SGE_IMSG_CTXT_BADDR_A
);
228 (md
++)->base
= t4_read_reg(padap
, SGE_FLM_CACHE_BADDR_A
);
229 (md
++)->base
= t4_read_reg(padap
, TP_CMM_TCB_BASE_A
);
230 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_BASE_A
);
231 (md
++)->base
= t4_read_reg(padap
, TP_CMM_TIMER_BASE_A
);
232 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_RX_FLST_BASE_A
);
233 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_TX_FLST_BASE_A
);
234 (md
++)->base
= t4_read_reg(padap
, TP_CMM_MM_PS_FLST_BASE_A
);
236 /* the next few have explicit upper bounds */
237 md
->base
= t4_read_reg(padap
, TP_PMM_TX_BASE_A
);
238 md
->limit
= md
->base
- 1 +
239 t4_read_reg(padap
, TP_PMM_TX_PAGE_SIZE_A
) *
240 PMTXMAXPAGE_G(t4_read_reg(padap
, TP_PMM_TX_MAX_PAGE_A
));
243 md
->base
= t4_read_reg(padap
, TP_PMM_RX_BASE_A
);
244 md
->limit
= md
->base
- 1 +
245 t4_read_reg(padap
, TP_PMM_RX_PAGE_SIZE_A
) *
246 PMRXMAXPAGE_G(t4_read_reg(padap
, TP_PMM_RX_MAX_PAGE_A
));
249 if (t4_read_reg(padap
, LE_DB_CONFIG_A
) & HASHEN_F
) {
250 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) <= CHELSIO_T5
) {
251 hi
= t4_read_reg(padap
, LE_DB_TID_HASHBASE_A
) / 4;
252 md
->base
= t4_read_reg(padap
, LE_DB_HASH_TID_BASE_A
);
254 hi
= t4_read_reg(padap
, LE_DB_HASH_TID_BASE_A
);
255 md
->base
= t4_read_reg(padap
,
256 LE_DB_HASH_TBL_BASE_ADDR_A
);
261 md
->idx
= ARRAY_SIZE(cudbg_region
); /* hide it */
265 #define ulp_region(reg) do { \
266 md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
267 (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
270 ulp_region(RX_ISCSI
);
275 ulp_region(RX_RQUDP
);
280 md
->idx
= ARRAY_SIZE(cudbg_region
);
281 if (!is_t4(padap
->params
.chip
)) {
282 u32 fifo_size
= t4_read_reg(padap
, SGE_DBVFIFO_SIZE_A
);
283 u32 sge_ctrl
= t4_read_reg(padap
, SGE_CONTROL2_A
);
286 if (is_t5(padap
->params
.chip
)) {
287 if (sge_ctrl
& VFIFO_ENABLE_F
)
288 size
= DBVFIFO_SIZE_G(fifo_size
);
290 size
= T6_DBVFIFO_SIZE_G(fifo_size
);
294 md
->base
= BASEADDR_G(t4_read_reg(padap
,
295 SGE_DBVFIFO_BADDR_A
));
296 md
->limit
= md
->base
+ (size
<< 2) - 1;
302 md
->base
= t4_read_reg(padap
, ULP_RX_CTX_BASE_A
);
305 md
->base
= t4_read_reg(padap
, ULP_TX_ERR_TABLE_BASE_A
);
309 md
->base
= padap
->vres
.ocq
.start
;
310 if (padap
->vres
.ocq
.size
)
311 md
->limit
= md
->base
+ padap
->vres
.ocq
.size
- 1;
313 md
->idx
= ARRAY_SIZE(cudbg_region
); /* hide it */
316 /* add any address-space holes, there can be up to 3 */
317 for (n
= 0; n
< i
- 1; n
++)
318 if (meminfo_buff
->avail
[n
].limit
<
319 meminfo_buff
->avail
[n
+ 1].base
)
320 (md
++)->base
= meminfo_buff
->avail
[n
].limit
;
322 if (meminfo_buff
->avail
[n
].limit
)
323 (md
++)->base
= meminfo_buff
->avail
[n
].limit
;
325 n
= md
- meminfo_buff
->mem
;
326 meminfo_buff
->mem_c
= n
;
328 sort(meminfo_buff
->mem
, n
, sizeof(struct cudbg_mem_desc
),
329 cudbg_mem_desc_cmp
, NULL
);
331 lo
= t4_read_reg(padap
, CIM_SDRAM_BASE_ADDR_A
);
332 hi
= t4_read_reg(padap
, CIM_SDRAM_ADDR_SIZE_A
) + lo
- 1;
333 meminfo_buff
->up_ram_lo
= lo
;
334 meminfo_buff
->up_ram_hi
= hi
;
336 lo
= t4_read_reg(padap
, CIM_EXTMEM2_BASE_ADDR_A
);
337 hi
= t4_read_reg(padap
, CIM_EXTMEM2_ADDR_SIZE_A
) + lo
- 1;
338 meminfo_buff
->up_extmem2_lo
= lo
;
339 meminfo_buff
->up_extmem2_hi
= hi
;
341 lo
= t4_read_reg(padap
, TP_PMM_RX_MAX_PAGE_A
);
342 for (i
= 0, meminfo_buff
->free_rx_cnt
= 0; i
< 2; i
++)
343 meminfo_buff
->free_rx_cnt
+=
344 FREERXPAGECOUNT_G(t4_read_reg(padap
,
345 TP_FLM_FREE_RX_CNT_A
));
347 meminfo_buff
->rx_pages_data
[0] = PMRXMAXPAGE_G(lo
);
348 meminfo_buff
->rx_pages_data
[1] =
349 t4_read_reg(padap
, TP_PMM_RX_PAGE_SIZE_A
) >> 10;
350 meminfo_buff
->rx_pages_data
[2] = (lo
& PMRXNUMCHN_F
) ? 2 : 1;
352 lo
= t4_read_reg(padap
, TP_PMM_TX_MAX_PAGE_A
);
353 hi
= t4_read_reg(padap
, TP_PMM_TX_PAGE_SIZE_A
);
354 for (i
= 0, meminfo_buff
->free_tx_cnt
= 0; i
< 4; i
++)
355 meminfo_buff
->free_tx_cnt
+=
356 FREETXPAGECOUNT_G(t4_read_reg(padap
,
357 TP_FLM_FREE_TX_CNT_A
));
359 meminfo_buff
->tx_pages_data
[0] = PMTXMAXPAGE_G(lo
);
360 meminfo_buff
->tx_pages_data
[1] =
361 hi
>= (1 << 20) ? (hi
>> 20) : (hi
>> 10);
362 meminfo_buff
->tx_pages_data
[2] =
363 hi
>= (1 << 20) ? 'M' : 'K';
364 meminfo_buff
->tx_pages_data
[3] = 1 << PMTXNUMCHN_G(lo
);
366 meminfo_buff
->p_structs
= t4_read_reg(padap
, TP_CMM_MM_MAX_PSTRUCT_A
);
367 meminfo_buff
->p_structs_free_cnt
=
368 FREEPSTRUCTCOUNT_G(t4_read_reg(padap
, TP_FLM_FREE_PS_CNT_A
));
370 for (i
= 0; i
< 4; i
++) {
371 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
)
372 lo
= t4_read_reg(padap
,
373 MPS_RX_MAC_BG_PG_CNT0_A
+ i
* 4);
375 lo
= t4_read_reg(padap
, MPS_RX_PG_RSV0_A
+ i
* 4);
376 if (is_t5(padap
->params
.chip
)) {
377 used
= T5_USED_G(lo
);
378 alloc
= T5_ALLOC_G(lo
);
383 meminfo_buff
->port_used
[i
] = used
;
384 meminfo_buff
->port_alloc
[i
] = alloc
;
387 for (i
= 0; i
< padap
->params
.arch
.nchan
; i
++) {
388 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
)
389 lo
= t4_read_reg(padap
,
390 MPS_RX_LPBK_BG_PG_CNT0_A
+ i
* 4);
392 lo
= t4_read_reg(padap
, MPS_RX_PG_RSV4_A
+ i
* 4);
393 if (is_t5(padap
->params
.chip
)) {
394 used
= T5_USED_G(lo
);
395 alloc
= T5_ALLOC_G(lo
);
400 meminfo_buff
->loopback_used
[i
] = used
;
401 meminfo_buff
->loopback_alloc
[i
] = alloc
;
407 int cudbg_collect_reg_dump(struct cudbg_init
*pdbg_init
,
408 struct cudbg_buffer
*dbg_buff
,
409 struct cudbg_error
*cudbg_err
)
411 struct adapter
*padap
= pdbg_init
->adap
;
412 struct cudbg_buffer temp_buff
= { 0 };
416 if (is_t4(padap
->params
.chip
))
417 buf_size
= T4_REGMAP_SIZE
;
418 else if (is_t5(padap
->params
.chip
) || is_t6(padap
->params
.chip
))
419 buf_size
= T5_REGMAP_SIZE
;
421 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, buf_size
, &temp_buff
);
424 t4_get_regs(padap
, (void *)temp_buff
.data
, temp_buff
.size
);
425 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
428 int cudbg_collect_fw_devlog(struct cudbg_init
*pdbg_init
,
429 struct cudbg_buffer
*dbg_buff
,
430 struct cudbg_error
*cudbg_err
)
432 struct adapter
*padap
= pdbg_init
->adap
;
433 struct cudbg_buffer temp_buff
= { 0 };
434 struct devlog_params
*dparams
;
437 rc
= t4_init_devlog_params(padap
);
439 cudbg_err
->sys_err
= rc
;
443 dparams
= &padap
->params
.devlog
;
444 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, dparams
->size
, &temp_buff
);
448 /* Collect FW devlog */
449 if (dparams
->start
!= 0) {
450 spin_lock(&padap
->win0_lock
);
451 rc
= t4_memory_rw(padap
, padap
->params
.drv_memwin
,
452 dparams
->memtype
, dparams
->start
,
454 (__be32
*)(char *)temp_buff
.data
,
456 spin_unlock(&padap
->win0_lock
);
458 cudbg_err
->sys_err
= rc
;
459 cudbg_put_buff(pdbg_init
, &temp_buff
);
463 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
466 int cudbg_collect_cim_la(struct cudbg_init
*pdbg_init
,
467 struct cudbg_buffer
*dbg_buff
,
468 struct cudbg_error
*cudbg_err
)
470 struct adapter
*padap
= pdbg_init
->adap
;
471 struct cudbg_buffer temp_buff
= { 0 };
475 if (is_t6(padap
->params
.chip
)) {
476 size
= padap
->params
.cim_la_size
/ 10 + 1;
477 size
*= 10 * sizeof(u32
);
479 size
= padap
->params
.cim_la_size
/ 8;
480 size
*= 8 * sizeof(u32
);
484 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
488 rc
= t4_cim_read(padap
, UP_UP_DBG_LA_CFG_A
, 1, &cfg
);
490 cudbg_err
->sys_err
= rc
;
491 cudbg_put_buff(pdbg_init
, &temp_buff
);
495 memcpy((char *)temp_buff
.data
, &cfg
, sizeof(cfg
));
496 rc
= t4_cim_read_la(padap
,
497 (u32
*)((char *)temp_buff
.data
+ sizeof(cfg
)),
500 cudbg_err
->sys_err
= rc
;
501 cudbg_put_buff(pdbg_init
, &temp_buff
);
504 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
507 int cudbg_collect_cim_ma_la(struct cudbg_init
*pdbg_init
,
508 struct cudbg_buffer
*dbg_buff
,
509 struct cudbg_error
*cudbg_err
)
511 struct adapter
*padap
= pdbg_init
->adap
;
512 struct cudbg_buffer temp_buff
= { 0 };
515 size
= 2 * CIM_MALA_SIZE
* 5 * sizeof(u32
);
516 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
520 t4_cim_read_ma_la(padap
,
521 (u32
*)temp_buff
.data
,
522 (u32
*)((char *)temp_buff
.data
+
524 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
527 int cudbg_collect_cim_qcfg(struct cudbg_init
*pdbg_init
,
528 struct cudbg_buffer
*dbg_buff
,
529 struct cudbg_error
*cudbg_err
)
531 struct adapter
*padap
= pdbg_init
->adap
;
532 struct cudbg_buffer temp_buff
= { 0 };
533 struct cudbg_cim_qcfg
*cim_qcfg_data
;
536 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_cim_qcfg
),
541 cim_qcfg_data
= (struct cudbg_cim_qcfg
*)temp_buff
.data
;
542 cim_qcfg_data
->chip
= padap
->params
.chip
;
543 rc
= t4_cim_read(padap
, UP_IBQ_0_RDADDR_A
,
544 ARRAY_SIZE(cim_qcfg_data
->stat
), cim_qcfg_data
->stat
);
546 cudbg_err
->sys_err
= rc
;
547 cudbg_put_buff(pdbg_init
, &temp_buff
);
551 rc
= t4_cim_read(padap
, UP_OBQ_0_REALADDR_A
,
552 ARRAY_SIZE(cim_qcfg_data
->obq_wr
),
553 cim_qcfg_data
->obq_wr
);
555 cudbg_err
->sys_err
= rc
;
556 cudbg_put_buff(pdbg_init
, &temp_buff
);
560 t4_read_cimq_cfg(padap
, cim_qcfg_data
->base
, cim_qcfg_data
->size
,
561 cim_qcfg_data
->thres
);
562 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
565 static int cudbg_read_cim_ibq(struct cudbg_init
*pdbg_init
,
566 struct cudbg_buffer
*dbg_buff
,
567 struct cudbg_error
*cudbg_err
, int qid
)
569 struct adapter
*padap
= pdbg_init
->adap
;
570 struct cudbg_buffer temp_buff
= { 0 };
571 int no_of_read_words
, rc
= 0;
574 /* collect CIM IBQ */
575 qsize
= CIM_IBQ_SIZE
* 4 * sizeof(u32
);
576 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, qsize
, &temp_buff
);
580 /* t4_read_cim_ibq will return no. of read words or error */
581 no_of_read_words
= t4_read_cim_ibq(padap
, qid
,
582 (u32
*)temp_buff
.data
, qsize
);
583 /* no_of_read_words is less than or equal to 0 means error */
584 if (no_of_read_words
<= 0) {
585 if (!no_of_read_words
)
586 rc
= CUDBG_SYSTEM_ERROR
;
588 rc
= no_of_read_words
;
589 cudbg_err
->sys_err
= rc
;
590 cudbg_put_buff(pdbg_init
, &temp_buff
);
593 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
596 int cudbg_collect_cim_ibq_tp0(struct cudbg_init
*pdbg_init
,
597 struct cudbg_buffer
*dbg_buff
,
598 struct cudbg_error
*cudbg_err
)
600 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 0);
603 int cudbg_collect_cim_ibq_tp1(struct cudbg_init
*pdbg_init
,
604 struct cudbg_buffer
*dbg_buff
,
605 struct cudbg_error
*cudbg_err
)
607 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 1);
610 int cudbg_collect_cim_ibq_ulp(struct cudbg_init
*pdbg_init
,
611 struct cudbg_buffer
*dbg_buff
,
612 struct cudbg_error
*cudbg_err
)
614 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 2);
617 int cudbg_collect_cim_ibq_sge0(struct cudbg_init
*pdbg_init
,
618 struct cudbg_buffer
*dbg_buff
,
619 struct cudbg_error
*cudbg_err
)
621 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 3);
624 int cudbg_collect_cim_ibq_sge1(struct cudbg_init
*pdbg_init
,
625 struct cudbg_buffer
*dbg_buff
,
626 struct cudbg_error
*cudbg_err
)
628 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 4);
631 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init
*pdbg_init
,
632 struct cudbg_buffer
*dbg_buff
,
633 struct cudbg_error
*cudbg_err
)
635 return cudbg_read_cim_ibq(pdbg_init
, dbg_buff
, cudbg_err
, 5);
638 u32
cudbg_cim_obq_size(struct adapter
*padap
, int qid
)
642 t4_write_reg(padap
, CIM_QUEUE_CONFIG_REF_A
, OBQSELECT_F
|
643 QUENUMSELECT_V(qid
));
644 value
= t4_read_reg(padap
, CIM_QUEUE_CONFIG_CTRL_A
);
645 value
= CIMQSIZE_G(value
) * 64; /* size in number of words */
646 return value
* sizeof(u32
);
649 static int cudbg_read_cim_obq(struct cudbg_init
*pdbg_init
,
650 struct cudbg_buffer
*dbg_buff
,
651 struct cudbg_error
*cudbg_err
, int qid
)
653 struct adapter
*padap
= pdbg_init
->adap
;
654 struct cudbg_buffer temp_buff
= { 0 };
655 int no_of_read_words
, rc
= 0;
658 /* collect CIM OBQ */
659 qsize
= cudbg_cim_obq_size(padap
, qid
);
660 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, qsize
, &temp_buff
);
664 /* t4_read_cim_obq will return no. of read words or error */
665 no_of_read_words
= t4_read_cim_obq(padap
, qid
,
666 (u32
*)temp_buff
.data
, qsize
);
667 /* no_of_read_words is less than or equal to 0 means error */
668 if (no_of_read_words
<= 0) {
669 if (!no_of_read_words
)
670 rc
= CUDBG_SYSTEM_ERROR
;
672 rc
= no_of_read_words
;
673 cudbg_err
->sys_err
= rc
;
674 cudbg_put_buff(pdbg_init
, &temp_buff
);
677 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
680 int cudbg_collect_cim_obq_ulp0(struct cudbg_init
*pdbg_init
,
681 struct cudbg_buffer
*dbg_buff
,
682 struct cudbg_error
*cudbg_err
)
684 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 0);
687 int cudbg_collect_cim_obq_ulp1(struct cudbg_init
*pdbg_init
,
688 struct cudbg_buffer
*dbg_buff
,
689 struct cudbg_error
*cudbg_err
)
691 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 1);
694 int cudbg_collect_cim_obq_ulp2(struct cudbg_init
*pdbg_init
,
695 struct cudbg_buffer
*dbg_buff
,
696 struct cudbg_error
*cudbg_err
)
698 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 2);
701 int cudbg_collect_cim_obq_ulp3(struct cudbg_init
*pdbg_init
,
702 struct cudbg_buffer
*dbg_buff
,
703 struct cudbg_error
*cudbg_err
)
705 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 3);
708 int cudbg_collect_cim_obq_sge(struct cudbg_init
*pdbg_init
,
709 struct cudbg_buffer
*dbg_buff
,
710 struct cudbg_error
*cudbg_err
)
712 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 4);
715 int cudbg_collect_cim_obq_ncsi(struct cudbg_init
*pdbg_init
,
716 struct cudbg_buffer
*dbg_buff
,
717 struct cudbg_error
*cudbg_err
)
719 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 5);
722 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init
*pdbg_init
,
723 struct cudbg_buffer
*dbg_buff
,
724 struct cudbg_error
*cudbg_err
)
726 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 6);
729 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init
*pdbg_init
,
730 struct cudbg_buffer
*dbg_buff
,
731 struct cudbg_error
*cudbg_err
)
733 return cudbg_read_cim_obq(pdbg_init
, dbg_buff
, cudbg_err
, 7);
736 static int cudbg_meminfo_get_mem_index(struct adapter
*padap
,
737 struct cudbg_meminfo
*mem_info
,
738 u8 mem_type
, u8
*idx
)
750 /* Some T5 cards have both MC0 and MC1. */
751 flag
= is_t5(padap
->params
.chip
) ? MC0_FLAG
: MC_FLAG
;
760 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
763 for (i
= 0; i
< mem_info
->avail_c
; i
++) {
764 if (mem_info
->avail
[i
].idx
== flag
) {
770 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
773 /* Fetch the @region_name's start and end from @meminfo. */
774 static int cudbg_get_mem_region(struct adapter
*padap
,
775 struct cudbg_meminfo
*meminfo
,
776 u8 mem_type
, const char *region_name
,
777 struct cudbg_mem_desc
*mem_desc
)
783 rc
= cudbg_meminfo_get_mem_index(padap
, meminfo
, mem_type
, &mc
);
787 i
= match_string(cudbg_region
, ARRAY_SIZE(cudbg_region
), region_name
);
792 for (i
= 0; i
< meminfo
->mem_c
; i
++) {
793 if (meminfo
->mem
[i
].idx
>= ARRAY_SIZE(cudbg_region
))
794 continue; /* Skip holes */
796 if (!(meminfo
->mem
[i
].limit
))
797 meminfo
->mem
[i
].limit
=
798 i
< meminfo
->mem_c
- 1 ?
799 meminfo
->mem
[i
+ 1].base
- 1 : ~0;
801 if (meminfo
->mem
[i
].idx
== idx
) {
802 /* Check if the region exists in @mem_type memory */
803 if (meminfo
->mem
[i
].base
< meminfo
->avail
[mc
].base
&&
804 meminfo
->mem
[i
].limit
< meminfo
->avail
[mc
].base
)
807 if (meminfo
->mem
[i
].base
> meminfo
->avail
[mc
].limit
)
810 memcpy(mem_desc
, &meminfo
->mem
[i
],
811 sizeof(struct cudbg_mem_desc
));
822 /* Fetch and update the start and end of the requested memory region w.r.t 0
823 * in the corresponding EDC/MC/HMA.
825 static int cudbg_get_mem_relative(struct adapter
*padap
,
826 struct cudbg_meminfo
*meminfo
,
827 u8 mem_type
, u32
*out_base
, u32
*out_end
)
832 rc
= cudbg_meminfo_get_mem_index(padap
, meminfo
, mem_type
, &mc_idx
);
836 if (*out_base
< meminfo
->avail
[mc_idx
].base
)
839 *out_base
-= meminfo
->avail
[mc_idx
].base
;
841 if (*out_end
> meminfo
->avail
[mc_idx
].limit
)
842 *out_end
= meminfo
->avail
[mc_idx
].limit
;
844 *out_end
-= meminfo
->avail
[mc_idx
].base
;
849 /* Get TX and RX Payload region */
850 static int cudbg_get_payload_range(struct adapter
*padap
, u8 mem_type
,
851 const char *region_name
,
852 struct cudbg_region_info
*payload
)
854 struct cudbg_mem_desc mem_desc
= { 0 };
855 struct cudbg_meminfo meminfo
;
858 rc
= cudbg_fill_meminfo(padap
, &meminfo
);
862 rc
= cudbg_get_mem_region(padap
, &meminfo
, mem_type
, region_name
,
865 payload
->exist
= false;
869 payload
->exist
= true;
870 payload
->start
= mem_desc
.base
;
871 payload
->end
= mem_desc
.limit
;
873 return cudbg_get_mem_relative(padap
, &meminfo
, mem_type
,
874 &payload
->start
, &payload
->end
);
877 static int cudbg_memory_read(struct cudbg_init
*pdbg_init
, int win
,
878 int mtype
, u32 addr
, u32 len
, void *hbuf
)
880 u32 win_pf
, memoffset
, mem_aperture
, mem_base
;
881 struct adapter
*adap
= pdbg_init
->adap
;
882 u32 pos
, offset
, resid
;
887 /* Argument sanity checks ...
889 if (addr
& 0x3 || (uintptr_t)hbuf
& 0x3)
894 /* Try to do 64-bit reads. Residual will be handled later. */
898 ret
= t4_memory_rw_init(adap
, win
, mtype
, &memoffset
, &mem_base
,
903 addr
= addr
+ memoffset
;
904 win_pf
= is_t4(adap
->params
.chip
) ? 0 : PFNUM_V(adap
->pf
);
906 pos
= addr
& ~(mem_aperture
- 1);
909 /* Set up initial PCI-E Memory Window to cover the start of our
912 t4_memory_update_win(adap
, win
, pos
| win_pf
);
914 /* Transfer data from the adapter */
916 *buf
++ = le64_to_cpu((__force __le64
)
917 t4_read_reg64(adap
, mem_base
+ offset
));
918 offset
+= sizeof(u64
);
921 /* If we've reached the end of our current window aperture,
922 * move the PCI-E Memory Window on to the next.
924 if (offset
== mem_aperture
) {
927 t4_memory_update_win(adap
, win
, pos
| win_pf
);
931 res_buf
= (u32
*)buf
;
932 /* Read residual in 32-bit multiples */
933 while (resid
> sizeof(u32
)) {
934 *res_buf
++ = le32_to_cpu((__force __le32
)
935 t4_read_reg(adap
, mem_base
+ offset
));
936 offset
+= sizeof(u32
);
937 resid
-= sizeof(u32
);
939 /* If we've reached the end of our current window aperture,
940 * move the PCI-E Memory Window on to the next.
942 if (offset
== mem_aperture
) {
945 t4_memory_update_win(adap
, win
, pos
| win_pf
);
949 /* Transfer residual < 32-bits */
951 t4_memory_rw_residual(adap
, resid
, mem_base
+ offset
,
952 (u8
*)res_buf
, T4_MEMORY_READ
);
957 #define CUDBG_YIELD_ITERATION 256
959 static int cudbg_read_fw_mem(struct cudbg_init
*pdbg_init
,
960 struct cudbg_buffer
*dbg_buff
, u8 mem_type
,
961 unsigned long tot_len
,
962 struct cudbg_error
*cudbg_err
)
964 static const char * const region_name
[] = { "Tx payload:",
966 unsigned long bytes
, bytes_left
, bytes_read
= 0;
967 struct adapter
*padap
= pdbg_init
->adap
;
968 struct cudbg_buffer temp_buff
= { 0 };
969 struct cudbg_region_info payload
[2];
974 /* Get TX/RX Payload region range if they exist */
975 memset(payload
, 0, sizeof(payload
));
976 for (i
= 0; i
< ARRAY_SIZE(region_name
); i
++) {
977 rc
= cudbg_get_payload_range(padap
, mem_type
, region_name
[i
],
982 if (payload
[i
].exist
) {
983 /* Align start and end to avoid wrap around */
984 payload
[i
].start
= roundup(payload
[i
].start
,
986 payload
[i
].end
= rounddown(payload
[i
].end
,
991 bytes_left
= tot_len
;
992 while (bytes_left
> 0) {
993 /* As MC size is huge and read through PIO access, this
994 * loop will hold cpu for a longer time. OS may think that
995 * the process is hanged and will generate CPU stall traces.
996 * So yield the cpu regularly.
999 if (!(yield_count
% CUDBG_YIELD_ITERATION
))
1002 bytes
= min_t(unsigned long, bytes_left
,
1003 (unsigned long)CUDBG_CHUNK_SIZE
);
1004 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, bytes
, &temp_buff
);
1008 for (i
= 0; i
< ARRAY_SIZE(payload
); i
++)
1009 if (payload
[i
].exist
&&
1010 bytes_read
>= payload
[i
].start
&&
1011 bytes_read
+ bytes
<= payload
[i
].end
)
1012 /* TX and RX Payload regions can't overlap */
1015 spin_lock(&padap
->win0_lock
);
1016 rc
= cudbg_memory_read(pdbg_init
, MEMWIN_NIC
, mem_type
,
1017 bytes_read
, bytes
, temp_buff
.data
);
1018 spin_unlock(&padap
->win0_lock
);
1020 cudbg_err
->sys_err
= rc
;
1021 cudbg_put_buff(pdbg_init
, &temp_buff
);
1026 bytes_left
-= bytes
;
1027 bytes_read
+= bytes
;
1028 rc
= cudbg_write_and_release_buff(pdbg_init
, &temp_buff
,
1031 cudbg_put_buff(pdbg_init
, &temp_buff
);
1038 static void cudbg_t4_fwcache(struct cudbg_init
*pdbg_init
,
1039 struct cudbg_error
*cudbg_err
)
1041 struct adapter
*padap
= pdbg_init
->adap
;
1044 if (is_fw_attached(pdbg_init
)) {
1045 /* Flush uP dcache before reading edcX/mcX */
1046 rc
= t4_fwcache(padap
, FW_PARAM_DEV_FWCACHE_FLUSH
);
1048 cudbg_err
->sys_warn
= rc
;
1052 static unsigned long cudbg_mem_region_size(struct cudbg_init
*pdbg_init
,
1053 struct cudbg_error
*cudbg_err
,
1056 struct adapter
*padap
= pdbg_init
->adap
;
1057 struct cudbg_meminfo mem_info
;
1061 memset(&mem_info
, 0, sizeof(struct cudbg_meminfo
));
1062 rc
= cudbg_fill_meminfo(padap
, &mem_info
);
1066 cudbg_t4_fwcache(pdbg_init
, cudbg_err
);
1067 rc
= cudbg_meminfo_get_mem_index(padap
, &mem_info
, mem_type
, &mc_idx
);
1071 return mem_info
.avail
[mc_idx
].limit
- mem_info
.avail
[mc_idx
].base
;
1074 static int cudbg_collect_mem_region(struct cudbg_init
*pdbg_init
,
1075 struct cudbg_buffer
*dbg_buff
,
1076 struct cudbg_error
*cudbg_err
,
1079 unsigned long size
= cudbg_mem_region_size(pdbg_init
, cudbg_err
, mem_type
);
1081 return cudbg_read_fw_mem(pdbg_init
, dbg_buff
, mem_type
, size
,
1085 int cudbg_collect_edc0_meminfo(struct cudbg_init
*pdbg_init
,
1086 struct cudbg_buffer
*dbg_buff
,
1087 struct cudbg_error
*cudbg_err
)
1089 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1093 int cudbg_collect_edc1_meminfo(struct cudbg_init
*pdbg_init
,
1094 struct cudbg_buffer
*dbg_buff
,
1095 struct cudbg_error
*cudbg_err
)
1097 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1101 int cudbg_collect_mc0_meminfo(struct cudbg_init
*pdbg_init
,
1102 struct cudbg_buffer
*dbg_buff
,
1103 struct cudbg_error
*cudbg_err
)
1105 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1109 int cudbg_collect_mc1_meminfo(struct cudbg_init
*pdbg_init
,
1110 struct cudbg_buffer
*dbg_buff
,
1111 struct cudbg_error
*cudbg_err
)
1113 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1117 int cudbg_collect_hma_meminfo(struct cudbg_init
*pdbg_init
,
1118 struct cudbg_buffer
*dbg_buff
,
1119 struct cudbg_error
*cudbg_err
)
1121 return cudbg_collect_mem_region(pdbg_init
, dbg_buff
, cudbg_err
,
1125 int cudbg_collect_rss(struct cudbg_init
*pdbg_init
,
1126 struct cudbg_buffer
*dbg_buff
,
1127 struct cudbg_error
*cudbg_err
)
1129 struct adapter
*padap
= pdbg_init
->adap
;
1130 struct cudbg_buffer temp_buff
= { 0 };
1133 nentries
= t4_chip_rss_size(padap
);
1134 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, nentries
* sizeof(u16
),
1139 rc
= t4_read_rss(padap
, (u16
*)temp_buff
.data
);
1141 cudbg_err
->sys_err
= rc
;
1142 cudbg_put_buff(pdbg_init
, &temp_buff
);
1145 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1148 int cudbg_collect_rss_vf_config(struct cudbg_init
*pdbg_init
,
1149 struct cudbg_buffer
*dbg_buff
,
1150 struct cudbg_error
*cudbg_err
)
1152 struct adapter
*padap
= pdbg_init
->adap
;
1153 struct cudbg_buffer temp_buff
= { 0 };
1154 struct cudbg_rss_vf_conf
*vfconf
;
1155 int vf
, rc
, vf_count
;
1157 vf_count
= padap
->params
.arch
.vfcount
;
1158 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1159 vf_count
* sizeof(struct cudbg_rss_vf_conf
),
1164 vfconf
= (struct cudbg_rss_vf_conf
*)temp_buff
.data
;
1165 for (vf
= 0; vf
< vf_count
; vf
++)
1166 t4_read_rss_vf_config(padap
, vf
, &vfconf
[vf
].rss_vf_vfl
,
1167 &vfconf
[vf
].rss_vf_vfh
, true);
1168 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1171 int cudbg_collect_path_mtu(struct cudbg_init
*pdbg_init
,
1172 struct cudbg_buffer
*dbg_buff
,
1173 struct cudbg_error
*cudbg_err
)
1175 struct adapter
*padap
= pdbg_init
->adap
;
1176 struct cudbg_buffer temp_buff
= { 0 };
1179 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, NMTUS
* sizeof(u16
),
1184 t4_read_mtu_tbl(padap
, (u16
*)temp_buff
.data
, NULL
);
1185 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1188 int cudbg_collect_pm_stats(struct cudbg_init
*pdbg_init
,
1189 struct cudbg_buffer
*dbg_buff
,
1190 struct cudbg_error
*cudbg_err
)
1192 struct adapter
*padap
= pdbg_init
->adap
;
1193 struct cudbg_buffer temp_buff
= { 0 };
1194 struct cudbg_pm_stats
*pm_stats_buff
;
1197 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_pm_stats
),
1202 pm_stats_buff
= (struct cudbg_pm_stats
*)temp_buff
.data
;
1203 t4_pmtx_get_stats(padap
, pm_stats_buff
->tx_cnt
, pm_stats_buff
->tx_cyc
);
1204 t4_pmrx_get_stats(padap
, pm_stats_buff
->rx_cnt
, pm_stats_buff
->rx_cyc
);
1205 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1208 int cudbg_collect_hw_sched(struct cudbg_init
*pdbg_init
,
1209 struct cudbg_buffer
*dbg_buff
,
1210 struct cudbg_error
*cudbg_err
)
1212 struct adapter
*padap
= pdbg_init
->adap
;
1213 struct cudbg_buffer temp_buff
= { 0 };
1214 struct cudbg_hw_sched
*hw_sched_buff
;
1217 if (!padap
->params
.vpd
.cclk
)
1218 return CUDBG_STATUS_CCLK_NOT_DEFINED
;
1220 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_hw_sched
),
1226 hw_sched_buff
= (struct cudbg_hw_sched
*)temp_buff
.data
;
1227 hw_sched_buff
->map
= t4_read_reg(padap
, TP_TX_MOD_QUEUE_REQ_MAP_A
);
1228 hw_sched_buff
->mode
= TIMERMODE_G(t4_read_reg(padap
, TP_MOD_CONFIG_A
));
1229 t4_read_pace_tbl(padap
, hw_sched_buff
->pace_tab
);
1230 for (i
= 0; i
< NTX_SCHED
; ++i
)
1231 t4_get_tx_sched(padap
, i
, &hw_sched_buff
->kbps
[i
],
1232 &hw_sched_buff
->ipg
[i
], true);
1233 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1236 int cudbg_collect_tp_indirect(struct cudbg_init
*pdbg_init
,
1237 struct cudbg_buffer
*dbg_buff
,
1238 struct cudbg_error
*cudbg_err
)
1240 struct adapter
*padap
= pdbg_init
->adap
;
1241 struct cudbg_buffer temp_buff
= { 0 };
1242 struct ireg_buf
*ch_tp_pio
;
1246 if (is_t5(padap
->params
.chip
))
1247 n
= sizeof(t5_tp_pio_array
) +
1248 sizeof(t5_tp_tm_pio_array
) +
1249 sizeof(t5_tp_mib_index_array
);
1251 n
= sizeof(t6_tp_pio_array
) +
1252 sizeof(t6_tp_tm_pio_array
) +
1253 sizeof(t6_tp_mib_index_array
);
1255 n
= n
/ (IREG_NUM_ELEM
* sizeof(u32
));
1256 size
= sizeof(struct ireg_buf
) * n
;
1257 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1261 ch_tp_pio
= (struct ireg_buf
*)temp_buff
.data
;
1264 if (is_t5(padap
->params
.chip
))
1265 n
= sizeof(t5_tp_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1266 else if (is_t6(padap
->params
.chip
))
1267 n
= sizeof(t6_tp_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1269 for (i
= 0; i
< n
; i
++) {
1270 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1271 u32
*buff
= ch_tp_pio
->outbuf
;
1273 if (is_t5(padap
->params
.chip
)) {
1274 tp_pio
->ireg_addr
= t5_tp_pio_array
[i
][0];
1275 tp_pio
->ireg_data
= t5_tp_pio_array
[i
][1];
1276 tp_pio
->ireg_local_offset
= t5_tp_pio_array
[i
][2];
1277 tp_pio
->ireg_offset_range
= t5_tp_pio_array
[i
][3];
1278 } else if (is_t6(padap
->params
.chip
)) {
1279 tp_pio
->ireg_addr
= t6_tp_pio_array
[i
][0];
1280 tp_pio
->ireg_data
= t6_tp_pio_array
[i
][1];
1281 tp_pio
->ireg_local_offset
= t6_tp_pio_array
[i
][2];
1282 tp_pio
->ireg_offset_range
= t6_tp_pio_array
[i
][3];
1284 t4_tp_pio_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1285 tp_pio
->ireg_local_offset
, true);
1290 if (is_t5(padap
->params
.chip
))
1291 n
= sizeof(t5_tp_tm_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1292 else if (is_t6(padap
->params
.chip
))
1293 n
= sizeof(t6_tp_tm_pio_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1295 for (i
= 0; i
< n
; i
++) {
1296 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1297 u32
*buff
= ch_tp_pio
->outbuf
;
1299 if (is_t5(padap
->params
.chip
)) {
1300 tp_pio
->ireg_addr
= t5_tp_tm_pio_array
[i
][0];
1301 tp_pio
->ireg_data
= t5_tp_tm_pio_array
[i
][1];
1302 tp_pio
->ireg_local_offset
= t5_tp_tm_pio_array
[i
][2];
1303 tp_pio
->ireg_offset_range
= t5_tp_tm_pio_array
[i
][3];
1304 } else if (is_t6(padap
->params
.chip
)) {
1305 tp_pio
->ireg_addr
= t6_tp_tm_pio_array
[i
][0];
1306 tp_pio
->ireg_data
= t6_tp_tm_pio_array
[i
][1];
1307 tp_pio
->ireg_local_offset
= t6_tp_tm_pio_array
[i
][2];
1308 tp_pio
->ireg_offset_range
= t6_tp_tm_pio_array
[i
][3];
1310 t4_tp_tm_pio_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1311 tp_pio
->ireg_local_offset
, true);
1316 if (is_t5(padap
->params
.chip
))
1317 n
= sizeof(t5_tp_mib_index_array
) /
1318 (IREG_NUM_ELEM
* sizeof(u32
));
1319 else if (is_t6(padap
->params
.chip
))
1320 n
= sizeof(t6_tp_mib_index_array
) /
1321 (IREG_NUM_ELEM
* sizeof(u32
));
1323 for (i
= 0; i
< n
; i
++) {
1324 struct ireg_field
*tp_pio
= &ch_tp_pio
->tp_pio
;
1325 u32
*buff
= ch_tp_pio
->outbuf
;
1327 if (is_t5(padap
->params
.chip
)) {
1328 tp_pio
->ireg_addr
= t5_tp_mib_index_array
[i
][0];
1329 tp_pio
->ireg_data
= t5_tp_mib_index_array
[i
][1];
1330 tp_pio
->ireg_local_offset
=
1331 t5_tp_mib_index_array
[i
][2];
1332 tp_pio
->ireg_offset_range
=
1333 t5_tp_mib_index_array
[i
][3];
1334 } else if (is_t6(padap
->params
.chip
)) {
1335 tp_pio
->ireg_addr
= t6_tp_mib_index_array
[i
][0];
1336 tp_pio
->ireg_data
= t6_tp_mib_index_array
[i
][1];
1337 tp_pio
->ireg_local_offset
=
1338 t6_tp_mib_index_array
[i
][2];
1339 tp_pio
->ireg_offset_range
=
1340 t6_tp_mib_index_array
[i
][3];
1342 t4_tp_mib_read(padap
, buff
, tp_pio
->ireg_offset_range
,
1343 tp_pio
->ireg_local_offset
, true);
1346 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1349 static void cudbg_read_sge_qbase_indirect_reg(struct adapter
*padap
,
1350 struct sge_qbase_reg_field
*qbase
,
1351 u32 func
, bool is_pf
)
1356 buff
= qbase
->pf_data_value
[func
];
1358 buff
= qbase
->vf_data_value
[func
];
1359 /* In SGE_QBASE_INDEX,
1360 * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
1365 t4_write_reg(padap
, qbase
->reg_addr
, func
);
1366 for (i
= 0; i
< SGE_QBASE_DATA_REG_NUM
; i
++, buff
++)
1367 *buff
= t4_read_reg(padap
, qbase
->reg_data
[i
]);
1370 int cudbg_collect_sge_indirect(struct cudbg_init
*pdbg_init
,
1371 struct cudbg_buffer
*dbg_buff
,
1372 struct cudbg_error
*cudbg_err
)
1374 struct adapter
*padap
= pdbg_init
->adap
;
1375 struct cudbg_buffer temp_buff
= { 0 };
1376 struct sge_qbase_reg_field
*sge_qbase
;
1377 struct ireg_buf
*ch_sge_dbg
;
1380 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1381 sizeof(*ch_sge_dbg
) * 2 + sizeof(*sge_qbase
),
1386 ch_sge_dbg
= (struct ireg_buf
*)temp_buff
.data
;
1387 for (i
= 0; i
< 2; i
++) {
1388 struct ireg_field
*sge_pio
= &ch_sge_dbg
->tp_pio
;
1389 u32
*buff
= ch_sge_dbg
->outbuf
;
1391 sge_pio
->ireg_addr
= t5_sge_dbg_index_array
[i
][0];
1392 sge_pio
->ireg_data
= t5_sge_dbg_index_array
[i
][1];
1393 sge_pio
->ireg_local_offset
= t5_sge_dbg_index_array
[i
][2];
1394 sge_pio
->ireg_offset_range
= t5_sge_dbg_index_array
[i
][3];
1395 t4_read_indirect(padap
,
1399 sge_pio
->ireg_offset_range
,
1400 sge_pio
->ireg_local_offset
);
1404 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
) {
1405 sge_qbase
= (struct sge_qbase_reg_field
*)ch_sge_dbg
;
1406 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
1407 * SGE_QBASE_MAP[0-3]
1409 sge_qbase
->reg_addr
= t6_sge_qbase_index_array
[0];
1410 for (i
= 0; i
< SGE_QBASE_DATA_REG_NUM
; i
++)
1411 sge_qbase
->reg_data
[i
] =
1412 t6_sge_qbase_index_array
[i
+ 1];
1414 for (i
= 0; i
<= PCIE_FW_MASTER_M
; i
++)
1415 cudbg_read_sge_qbase_indirect_reg(padap
, sge_qbase
,
1418 for (i
= 0; i
< padap
->params
.arch
.vfcount
; i
++)
1419 cudbg_read_sge_qbase_indirect_reg(padap
, sge_qbase
,
1422 sge_qbase
->vfcount
= padap
->params
.arch
.vfcount
;
1425 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1428 int cudbg_collect_ulprx_la(struct cudbg_init
*pdbg_init
,
1429 struct cudbg_buffer
*dbg_buff
,
1430 struct cudbg_error
*cudbg_err
)
1432 struct adapter
*padap
= pdbg_init
->adap
;
1433 struct cudbg_buffer temp_buff
= { 0 };
1434 struct cudbg_ulprx_la
*ulprx_la_buff
;
1437 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_ulprx_la
),
1442 ulprx_la_buff
= (struct cudbg_ulprx_la
*)temp_buff
.data
;
1443 t4_ulprx_read_la(padap
, (u32
*)ulprx_la_buff
->data
);
1444 ulprx_la_buff
->size
= ULPRX_LA_SIZE
;
1445 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1448 int cudbg_collect_tp_la(struct cudbg_init
*pdbg_init
,
1449 struct cudbg_buffer
*dbg_buff
,
1450 struct cudbg_error
*cudbg_err
)
1452 struct adapter
*padap
= pdbg_init
->adap
;
1453 struct cudbg_buffer temp_buff
= { 0 };
1454 struct cudbg_tp_la
*tp_la_buff
;
1457 size
= sizeof(struct cudbg_tp_la
) + TPLA_SIZE
* sizeof(u64
);
1458 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1462 tp_la_buff
= (struct cudbg_tp_la
*)temp_buff
.data
;
1463 tp_la_buff
->mode
= DBGLAMODE_G(t4_read_reg(padap
, TP_DBG_LA_CONFIG_A
));
1464 t4_tp_read_la(padap
, (u64
*)tp_la_buff
->data
, NULL
);
1465 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1468 int cudbg_collect_meminfo(struct cudbg_init
*pdbg_init
,
1469 struct cudbg_buffer
*dbg_buff
,
1470 struct cudbg_error
*cudbg_err
)
1472 struct adapter
*padap
= pdbg_init
->adap
;
1473 struct cudbg_buffer temp_buff
= { 0 };
1474 struct cudbg_meminfo
*meminfo_buff
;
1475 struct cudbg_ver_hdr
*ver_hdr
;
1478 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1479 sizeof(struct cudbg_ver_hdr
) +
1480 sizeof(struct cudbg_meminfo
),
1485 ver_hdr
= (struct cudbg_ver_hdr
*)temp_buff
.data
;
1486 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
1487 ver_hdr
->revision
= CUDBG_MEMINFO_REV
;
1488 ver_hdr
->size
= sizeof(struct cudbg_meminfo
);
1490 meminfo_buff
= (struct cudbg_meminfo
*)(temp_buff
.data
+
1492 rc
= cudbg_fill_meminfo(padap
, meminfo_buff
);
1494 cudbg_err
->sys_err
= rc
;
1495 cudbg_put_buff(pdbg_init
, &temp_buff
);
1499 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1502 int cudbg_collect_cim_pif_la(struct cudbg_init
*pdbg_init
,
1503 struct cudbg_buffer
*dbg_buff
,
1504 struct cudbg_error
*cudbg_err
)
1506 struct cudbg_cim_pif_la
*cim_pif_la_buff
;
1507 struct adapter
*padap
= pdbg_init
->adap
;
1508 struct cudbg_buffer temp_buff
= { 0 };
1511 size
= sizeof(struct cudbg_cim_pif_la
) +
1512 2 * CIM_PIFLA_SIZE
* 6 * sizeof(u32
);
1513 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1517 cim_pif_la_buff
= (struct cudbg_cim_pif_la
*)temp_buff
.data
;
1518 cim_pif_la_buff
->size
= CIM_PIFLA_SIZE
;
1519 t4_cim_read_pif_la(padap
, (u32
*)cim_pif_la_buff
->data
,
1520 (u32
*)cim_pif_la_buff
->data
+ 6 * CIM_PIFLA_SIZE
,
1522 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1525 int cudbg_collect_clk_info(struct cudbg_init
*pdbg_init
,
1526 struct cudbg_buffer
*dbg_buff
,
1527 struct cudbg_error
*cudbg_err
)
1529 struct adapter
*padap
= pdbg_init
->adap
;
1530 struct cudbg_buffer temp_buff
= { 0 };
1531 struct cudbg_clk_info
*clk_info_buff
;
1535 if (!padap
->params
.vpd
.cclk
)
1536 return CUDBG_STATUS_CCLK_NOT_DEFINED
;
1538 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_clk_info
),
1543 clk_info_buff
= (struct cudbg_clk_info
*)temp_buff
.data
;
1544 clk_info_buff
->cclk_ps
= 1000000000 / padap
->params
.vpd
.cclk
; /* psec */
1545 clk_info_buff
->res
= t4_read_reg(padap
, TP_TIMER_RESOLUTION_A
);
1546 clk_info_buff
->tre
= TIMERRESOLUTION_G(clk_info_buff
->res
);
1547 clk_info_buff
->dack_re
= DELAYEDACKRESOLUTION_G(clk_info_buff
->res
);
1548 tp_tick_us
= (clk_info_buff
->cclk_ps
<< clk_info_buff
->tre
) / 1000000;
1550 clk_info_buff
->dack_timer
=
1551 (clk_info_buff
->cclk_ps
<< clk_info_buff
->dack_re
) / 1000000 *
1552 t4_read_reg(padap
, TP_DACK_TIMER_A
);
1553 clk_info_buff
->retransmit_min
=
1554 tp_tick_us
* t4_read_reg(padap
, TP_RXT_MIN_A
);
1555 clk_info_buff
->retransmit_max
=
1556 tp_tick_us
* t4_read_reg(padap
, TP_RXT_MAX_A
);
1557 clk_info_buff
->persist_timer_min
=
1558 tp_tick_us
* t4_read_reg(padap
, TP_PERS_MIN_A
);
1559 clk_info_buff
->persist_timer_max
=
1560 tp_tick_us
* t4_read_reg(padap
, TP_PERS_MAX_A
);
1561 clk_info_buff
->keepalive_idle_timer
=
1562 tp_tick_us
* t4_read_reg(padap
, TP_KEEP_IDLE_A
);
1563 clk_info_buff
->keepalive_interval
=
1564 tp_tick_us
* t4_read_reg(padap
, TP_KEEP_INTVL_A
);
1565 clk_info_buff
->initial_srtt
=
1566 tp_tick_us
* INITSRTT_G(t4_read_reg(padap
, TP_INIT_SRTT_A
));
1567 clk_info_buff
->finwait2_timer
=
1568 tp_tick_us
* t4_read_reg(padap
, TP_FINWAIT2_TIMER_A
);
1570 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1573 int cudbg_collect_pcie_indirect(struct cudbg_init
*pdbg_init
,
1574 struct cudbg_buffer
*dbg_buff
,
1575 struct cudbg_error
*cudbg_err
)
1577 struct adapter
*padap
= pdbg_init
->adap
;
1578 struct cudbg_buffer temp_buff
= { 0 };
1579 struct ireg_buf
*ch_pcie
;
1583 n
= sizeof(t5_pcie_pdbg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1584 size
= sizeof(struct ireg_buf
) * n
* 2;
1585 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1589 ch_pcie
= (struct ireg_buf
*)temp_buff
.data
;
1591 for (i
= 0; i
< n
; i
++) {
1592 struct ireg_field
*pcie_pio
= &ch_pcie
->tp_pio
;
1593 u32
*buff
= ch_pcie
->outbuf
;
1595 pcie_pio
->ireg_addr
= t5_pcie_pdbg_array
[i
][0];
1596 pcie_pio
->ireg_data
= t5_pcie_pdbg_array
[i
][1];
1597 pcie_pio
->ireg_local_offset
= t5_pcie_pdbg_array
[i
][2];
1598 pcie_pio
->ireg_offset_range
= t5_pcie_pdbg_array
[i
][3];
1599 t4_read_indirect(padap
,
1600 pcie_pio
->ireg_addr
,
1601 pcie_pio
->ireg_data
,
1603 pcie_pio
->ireg_offset_range
,
1604 pcie_pio
->ireg_local_offset
);
1609 n
= sizeof(t5_pcie_cdbg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1610 for (i
= 0; i
< n
; i
++) {
1611 struct ireg_field
*pcie_pio
= &ch_pcie
->tp_pio
;
1612 u32
*buff
= ch_pcie
->outbuf
;
1614 pcie_pio
->ireg_addr
= t5_pcie_cdbg_array
[i
][0];
1615 pcie_pio
->ireg_data
= t5_pcie_cdbg_array
[i
][1];
1616 pcie_pio
->ireg_local_offset
= t5_pcie_cdbg_array
[i
][2];
1617 pcie_pio
->ireg_offset_range
= t5_pcie_cdbg_array
[i
][3];
1618 t4_read_indirect(padap
,
1619 pcie_pio
->ireg_addr
,
1620 pcie_pio
->ireg_data
,
1622 pcie_pio
->ireg_offset_range
,
1623 pcie_pio
->ireg_local_offset
);
1626 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1629 int cudbg_collect_pm_indirect(struct cudbg_init
*pdbg_init
,
1630 struct cudbg_buffer
*dbg_buff
,
1631 struct cudbg_error
*cudbg_err
)
1633 struct adapter
*padap
= pdbg_init
->adap
;
1634 struct cudbg_buffer temp_buff
= { 0 };
1635 struct ireg_buf
*ch_pm
;
1639 n
= sizeof(t5_pm_rx_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1640 size
= sizeof(struct ireg_buf
) * n
* 2;
1641 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1645 ch_pm
= (struct ireg_buf
*)temp_buff
.data
;
1647 for (i
= 0; i
< n
; i
++) {
1648 struct ireg_field
*pm_pio
= &ch_pm
->tp_pio
;
1649 u32
*buff
= ch_pm
->outbuf
;
1651 pm_pio
->ireg_addr
= t5_pm_rx_array
[i
][0];
1652 pm_pio
->ireg_data
= t5_pm_rx_array
[i
][1];
1653 pm_pio
->ireg_local_offset
= t5_pm_rx_array
[i
][2];
1654 pm_pio
->ireg_offset_range
= t5_pm_rx_array
[i
][3];
1655 t4_read_indirect(padap
,
1659 pm_pio
->ireg_offset_range
,
1660 pm_pio
->ireg_local_offset
);
1665 n
= sizeof(t5_pm_tx_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
1666 for (i
= 0; i
< n
; i
++) {
1667 struct ireg_field
*pm_pio
= &ch_pm
->tp_pio
;
1668 u32
*buff
= ch_pm
->outbuf
;
1670 pm_pio
->ireg_addr
= t5_pm_tx_array
[i
][0];
1671 pm_pio
->ireg_data
= t5_pm_tx_array
[i
][1];
1672 pm_pio
->ireg_local_offset
= t5_pm_tx_array
[i
][2];
1673 pm_pio
->ireg_offset_range
= t5_pm_tx_array
[i
][3];
1674 t4_read_indirect(padap
,
1678 pm_pio
->ireg_offset_range
,
1679 pm_pio
->ireg_local_offset
);
1682 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1685 int cudbg_collect_tid(struct cudbg_init
*pdbg_init
,
1686 struct cudbg_buffer
*dbg_buff
,
1687 struct cudbg_error
*cudbg_err
)
1689 struct adapter
*padap
= pdbg_init
->adap
;
1690 struct cudbg_tid_info_region_rev1
*tid1
;
1691 struct cudbg_buffer temp_buff
= { 0 };
1692 struct cudbg_tid_info_region
*tid
;
1693 u32 para
[2], val
[2];
1696 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
1697 sizeof(struct cudbg_tid_info_region_rev1
),
1702 tid1
= (struct cudbg_tid_info_region_rev1
*)temp_buff
.data
;
1704 tid1
->ver_hdr
.signature
= CUDBG_ENTITY_SIGNATURE
;
1705 tid1
->ver_hdr
.revision
= CUDBG_TID_INFO_REV
;
1706 tid1
->ver_hdr
.size
= sizeof(struct cudbg_tid_info_region_rev1
) -
1707 sizeof(struct cudbg_ver_hdr
);
1709 /* If firmware is not attached/alive, use backdoor register
1710 * access to collect dump.
1712 if (!is_fw_attached(pdbg_init
))
1715 #define FW_PARAM_PFVF_A(param) \
1716 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1717 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1718 FW_PARAMS_PARAM_Y_V(0) | \
1719 FW_PARAMS_PARAM_Z_V(0))
1721 para
[0] = FW_PARAM_PFVF_A(ETHOFLD_START
);
1722 para
[1] = FW_PARAM_PFVF_A(ETHOFLD_END
);
1723 rc
= t4_query_params(padap
, padap
->mbox
, padap
->pf
, 0, 2, para
, val
);
1725 cudbg_err
->sys_err
= rc
;
1726 cudbg_put_buff(pdbg_init
, &temp_buff
);
1729 tid
->uotid_base
= val
[0];
1730 tid
->nuotids
= val
[1] - val
[0] + 1;
1732 if (is_t5(padap
->params
.chip
)) {
1733 tid
->sb
= t4_read_reg(padap
, LE_DB_SERVER_INDEX_A
) / 4;
1734 } else if (is_t6(padap
->params
.chip
)) {
1736 t4_read_reg(padap
, LE_DB_ACTIVE_TABLE_START_INDEX_A
);
1737 tid
->sb
= t4_read_reg(padap
, LE_DB_SRVR_START_INDEX_A
);
1739 para
[0] = FW_PARAM_PFVF_A(HPFILTER_START
);
1740 para
[1] = FW_PARAM_PFVF_A(HPFILTER_END
);
1741 rc
= t4_query_params(padap
, padap
->mbox
, padap
->pf
, 0, 2,
1744 cudbg_err
->sys_err
= rc
;
1745 cudbg_put_buff(pdbg_init
, &temp_buff
);
1748 tid
->hpftid_base
= val
[0];
1749 tid
->nhpftids
= val
[1] - val
[0] + 1;
1752 #undef FW_PARAM_PFVF_A
1755 tid
->ntids
= padap
->tids
.ntids
;
1756 tid
->nstids
= padap
->tids
.nstids
;
1757 tid
->stid_base
= padap
->tids
.stid_base
;
1758 tid
->hash_base
= padap
->tids
.hash_base
;
1760 tid
->natids
= padap
->tids
.natids
;
1761 tid
->nftids
= padap
->tids
.nftids
;
1762 tid
->ftid_base
= padap
->tids
.ftid_base
;
1763 tid
->aftid_base
= padap
->tids
.aftid_base
;
1764 tid
->aftid_end
= padap
->tids
.aftid_end
;
1766 tid
->sftid_base
= padap
->tids
.sftid_base
;
1767 tid
->nsftids
= padap
->tids
.nsftids
;
1769 tid
->flags
= padap
->flags
;
1770 tid
->le_db_conf
= t4_read_reg(padap
, LE_DB_CONFIG_A
);
1771 tid
->ip_users
= t4_read_reg(padap
, LE_DB_ACT_CNT_IPV4_A
);
1772 tid
->ipv6_users
= t4_read_reg(padap
, LE_DB_ACT_CNT_IPV6_A
);
1774 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1777 int cudbg_collect_pcie_config(struct cudbg_init
*pdbg_init
,
1778 struct cudbg_buffer
*dbg_buff
,
1779 struct cudbg_error
*cudbg_err
)
1781 struct adapter
*padap
= pdbg_init
->adap
;
1782 struct cudbg_buffer temp_buff
= { 0 };
1783 u32 size
, *value
, j
;
1786 size
= sizeof(u32
) * CUDBG_NUM_PCIE_CONFIG_REGS
;
1787 n
= sizeof(t5_pcie_config_array
) / (2 * sizeof(u32
));
1788 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1792 value
= (u32
*)temp_buff
.data
;
1793 for (i
= 0; i
< n
; i
++) {
1794 for (j
= t5_pcie_config_array
[i
][0];
1795 j
<= t5_pcie_config_array
[i
][1]; j
+= 4) {
1796 t4_hw_pci_read_cfg4(padap
, j
, value
);
1800 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
1803 static int cudbg_sge_ctxt_check_valid(u32
*buf
, int type
)
1805 int index
, bit
, bit_pos
= 0;
1818 index
= bit_pos
/ 32;
1820 return buf
[index
] & (1U << bit
);
1823 static int cudbg_get_ctxt_region_info(struct adapter
*padap
,
1824 struct cudbg_region_info
*ctx_info
,
1827 struct cudbg_mem_desc mem_desc
;
1828 struct cudbg_meminfo meminfo
;
1829 u32 i
, j
, value
, found
;
1833 rc
= cudbg_fill_meminfo(padap
, &meminfo
);
1837 /* Get EGRESS and INGRESS context region size */
1838 for (i
= CTXT_EGRESS
; i
<= CTXT_INGRESS
; i
++) {
1840 memset(&mem_desc
, 0, sizeof(struct cudbg_mem_desc
));
1841 for (j
= 0; j
< ARRAY_SIZE(meminfo
.avail
); j
++) {
1842 rc
= cudbg_get_mem_region(padap
, &meminfo
, j
,
1847 rc
= cudbg_get_mem_relative(padap
, &meminfo
, j
,
1851 ctx_info
[i
].exist
= false;
1854 ctx_info
[i
].exist
= true;
1855 ctx_info
[i
].start
= mem_desc
.base
;
1856 ctx_info
[i
].end
= mem_desc
.limit
;
1862 ctx_info
[i
].exist
= false;
1865 /* Get FLM and CNM max qid. */
1866 value
= t4_read_reg(padap
, SGE_FLM_CFG_A
);
1868 /* Get number of data freelist queues */
1869 flq
= HDRSTARTFLQ_G(value
);
1870 ctx_info
[CTXT_FLM
].exist
= true;
1871 ctx_info
[CTXT_FLM
].end
= (CUDBG_MAX_FL_QIDS
>> flq
) * SGE_CTXT_SIZE
;
1873 /* The number of CONM contexts are same as number of freelist
1876 ctx_info
[CTXT_CNM
].exist
= true;
1877 ctx_info
[CTXT_CNM
].end
= ctx_info
[CTXT_FLM
].end
;
1882 int cudbg_dump_context_size(struct adapter
*padap
)
1884 struct cudbg_region_info region_info
[CTXT_CNM
+ 1] = { {0} };
1885 u8 mem_type
[CTXT_INGRESS
+ 1] = { 0 };
1889 /* Get max valid qid for each type of queue */
1890 rc
= cudbg_get_ctxt_region_info(padap
, region_info
, mem_type
);
1894 for (i
= 0; i
< CTXT_CNM
; i
++) {
1895 if (!region_info
[i
].exist
) {
1896 if (i
== CTXT_EGRESS
|| i
== CTXT_INGRESS
)
1897 size
+= CUDBG_LOWMEM_MAX_CTXT_QIDS
*
1902 size
+= (region_info
[i
].end
- region_info
[i
].start
+ 1) /
1905 return size
* sizeof(struct cudbg_ch_cntxt
);
1908 static void cudbg_read_sge_ctxt(struct cudbg_init
*pdbg_init
, u32 cid
,
1909 enum ctxt_type ctype
, u32
*data
)
1911 struct adapter
*padap
= pdbg_init
->adap
;
1914 /* Under heavy traffic, the SGE Queue contexts registers will be
1915 * frequently accessed by firmware.
1917 * To avoid conflicts with firmware, always ask firmware to fetch
1918 * the SGE Queue contexts via mailbox. On failure, fallback to
1919 * accessing hardware registers directly.
1921 if (is_fw_attached(pdbg_init
))
1922 rc
= t4_sge_ctxt_rd(padap
, padap
->mbox
, cid
, ctype
, data
);
1924 t4_sge_ctxt_rd_bd(padap
, cid
, ctype
, data
);
1927 static void cudbg_get_sge_ctxt_fw(struct cudbg_init
*pdbg_init
, u32 max_qid
,
1929 struct cudbg_ch_cntxt
**out_buff
)
1931 struct cudbg_ch_cntxt
*buff
= *out_buff
;
1935 for (j
= 0; j
< max_qid
; j
++) {
1936 cudbg_read_sge_ctxt(pdbg_init
, j
, ctxt_type
, buff
->data
);
1937 rc
= cudbg_sge_ctxt_check_valid(buff
->data
, ctxt_type
);
1941 buff
->cntxt_type
= ctxt_type
;
1944 if (ctxt_type
== CTXT_FLM
) {
1945 cudbg_read_sge_ctxt(pdbg_init
, j
, CTXT_CNM
, buff
->data
);
1946 buff
->cntxt_type
= CTXT_CNM
;
1955 int cudbg_collect_dump_context(struct cudbg_init
*pdbg_init
,
1956 struct cudbg_buffer
*dbg_buff
,
1957 struct cudbg_error
*cudbg_err
)
1959 struct cudbg_region_info region_info
[CTXT_CNM
+ 1] = { {0} };
1960 struct adapter
*padap
= pdbg_init
->adap
;
1961 u32 j
, size
, max_ctx_size
, max_ctx_qid
;
1962 u8 mem_type
[CTXT_INGRESS
+ 1] = { 0 };
1963 struct cudbg_buffer temp_buff
= { 0 };
1964 struct cudbg_ch_cntxt
*buff
;
1965 u64
*dst_off
, *src_off
;
1970 /* Get max valid qid for each type of queue */
1971 rc
= cudbg_get_ctxt_region_info(padap
, region_info
, mem_type
);
1975 rc
= cudbg_dump_context_size(padap
);
1977 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
1980 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
1984 /* Get buffer with enough space to read the biggest context
1987 max_ctx_size
= max(region_info
[CTXT_EGRESS
].end
-
1988 region_info
[CTXT_EGRESS
].start
+ 1,
1989 region_info
[CTXT_INGRESS
].end
-
1990 region_info
[CTXT_INGRESS
].start
+ 1);
1992 ctx_buf
= kvzalloc(max_ctx_size
, GFP_KERNEL
);
1994 cudbg_put_buff(pdbg_init
, &temp_buff
);
1998 buff
= (struct cudbg_ch_cntxt
*)temp_buff
.data
;
2000 /* Collect EGRESS and INGRESS context data.
2001 * In case of failures, fallback to collecting via FW or
2004 for (i
= CTXT_EGRESS
; i
<= CTXT_INGRESS
; i
++) {
2005 if (!region_info
[i
].exist
) {
2006 max_ctx_qid
= CUDBG_LOWMEM_MAX_CTXT_QIDS
;
2007 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, i
,
2012 max_ctx_size
= region_info
[i
].end
- region_info
[i
].start
+ 1;
2013 max_ctx_qid
= max_ctx_size
/ SGE_CTXT_SIZE
;
2015 /* If firmware is not attached/alive, use backdoor register
2016 * access to collect dump.
2018 if (is_fw_attached(pdbg_init
)) {
2019 t4_sge_ctxt_flush(padap
, padap
->mbox
, i
);
2021 rc
= t4_memory_rw(padap
, MEMWIN_NIC
, mem_type
[i
],
2022 region_info
[i
].start
, max_ctx_size
,
2023 (__be32
*)ctx_buf
, 1);
2026 if (rc
|| !is_fw_attached(pdbg_init
)) {
2027 max_ctx_qid
= CUDBG_LOWMEM_MAX_CTXT_QIDS
;
2028 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, i
,
2033 for (j
= 0; j
< max_ctx_qid
; j
++) {
2034 src_off
= (u64
*)(ctx_buf
+ j
* SGE_CTXT_SIZE
);
2035 dst_off
= (u64
*)buff
->data
;
2037 /* The data is stored in 64-bit cpu order. Convert it
2038 * to big endian before parsing.
2040 for (k
= 0; k
< SGE_CTXT_SIZE
/ sizeof(u64
); k
++)
2041 dst_off
[k
] = cpu_to_be64(src_off
[k
]);
2043 rc
= cudbg_sge_ctxt_check_valid(buff
->data
, i
);
2047 buff
->cntxt_type
= i
;
2055 /* Collect FREELIST and CONGESTION MANAGER contexts */
2056 max_ctx_size
= region_info
[CTXT_FLM
].end
-
2057 region_info
[CTXT_FLM
].start
+ 1;
2058 max_ctx_qid
= max_ctx_size
/ SGE_CTXT_SIZE
;
2059 /* Since FLM and CONM are 1-to-1 mapped, the below function
2060 * will fetch both FLM and CONM contexts.
2062 cudbg_get_sge_ctxt_fw(pdbg_init
, max_ctx_qid
, CTXT_FLM
, &buff
);
2064 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2067 static inline void cudbg_tcamxy2valmask(u64 x
, u64 y
, u8
*addr
, u64
*mask
)
2070 y
= (__force u64
)cpu_to_be64(y
);
2071 memcpy(addr
, (char *)&y
+ 2, ETH_ALEN
);
2074 static void cudbg_mps_rpl_backdoor(struct adapter
*padap
,
2075 struct fw_ldst_mps_rplc
*mps_rplc
)
2077 if (is_t5(padap
->params
.chip
)) {
2078 mps_rplc
->rplc255_224
= htonl(t4_read_reg(padap
,
2079 MPS_VF_RPLCT_MAP3_A
));
2080 mps_rplc
->rplc223_192
= htonl(t4_read_reg(padap
,
2081 MPS_VF_RPLCT_MAP2_A
));
2082 mps_rplc
->rplc191_160
= htonl(t4_read_reg(padap
,
2083 MPS_VF_RPLCT_MAP1_A
));
2084 mps_rplc
->rplc159_128
= htonl(t4_read_reg(padap
,
2085 MPS_VF_RPLCT_MAP0_A
));
2087 mps_rplc
->rplc255_224
= htonl(t4_read_reg(padap
,
2088 MPS_VF_RPLCT_MAP7_A
));
2089 mps_rplc
->rplc223_192
= htonl(t4_read_reg(padap
,
2090 MPS_VF_RPLCT_MAP6_A
));
2091 mps_rplc
->rplc191_160
= htonl(t4_read_reg(padap
,
2092 MPS_VF_RPLCT_MAP5_A
));
2093 mps_rplc
->rplc159_128
= htonl(t4_read_reg(padap
,
2094 MPS_VF_RPLCT_MAP4_A
));
2096 mps_rplc
->rplc127_96
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP3_A
));
2097 mps_rplc
->rplc95_64
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP2_A
));
2098 mps_rplc
->rplc63_32
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP1_A
));
2099 mps_rplc
->rplc31_0
= htonl(t4_read_reg(padap
, MPS_VF_RPLCT_MAP0_A
));
2102 static int cudbg_collect_tcam_index(struct cudbg_init
*pdbg_init
,
2103 struct cudbg_mps_tcam
*tcam
, u32 idx
)
2105 struct adapter
*padap
= pdbg_init
->adap
;
2106 u64 tcamy
, tcamx
, val
;
2110 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) >= CHELSIO_T6
) {
2111 /* CtlReqID - 1: use Host Driver Requester ID
2112 * CtlCmdType - 0: Read, 1: Write
2113 * CtlTcamSel - 0: TCAM0, 1: TCAM1
2114 * CtlXYBitSel- 0: Y bit, 1: X bit
2118 ctl
= CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2120 ctl
|= CTLTCAMINDEX_V(idx
) | CTLTCAMSEL_V(0);
2122 ctl
|= CTLTCAMINDEX_V(idx
- 256) | CTLTCAMSEL_V(1);
2124 t4_write_reg(padap
, MPS_CLS_TCAM_DATA2_CTL_A
, ctl
);
2125 val
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA1_REQ_ID1_A
);
2126 tcamy
= DMACH_G(val
) << 32;
2127 tcamy
|= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA0_REQ_ID1_A
);
2128 data2
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA2_REQ_ID1_A
);
2129 tcam
->lookup_type
= DATALKPTYPE_G(data2
);
2131 /* 0 - Outer header, 1 - Inner header
2132 * [71:48] bit locations are overloaded for
2133 * outer vs. inner lookup types.
2135 if (tcam
->lookup_type
&& tcam
->lookup_type
!= DATALKPTYPE_M
) {
2136 /* Inner header VNI */
2137 tcam
->vniy
= (data2
& DATAVIDH2_F
) | DATAVIDH1_G(data2
);
2138 tcam
->vniy
= (tcam
->vniy
<< 16) | VIDL_G(val
);
2139 tcam
->dip_hit
= data2
& DATADIPHIT_F
;
2141 tcam
->vlan_vld
= data2
& DATAVIDH2_F
;
2142 tcam
->ivlan
= VIDL_G(val
);
2145 tcam
->port_num
= DATAPORTNUM_G(data2
);
2147 /* Read tcamx. Change the control param */
2148 ctl
|= CTLXYBITSEL_V(1);
2149 t4_write_reg(padap
, MPS_CLS_TCAM_DATA2_CTL_A
, ctl
);
2150 val
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA1_REQ_ID1_A
);
2151 tcamx
= DMACH_G(val
) << 32;
2152 tcamx
|= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA0_REQ_ID1_A
);
2153 data2
= t4_read_reg(padap
, MPS_CLS_TCAM_RDATA2_REQ_ID1_A
);
2154 if (tcam
->lookup_type
&& tcam
->lookup_type
!= DATALKPTYPE_M
) {
2155 /* Inner header VNI mask */
2156 tcam
->vnix
= (data2
& DATAVIDH2_F
) | DATAVIDH1_G(data2
);
2157 tcam
->vnix
= (tcam
->vnix
<< 16) | VIDL_G(val
);
2160 tcamy
= t4_read_reg64(padap
, MPS_CLS_TCAM_Y_L(idx
));
2161 tcamx
= t4_read_reg64(padap
, MPS_CLS_TCAM_X_L(idx
));
2164 /* If no entry, return */
2168 tcam
->cls_lo
= t4_read_reg(padap
, MPS_CLS_SRAM_L(idx
));
2169 tcam
->cls_hi
= t4_read_reg(padap
, MPS_CLS_SRAM_H(idx
));
2171 if (is_t5(padap
->params
.chip
))
2172 tcam
->repli
= (tcam
->cls_lo
& REPLICATE_F
);
2173 else if (is_t6(padap
->params
.chip
))
2174 tcam
->repli
= (tcam
->cls_lo
& T6_REPLICATE_F
);
2177 struct fw_ldst_cmd ldst_cmd
;
2178 struct fw_ldst_mps_rplc mps_rplc
;
2180 memset(&ldst_cmd
, 0, sizeof(ldst_cmd
));
2181 ldst_cmd
.op_to_addrspace
=
2182 htonl(FW_CMD_OP_V(FW_LDST_CMD
) |
2183 FW_CMD_REQUEST_F
| FW_CMD_READ_F
|
2184 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS
));
2185 ldst_cmd
.cycles_to_len16
= htonl(FW_LEN16(ldst_cmd
));
2186 ldst_cmd
.u
.mps
.rplc
.fid_idx
=
2187 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC
) |
2188 FW_LDST_CMD_IDX_V(idx
));
2190 /* If firmware is not attached/alive, use backdoor register
2191 * access to collect dump.
2193 if (is_fw_attached(pdbg_init
))
2194 rc
= t4_wr_mbox(padap
, padap
->mbox
, &ldst_cmd
,
2195 sizeof(ldst_cmd
), &ldst_cmd
);
2197 if (rc
|| !is_fw_attached(pdbg_init
)) {
2198 cudbg_mps_rpl_backdoor(padap
, &mps_rplc
);
2199 /* Ignore error since we collected directly from
2200 * reading registers.
2204 mps_rplc
= ldst_cmd
.u
.mps
.rplc
;
2207 tcam
->rplc
[0] = ntohl(mps_rplc
.rplc31_0
);
2208 tcam
->rplc
[1] = ntohl(mps_rplc
.rplc63_32
);
2209 tcam
->rplc
[2] = ntohl(mps_rplc
.rplc95_64
);
2210 tcam
->rplc
[3] = ntohl(mps_rplc
.rplc127_96
);
2211 if (padap
->params
.arch
.mps_rplc_size
> CUDBG_MAX_RPLC_SIZE
) {
2212 tcam
->rplc
[4] = ntohl(mps_rplc
.rplc159_128
);
2213 tcam
->rplc
[5] = ntohl(mps_rplc
.rplc191_160
);
2214 tcam
->rplc
[6] = ntohl(mps_rplc
.rplc223_192
);
2215 tcam
->rplc
[7] = ntohl(mps_rplc
.rplc255_224
);
2218 cudbg_tcamxy2valmask(tcamx
, tcamy
, tcam
->addr
, &tcam
->mask
);
2220 tcam
->rplc_size
= padap
->params
.arch
.mps_rplc_size
;
2224 int cudbg_collect_mps_tcam(struct cudbg_init
*pdbg_init
,
2225 struct cudbg_buffer
*dbg_buff
,
2226 struct cudbg_error
*cudbg_err
)
2228 struct adapter
*padap
= pdbg_init
->adap
;
2229 struct cudbg_buffer temp_buff
= { 0 };
2230 u32 size
= 0, i
, n
, total_size
= 0;
2231 struct cudbg_mps_tcam
*tcam
;
2234 n
= padap
->params
.arch
.mps_tcam_size
;
2235 size
= sizeof(struct cudbg_mps_tcam
) * n
;
2236 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2240 tcam
= (struct cudbg_mps_tcam
*)temp_buff
.data
;
2241 for (i
= 0; i
< n
; i
++) {
2242 rc
= cudbg_collect_tcam_index(pdbg_init
, tcam
, i
);
2244 cudbg_err
->sys_err
= rc
;
2245 cudbg_put_buff(pdbg_init
, &temp_buff
);
2248 total_size
+= sizeof(struct cudbg_mps_tcam
);
2253 rc
= CUDBG_SYSTEM_ERROR
;
2254 cudbg_err
->sys_err
= rc
;
2255 cudbg_put_buff(pdbg_init
, &temp_buff
);
2258 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2261 int cudbg_collect_vpd_data(struct cudbg_init
*pdbg_init
,
2262 struct cudbg_buffer
*dbg_buff
,
2263 struct cudbg_error
*cudbg_err
)
2265 struct adapter
*padap
= pdbg_init
->adap
;
2266 struct cudbg_buffer temp_buff
= { 0 };
2267 char vpd_str
[CUDBG_VPD_VER_LEN
+ 1];
2268 u32 scfg_vers
, vpd_vers
, fw_vers
;
2269 struct cudbg_vpd_data
*vpd_data
;
2270 struct vpd_params vpd
= { 0 };
2273 rc
= t4_get_raw_vpd_params(padap
, &vpd
);
2277 rc
= t4_get_fw_version(padap
, &fw_vers
);
2281 /* Serial Configuration Version is located beyond the PF's vpd size.
2282 * Temporarily give access to entire EEPROM to get it.
2284 rc
= pci_set_vpd_size(padap
->pdev
, EEPROMVSIZE
);
2288 ret
= cudbg_read_vpd_reg(padap
, CUDBG_SCFG_VER_ADDR
, CUDBG_SCFG_VER_LEN
,
2291 /* Restore back to original PF's vpd size */
2292 rc
= pci_set_vpd_size(padap
->pdev
, CUDBG_VPD_PF_SIZE
);
2299 rc
= cudbg_read_vpd_reg(padap
, CUDBG_VPD_VER_ADDR
, CUDBG_VPD_VER_LEN
,
2304 vpd_str
[CUDBG_VPD_VER_LEN
] = '\0';
2305 rc
= kstrtouint(vpd_str
, 0, &vpd_vers
);
2309 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, sizeof(struct cudbg_vpd_data
),
2314 vpd_data
= (struct cudbg_vpd_data
*)temp_buff
.data
;
2315 memcpy(vpd_data
->sn
, vpd
.sn
, SERNUM_LEN
+ 1);
2316 memcpy(vpd_data
->bn
, vpd
.pn
, PN_LEN
+ 1);
2317 memcpy(vpd_data
->na
, vpd
.na
, MACADDR_LEN
+ 1);
2318 memcpy(vpd_data
->mn
, vpd
.id
, ID_LEN
+ 1);
2319 vpd_data
->scfg_vers
= scfg_vers
;
2320 vpd_data
->vpd_vers
= vpd_vers
;
2321 vpd_data
->fw_major
= FW_HDR_FW_VER_MAJOR_G(fw_vers
);
2322 vpd_data
->fw_minor
= FW_HDR_FW_VER_MINOR_G(fw_vers
);
2323 vpd_data
->fw_micro
= FW_HDR_FW_VER_MICRO_G(fw_vers
);
2324 vpd_data
->fw_build
= FW_HDR_FW_VER_BUILD_G(fw_vers
);
2325 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2328 static int cudbg_read_tid(struct cudbg_init
*pdbg_init
, u32 tid
,
2329 struct cudbg_tid_data
*tid_data
)
2331 struct adapter
*padap
= pdbg_init
->adap
;
2332 int i
, cmd_retry
= 8;
2335 /* Fill REQ_DATA regs with 0's */
2336 for (i
= 0; i
< NUM_LE_DB_DBGI_REQ_DATA_INSTANCES
; i
++)
2337 t4_write_reg(padap
, LE_DB_DBGI_REQ_DATA_A
+ (i
<< 2), 0);
2339 /* Write DBIG command */
2340 val
= DBGICMD_V(4) | DBGITID_V(tid
);
2341 t4_write_reg(padap
, LE_DB_DBGI_REQ_TCAM_CMD_A
, val
);
2342 tid_data
->dbig_cmd
= val
;
2344 val
= DBGICMDSTRT_F
| DBGICMDMODE_V(1); /* LE mode */
2345 t4_write_reg(padap
, LE_DB_DBGI_CONFIG_A
, val
);
2346 tid_data
->dbig_conf
= val
;
2348 /* Poll the DBGICMDBUSY bit */
2351 val
= t4_read_reg(padap
, LE_DB_DBGI_CONFIG_A
);
2352 val
= val
& DBGICMDBUSY_F
;
2355 return CUDBG_SYSTEM_ERROR
;
2358 /* Check RESP status */
2359 val
= t4_read_reg(padap
, LE_DB_DBGI_RSP_STATUS_A
);
2360 tid_data
->dbig_rsp_stat
= val
;
2362 return CUDBG_SYSTEM_ERROR
;
2364 /* Read RESP data */
2365 for (i
= 0; i
< NUM_LE_DB_DBGI_RSP_DATA_INSTANCES
; i
++)
2366 tid_data
->data
[i
] = t4_read_reg(padap
,
2367 LE_DB_DBGI_RSP_DATA_A
+
2369 tid_data
->tid
= tid
;
2373 static int cudbg_get_le_type(u32 tid
, struct cudbg_tcam tcam_region
)
2375 int type
= LE_ET_UNKNOWN
;
2377 if (tid
< tcam_region
.server_start
)
2378 type
= LE_ET_TCAM_CON
;
2379 else if (tid
< tcam_region
.filter_start
)
2380 type
= LE_ET_TCAM_SERVER
;
2381 else if (tid
< tcam_region
.clip_start
)
2382 type
= LE_ET_TCAM_FILTER
;
2383 else if (tid
< tcam_region
.routing_start
)
2384 type
= LE_ET_TCAM_CLIP
;
2385 else if (tid
< tcam_region
.tid_hash_base
)
2386 type
= LE_ET_TCAM_ROUTING
;
2387 else if (tid
< tcam_region
.max_tid
)
2388 type
= LE_ET_HASH_CON
;
2390 type
= LE_ET_INVALID_TID
;
2395 static int cudbg_is_ipv6_entry(struct cudbg_tid_data
*tid_data
,
2396 struct cudbg_tcam tcam_region
)
2401 le_type
= cudbg_get_le_type(tid_data
->tid
, tcam_region
);
2402 if (tid_data
->tid
& 1)
2405 if (le_type
== LE_ET_HASH_CON
) {
2406 ipv6
= tid_data
->data
[16] & 0x8000;
2407 } else if (le_type
== LE_ET_TCAM_CON
) {
2408 ipv6
= tid_data
->data
[16] & 0x8000;
2410 ipv6
= tid_data
->data
[9] == 0x00C00000;
2417 void cudbg_fill_le_tcam_info(struct adapter
*padap
,
2418 struct cudbg_tcam
*tcam_region
)
2422 /* Get the LE regions */
2423 value
= t4_read_reg(padap
, LE_DB_TID_HASHBASE_A
); /* hash base index */
2424 tcam_region
->tid_hash_base
= value
;
2426 /* Get routing table index */
2427 value
= t4_read_reg(padap
, LE_DB_ROUTING_TABLE_INDEX_A
);
2428 tcam_region
->routing_start
= value
;
2430 /* Get clip table index. For T6 there is separate CLIP TCAM */
2431 if (is_t6(padap
->params
.chip
))
2432 value
= t4_read_reg(padap
, LE_DB_CLCAM_TID_BASE_A
);
2434 value
= t4_read_reg(padap
, LE_DB_CLIP_TABLE_INDEX_A
);
2435 tcam_region
->clip_start
= value
;
2437 /* Get filter table index */
2438 value
= t4_read_reg(padap
, LE_DB_FILTER_TABLE_INDEX_A
);
2439 tcam_region
->filter_start
= value
;
2441 /* Get server table index */
2442 value
= t4_read_reg(padap
, LE_DB_SERVER_INDEX_A
);
2443 tcam_region
->server_start
= value
;
2445 /* Check whether hash is enabled and calculate the max tids */
2446 value
= t4_read_reg(padap
, LE_DB_CONFIG_A
);
2447 if ((value
>> HASHEN_S
) & 1) {
2448 value
= t4_read_reg(padap
, LE_DB_HASH_CONFIG_A
);
2449 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) > CHELSIO_T5
) {
2450 tcam_region
->max_tid
= (value
& 0xFFFFF) +
2451 tcam_region
->tid_hash_base
;
2453 value
= HASHTIDSIZE_G(value
);
2455 tcam_region
->max_tid
= value
+
2456 tcam_region
->tid_hash_base
;
2458 } else { /* hash not enabled */
2459 if (is_t6(padap
->params
.chip
))
2460 tcam_region
->max_tid
= (value
& ASLIPCOMPEN_F
) ?
2461 CUDBG_MAX_TID_COMP_EN
:
2462 CUDBG_MAX_TID_COMP_DIS
;
2464 tcam_region
->max_tid
= CUDBG_MAX_TCAM_TID
;
2467 if (is_t6(padap
->params
.chip
))
2468 tcam_region
->max_tid
+= CUDBG_T6_CLIP
;
2471 int cudbg_collect_le_tcam(struct cudbg_init
*pdbg_init
,
2472 struct cudbg_buffer
*dbg_buff
,
2473 struct cudbg_error
*cudbg_err
)
2475 struct adapter
*padap
= pdbg_init
->adap
;
2476 struct cudbg_buffer temp_buff
= { 0 };
2477 struct cudbg_tcam tcam_region
= { 0 };
2478 struct cudbg_tid_data
*tid_data
;
2483 cudbg_fill_le_tcam_info(padap
, &tcam_region
);
2485 size
= sizeof(struct cudbg_tid_data
) * tcam_region
.max_tid
;
2486 size
+= sizeof(struct cudbg_tcam
);
2487 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2491 memcpy(temp_buff
.data
, &tcam_region
, sizeof(struct cudbg_tcam
));
2492 bytes
= sizeof(struct cudbg_tcam
);
2493 tid_data
= (struct cudbg_tid_data
*)(temp_buff
.data
+ bytes
);
2495 for (i
= 0; i
< tcam_region
.max_tid
; ) {
2496 rc
= cudbg_read_tid(pdbg_init
, i
, tid_data
);
2498 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
2499 /* Update tcam header and exit */
2500 tcam_region
.max_tid
= i
;
2501 memcpy(temp_buff
.data
, &tcam_region
,
2502 sizeof(struct cudbg_tcam
));
2506 if (cudbg_is_ipv6_entry(tid_data
, tcam_region
)) {
2507 /* T6 CLIP TCAM: ipv6 takes 4 entries */
2508 if (is_t6(padap
->params
.chip
) &&
2509 i
>= tcam_region
.clip_start
&&
2510 i
< tcam_region
.clip_start
+ CUDBG_T6_CLIP
)
2512 else /* Main TCAM: ipv6 takes two tids */
2519 bytes
+= sizeof(struct cudbg_tid_data
);
2523 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2526 int cudbg_collect_cctrl(struct cudbg_init
*pdbg_init
,
2527 struct cudbg_buffer
*dbg_buff
,
2528 struct cudbg_error
*cudbg_err
)
2530 struct adapter
*padap
= pdbg_init
->adap
;
2531 struct cudbg_buffer temp_buff
= { 0 };
2535 size
= sizeof(u16
) * NMTUS
* NCCTRL_WIN
;
2536 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2540 t4_read_cong_tbl(padap
, (void *)temp_buff
.data
);
2541 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2544 int cudbg_collect_ma_indirect(struct cudbg_init
*pdbg_init
,
2545 struct cudbg_buffer
*dbg_buff
,
2546 struct cudbg_error
*cudbg_err
)
2548 struct adapter
*padap
= pdbg_init
->adap
;
2549 struct cudbg_buffer temp_buff
= { 0 };
2550 struct ireg_buf
*ma_indr
;
2554 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) < CHELSIO_T6
)
2555 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
2557 n
= sizeof(t6_ma_ireg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2558 size
= sizeof(struct ireg_buf
) * n
* 2;
2559 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2563 ma_indr
= (struct ireg_buf
*)temp_buff
.data
;
2564 for (i
= 0; i
< n
; i
++) {
2565 struct ireg_field
*ma_fli
= &ma_indr
->tp_pio
;
2566 u32
*buff
= ma_indr
->outbuf
;
2568 ma_fli
->ireg_addr
= t6_ma_ireg_array
[i
][0];
2569 ma_fli
->ireg_data
= t6_ma_ireg_array
[i
][1];
2570 ma_fli
->ireg_local_offset
= t6_ma_ireg_array
[i
][2];
2571 ma_fli
->ireg_offset_range
= t6_ma_ireg_array
[i
][3];
2572 t4_read_indirect(padap
, ma_fli
->ireg_addr
, ma_fli
->ireg_data
,
2573 buff
, ma_fli
->ireg_offset_range
,
2574 ma_fli
->ireg_local_offset
);
2578 n
= sizeof(t6_ma_ireg_array2
) / (IREG_NUM_ELEM
* sizeof(u32
));
2579 for (i
= 0; i
< n
; i
++) {
2580 struct ireg_field
*ma_fli
= &ma_indr
->tp_pio
;
2581 u32
*buff
= ma_indr
->outbuf
;
2583 ma_fli
->ireg_addr
= t6_ma_ireg_array2
[i
][0];
2584 ma_fli
->ireg_data
= t6_ma_ireg_array2
[i
][1];
2585 ma_fli
->ireg_local_offset
= t6_ma_ireg_array2
[i
][2];
2586 for (j
= 0; j
< t6_ma_ireg_array2
[i
][3]; j
++) {
2587 t4_read_indirect(padap
, ma_fli
->ireg_addr
,
2588 ma_fli
->ireg_data
, buff
, 1,
2589 ma_fli
->ireg_local_offset
);
2591 ma_fli
->ireg_local_offset
+= 0x20;
2595 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2598 int cudbg_collect_ulptx_la(struct cudbg_init
*pdbg_init
,
2599 struct cudbg_buffer
*dbg_buff
,
2600 struct cudbg_error
*cudbg_err
)
2602 struct adapter
*padap
= pdbg_init
->adap
;
2603 struct cudbg_buffer temp_buff
= { 0 };
2604 struct cudbg_ulptx_la
*ulptx_la_buff
;
2605 struct cudbg_ver_hdr
*ver_hdr
;
2609 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
2610 sizeof(struct cudbg_ver_hdr
) +
2611 sizeof(struct cudbg_ulptx_la
),
2616 ver_hdr
= (struct cudbg_ver_hdr
*)temp_buff
.data
;
2617 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
2618 ver_hdr
->revision
= CUDBG_ULPTX_LA_REV
;
2619 ver_hdr
->size
= sizeof(struct cudbg_ulptx_la
);
2621 ulptx_la_buff
= (struct cudbg_ulptx_la
*)(temp_buff
.data
+
2623 for (i
= 0; i
< CUDBG_NUM_ULPTX
; i
++) {
2624 ulptx_la_buff
->rdptr
[i
] = t4_read_reg(padap
,
2625 ULP_TX_LA_RDPTR_0_A
+
2627 ulptx_la_buff
->wrptr
[i
] = t4_read_reg(padap
,
2628 ULP_TX_LA_WRPTR_0_A
+
2630 ulptx_la_buff
->rddata
[i
] = t4_read_reg(padap
,
2631 ULP_TX_LA_RDDATA_0_A
+
2633 for (j
= 0; j
< CUDBG_NUM_ULPTX_READ
; j
++)
2634 ulptx_la_buff
->rd_data
[i
][j
] =
2636 ULP_TX_LA_RDDATA_0_A
+ 0x10 * i
);
2639 for (i
= 0; i
< CUDBG_NUM_ULPTX_ASIC_READ
; i
++) {
2640 t4_write_reg(padap
, ULP_TX_ASIC_DEBUG_CTRL_A
, 0x1);
2641 ulptx_la_buff
->rdptr_asic
[i
] =
2642 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_CTRL_A
);
2643 ulptx_la_buff
->rddata_asic
[i
][0] =
2644 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_0_A
);
2645 ulptx_la_buff
->rddata_asic
[i
][1] =
2646 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_1_A
);
2647 ulptx_la_buff
->rddata_asic
[i
][2] =
2648 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_2_A
);
2649 ulptx_la_buff
->rddata_asic
[i
][3] =
2650 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_3_A
);
2651 ulptx_la_buff
->rddata_asic
[i
][4] =
2652 t4_read_reg(padap
, ULP_TX_ASIC_DEBUG_4_A
);
2653 ulptx_la_buff
->rddata_asic
[i
][5] =
2654 t4_read_reg(padap
, PM_RX_BASE_ADDR
);
2657 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2660 int cudbg_collect_up_cim_indirect(struct cudbg_init
*pdbg_init
,
2661 struct cudbg_buffer
*dbg_buff
,
2662 struct cudbg_error
*cudbg_err
)
2664 struct adapter
*padap
= pdbg_init
->adap
;
2665 struct cudbg_buffer temp_buff
= { 0 };
2666 u32 local_offset
, local_range
;
2667 struct ireg_buf
*up_cim
;
2672 if (is_t5(padap
->params
.chip
))
2673 n
= sizeof(t5_up_cim_reg_array
) /
2674 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
2675 else if (is_t6(padap
->params
.chip
))
2676 n
= sizeof(t6_up_cim_reg_array
) /
2677 ((IREG_NUM_ELEM
+ 1) * sizeof(u32
));
2679 return CUDBG_STATUS_NOT_IMPLEMENTED
;
2681 size
= sizeof(struct ireg_buf
) * n
;
2682 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2686 up_cim
= (struct ireg_buf
*)temp_buff
.data
;
2687 for (i
= 0; i
< n
; i
++) {
2688 struct ireg_field
*up_cim_reg
= &up_cim
->tp_pio
;
2689 u32
*buff
= up_cim
->outbuf
;
2691 if (is_t5(padap
->params
.chip
)) {
2692 up_cim_reg
->ireg_addr
= t5_up_cim_reg_array
[i
][0];
2693 up_cim_reg
->ireg_data
= t5_up_cim_reg_array
[i
][1];
2694 up_cim_reg
->ireg_local_offset
=
2695 t5_up_cim_reg_array
[i
][2];
2696 up_cim_reg
->ireg_offset_range
=
2697 t5_up_cim_reg_array
[i
][3];
2698 instance
= t5_up_cim_reg_array
[i
][4];
2699 } else if (is_t6(padap
->params
.chip
)) {
2700 up_cim_reg
->ireg_addr
= t6_up_cim_reg_array
[i
][0];
2701 up_cim_reg
->ireg_data
= t6_up_cim_reg_array
[i
][1];
2702 up_cim_reg
->ireg_local_offset
=
2703 t6_up_cim_reg_array
[i
][2];
2704 up_cim_reg
->ireg_offset_range
=
2705 t6_up_cim_reg_array
[i
][3];
2706 instance
= t6_up_cim_reg_array
[i
][4];
2710 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES
:
2711 iter
= up_cim_reg
->ireg_offset_range
;
2712 local_offset
= 0x120;
2715 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES
:
2716 iter
= up_cim_reg
->ireg_offset_range
;
2717 local_offset
= 0x10;
2723 local_range
= up_cim_reg
->ireg_offset_range
;
2727 for (j
= 0; j
< iter
; j
++, buff
++) {
2728 rc
= t4_cim_read(padap
,
2729 up_cim_reg
->ireg_local_offset
+
2730 (j
* local_offset
), local_range
, buff
);
2732 cudbg_put_buff(pdbg_init
, &temp_buff
);
2738 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2741 int cudbg_collect_pbt_tables(struct cudbg_init
*pdbg_init
,
2742 struct cudbg_buffer
*dbg_buff
,
2743 struct cudbg_error
*cudbg_err
)
2745 struct adapter
*padap
= pdbg_init
->adap
;
2746 struct cudbg_buffer temp_buff
= { 0 };
2747 struct cudbg_pbt_tables
*pbt
;
2751 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
,
2752 sizeof(struct cudbg_pbt_tables
),
2757 pbt
= (struct cudbg_pbt_tables
*)temp_buff
.data
;
2758 /* PBT dynamic entries */
2759 addr
= CUDBG_CHAC_PBT_ADDR
;
2760 for (i
= 0; i
< CUDBG_PBT_DYNAMIC_ENTRIES
; i
++) {
2761 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
2762 &pbt
->pbt_dynamic
[i
]);
2764 cudbg_err
->sys_err
= rc
;
2765 cudbg_put_buff(pdbg_init
, &temp_buff
);
2770 /* PBT static entries */
2771 /* static entries start when bit 6 is set */
2772 addr
= CUDBG_CHAC_PBT_ADDR
+ (1 << 6);
2773 for (i
= 0; i
< CUDBG_PBT_STATIC_ENTRIES
; i
++) {
2774 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
2775 &pbt
->pbt_static
[i
]);
2777 cudbg_err
->sys_err
= rc
;
2778 cudbg_put_buff(pdbg_init
, &temp_buff
);
2784 addr
= CUDBG_CHAC_PBT_LRF
;
2785 for (i
= 0; i
< CUDBG_LRF_ENTRIES
; i
++) {
2786 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
2787 &pbt
->lrf_table
[i
]);
2789 cudbg_err
->sys_err
= rc
;
2790 cudbg_put_buff(pdbg_init
, &temp_buff
);
2795 /* PBT data entries */
2796 addr
= CUDBG_CHAC_PBT_DATA
;
2797 for (i
= 0; i
< CUDBG_PBT_DATA_ENTRIES
; i
++) {
2798 rc
= t4_cim_read(padap
, addr
+ (i
* 4), 1,
2801 cudbg_err
->sys_err
= rc
;
2802 cudbg_put_buff(pdbg_init
, &temp_buff
);
2806 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2809 int cudbg_collect_mbox_log(struct cudbg_init
*pdbg_init
,
2810 struct cudbg_buffer
*dbg_buff
,
2811 struct cudbg_error
*cudbg_err
)
2813 struct adapter
*padap
= pdbg_init
->adap
;
2814 struct cudbg_mbox_log
*mboxlog
= NULL
;
2815 struct cudbg_buffer temp_buff
= { 0 };
2816 struct mbox_cmd_log
*log
= NULL
;
2817 struct mbox_cmd
*entry
;
2818 unsigned int entry_idx
;
2824 log
= padap
->mbox_log
;
2825 mbox_cmds
= padap
->mbox_log
->size
;
2826 size
= sizeof(struct cudbg_mbox_log
) * mbox_cmds
;
2827 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2831 mboxlog
= (struct cudbg_mbox_log
*)temp_buff
.data
;
2832 for (k
= 0; k
< mbox_cmds
; k
++) {
2833 entry_idx
= log
->cursor
+ k
;
2834 if (entry_idx
>= log
->size
)
2835 entry_idx
-= log
->size
;
2837 entry
= mbox_cmd_log_entry(log
, entry_idx
);
2838 /* skip over unused entries */
2839 if (entry
->timestamp
== 0)
2842 memcpy(&mboxlog
->entry
, entry
, sizeof(struct mbox_cmd
));
2843 for (i
= 0; i
< MBOX_LEN
/ 8; i
++) {
2844 flit
= entry
->cmd
[i
];
2845 mboxlog
->hi
[i
] = (u32
)(flit
>> 32);
2846 mboxlog
->lo
[i
] = (u32
)flit
;
2850 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2853 int cudbg_collect_hma_indirect(struct cudbg_init
*pdbg_init
,
2854 struct cudbg_buffer
*dbg_buff
,
2855 struct cudbg_error
*cudbg_err
)
2857 struct adapter
*padap
= pdbg_init
->adap
;
2858 struct cudbg_buffer temp_buff
= { 0 };
2859 struct ireg_buf
*hma_indr
;
2863 if (CHELSIO_CHIP_VERSION(padap
->params
.chip
) < CHELSIO_T6
)
2864 return CUDBG_STATUS_ENTITY_NOT_FOUND
;
2866 n
= sizeof(t6_hma_ireg_array
) / (IREG_NUM_ELEM
* sizeof(u32
));
2867 size
= sizeof(struct ireg_buf
) * n
;
2868 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, size
, &temp_buff
);
2872 hma_indr
= (struct ireg_buf
*)temp_buff
.data
;
2873 for (i
= 0; i
< n
; i
++) {
2874 struct ireg_field
*hma_fli
= &hma_indr
->tp_pio
;
2875 u32
*buff
= hma_indr
->outbuf
;
2877 hma_fli
->ireg_addr
= t6_hma_ireg_array
[i
][0];
2878 hma_fli
->ireg_data
= t6_hma_ireg_array
[i
][1];
2879 hma_fli
->ireg_local_offset
= t6_hma_ireg_array
[i
][2];
2880 hma_fli
->ireg_offset_range
= t6_hma_ireg_array
[i
][3];
2881 t4_read_indirect(padap
, hma_fli
->ireg_addr
, hma_fli
->ireg_data
,
2882 buff
, hma_fli
->ireg_offset_range
,
2883 hma_fli
->ireg_local_offset
);
2886 return cudbg_write_and_release_buff(pdbg_init
, &temp_buff
, dbg_buff
);
2889 void cudbg_fill_qdesc_num_and_size(const struct adapter
*padap
,
2890 u32
*num
, u32
*size
)
2892 u32 tot_entries
= 0, tot_size
= 0;
2894 /* NIC TXQ, RXQ, FLQ, and CTRLQ */
2895 tot_entries
+= MAX_ETH_QSETS
* 3;
2896 tot_entries
+= MAX_CTRL_QUEUES
;
2898 tot_size
+= MAX_ETH_QSETS
* MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
2899 tot_size
+= MAX_ETH_QSETS
* MAX_RSPQ_ENTRIES
* MAX_RXQ_DESC_SIZE
;
2900 tot_size
+= MAX_ETH_QSETS
* MAX_RX_BUFFERS
* MAX_FL_DESC_SIZE
;
2901 tot_size
+= MAX_CTRL_QUEUES
* MAX_CTRL_TXQ_ENTRIES
*
2902 MAX_CTRL_TXQ_DESC_SIZE
;
2904 /* FW_EVTQ and INTRQ */
2905 tot_entries
+= INGQ_EXTRAS
;
2906 tot_size
+= INGQ_EXTRAS
* MAX_RSPQ_ENTRIES
* MAX_RXQ_DESC_SIZE
;
2910 tot_size
+= MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
2912 /* ULD TXQ, RXQ, and FLQ */
2913 tot_entries
+= CXGB4_TX_MAX
* MAX_OFLD_QSETS
;
2914 tot_entries
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* 2;
2916 tot_size
+= CXGB4_TX_MAX
* MAX_OFLD_QSETS
* MAX_TXQ_ENTRIES
*
2918 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* MAX_RSPQ_ENTRIES
*
2920 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* MAX_RX_BUFFERS
*
2924 tot_entries
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
;
2925 tot_size
+= CXGB4_ULD_MAX
* MAX_ULD_QSETS
* SGE_MAX_IQ_SIZE
*
2928 /* ETHOFLD TXQ, RXQ, and FLQ */
2929 tot_entries
+= MAX_OFLD_QSETS
* 3;
2930 tot_size
+= MAX_OFLD_QSETS
* MAX_TXQ_ENTRIES
* MAX_TXQ_DESC_SIZE
;
2932 tot_size
+= sizeof(struct cudbg_ver_hdr
) +
2933 sizeof(struct cudbg_qdesc_info
) +
2934 sizeof(struct cudbg_qdesc_entry
) * tot_entries
;
2943 int cudbg_collect_qdesc(struct cudbg_init
*pdbg_init
,
2944 struct cudbg_buffer
*dbg_buff
,
2945 struct cudbg_error
*cudbg_err
)
2947 u32 num_queues
= 0, tot_entries
= 0, size
= 0;
2948 struct adapter
*padap
= pdbg_init
->adap
;
2949 struct cudbg_buffer temp_buff
= { 0 };
2950 struct cudbg_qdesc_entry
*qdesc_entry
;
2951 struct cudbg_qdesc_info
*qdesc_info
;
2952 struct cudbg_ver_hdr
*ver_hdr
;
2953 struct sge
*s
= &padap
->sge
;
2954 u32 i
, j
, cur_off
, tot_len
;
2958 cudbg_fill_qdesc_num_and_size(padap
, &tot_entries
, &size
);
2959 size
= min_t(u32
, size
, CUDBG_DUMP_BUFF_SIZE
);
2961 data
= kvzalloc(size
, GFP_KERNEL
);
2965 ver_hdr
= (struct cudbg_ver_hdr
*)data
;
2966 ver_hdr
->signature
= CUDBG_ENTITY_SIGNATURE
;
2967 ver_hdr
->revision
= CUDBG_QDESC_REV
;
2968 ver_hdr
->size
= sizeof(struct cudbg_qdesc_info
);
2969 size
-= sizeof(*ver_hdr
);
2971 qdesc_info
= (struct cudbg_qdesc_info
*)(data
+
2973 size
-= sizeof(*qdesc_info
);
2974 qdesc_entry
= (struct cudbg_qdesc_entry
*)qdesc_info
->data
;
2976 #define QDESC_GET(q, desc, type, label) do { \
2981 cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
2982 size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
2984 qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
2988 #define QDESC_GET_TXQ(q, type, label) do { \
2989 struct sge_txq *txq = (struct sge_txq *)q; \
2990 QDESC_GET(txq, txq->desc, type, label); \
2993 #define QDESC_GET_RXQ(q, type, label) do { \
2994 struct sge_rspq *rxq = (struct sge_rspq *)q; \
2995 QDESC_GET(rxq, rxq->desc, type, label); \
2998 #define QDESC_GET_FLQ(q, type, label) do { \
2999 struct sge_fl *flq = (struct sge_fl *)q; \
3000 QDESC_GET(flq, flq->desc, type, label); \
3004 for (i
= 0; i
< s
->ethqsets
; i
++)
3005 QDESC_GET_TXQ(&s
->ethtxq
[i
].q
, CUDBG_QTYPE_NIC_TXQ
, out
);
3008 for (i
= 0; i
< s
->ethqsets
; i
++)
3009 QDESC_GET_RXQ(&s
->ethrxq
[i
].rspq
, CUDBG_QTYPE_NIC_RXQ
, out
);
3012 for (i
= 0; i
< s
->ethqsets
; i
++)
3013 QDESC_GET_FLQ(&s
->ethrxq
[i
].fl
, CUDBG_QTYPE_NIC_FLQ
, out
);
3016 for (i
= 0; i
< padap
->params
.nports
; i
++)
3017 QDESC_GET_TXQ(&s
->ctrlq
[i
].q
, CUDBG_QTYPE_CTRLQ
, out
);
3020 QDESC_GET_RXQ(&s
->fw_evtq
, CUDBG_QTYPE_FWEVTQ
, out
);
3023 QDESC_GET_RXQ(&s
->intrq
, CUDBG_QTYPE_INTRQ
, out
);
3026 QDESC_GET_TXQ(&s
->ptptxq
.q
, CUDBG_QTYPE_PTP_TXQ
, out
);
3029 mutex_lock(&uld_mutex
);
3031 if (s
->uld_txq_info
) {
3032 struct sge_uld_txq_info
*utxq
;
3035 for (j
= 0; j
< CXGB4_TX_MAX
; j
++) {
3036 if (!s
->uld_txq_info
[j
])
3039 utxq
= s
->uld_txq_info
[j
];
3040 for (i
= 0; i
< utxq
->ntxq
; i
++)
3041 QDESC_GET_TXQ(&utxq
->uldtxq
[i
].q
,
3042 cudbg_uld_txq_to_qtype(j
),
3047 if (s
->uld_rxq_info
) {
3048 struct sge_uld_rxq_info
*urxq
;
3052 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3053 if (!s
->uld_rxq_info
[j
])
3056 urxq
= s
->uld_rxq_info
[j
];
3057 for (i
= 0; i
< urxq
->nrxq
; i
++)
3058 QDESC_GET_RXQ(&urxq
->uldrxq
[i
].rspq
,
3059 cudbg_uld_rxq_to_qtype(j
),
3064 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3065 if (!s
->uld_rxq_info
[j
])
3068 urxq
= s
->uld_rxq_info
[j
];
3069 for (i
= 0; i
< urxq
->nrxq
; i
++)
3070 QDESC_GET_FLQ(&urxq
->uldrxq
[i
].fl
,
3071 cudbg_uld_flq_to_qtype(j
),
3076 for (j
= 0; j
< CXGB4_ULD_MAX
; j
++) {
3077 if (!s
->uld_rxq_info
[j
])
3080 urxq
= s
->uld_rxq_info
[j
];
3082 for (i
= 0; i
< urxq
->nciq
; i
++)
3083 QDESC_GET_RXQ(&urxq
->uldrxq
[base
+ i
].rspq
,
3084 cudbg_uld_ciq_to_qtype(j
),
3091 for (i
= 0; i
< s
->eoqsets
; i
++)
3092 QDESC_GET_TXQ(&s
->eohw_txq
[i
].q
,
3093 CUDBG_QTYPE_ETHOFLD_TXQ
, out
);
3095 /* ETHOFLD RXQ and FLQ */
3097 for (i
= 0; i
< s
->eoqsets
; i
++)
3098 QDESC_GET_RXQ(&s
->eohw_rxq
[i
].rspq
,
3099 CUDBG_QTYPE_ETHOFLD_RXQ
, out
);
3101 for (i
= 0; i
< s
->eoqsets
; i
++)
3102 QDESC_GET_FLQ(&s
->eohw_rxq
[i
].fl
,
3103 CUDBG_QTYPE_ETHOFLD_FLQ
, out
);
3107 mutex_unlock(&uld_mutex
);
3110 qdesc_info
->qdesc_entry_size
= sizeof(*qdesc_entry
);
3111 qdesc_info
->num_queues
= num_queues
;
3114 u32 chunk_size
= min_t(u32
, tot_len
, CUDBG_CHUNK_SIZE
);
3116 rc
= cudbg_get_buff(pdbg_init
, dbg_buff
, chunk_size
,
3119 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
3123 memcpy(temp_buff
.data
, data
+ cur_off
, chunk_size
);
3124 tot_len
-= chunk_size
;
3125 cur_off
+= chunk_size
;
3126 rc
= cudbg_write_and_release_buff(pdbg_init
, &temp_buff
,
3129 cudbg_put_buff(pdbg_init
, &temp_buff
);
3130 cudbg_err
->sys_warn
= CUDBG_STATUS_PARTIAL_DATA
;
3139 #undef QDESC_GET_FLQ
3140 #undef QDESC_GET_RXQ
3141 #undef QDESC_GET_TXQ