1 /* bnx2x_stats.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include "bnx2x_stats.h"
23 #include "bnx2x_cmn.h"
24 #include "bnx2x_sriov.h"
26 extern const u32 dmae_reg_go_c
[];
31 * General service functions
34 static inline long bnx2x_hilo(u32
*hiref
)
36 u32 lo
= *(hiref
+ 1);
37 #if (BITS_PER_LONG == 64)
40 return HILO_U64(hi
, lo
);
46 static inline u16
bnx2x_get_port_stats_dma_len(struct bnx2x
*bp
)
50 /* 'newest' convention - shmem2 cotains the size of the port stats */
51 if (SHMEM2_HAS(bp
, sizeof_port_stats
)) {
52 u32 size
= SHMEM2_RD(bp
, sizeof_port_stats
);
56 /* prevent newer BC from causing buffer overflow */
57 if (res
> sizeof(struct host_port_stats
))
58 res
= sizeof(struct host_port_stats
);
61 /* Older convention - all BCs support the port stats' fields up until
62 * the 'not_used' field
65 res
= offsetof(struct host_port_stats
, not_used
) + 4;
67 /* if PFC stats are supported by the MFW, DMA them as well */
68 if (bp
->flags
& BC_SUPPORTS_PFC_STATS
) {
69 res
+= offsetof(struct host_port_stats
,
71 offsetof(struct host_port_stats
,
72 pfc_frames_tx_hi
) + 4 ;
78 WARN_ON(res
> 2 * DMAE_LEN32_RD_MAX
);
83 * Init service functions
86 static void bnx2x_dp_stats(struct bnx2x
*bp
)
90 DP(BNX2X_MSG_STATS
, "dumping stats:\n"
95 " drv_stats_counter %d\n"
97 " stats_counters_addrs %x %x\n",
98 bp
->fw_stats_req
->hdr
.cmd_num
,
99 bp
->fw_stats_req
->hdr
.reserved0
,
100 bp
->fw_stats_req
->hdr
.drv_stats_counter
,
101 bp
->fw_stats_req
->hdr
.reserved1
,
102 bp
->fw_stats_req
->hdr
.stats_counters_addrs
.hi
,
103 bp
->fw_stats_req
->hdr
.stats_counters_addrs
.lo
);
105 for (i
= 0; i
< bp
->fw_stats_req
->hdr
.cmd_num
; i
++) {
113 i
, bp
->fw_stats_req
->query
[i
].kind
,
114 bp
->fw_stats_req
->query
[i
].index
,
115 bp
->fw_stats_req
->query
[i
].funcID
,
116 bp
->fw_stats_req
->query
[i
].reserved
,
117 bp
->fw_stats_req
->query
[i
].address
.hi
,
118 bp
->fw_stats_req
->query
[i
].address
.lo
);
122 /* Post the next statistics ramrod. Protect it with the spin in
123 * order to ensure the strict order between statistics ramrods
124 * (each ramrod has a sequence number passed in a
125 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
128 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
132 if (bp
->stats_pending
)
135 bp
->fw_stats_req
->hdr
.drv_stats_counter
=
136 cpu_to_le16(bp
->stats_counter
++);
138 DP(BNX2X_MSG_STATS
, "Sending statistics ramrod %d\n",
139 le16_to_cpu(bp
->fw_stats_req
->hdr
.drv_stats_counter
));
141 /* adjust the ramrod to include VF queues statistics */
142 bnx2x_iov_adjust_stats_req(bp
);
145 /* send FW stats ramrod */
146 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_STAT_QUERY
, 0,
147 U64_HI(bp
->fw_stats_req_mapping
),
148 U64_LO(bp
->fw_stats_req_mapping
),
149 NONE_CONNECTION_TYPE
);
151 bp
->stats_pending
= 1;
154 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
156 struct dmae_command
*dmae
= &bp
->stats_dmae
;
157 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
159 *stats_comp
= DMAE_COMP_VAL
;
160 if (CHIP_REV_IS_SLOW(bp
))
163 /* Update MCP's statistics if possible */
165 memcpy(bnx2x_sp(bp
, func_stats
), &bp
->func_stats
,
166 sizeof(bp
->func_stats
));
169 if (bp
->executer_idx
) {
170 int loader_idx
= PMF_DMAE_C(bp
);
171 u32 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
,
172 true, DMAE_COMP_GRC
);
173 opcode
= bnx2x_dmae_opcode_clr_src_reset(opcode
);
175 memset(dmae
, 0, sizeof(struct dmae_command
));
176 dmae
->opcode
= opcode
;
177 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
178 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
179 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
180 sizeof(struct dmae_command
) *
181 (loader_idx
+ 1)) >> 2;
182 dmae
->dst_addr_hi
= 0;
183 dmae
->len
= sizeof(struct dmae_command
) >> 2;
186 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
187 dmae
->comp_addr_hi
= 0;
191 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
193 } else if (bp
->func_stx
) {
195 bnx2x_issue_dmae_with_comp(bp
, dmae
, stats_comp
);
199 static void bnx2x_stats_comp(struct bnx2x
*bp
)
201 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
205 while (*stats_comp
!= DMAE_COMP_VAL
) {
207 BNX2X_ERR("timeout waiting for stats finished\n");
211 usleep_range(1000, 2000);
216 * Statistics service functions
219 /* should be called under stats_sema */
220 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
222 struct dmae_command
*dmae
;
224 int loader_idx
= PMF_DMAE_C(bp
);
225 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
228 if (!bp
->port
.pmf
|| !bp
->port
.port_stx
) {
233 bp
->executer_idx
= 0;
235 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_GRC
, DMAE_DST_PCI
, false, 0);
237 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
238 dmae
->opcode
= bnx2x_dmae_opcode_add_comp(opcode
, DMAE_COMP_GRC
);
239 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
240 dmae
->src_addr_hi
= 0;
241 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
242 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
243 dmae
->len
= DMAE_LEN32_RD_MAX
;
244 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
245 dmae
->comp_addr_hi
= 0;
248 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
249 dmae
->opcode
= bnx2x_dmae_opcode_add_comp(opcode
, DMAE_COMP_PCI
);
250 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
251 dmae
->src_addr_hi
= 0;
252 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
253 DMAE_LEN32_RD_MAX
* 4);
254 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
255 DMAE_LEN32_RD_MAX
* 4);
256 dmae
->len
= bnx2x_get_port_stats_dma_len(bp
) - DMAE_LEN32_RD_MAX
;
258 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
259 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
260 dmae
->comp_val
= DMAE_COMP_VAL
;
263 bnx2x_hw_stats_post(bp
);
264 bnx2x_stats_comp(bp
);
267 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
269 struct dmae_command
*dmae
;
270 int port
= BP_PORT(bp
);
272 int loader_idx
= PMF_DMAE_C(bp
);
274 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
277 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
282 bp
->executer_idx
= 0;
285 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
,
286 true, DMAE_COMP_GRC
);
288 if (bp
->port
.port_stx
) {
290 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
291 dmae
->opcode
= opcode
;
292 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
293 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
294 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
295 dmae
->dst_addr_hi
= 0;
296 dmae
->len
= bnx2x_get_port_stats_dma_len(bp
);
297 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
298 dmae
->comp_addr_hi
= 0;
304 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
305 dmae
->opcode
= opcode
;
306 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
307 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
308 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
309 dmae
->dst_addr_hi
= 0;
310 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
311 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
312 dmae
->comp_addr_hi
= 0;
317 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_GRC
, DMAE_DST_PCI
,
318 true, DMAE_COMP_GRC
);
320 /* EMAC is special */
321 if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
322 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
324 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
325 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
326 dmae
->opcode
= opcode
;
327 dmae
->src_addr_lo
= (mac_addr
+
328 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
329 dmae
->src_addr_hi
= 0;
330 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
331 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
332 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
333 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
334 dmae
->comp_addr_hi
= 0;
337 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
338 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
339 dmae
->opcode
= opcode
;
340 dmae
->src_addr_lo
= (mac_addr
+
341 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
342 dmae
->src_addr_hi
= 0;
343 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
344 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
345 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
346 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
348 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
349 dmae
->comp_addr_hi
= 0;
352 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
353 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
354 dmae
->opcode
= opcode
;
355 dmae
->src_addr_lo
= (mac_addr
+
356 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
357 dmae
->src_addr_hi
= 0;
358 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
359 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
360 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
361 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
362 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
363 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
364 dmae
->comp_addr_hi
= 0;
367 u32 tx_src_addr_lo
, rx_src_addr_lo
;
370 /* configure the params according to MAC type */
371 switch (bp
->link_vars
.mac_type
) {
373 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
374 NIG_REG_INGRESS_BMAC0_MEM
);
376 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
377 BIGMAC_REGISTER_TX_STAT_GTBYT */
378 if (CHIP_IS_E1x(bp
)) {
379 tx_src_addr_lo
= (mac_addr
+
380 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
381 tx_len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
382 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
383 rx_src_addr_lo
= (mac_addr
+
384 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
385 rx_len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
386 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
388 tx_src_addr_lo
= (mac_addr
+
389 BIGMAC2_REGISTER_TX_STAT_GTPOK
) >> 2;
390 tx_len
= (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT
-
391 BIGMAC2_REGISTER_TX_STAT_GTPOK
) >> 2;
392 rx_src_addr_lo
= (mac_addr
+
393 BIGMAC2_REGISTER_RX_STAT_GR64
) >> 2;
394 rx_len
= (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ
-
395 BIGMAC2_REGISTER_RX_STAT_GR64
) >> 2;
399 case MAC_TYPE_UMAC
: /* handled by MSTAT */
400 case MAC_TYPE_XMAC
: /* handled by MSTAT */
402 mac_addr
= port
? GRCBASE_MSTAT1
: GRCBASE_MSTAT0
;
403 tx_src_addr_lo
= (mac_addr
+
404 MSTAT_REG_TX_STAT_GTXPOK_LO
) >> 2;
405 rx_src_addr_lo
= (mac_addr
+
406 MSTAT_REG_RX_STAT_GR64_LO
) >> 2;
407 tx_len
= sizeof(bp
->slowpath
->
408 mac_stats
.mstat_stats
.stats_tx
) >> 2;
409 rx_len
= sizeof(bp
->slowpath
->
410 mac_stats
.mstat_stats
.stats_rx
) >> 2;
415 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
416 dmae
->opcode
= opcode
;
417 dmae
->src_addr_lo
= tx_src_addr_lo
;
418 dmae
->src_addr_hi
= 0;
420 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
421 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
422 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
423 dmae
->comp_addr_hi
= 0;
427 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
428 dmae
->opcode
= opcode
;
429 dmae
->src_addr_hi
= 0;
430 dmae
->src_addr_lo
= rx_src_addr_lo
;
432 U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) + (tx_len
<< 2));
434 U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) + (tx_len
<< 2));
436 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
437 dmae
->comp_addr_hi
= 0;
442 if (!CHIP_IS_E3(bp
)) {
443 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
444 dmae
->opcode
= opcode
;
445 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
446 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
447 dmae
->src_addr_hi
= 0;
448 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
449 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
450 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
451 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
452 dmae
->len
= (2*sizeof(u32
)) >> 2;
453 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
454 dmae
->comp_addr_hi
= 0;
457 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
458 dmae
->opcode
= opcode
;
459 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
460 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
461 dmae
->src_addr_hi
= 0;
462 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
463 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
464 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
465 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
466 dmae
->len
= (2*sizeof(u32
)) >> 2;
467 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
468 dmae
->comp_addr_hi
= 0;
472 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
473 dmae
->opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_GRC
, DMAE_DST_PCI
,
474 true, DMAE_COMP_PCI
);
475 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
476 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
477 dmae
->src_addr_hi
= 0;
478 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
479 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
480 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
482 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
483 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
484 dmae
->comp_val
= DMAE_COMP_VAL
;
489 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
491 struct dmae_command
*dmae
= &bp
->stats_dmae
;
492 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
500 bp
->executer_idx
= 0;
501 memset(dmae
, 0, sizeof(struct dmae_command
));
503 dmae
->opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
,
504 true, DMAE_COMP_PCI
);
505 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
506 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
507 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
508 dmae
->dst_addr_hi
= 0;
509 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
510 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
511 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
512 dmae
->comp_val
= DMAE_COMP_VAL
;
517 /* should be called under stats_sema */
518 static void bnx2x_stats_start(struct bnx2x
*bp
)
522 bnx2x_port_stats_init(bp
);
524 else if (bp
->func_stx
)
525 bnx2x_func_stats_init(bp
);
527 bnx2x_hw_stats_post(bp
);
528 bnx2x_storm_stats_post(bp
);
532 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
534 bnx2x_stats_comp(bp
);
535 bnx2x_stats_pmf_update(bp
);
536 bnx2x_stats_start(bp
);
539 static void bnx2x_stats_restart(struct bnx2x
*bp
)
541 /* vfs travel through here as part of the statistics FSM, but no action
547 bnx2x_stats_comp(bp
);
548 bnx2x_stats_start(bp
);
551 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
553 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
554 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
560 if (CHIP_IS_E1x(bp
)) {
561 struct bmac1_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac1_stats
);
563 /* the macros below will use "bmac1_stats" type */
564 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
565 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
566 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
567 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
568 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
569 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
570 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
571 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
572 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_mac_xpf
);
574 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
575 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
576 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
577 UPDATE_STAT64(tx_stat_gt127
,
578 tx_stat_etherstatspkts65octetsto127octets
);
579 UPDATE_STAT64(tx_stat_gt255
,
580 tx_stat_etherstatspkts128octetsto255octets
);
581 UPDATE_STAT64(tx_stat_gt511
,
582 tx_stat_etherstatspkts256octetsto511octets
);
583 UPDATE_STAT64(tx_stat_gt1023
,
584 tx_stat_etherstatspkts512octetsto1023octets
);
585 UPDATE_STAT64(tx_stat_gt1518
,
586 tx_stat_etherstatspkts1024octetsto1522octets
);
587 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_mac_2047
);
588 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_mac_4095
);
589 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_mac_9216
);
590 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_mac_16383
);
591 UPDATE_STAT64(tx_stat_gterr
,
592 tx_stat_dot3statsinternalmactransmiterrors
);
593 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_mac_ufl
);
596 struct bmac2_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac2_stats
);
598 /* the macros below will use "bmac2_stats" type */
599 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
600 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
601 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
602 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
603 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
604 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
605 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
606 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
607 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_mac_xpf
);
608 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
609 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
610 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
611 UPDATE_STAT64(tx_stat_gt127
,
612 tx_stat_etherstatspkts65octetsto127octets
);
613 UPDATE_STAT64(tx_stat_gt255
,
614 tx_stat_etherstatspkts128octetsto255octets
);
615 UPDATE_STAT64(tx_stat_gt511
,
616 tx_stat_etherstatspkts256octetsto511octets
);
617 UPDATE_STAT64(tx_stat_gt1023
,
618 tx_stat_etherstatspkts512octetsto1023octets
);
619 UPDATE_STAT64(tx_stat_gt1518
,
620 tx_stat_etherstatspkts1024octetsto1522octets
);
621 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_mac_2047
);
622 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_mac_4095
);
623 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_mac_9216
);
624 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_mac_16383
);
625 UPDATE_STAT64(tx_stat_gterr
,
626 tx_stat_dot3statsinternalmactransmiterrors
);
627 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_mac_ufl
);
629 /* collect PFC stats */
630 pstats
->pfc_frames_tx_hi
= new->tx_stat_gtpp_hi
;
631 pstats
->pfc_frames_tx_lo
= new->tx_stat_gtpp_lo
;
633 pstats
->pfc_frames_rx_hi
= new->rx_stat_grpp_hi
;
634 pstats
->pfc_frames_rx_lo
= new->rx_stat_grpp_lo
;
637 estats
->pause_frames_received_hi
=
638 pstats
->mac_stx
[1].rx_stat_mac_xpf_hi
;
639 estats
->pause_frames_received_lo
=
640 pstats
->mac_stx
[1].rx_stat_mac_xpf_lo
;
642 estats
->pause_frames_sent_hi
=
643 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
644 estats
->pause_frames_sent_lo
=
645 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
647 estats
->pfc_frames_received_hi
=
648 pstats
->pfc_frames_rx_hi
;
649 estats
->pfc_frames_received_lo
=
650 pstats
->pfc_frames_rx_lo
;
651 estats
->pfc_frames_sent_hi
=
652 pstats
->pfc_frames_tx_hi
;
653 estats
->pfc_frames_sent_lo
=
654 pstats
->pfc_frames_tx_lo
;
657 static void bnx2x_mstat_stats_update(struct bnx2x
*bp
)
659 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
660 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
662 struct mstat_stats
*new = bnx2x_sp(bp
, mac_stats
.mstat_stats
);
664 ADD_STAT64(stats_rx
.rx_grerb
, rx_stat_ifhcinbadoctets
);
665 ADD_STAT64(stats_rx
.rx_grfcs
, rx_stat_dot3statsfcserrors
);
666 ADD_STAT64(stats_rx
.rx_grund
, rx_stat_etherstatsundersizepkts
);
667 ADD_STAT64(stats_rx
.rx_grovr
, rx_stat_dot3statsframestoolong
);
668 ADD_STAT64(stats_rx
.rx_grfrg
, rx_stat_etherstatsfragments
);
669 ADD_STAT64(stats_rx
.rx_grxcf
, rx_stat_maccontrolframesreceived
);
670 ADD_STAT64(stats_rx
.rx_grxpf
, rx_stat_xoffstateentered
);
671 ADD_STAT64(stats_rx
.rx_grxpf
, rx_stat_mac_xpf
);
672 ADD_STAT64(stats_tx
.tx_gtxpf
, tx_stat_outxoffsent
);
673 ADD_STAT64(stats_tx
.tx_gtxpf
, tx_stat_flowcontroldone
);
675 /* collect pfc stats */
676 ADD_64(pstats
->pfc_frames_tx_hi
, new->stats_tx
.tx_gtxpp_hi
,
677 pstats
->pfc_frames_tx_lo
, new->stats_tx
.tx_gtxpp_lo
);
678 ADD_64(pstats
->pfc_frames_rx_hi
, new->stats_rx
.rx_grxpp_hi
,
679 pstats
->pfc_frames_rx_lo
, new->stats_rx
.rx_grxpp_lo
);
681 ADD_STAT64(stats_tx
.tx_gt64
, tx_stat_etherstatspkts64octets
);
682 ADD_STAT64(stats_tx
.tx_gt127
,
683 tx_stat_etherstatspkts65octetsto127octets
);
684 ADD_STAT64(stats_tx
.tx_gt255
,
685 tx_stat_etherstatspkts128octetsto255octets
);
686 ADD_STAT64(stats_tx
.tx_gt511
,
687 tx_stat_etherstatspkts256octetsto511octets
);
688 ADD_STAT64(stats_tx
.tx_gt1023
,
689 tx_stat_etherstatspkts512octetsto1023octets
);
690 ADD_STAT64(stats_tx
.tx_gt1518
,
691 tx_stat_etherstatspkts1024octetsto1522octets
);
692 ADD_STAT64(stats_tx
.tx_gt2047
, tx_stat_mac_2047
);
694 ADD_STAT64(stats_tx
.tx_gt4095
, tx_stat_mac_4095
);
695 ADD_STAT64(stats_tx
.tx_gt9216
, tx_stat_mac_9216
);
696 ADD_STAT64(stats_tx
.tx_gt16383
, tx_stat_mac_16383
);
698 ADD_STAT64(stats_tx
.tx_gterr
,
699 tx_stat_dot3statsinternalmactransmiterrors
);
700 ADD_STAT64(stats_tx
.tx_gtufl
, tx_stat_mac_ufl
);
702 estats
->etherstatspkts1024octetsto1522octets_hi
=
703 pstats
->mac_stx
[1].tx_stat_etherstatspkts1024octetsto1522octets_hi
;
704 estats
->etherstatspkts1024octetsto1522octets_lo
=
705 pstats
->mac_stx
[1].tx_stat_etherstatspkts1024octetsto1522octets_lo
;
707 estats
->etherstatspktsover1522octets_hi
=
708 pstats
->mac_stx
[1].tx_stat_mac_2047_hi
;
709 estats
->etherstatspktsover1522octets_lo
=
710 pstats
->mac_stx
[1].tx_stat_mac_2047_lo
;
712 ADD_64(estats
->etherstatspktsover1522octets_hi
,
713 pstats
->mac_stx
[1].tx_stat_mac_4095_hi
,
714 estats
->etherstatspktsover1522octets_lo
,
715 pstats
->mac_stx
[1].tx_stat_mac_4095_lo
);
717 ADD_64(estats
->etherstatspktsover1522octets_hi
,
718 pstats
->mac_stx
[1].tx_stat_mac_9216_hi
,
719 estats
->etherstatspktsover1522octets_lo
,
720 pstats
->mac_stx
[1].tx_stat_mac_9216_lo
);
722 ADD_64(estats
->etherstatspktsover1522octets_hi
,
723 pstats
->mac_stx
[1].tx_stat_mac_16383_hi
,
724 estats
->etherstatspktsover1522octets_lo
,
725 pstats
->mac_stx
[1].tx_stat_mac_16383_lo
);
727 estats
->pause_frames_received_hi
=
728 pstats
->mac_stx
[1].rx_stat_mac_xpf_hi
;
729 estats
->pause_frames_received_lo
=
730 pstats
->mac_stx
[1].rx_stat_mac_xpf_lo
;
732 estats
->pause_frames_sent_hi
=
733 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
734 estats
->pause_frames_sent_lo
=
735 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
737 estats
->pfc_frames_received_hi
=
738 pstats
->pfc_frames_rx_hi
;
739 estats
->pfc_frames_received_lo
=
740 pstats
->pfc_frames_rx_lo
;
741 estats
->pfc_frames_sent_hi
=
742 pstats
->pfc_frames_tx_hi
;
743 estats
->pfc_frames_sent_lo
=
744 pstats
->pfc_frames_tx_lo
;
747 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
749 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
750 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
751 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
753 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
754 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
755 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
756 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
757 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
758 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
759 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
760 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
761 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
762 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
763 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
764 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
765 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
766 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
767 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
768 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
769 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
770 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
771 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
772 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
773 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
774 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
775 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
776 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
777 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
778 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
779 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
780 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
781 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
782 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
783 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
785 estats
->pause_frames_received_hi
=
786 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_hi
;
787 estats
->pause_frames_received_lo
=
788 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_lo
;
789 ADD_64(estats
->pause_frames_received_hi
,
790 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_hi
,
791 estats
->pause_frames_received_lo
,
792 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_lo
);
794 estats
->pause_frames_sent_hi
=
795 pstats
->mac_stx
[1].tx_stat_outxonsent_hi
;
796 estats
->pause_frames_sent_lo
=
797 pstats
->mac_stx
[1].tx_stat_outxonsent_lo
;
798 ADD_64(estats
->pause_frames_sent_hi
,
799 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
,
800 estats
->pause_frames_sent_lo
,
801 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
);
804 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
806 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
807 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
808 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
809 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
815 switch (bp
->link_vars
.mac_type
) {
817 bnx2x_bmac_stats_update(bp
);
821 bnx2x_emac_stats_update(bp
);
826 bnx2x_mstat_stats_update(bp
);
829 case MAC_TYPE_NONE
: /* unreached */
831 "stats updated by DMAE but no MAC active\n");
834 default: /* unreached */
835 BNX2X_ERR("Unknown MAC type\n");
838 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
839 new->brb_discard
- old
->brb_discard
);
840 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
841 new->brb_truncate
- old
->brb_truncate
);
843 if (!CHIP_IS_E3(bp
)) {
844 UPDATE_STAT64_NIG(egress_mac_pkt0
,
845 etherstatspkts1024octetsto1522octets
);
846 UPDATE_STAT64_NIG(egress_mac_pkt1
,
847 etherstatspktsover1522octets
);
850 memcpy(old
, new, sizeof(struct nig_stats
));
852 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
853 sizeof(struct mac_stx
));
854 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
855 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
857 pstats
->host_port_stats_counter
++;
859 if (CHIP_IS_E3(bp
)) {
860 u32 lpi_reg
= BP_PORT(bp
) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
861 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0
;
862 estats
->eee_tx_lpi
+= REG_RD(bp
, lpi_reg
);
867 SHMEM_RD(bp
, port_mb
[BP_PORT(bp
)].stat_nig_timer
);
868 if (nig_timer_max
!= estats
->nig_timer_max
) {
869 estats
->nig_timer_max
= nig_timer_max
;
870 BNX2X_ERR("NIG timer max (%u)\n",
871 estats
->nig_timer_max
);
878 static int bnx2x_storm_stats_validate_counters(struct bnx2x
*bp
)
880 struct stats_counter
*counters
= &bp
->fw_stats_data
->storm_counters
;
881 u16 cur_stats_counter
;
882 /* Make sure we use the value of the counter
883 * used for sending the last stats ramrod.
885 cur_stats_counter
= bp
->stats_counter
- 1;
887 /* are storm stats valid? */
888 if (le16_to_cpu(counters
->xstats_counter
) != cur_stats_counter
) {
890 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
891 le16_to_cpu(counters
->xstats_counter
), bp
->stats_counter
);
895 if (le16_to_cpu(counters
->ustats_counter
) != cur_stats_counter
) {
897 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
898 le16_to_cpu(counters
->ustats_counter
), bp
->stats_counter
);
902 if (le16_to_cpu(counters
->cstats_counter
) != cur_stats_counter
) {
904 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
905 le16_to_cpu(counters
->cstats_counter
), bp
->stats_counter
);
909 if (le16_to_cpu(counters
->tstats_counter
) != cur_stats_counter
) {
911 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
912 le16_to_cpu(counters
->tstats_counter
), bp
->stats_counter
);
918 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
920 struct tstorm_per_port_stats
*tport
=
921 &bp
->fw_stats_data
->port
.tstorm_port_statistics
;
922 struct tstorm_per_pf_stats
*tfunc
=
923 &bp
->fw_stats_data
->pf
.tstorm_pf_statistics
;
924 struct host_func_stats
*fstats
= &bp
->func_stats
;
925 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
926 struct bnx2x_eth_stats_old
*estats_old
= &bp
->eth_stats_old
;
929 /* vfs stat counter is managed by pf */
930 if (IS_PF(bp
) && bnx2x_storm_stats_validate_counters(bp
))
933 estats
->error_bytes_received_hi
= 0;
934 estats
->error_bytes_received_lo
= 0;
936 for_each_eth_queue(bp
, i
) {
937 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
938 struct tstorm_per_queue_stats
*tclient
=
939 &bp
->fw_stats_data
->queue_stats
[i
].
940 tstorm_queue_statistics
;
941 struct tstorm_per_queue_stats
*old_tclient
=
942 &bnx2x_fp_stats(bp
, fp
)->old_tclient
;
943 struct ustorm_per_queue_stats
*uclient
=
944 &bp
->fw_stats_data
->queue_stats
[i
].
945 ustorm_queue_statistics
;
946 struct ustorm_per_queue_stats
*old_uclient
=
947 &bnx2x_fp_stats(bp
, fp
)->old_uclient
;
948 struct xstorm_per_queue_stats
*xclient
=
949 &bp
->fw_stats_data
->queue_stats
[i
].
950 xstorm_queue_statistics
;
951 struct xstorm_per_queue_stats
*old_xclient
=
952 &bnx2x_fp_stats(bp
, fp
)->old_xclient
;
953 struct bnx2x_eth_q_stats
*qstats
=
954 &bnx2x_fp_stats(bp
, fp
)->eth_q_stats
;
955 struct bnx2x_eth_q_stats_old
*qstats_old
=
956 &bnx2x_fp_stats(bp
, fp
)->eth_q_stats_old
;
960 DP(BNX2X_MSG_STATS
, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
961 i
, xclient
->ucast_pkts_sent
,
962 xclient
->bcast_pkts_sent
, xclient
->mcast_pkts_sent
);
964 DP(BNX2X_MSG_STATS
, "---------------\n");
966 UPDATE_QSTAT(tclient
->rcv_bcast_bytes
,
967 total_broadcast_bytes_received
);
968 UPDATE_QSTAT(tclient
->rcv_mcast_bytes
,
969 total_multicast_bytes_received
);
970 UPDATE_QSTAT(tclient
->rcv_ucast_bytes
,
971 total_unicast_bytes_received
);
974 * sum to total_bytes_received all
975 * unicast/multicast/broadcast
977 qstats
->total_bytes_received_hi
=
978 qstats
->total_broadcast_bytes_received_hi
;
979 qstats
->total_bytes_received_lo
=
980 qstats
->total_broadcast_bytes_received_lo
;
982 ADD_64(qstats
->total_bytes_received_hi
,
983 qstats
->total_multicast_bytes_received_hi
,
984 qstats
->total_bytes_received_lo
,
985 qstats
->total_multicast_bytes_received_lo
);
987 ADD_64(qstats
->total_bytes_received_hi
,
988 qstats
->total_unicast_bytes_received_hi
,
989 qstats
->total_bytes_received_lo
,
990 qstats
->total_unicast_bytes_received_lo
);
992 qstats
->valid_bytes_received_hi
=
993 qstats
->total_bytes_received_hi
;
994 qstats
->valid_bytes_received_lo
=
995 qstats
->total_bytes_received_lo
;
997 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts
,
998 total_unicast_packets_received
);
999 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts
,
1000 total_multicast_packets_received
);
1001 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts
,
1002 total_broadcast_packets_received
);
1003 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard
,
1004 etherstatsoverrsizepkts
, 32);
1005 UPDATE_EXTEND_E_TSTAT(no_buff_discard
, no_buff_discard
, 16);
1007 SUB_EXTEND_USTAT(ucast_no_buff_pkts
,
1008 total_unicast_packets_received
);
1009 SUB_EXTEND_USTAT(mcast_no_buff_pkts
,
1010 total_multicast_packets_received
);
1011 SUB_EXTEND_USTAT(bcast_no_buff_pkts
,
1012 total_broadcast_packets_received
);
1013 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts
, no_buff_discard
);
1014 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts
, no_buff_discard
);
1015 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts
, no_buff_discard
);
1017 UPDATE_QSTAT(xclient
->bcast_bytes_sent
,
1018 total_broadcast_bytes_transmitted
);
1019 UPDATE_QSTAT(xclient
->mcast_bytes_sent
,
1020 total_multicast_bytes_transmitted
);
1021 UPDATE_QSTAT(xclient
->ucast_bytes_sent
,
1022 total_unicast_bytes_transmitted
);
1025 * sum to total_bytes_transmitted all
1026 * unicast/multicast/broadcast
1028 qstats
->total_bytes_transmitted_hi
=
1029 qstats
->total_unicast_bytes_transmitted_hi
;
1030 qstats
->total_bytes_transmitted_lo
=
1031 qstats
->total_unicast_bytes_transmitted_lo
;
1033 ADD_64(qstats
->total_bytes_transmitted_hi
,
1034 qstats
->total_broadcast_bytes_transmitted_hi
,
1035 qstats
->total_bytes_transmitted_lo
,
1036 qstats
->total_broadcast_bytes_transmitted_lo
);
1038 ADD_64(qstats
->total_bytes_transmitted_hi
,
1039 qstats
->total_multicast_bytes_transmitted_hi
,
1040 qstats
->total_bytes_transmitted_lo
,
1041 qstats
->total_multicast_bytes_transmitted_lo
);
1043 UPDATE_EXTEND_XSTAT(ucast_pkts_sent
,
1044 total_unicast_packets_transmitted
);
1045 UPDATE_EXTEND_XSTAT(mcast_pkts_sent
,
1046 total_multicast_packets_transmitted
);
1047 UPDATE_EXTEND_XSTAT(bcast_pkts_sent
,
1048 total_broadcast_packets_transmitted
);
1050 UPDATE_EXTEND_TSTAT(checksum_discard
,
1051 total_packets_received_checksum_discarded
);
1052 UPDATE_EXTEND_TSTAT(ttl0_discard
,
1053 total_packets_received_ttl0_discarded
);
1055 UPDATE_EXTEND_XSTAT(error_drop_pkts
,
1056 total_transmitted_dropped_packets_error
);
1058 /* TPA aggregations completed */
1059 UPDATE_EXTEND_E_USTAT(coalesced_events
, total_tpa_aggregations
);
1060 /* Number of network frames aggregated by TPA */
1061 UPDATE_EXTEND_E_USTAT(coalesced_pkts
,
1062 total_tpa_aggregated_frames
);
1063 /* Total number of bytes in completed TPA aggregations */
1064 UPDATE_QSTAT(uclient
->coalesced_bytes
, total_tpa_bytes
);
1066 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes
);
1068 UPDATE_FSTAT_QSTAT(total_bytes_received
);
1069 UPDATE_FSTAT_QSTAT(total_bytes_transmitted
);
1070 UPDATE_FSTAT_QSTAT(total_unicast_packets_received
);
1071 UPDATE_FSTAT_QSTAT(total_multicast_packets_received
);
1072 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received
);
1073 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted
);
1074 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted
);
1075 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted
);
1076 UPDATE_FSTAT_QSTAT(valid_bytes_received
);
1079 ADD_64(estats
->total_bytes_received_hi
,
1080 estats
->rx_stat_ifhcinbadoctets_hi
,
1081 estats
->total_bytes_received_lo
,
1082 estats
->rx_stat_ifhcinbadoctets_lo
);
1084 ADD_64_LE(estats
->total_bytes_received_hi
,
1085 tfunc
->rcv_error_bytes
.hi
,
1086 estats
->total_bytes_received_lo
,
1087 tfunc
->rcv_error_bytes
.lo
);
1089 ADD_64_LE(estats
->error_bytes_received_hi
,
1090 tfunc
->rcv_error_bytes
.hi
,
1091 estats
->error_bytes_received_lo
,
1092 tfunc
->rcv_error_bytes
.lo
);
1094 UPDATE_ESTAT(etherstatsoverrsizepkts
, rx_stat_dot3statsframestoolong
);
1096 ADD_64(estats
->error_bytes_received_hi
,
1097 estats
->rx_stat_ifhcinbadoctets_hi
,
1098 estats
->error_bytes_received_lo
,
1099 estats
->rx_stat_ifhcinbadoctets_lo
);
1102 struct bnx2x_fw_port_stats_old
*fwstats
= &bp
->fw_stats_old
;
1103 UPDATE_FW_STAT(mac_filter_discard
);
1104 UPDATE_FW_STAT(mf_tag_discard
);
1105 UPDATE_FW_STAT(brb_truncate_discard
);
1106 UPDATE_FW_STAT(mac_discard
);
1109 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
1111 bp
->stats_pending
= 0;
1116 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
1118 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1119 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
1123 nstats
->rx_packets
=
1124 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
1125 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
1126 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
1128 nstats
->tx_packets
=
1129 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
1130 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
1131 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
1133 nstats
->rx_bytes
= bnx2x_hilo(&estats
->total_bytes_received_hi
);
1135 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
1137 tmp
= estats
->mac_discard
;
1138 for_each_rx_queue(bp
, i
) {
1139 struct tstorm_per_queue_stats
*old_tclient
=
1140 &bp
->fp_stats
[i
].old_tclient
;
1141 tmp
+= le32_to_cpu(old_tclient
->checksum_discard
);
1143 nstats
->rx_dropped
= tmp
+ bp
->net_stats_old
.rx_dropped
;
1145 nstats
->tx_dropped
= 0;
1148 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
);
1150 nstats
->collisions
=
1151 bnx2x_hilo(&estats
->tx_stat_etherstatscollisions_hi
);
1153 nstats
->rx_length_errors
=
1154 bnx2x_hilo(&estats
->rx_stat_etherstatsundersizepkts_hi
) +
1155 bnx2x_hilo(&estats
->etherstatsoverrsizepkts_hi
);
1156 nstats
->rx_over_errors
= bnx2x_hilo(&estats
->brb_drop_hi
) +
1157 bnx2x_hilo(&estats
->brb_truncate_hi
);
1158 nstats
->rx_crc_errors
=
1159 bnx2x_hilo(&estats
->rx_stat_dot3statsfcserrors_hi
);
1160 nstats
->rx_frame_errors
=
1161 bnx2x_hilo(&estats
->rx_stat_dot3statsalignmenterrors_hi
);
1162 nstats
->rx_fifo_errors
= bnx2x_hilo(&estats
->no_buff_discard_hi
);
1163 nstats
->rx_missed_errors
= 0;
1165 nstats
->rx_errors
= nstats
->rx_length_errors
+
1166 nstats
->rx_over_errors
+
1167 nstats
->rx_crc_errors
+
1168 nstats
->rx_frame_errors
+
1169 nstats
->rx_fifo_errors
+
1170 nstats
->rx_missed_errors
;
1172 nstats
->tx_aborted_errors
=
1173 bnx2x_hilo(&estats
->tx_stat_dot3statslatecollisions_hi
) +
1174 bnx2x_hilo(&estats
->tx_stat_dot3statsexcessivecollisions_hi
);
1175 nstats
->tx_carrier_errors
=
1176 bnx2x_hilo(&estats
->rx_stat_dot3statscarriersenseerrors_hi
);
1177 nstats
->tx_fifo_errors
= 0;
1178 nstats
->tx_heartbeat_errors
= 0;
1179 nstats
->tx_window_errors
= 0;
1181 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
1182 nstats
->tx_carrier_errors
+
1183 bnx2x_hilo(&estats
->tx_stat_dot3statsinternalmactransmiterrors_hi
);
1186 static void bnx2x_drv_stats_update(struct bnx2x
*bp
)
1188 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1191 for_each_queue(bp
, i
) {
1192 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp_stats
[i
].eth_q_stats
;
1193 struct bnx2x_eth_q_stats_old
*qstats_old
=
1194 &bp
->fp_stats
[i
].eth_q_stats_old
;
1196 UPDATE_ESTAT_QSTAT(driver_xoff
);
1197 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt
);
1198 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed
);
1199 UPDATE_ESTAT_QSTAT(hw_csum_err
);
1200 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt
);
1204 static bool bnx2x_edebug_stats_stopped(struct bnx2x
*bp
)
1208 if (SHMEM2_HAS(bp
, edebug_driver_if
[1])) {
1209 val
= SHMEM2_RD(bp
, edebug_driver_if
[1]);
1211 if (val
== EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT
)
1218 static void bnx2x_stats_update(struct bnx2x
*bp
)
1220 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
1222 if (bnx2x_edebug_stats_stopped(bp
))
1226 if (*stats_comp
!= DMAE_COMP_VAL
)
1230 bnx2x_hw_stats_update(bp
);
1232 if (bnx2x_storm_stats_update(bp
)) {
1233 if (bp
->stats_pending
++ == 3) {
1234 BNX2X_ERR("storm stats were not updated for 3 times\n");
1240 /* vf doesn't collect HW statistics, and doesn't get completions
1241 * perform only update
1243 bnx2x_storm_stats_update(bp
);
1246 bnx2x_net_stats_update(bp
);
1247 bnx2x_drv_stats_update(bp
);
1253 if (netif_msg_timer(bp
)) {
1254 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1256 netdev_dbg(bp
->dev
, "brb drops %u brb truncate %u\n",
1257 estats
->brb_drop_lo
, estats
->brb_truncate_lo
);
1260 bnx2x_hw_stats_post(bp
);
1261 bnx2x_storm_stats_post(bp
);
1264 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
1266 struct dmae_command
*dmae
;
1268 int loader_idx
= PMF_DMAE_C(bp
);
1269 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
1271 bp
->executer_idx
= 0;
1273 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
, false, 0);
1275 if (bp
->port
.port_stx
) {
1277 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
1279 dmae
->opcode
= bnx2x_dmae_opcode_add_comp(
1280 opcode
, DMAE_COMP_GRC
);
1282 dmae
->opcode
= bnx2x_dmae_opcode_add_comp(
1283 opcode
, DMAE_COMP_PCI
);
1285 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
1286 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
1287 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
1288 dmae
->dst_addr_hi
= 0;
1289 dmae
->len
= bnx2x_get_port_stats_dma_len(bp
);
1291 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
1292 dmae
->comp_addr_hi
= 0;
1295 dmae
->comp_addr_lo
=
1296 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
1297 dmae
->comp_addr_hi
=
1298 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
1299 dmae
->comp_val
= DMAE_COMP_VAL
;
1307 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
1309 bnx2x_dmae_opcode_add_comp(opcode
, DMAE_COMP_PCI
);
1310 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
1311 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
1312 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
1313 dmae
->dst_addr_hi
= 0;
1314 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
1315 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
1316 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
1317 dmae
->comp_val
= DMAE_COMP_VAL
;
1323 static void bnx2x_stats_stop(struct bnx2x
*bp
)
1325 bool update
= false;
1327 bnx2x_stats_comp(bp
);
1330 update
= (bnx2x_hw_stats_update(bp
) == 0);
1332 update
|= (bnx2x_storm_stats_update(bp
) == 0);
1335 bnx2x_net_stats_update(bp
);
1338 bnx2x_port_stats_stop(bp
);
1340 bnx2x_hw_stats_post(bp
);
1341 bnx2x_stats_comp(bp
);
1345 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
1349 static const struct {
1350 void (*action
)(struct bnx2x
*bp
);
1351 enum bnx2x_stats_state next_state
;
1352 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
1355 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
1356 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
1357 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
1358 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
1361 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
1362 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
1363 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
1364 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
1368 void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
1370 enum bnx2x_stats_state state
= bp
->stats_state
;
1372 if (unlikely(bp
->panic
))
1375 /* Statistics update run from timer context, and we don't want to stop
1376 * that context in case someone is in the middle of a transition.
1377 * For other events, wait a bit until lock is taken.
1379 if (down_trylock(&bp
->stats_lock
)) {
1380 if (event
== STATS_EVENT_UPDATE
)
1384 "Unlikely stats' lock contention [event %d]\n", event
);
1385 if (unlikely(down_timeout(&bp
->stats_lock
, HZ
/ 10))) {
1386 BNX2X_ERR("Failed to take stats lock [event %d]\n",
1392 bnx2x_stats_stm
[state
][event
].action(bp
);
1393 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
1395 up(&bp
->stats_lock
);
1397 if ((event
!= STATS_EVENT_UPDATE
) || netif_msg_timer(bp
))
1398 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
1399 state
, event
, bp
->stats_state
);
1402 static void bnx2x_port_stats_base_init(struct bnx2x
*bp
)
1404 struct dmae_command
*dmae
;
1405 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
1408 if (!bp
->port
.pmf
|| !bp
->port
.port_stx
) {
1409 BNX2X_ERR("BUG!\n");
1413 bp
->executer_idx
= 0;
1415 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
1416 dmae
->opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
,
1417 true, DMAE_COMP_PCI
);
1418 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
1419 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
1420 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
1421 dmae
->dst_addr_hi
= 0;
1422 dmae
->len
= bnx2x_get_port_stats_dma_len(bp
);
1423 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
1424 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
1425 dmae
->comp_val
= DMAE_COMP_VAL
;
1428 bnx2x_hw_stats_post(bp
);
1429 bnx2x_stats_comp(bp
);
1432 /* This function will prepare the statistics ramrod data the way
1433 * we will only have to increment the statistics counter and
1434 * send the ramrod each time we have to.
1436 static void bnx2x_prep_fw_stats_req(struct bnx2x
*bp
)
1439 int first_queue_query_index
;
1440 struct stats_query_header
*stats_hdr
= &bp
->fw_stats_req
->hdr
;
1442 dma_addr_t cur_data_offset
;
1443 struct stats_query_entry
*cur_query_entry
;
1445 stats_hdr
->cmd_num
= bp
->fw_stats_num
;
1446 stats_hdr
->drv_stats_counter
= 0;
1448 /* storm_counters struct contains the counters of completed
1449 * statistics requests per storm which are incremented by FW
1450 * each time it completes hadning a statistics ramrod. We will
1451 * check these counters in the timer handler and discard a
1452 * (statistics) ramrod completion.
1454 cur_data_offset
= bp
->fw_stats_data_mapping
+
1455 offsetof(struct bnx2x_fw_stats_data
, storm_counters
);
1457 stats_hdr
->stats_counters_addrs
.hi
=
1458 cpu_to_le32(U64_HI(cur_data_offset
));
1459 stats_hdr
->stats_counters_addrs
.lo
=
1460 cpu_to_le32(U64_LO(cur_data_offset
));
1462 /* prepare to the first stats ramrod (will be completed with
1463 * the counters equal to zero) - init counters to somethig different.
1465 memset(&bp
->fw_stats_data
->storm_counters
, 0xff,
1466 sizeof(struct stats_counter
));
1468 /**** Port FW statistics data ****/
1469 cur_data_offset
= bp
->fw_stats_data_mapping
+
1470 offsetof(struct bnx2x_fw_stats_data
, port
);
1472 cur_query_entry
= &bp
->fw_stats_req
->query
[BNX2X_PORT_QUERY_IDX
];
1474 cur_query_entry
->kind
= STATS_TYPE_PORT
;
1475 /* For port query index is a DONT CARE */
1476 cur_query_entry
->index
= BP_PORT(bp
);
1477 /* For port query funcID is a DONT CARE */
1478 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1479 cur_query_entry
->address
.hi
= cpu_to_le32(U64_HI(cur_data_offset
));
1480 cur_query_entry
->address
.lo
= cpu_to_le32(U64_LO(cur_data_offset
));
1482 /**** PF FW statistics data ****/
1483 cur_data_offset
= bp
->fw_stats_data_mapping
+
1484 offsetof(struct bnx2x_fw_stats_data
, pf
);
1486 cur_query_entry
= &bp
->fw_stats_req
->query
[BNX2X_PF_QUERY_IDX
];
1488 cur_query_entry
->kind
= STATS_TYPE_PF
;
1489 /* For PF query index is a DONT CARE */
1490 cur_query_entry
->index
= BP_PORT(bp
);
1491 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1492 cur_query_entry
->address
.hi
= cpu_to_le32(U64_HI(cur_data_offset
));
1493 cur_query_entry
->address
.lo
= cpu_to_le32(U64_LO(cur_data_offset
));
1495 /**** FCoE FW statistics data ****/
1497 cur_data_offset
= bp
->fw_stats_data_mapping
+
1498 offsetof(struct bnx2x_fw_stats_data
, fcoe
);
1501 &bp
->fw_stats_req
->query
[BNX2X_FCOE_QUERY_IDX
];
1503 cur_query_entry
->kind
= STATS_TYPE_FCOE
;
1504 /* For FCoE query index is a DONT CARE */
1505 cur_query_entry
->index
= BP_PORT(bp
);
1506 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1507 cur_query_entry
->address
.hi
=
1508 cpu_to_le32(U64_HI(cur_data_offset
));
1509 cur_query_entry
->address
.lo
=
1510 cpu_to_le32(U64_LO(cur_data_offset
));
1513 /**** Clients' queries ****/
1514 cur_data_offset
= bp
->fw_stats_data_mapping
+
1515 offsetof(struct bnx2x_fw_stats_data
, queue_stats
);
1517 /* first queue query index depends whether FCoE offloaded request will
1518 * be included in the ramrod
1521 first_queue_query_index
= BNX2X_FIRST_QUEUE_QUERY_IDX
;
1523 first_queue_query_index
= BNX2X_FIRST_QUEUE_QUERY_IDX
- 1;
1525 for_each_eth_queue(bp
, i
) {
1528 query
[first_queue_query_index
+ i
];
1530 cur_query_entry
->kind
= STATS_TYPE_QUEUE
;
1531 cur_query_entry
->index
= bnx2x_stats_id(&bp
->fp
[i
]);
1532 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1533 cur_query_entry
->address
.hi
=
1534 cpu_to_le32(U64_HI(cur_data_offset
));
1535 cur_query_entry
->address
.lo
=
1536 cpu_to_le32(U64_LO(cur_data_offset
));
1538 cur_data_offset
+= sizeof(struct per_queue_stats
);
1541 /* add FCoE queue query if needed */
1545 query
[first_queue_query_index
+ i
];
1547 cur_query_entry
->kind
= STATS_TYPE_QUEUE
;
1548 cur_query_entry
->index
= bnx2x_stats_id(&bp
->fp
[FCOE_IDX(bp
)]);
1549 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1550 cur_query_entry
->address
.hi
=
1551 cpu_to_le32(U64_HI(cur_data_offset
));
1552 cur_query_entry
->address
.lo
=
1553 cpu_to_le32(U64_LO(cur_data_offset
));
1557 void bnx2x_memset_stats(struct bnx2x
*bp
)
1561 /* function stats */
1562 for_each_queue(bp
, i
) {
1563 struct bnx2x_fp_stats
*fp_stats
= &bp
->fp_stats
[i
];
1565 memset(&fp_stats
->old_tclient
, 0,
1566 sizeof(fp_stats
->old_tclient
));
1567 memset(&fp_stats
->old_uclient
, 0,
1568 sizeof(fp_stats
->old_uclient
));
1569 memset(&fp_stats
->old_xclient
, 0,
1570 sizeof(fp_stats
->old_xclient
));
1571 if (bp
->stats_init
) {
1572 memset(&fp_stats
->eth_q_stats
, 0,
1573 sizeof(fp_stats
->eth_q_stats
));
1574 memset(&fp_stats
->eth_q_stats_old
, 0,
1575 sizeof(fp_stats
->eth_q_stats_old
));
1579 memset(&bp
->dev
->stats
, 0, sizeof(bp
->dev
->stats
));
1581 if (bp
->stats_init
) {
1582 memset(&bp
->net_stats_old
, 0, sizeof(bp
->net_stats_old
));
1583 memset(&bp
->fw_stats_old
, 0, sizeof(bp
->fw_stats_old
));
1584 memset(&bp
->eth_stats_old
, 0, sizeof(bp
->eth_stats_old
));
1585 memset(&bp
->eth_stats
, 0, sizeof(bp
->eth_stats
));
1586 memset(&bp
->func_stats
, 0, sizeof(bp
->func_stats
));
1589 bp
->stats_state
= STATS_STATE_DISABLED
;
1591 if (bp
->port
.pmf
&& bp
->port
.port_stx
)
1592 bnx2x_port_stats_base_init(bp
);
1594 /* mark the end of statistics initialization */
1595 bp
->stats_init
= false;
1598 void bnx2x_stats_init(struct bnx2x
*bp
)
1600 int /*abs*/port
= BP_PORT(bp
);
1601 int mb_idx
= BP_FW_MB_IDX(bp
);
1604 bnx2x_memset_stats(bp
);
1608 bp
->stats_pending
= 0;
1609 bp
->executer_idx
= 0;
1610 bp
->stats_counter
= 0;
1612 /* port and func stats for management */
1613 if (!BP_NOMCP(bp
)) {
1614 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
1615 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[mb_idx
].fw_mb_param
);
1618 bp
->port
.port_stx
= 0;
1621 DP(BNX2X_MSG_STATS
, "port_stx 0x%x func_stx 0x%x\n",
1622 bp
->port
.port_stx
, bp
->func_stx
);
1624 /* pmf should retrieve port statistics from SP on a non-init*/
1625 if (!bp
->stats_init
&& bp
->port
.pmf
&& bp
->port
.port_stx
)
1626 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
1630 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
1631 bp
->port
.old_nig_stats
.brb_discard
=
1632 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
1633 bp
->port
.old_nig_stats
.brb_truncate
=
1634 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
1635 if (!CHIP_IS_E3(bp
)) {
1636 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
1637 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
1638 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
1639 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
1642 /* Prepare statistics ramrod data */
1643 bnx2x_prep_fw_stats_req(bp
);
1645 /* Clean SP from previous statistics */
1646 if (bp
->stats_init
) {
1648 memset(bnx2x_sp(bp
, func_stats
), 0,
1649 sizeof(struct host_func_stats
));
1650 bnx2x_func_stats_init(bp
);
1651 bnx2x_hw_stats_post(bp
);
1652 bnx2x_stats_comp(bp
);
1656 bnx2x_memset_stats(bp
);
1659 void bnx2x_save_statistics(struct bnx2x
*bp
)
1662 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
1664 /* save queue statistics */
1665 for_each_eth_queue(bp
, i
) {
1666 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1667 struct bnx2x_eth_q_stats
*qstats
=
1668 &bnx2x_fp_stats(bp
, fp
)->eth_q_stats
;
1669 struct bnx2x_eth_q_stats_old
*qstats_old
=
1670 &bnx2x_fp_stats(bp
, fp
)->eth_q_stats_old
;
1672 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi
);
1673 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo
);
1674 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi
);
1675 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo
);
1676 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi
);
1677 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo
);
1678 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi
);
1679 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo
);
1680 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi
);
1681 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo
);
1682 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi
);
1683 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo
);
1684 UPDATE_QSTAT_OLD(total_tpa_bytes_hi
);
1685 UPDATE_QSTAT_OLD(total_tpa_bytes_lo
);
1688 /* save net_device_stats statistics */
1689 bp
->net_stats_old
.rx_dropped
= nstats
->rx_dropped
;
1691 /* store port firmware statistics */
1692 if (bp
->port
.pmf
&& IS_MF(bp
)) {
1693 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1694 struct bnx2x_fw_port_stats_old
*fwstats
= &bp
->fw_stats_old
;
1695 UPDATE_FW_STAT_OLD(mac_filter_discard
);
1696 UPDATE_FW_STAT_OLD(mf_tag_discard
);
1697 UPDATE_FW_STAT_OLD(brb_truncate_discard
);
1698 UPDATE_FW_STAT_OLD(mac_discard
);
1702 void bnx2x_afex_collect_stats(struct bnx2x
*bp
, void *void_afex_stats
,
1706 struct afex_stats
*afex_stats
= (struct afex_stats
*)void_afex_stats
;
1707 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1708 struct per_queue_stats
*fcoe_q_stats
=
1709 &bp
->fw_stats_data
->queue_stats
[FCOE_IDX(bp
)];
1711 struct tstorm_per_queue_stats
*fcoe_q_tstorm_stats
=
1712 &fcoe_q_stats
->tstorm_queue_statistics
;
1714 struct ustorm_per_queue_stats
*fcoe_q_ustorm_stats
=
1715 &fcoe_q_stats
->ustorm_queue_statistics
;
1717 struct xstorm_per_queue_stats
*fcoe_q_xstorm_stats
=
1718 &fcoe_q_stats
->xstorm_queue_statistics
;
1720 struct fcoe_statistics_params
*fw_fcoe_stat
=
1721 &bp
->fw_stats_data
->fcoe
;
1723 memset(afex_stats
, 0, sizeof(struct afex_stats
));
1725 for_each_eth_queue(bp
, i
) {
1726 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp_stats
[i
].eth_q_stats
;
1728 ADD_64(afex_stats
->rx_unicast_bytes_hi
,
1729 qstats
->total_unicast_bytes_received_hi
,
1730 afex_stats
->rx_unicast_bytes_lo
,
1731 qstats
->total_unicast_bytes_received_lo
);
1733 ADD_64(afex_stats
->rx_broadcast_bytes_hi
,
1734 qstats
->total_broadcast_bytes_received_hi
,
1735 afex_stats
->rx_broadcast_bytes_lo
,
1736 qstats
->total_broadcast_bytes_received_lo
);
1738 ADD_64(afex_stats
->rx_multicast_bytes_hi
,
1739 qstats
->total_multicast_bytes_received_hi
,
1740 afex_stats
->rx_multicast_bytes_lo
,
1741 qstats
->total_multicast_bytes_received_lo
);
1743 ADD_64(afex_stats
->rx_unicast_frames_hi
,
1744 qstats
->total_unicast_packets_received_hi
,
1745 afex_stats
->rx_unicast_frames_lo
,
1746 qstats
->total_unicast_packets_received_lo
);
1748 ADD_64(afex_stats
->rx_broadcast_frames_hi
,
1749 qstats
->total_broadcast_packets_received_hi
,
1750 afex_stats
->rx_broadcast_frames_lo
,
1751 qstats
->total_broadcast_packets_received_lo
);
1753 ADD_64(afex_stats
->rx_multicast_frames_hi
,
1754 qstats
->total_multicast_packets_received_hi
,
1755 afex_stats
->rx_multicast_frames_lo
,
1756 qstats
->total_multicast_packets_received_lo
);
1758 /* sum to rx_frames_discarded all discraded
1759 * packets due to size, ttl0 and checksum
1761 ADD_64(afex_stats
->rx_frames_discarded_hi
,
1762 qstats
->total_packets_received_checksum_discarded_hi
,
1763 afex_stats
->rx_frames_discarded_lo
,
1764 qstats
->total_packets_received_checksum_discarded_lo
);
1766 ADD_64(afex_stats
->rx_frames_discarded_hi
,
1767 qstats
->total_packets_received_ttl0_discarded_hi
,
1768 afex_stats
->rx_frames_discarded_lo
,
1769 qstats
->total_packets_received_ttl0_discarded_lo
);
1771 ADD_64(afex_stats
->rx_frames_discarded_hi
,
1772 qstats
->etherstatsoverrsizepkts_hi
,
1773 afex_stats
->rx_frames_discarded_lo
,
1774 qstats
->etherstatsoverrsizepkts_lo
);
1776 ADD_64(afex_stats
->rx_frames_dropped_hi
,
1777 qstats
->no_buff_discard_hi
,
1778 afex_stats
->rx_frames_dropped_lo
,
1779 qstats
->no_buff_discard_lo
);
1781 ADD_64(afex_stats
->tx_unicast_bytes_hi
,
1782 qstats
->total_unicast_bytes_transmitted_hi
,
1783 afex_stats
->tx_unicast_bytes_lo
,
1784 qstats
->total_unicast_bytes_transmitted_lo
);
1786 ADD_64(afex_stats
->tx_broadcast_bytes_hi
,
1787 qstats
->total_broadcast_bytes_transmitted_hi
,
1788 afex_stats
->tx_broadcast_bytes_lo
,
1789 qstats
->total_broadcast_bytes_transmitted_lo
);
1791 ADD_64(afex_stats
->tx_multicast_bytes_hi
,
1792 qstats
->total_multicast_bytes_transmitted_hi
,
1793 afex_stats
->tx_multicast_bytes_lo
,
1794 qstats
->total_multicast_bytes_transmitted_lo
);
1796 ADD_64(afex_stats
->tx_unicast_frames_hi
,
1797 qstats
->total_unicast_packets_transmitted_hi
,
1798 afex_stats
->tx_unicast_frames_lo
,
1799 qstats
->total_unicast_packets_transmitted_lo
);
1801 ADD_64(afex_stats
->tx_broadcast_frames_hi
,
1802 qstats
->total_broadcast_packets_transmitted_hi
,
1803 afex_stats
->tx_broadcast_frames_lo
,
1804 qstats
->total_broadcast_packets_transmitted_lo
);
1806 ADD_64(afex_stats
->tx_multicast_frames_hi
,
1807 qstats
->total_multicast_packets_transmitted_hi
,
1808 afex_stats
->tx_multicast_frames_lo
,
1809 qstats
->total_multicast_packets_transmitted_lo
);
1811 ADD_64(afex_stats
->tx_frames_dropped_hi
,
1812 qstats
->total_transmitted_dropped_packets_error_hi
,
1813 afex_stats
->tx_frames_dropped_lo
,
1814 qstats
->total_transmitted_dropped_packets_error_lo
);
1817 /* now add FCoE statistics which are collected separately
1818 * (both offloaded and non offloaded)
1821 ADD_64_LE(afex_stats
->rx_unicast_bytes_hi
,
1823 afex_stats
->rx_unicast_bytes_lo
,
1824 fw_fcoe_stat
->rx_stat0
.fcoe_rx_byte_cnt
);
1826 ADD_64_LE(afex_stats
->rx_unicast_bytes_hi
,
1827 fcoe_q_tstorm_stats
->rcv_ucast_bytes
.hi
,
1828 afex_stats
->rx_unicast_bytes_lo
,
1829 fcoe_q_tstorm_stats
->rcv_ucast_bytes
.lo
);
1831 ADD_64_LE(afex_stats
->rx_broadcast_bytes_hi
,
1832 fcoe_q_tstorm_stats
->rcv_bcast_bytes
.hi
,
1833 afex_stats
->rx_broadcast_bytes_lo
,
1834 fcoe_q_tstorm_stats
->rcv_bcast_bytes
.lo
);
1836 ADD_64_LE(afex_stats
->rx_multicast_bytes_hi
,
1837 fcoe_q_tstorm_stats
->rcv_mcast_bytes
.hi
,
1838 afex_stats
->rx_multicast_bytes_lo
,
1839 fcoe_q_tstorm_stats
->rcv_mcast_bytes
.lo
);
1841 ADD_64_LE(afex_stats
->rx_unicast_frames_hi
,
1843 afex_stats
->rx_unicast_frames_lo
,
1844 fw_fcoe_stat
->rx_stat0
.fcoe_rx_pkt_cnt
);
1846 ADD_64_LE(afex_stats
->rx_unicast_frames_hi
,
1848 afex_stats
->rx_unicast_frames_lo
,
1849 fcoe_q_tstorm_stats
->rcv_ucast_pkts
);
1851 ADD_64_LE(afex_stats
->rx_broadcast_frames_hi
,
1853 afex_stats
->rx_broadcast_frames_lo
,
1854 fcoe_q_tstorm_stats
->rcv_bcast_pkts
);
1856 ADD_64_LE(afex_stats
->rx_multicast_frames_hi
,
1858 afex_stats
->rx_multicast_frames_lo
,
1859 fcoe_q_tstorm_stats
->rcv_ucast_pkts
);
1861 ADD_64_LE(afex_stats
->rx_frames_discarded_hi
,
1863 afex_stats
->rx_frames_discarded_lo
,
1864 fcoe_q_tstorm_stats
->checksum_discard
);
1866 ADD_64_LE(afex_stats
->rx_frames_discarded_hi
,
1868 afex_stats
->rx_frames_discarded_lo
,
1869 fcoe_q_tstorm_stats
->pkts_too_big_discard
);
1871 ADD_64_LE(afex_stats
->rx_frames_discarded_hi
,
1873 afex_stats
->rx_frames_discarded_lo
,
1874 fcoe_q_tstorm_stats
->ttl0_discard
);
1876 ADD_64_LE16(afex_stats
->rx_frames_dropped_hi
,
1878 afex_stats
->rx_frames_dropped_lo
,
1879 fcoe_q_tstorm_stats
->no_buff_discard
);
1881 ADD_64_LE(afex_stats
->rx_frames_dropped_hi
,
1883 afex_stats
->rx_frames_dropped_lo
,
1884 fcoe_q_ustorm_stats
->ucast_no_buff_pkts
);
1886 ADD_64_LE(afex_stats
->rx_frames_dropped_hi
,
1888 afex_stats
->rx_frames_dropped_lo
,
1889 fcoe_q_ustorm_stats
->mcast_no_buff_pkts
);
1891 ADD_64_LE(afex_stats
->rx_frames_dropped_hi
,
1893 afex_stats
->rx_frames_dropped_lo
,
1894 fcoe_q_ustorm_stats
->bcast_no_buff_pkts
);
1896 ADD_64_LE(afex_stats
->rx_frames_dropped_hi
,
1898 afex_stats
->rx_frames_dropped_lo
,
1899 fw_fcoe_stat
->rx_stat1
.fcoe_rx_drop_pkt_cnt
);
1901 ADD_64_LE(afex_stats
->rx_frames_dropped_hi
,
1903 afex_stats
->rx_frames_dropped_lo
,
1904 fw_fcoe_stat
->rx_stat2
.fcoe_rx_drop_pkt_cnt
);
1906 ADD_64_LE(afex_stats
->tx_unicast_bytes_hi
,
1908 afex_stats
->tx_unicast_bytes_lo
,
1909 fw_fcoe_stat
->tx_stat
.fcoe_tx_byte_cnt
);
1911 ADD_64_LE(afex_stats
->tx_unicast_bytes_hi
,
1912 fcoe_q_xstorm_stats
->ucast_bytes_sent
.hi
,
1913 afex_stats
->tx_unicast_bytes_lo
,
1914 fcoe_q_xstorm_stats
->ucast_bytes_sent
.lo
);
1916 ADD_64_LE(afex_stats
->tx_broadcast_bytes_hi
,
1917 fcoe_q_xstorm_stats
->bcast_bytes_sent
.hi
,
1918 afex_stats
->tx_broadcast_bytes_lo
,
1919 fcoe_q_xstorm_stats
->bcast_bytes_sent
.lo
);
1921 ADD_64_LE(afex_stats
->tx_multicast_bytes_hi
,
1922 fcoe_q_xstorm_stats
->mcast_bytes_sent
.hi
,
1923 afex_stats
->tx_multicast_bytes_lo
,
1924 fcoe_q_xstorm_stats
->mcast_bytes_sent
.lo
);
1926 ADD_64_LE(afex_stats
->tx_unicast_frames_hi
,
1928 afex_stats
->tx_unicast_frames_lo
,
1929 fw_fcoe_stat
->tx_stat
.fcoe_tx_pkt_cnt
);
1931 ADD_64_LE(afex_stats
->tx_unicast_frames_hi
,
1933 afex_stats
->tx_unicast_frames_lo
,
1934 fcoe_q_xstorm_stats
->ucast_pkts_sent
);
1936 ADD_64_LE(afex_stats
->tx_broadcast_frames_hi
,
1938 afex_stats
->tx_broadcast_frames_lo
,
1939 fcoe_q_xstorm_stats
->bcast_pkts_sent
);
1941 ADD_64_LE(afex_stats
->tx_multicast_frames_hi
,
1943 afex_stats
->tx_multicast_frames_lo
,
1944 fcoe_q_xstorm_stats
->mcast_pkts_sent
);
1946 ADD_64_LE(afex_stats
->tx_frames_dropped_hi
,
1948 afex_stats
->tx_frames_dropped_lo
,
1949 fcoe_q_xstorm_stats
->error_drop_pkts
);
1952 /* if port stats are requested, add them to the PMF
1953 * stats, as anyway they will be accumulated by the
1954 * MCP before sent to the switch
1956 if ((bp
->port
.pmf
) && (stats_type
== VICSTATST_UIF_INDEX
)) {
1957 ADD_64(afex_stats
->rx_frames_dropped_hi
,
1959 afex_stats
->rx_frames_dropped_lo
,
1960 estats
->mac_filter_discard
);
1961 ADD_64(afex_stats
->rx_frames_dropped_hi
,
1963 afex_stats
->rx_frames_dropped_lo
,
1964 estats
->brb_truncate_discard
);
1965 ADD_64(afex_stats
->rx_frames_discarded_hi
,
1967 afex_stats
->rx_frames_discarded_lo
,
1968 estats
->mac_discard
);
1972 int bnx2x_stats_safe_exec(struct bnx2x
*bp
,
1973 void (func_to_exec
)(void *cookie
),
1976 int cnt
= 10, rc
= 0;
1978 /* Wait for statistics to end [while blocking further requests],
1979 * then run supplied function 'safely'.
1981 rc
= down_timeout(&bp
->stats_lock
, HZ
/ 10);
1983 BNX2X_ERR("Failed to take statistics lock for safe execution\n");
1987 bnx2x_stats_comp(bp
);
1988 while (bp
->stats_pending
&& cnt
--)
1989 if (bnx2x_storm_stats_update(bp
))
1990 usleep_range(1000, 2000);
1991 if (bp
->stats_pending
) {
1992 BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
1997 func_to_exec(cookie
);
2000 /* No need to restart statistics - if they're enabled, the timer
2001 * will restart the statistics.
2003 up(&bp
->stats_lock
);