1 /* bnx2x_stats.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include "bnx2x_stats.h"
21 #include "bnx2x_cmn.h"
27 * General service functions
30 static inline long bnx2x_hilo(u32
*hiref
)
32 u32 lo
= *(hiref
+ 1);
33 #if (BITS_PER_LONG == 64)
36 return HILO_U64(hi
, lo
);
42 static u16
bnx2x_get_port_stats_dma_len(struct bnx2x
*bp
)
44 u16 res
= sizeof(struct host_port_stats
) >> 2;
46 /* if PFC stats are not supported by the MFW, don't DMA them */
47 if (!(bp
->flags
& BC_SUPPORTS_PFC_STATS
))
48 res
-= (sizeof(u32
)*4) >> 2;
54 * Init service functions
57 /* Post the next statistics ramrod. Protect it with the spin in
58 * order to ensure the strict order between statistics ramrods
59 * (each ramrod has a sequence number passed in a
60 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
63 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
65 if (!bp
->stats_pending
) {
68 spin_lock_bh(&bp
->stats_lock
);
70 if (bp
->stats_pending
) {
71 spin_unlock_bh(&bp
->stats_lock
);
75 bp
->fw_stats_req
->hdr
.drv_stats_counter
=
76 cpu_to_le16(bp
->stats_counter
++);
78 DP(BNX2X_MSG_STATS
, "Sending statistics ramrod %d\n",
79 bp
->fw_stats_req
->hdr
.drv_stats_counter
);
83 /* send FW stats ramrod */
84 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_STAT_QUERY
, 0,
85 U64_HI(bp
->fw_stats_req_mapping
),
86 U64_LO(bp
->fw_stats_req_mapping
),
87 NONE_CONNECTION_TYPE
);
89 bp
->stats_pending
= 1;
91 spin_unlock_bh(&bp
->stats_lock
);
95 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
97 struct dmae_command
*dmae
= &bp
->stats_dmae
;
98 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
100 *stats_comp
= DMAE_COMP_VAL
;
101 if (CHIP_REV_IS_SLOW(bp
))
105 if (bp
->executer_idx
) {
106 int loader_idx
= PMF_DMAE_C(bp
);
107 u32 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
,
108 true, DMAE_COMP_GRC
);
109 opcode
= bnx2x_dmae_opcode_clr_src_reset(opcode
);
111 memset(dmae
, 0, sizeof(struct dmae_command
));
112 dmae
->opcode
= opcode
;
113 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
114 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
115 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
116 sizeof(struct dmae_command
) *
117 (loader_idx
+ 1)) >> 2;
118 dmae
->dst_addr_hi
= 0;
119 dmae
->len
= sizeof(struct dmae_command
) >> 2;
122 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
123 dmae
->comp_addr_hi
= 0;
127 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
129 } else if (bp
->func_stx
) {
131 memcpy(bnx2x_sp(bp
, func_stats
), &bp
->func_stats
,
132 sizeof(bp
->func_stats
));
133 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
137 static int bnx2x_stats_comp(struct bnx2x
*bp
)
139 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
143 while (*stats_comp
!= DMAE_COMP_VAL
) {
145 BNX2X_ERR("timeout waiting for stats finished\n");
149 usleep_range(1000, 1000);
155 * Statistics service functions
158 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
160 struct dmae_command
*dmae
;
162 int loader_idx
= PMF_DMAE_C(bp
);
163 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
166 if (!bp
->port
.pmf
|| !bp
->port
.port_stx
) {
171 bp
->executer_idx
= 0;
173 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_GRC
, DMAE_DST_PCI
, false, 0);
175 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
176 dmae
->opcode
= bnx2x_dmae_opcode_add_comp(opcode
, DMAE_COMP_GRC
);
177 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
178 dmae
->src_addr_hi
= 0;
179 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
180 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
181 dmae
->len
= DMAE_LEN32_RD_MAX
;
182 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
183 dmae
->comp_addr_hi
= 0;
186 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
187 dmae
->opcode
= bnx2x_dmae_opcode_add_comp(opcode
, DMAE_COMP_PCI
);
188 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
189 dmae
->src_addr_hi
= 0;
190 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
191 DMAE_LEN32_RD_MAX
* 4);
192 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
193 DMAE_LEN32_RD_MAX
* 4);
194 dmae
->len
= bnx2x_get_port_stats_dma_len(bp
) - DMAE_LEN32_RD_MAX
;
196 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
197 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
198 dmae
->comp_val
= DMAE_COMP_VAL
;
201 bnx2x_hw_stats_post(bp
);
202 bnx2x_stats_comp(bp
);
205 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
207 struct dmae_command
*dmae
;
208 int port
= BP_PORT(bp
);
210 int loader_idx
= PMF_DMAE_C(bp
);
212 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
215 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
220 bp
->executer_idx
= 0;
223 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
,
224 true, DMAE_COMP_GRC
);
226 if (bp
->port
.port_stx
) {
228 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
229 dmae
->opcode
= opcode
;
230 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
231 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
232 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
233 dmae
->dst_addr_hi
= 0;
234 dmae
->len
= bnx2x_get_port_stats_dma_len(bp
);
235 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
236 dmae
->comp_addr_hi
= 0;
242 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
243 dmae
->opcode
= opcode
;
244 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
245 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
246 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
247 dmae
->dst_addr_hi
= 0;
248 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
249 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
250 dmae
->comp_addr_hi
= 0;
255 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_GRC
, DMAE_DST_PCI
,
256 true, DMAE_COMP_GRC
);
258 /* EMAC is special */
259 if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
260 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
262 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
263 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
264 dmae
->opcode
= opcode
;
265 dmae
->src_addr_lo
= (mac_addr
+
266 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
267 dmae
->src_addr_hi
= 0;
268 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
269 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
270 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
271 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
272 dmae
->comp_addr_hi
= 0;
275 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
276 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
277 dmae
->opcode
= opcode
;
278 dmae
->src_addr_lo
= (mac_addr
+
279 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
280 dmae
->src_addr_hi
= 0;
281 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
282 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
283 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
284 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
286 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
287 dmae
->comp_addr_hi
= 0;
290 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
291 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
292 dmae
->opcode
= opcode
;
293 dmae
->src_addr_lo
= (mac_addr
+
294 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
295 dmae
->src_addr_hi
= 0;
296 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
297 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
298 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
299 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
300 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
301 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
302 dmae
->comp_addr_hi
= 0;
305 u32 tx_src_addr_lo
, rx_src_addr_lo
;
308 /* configure the params according to MAC type */
309 switch (bp
->link_vars
.mac_type
) {
311 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
312 NIG_REG_INGRESS_BMAC0_MEM
);
314 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
315 BIGMAC_REGISTER_TX_STAT_GTBYT */
316 if (CHIP_IS_E1x(bp
)) {
317 tx_src_addr_lo
= (mac_addr
+
318 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
319 tx_len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
320 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
321 rx_src_addr_lo
= (mac_addr
+
322 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
323 rx_len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
324 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
326 tx_src_addr_lo
= (mac_addr
+
327 BIGMAC2_REGISTER_TX_STAT_GTPOK
) >> 2;
328 tx_len
= (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT
-
329 BIGMAC2_REGISTER_TX_STAT_GTPOK
) >> 2;
330 rx_src_addr_lo
= (mac_addr
+
331 BIGMAC2_REGISTER_RX_STAT_GR64
) >> 2;
332 rx_len
= (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ
-
333 BIGMAC2_REGISTER_RX_STAT_GR64
) >> 2;
337 case MAC_TYPE_UMAC
: /* handled by MSTAT */
338 case MAC_TYPE_XMAC
: /* handled by MSTAT */
340 mac_addr
= port
? GRCBASE_MSTAT1
: GRCBASE_MSTAT0
;
341 tx_src_addr_lo
= (mac_addr
+
342 MSTAT_REG_TX_STAT_GTXPOK_LO
) >> 2;
343 rx_src_addr_lo
= (mac_addr
+
344 MSTAT_REG_RX_STAT_GR64_LO
) >> 2;
345 tx_len
= sizeof(bp
->slowpath
->
346 mac_stats
.mstat_stats
.stats_tx
) >> 2;
347 rx_len
= sizeof(bp
->slowpath
->
348 mac_stats
.mstat_stats
.stats_rx
) >> 2;
353 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
354 dmae
->opcode
= opcode
;
355 dmae
->src_addr_lo
= tx_src_addr_lo
;
356 dmae
->src_addr_hi
= 0;
358 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
359 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
360 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
361 dmae
->comp_addr_hi
= 0;
365 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
366 dmae
->opcode
= opcode
;
367 dmae
->src_addr_hi
= 0;
368 dmae
->src_addr_lo
= rx_src_addr_lo
;
370 U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) + (tx_len
<< 2));
372 U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) + (tx_len
<< 2));
374 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
375 dmae
->comp_addr_hi
= 0;
380 if (!CHIP_IS_E3(bp
)) {
381 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
382 dmae
->opcode
= opcode
;
383 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
384 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
385 dmae
->src_addr_hi
= 0;
386 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
387 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
388 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
389 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
390 dmae
->len
= (2*sizeof(u32
)) >> 2;
391 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
392 dmae
->comp_addr_hi
= 0;
395 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
396 dmae
->opcode
= opcode
;
397 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
398 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
399 dmae
->src_addr_hi
= 0;
400 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
401 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
402 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
403 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
404 dmae
->len
= (2*sizeof(u32
)) >> 2;
405 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
406 dmae
->comp_addr_hi
= 0;
410 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
411 dmae
->opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_GRC
, DMAE_DST_PCI
,
412 true, DMAE_COMP_PCI
);
413 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
414 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
415 dmae
->src_addr_hi
= 0;
416 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
417 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
418 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
420 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
421 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
422 dmae
->comp_val
= DMAE_COMP_VAL
;
427 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
429 struct dmae_command
*dmae
= &bp
->stats_dmae
;
430 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
438 bp
->executer_idx
= 0;
439 memset(dmae
, 0, sizeof(struct dmae_command
));
441 dmae
->opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
,
442 true, DMAE_COMP_PCI
);
443 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
444 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
445 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
446 dmae
->dst_addr_hi
= 0;
447 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
448 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
449 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
450 dmae
->comp_val
= DMAE_COMP_VAL
;
455 static void bnx2x_stats_start(struct bnx2x
*bp
)
458 bnx2x_port_stats_init(bp
);
460 else if (bp
->func_stx
)
461 bnx2x_func_stats_init(bp
);
463 bnx2x_hw_stats_post(bp
);
464 bnx2x_storm_stats_post(bp
);
467 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
469 bnx2x_stats_comp(bp
);
470 bnx2x_stats_pmf_update(bp
);
471 bnx2x_stats_start(bp
);
474 static void bnx2x_stats_restart(struct bnx2x
*bp
)
476 bnx2x_stats_comp(bp
);
477 bnx2x_stats_start(bp
);
480 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
482 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
483 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
489 if (CHIP_IS_E1x(bp
)) {
490 struct bmac1_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac1_stats
);
492 /* the macros below will use "bmac1_stats" type */
493 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
494 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
495 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
496 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
497 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
498 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
499 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
500 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
501 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_mac_xpf
);
503 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
504 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
505 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
506 UPDATE_STAT64(tx_stat_gt127
,
507 tx_stat_etherstatspkts65octetsto127octets
);
508 UPDATE_STAT64(tx_stat_gt255
,
509 tx_stat_etherstatspkts128octetsto255octets
);
510 UPDATE_STAT64(tx_stat_gt511
,
511 tx_stat_etherstatspkts256octetsto511octets
);
512 UPDATE_STAT64(tx_stat_gt1023
,
513 tx_stat_etherstatspkts512octetsto1023octets
);
514 UPDATE_STAT64(tx_stat_gt1518
,
515 tx_stat_etherstatspkts1024octetsto1522octets
);
516 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_mac_2047
);
517 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_mac_4095
);
518 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_mac_9216
);
519 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_mac_16383
);
520 UPDATE_STAT64(tx_stat_gterr
,
521 tx_stat_dot3statsinternalmactransmiterrors
);
522 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_mac_ufl
);
525 struct bmac2_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac2_stats
);
527 /* the macros below will use "bmac2_stats" type */
528 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
529 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
530 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
531 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
532 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
533 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
534 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_maccontrolframesreceived
);
535 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
536 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_mac_xpf
);
537 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
538 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
539 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
540 UPDATE_STAT64(tx_stat_gt127
,
541 tx_stat_etherstatspkts65octetsto127octets
);
542 UPDATE_STAT64(tx_stat_gt255
,
543 tx_stat_etherstatspkts128octetsto255octets
);
544 UPDATE_STAT64(tx_stat_gt511
,
545 tx_stat_etherstatspkts256octetsto511octets
);
546 UPDATE_STAT64(tx_stat_gt1023
,
547 tx_stat_etherstatspkts512octetsto1023octets
);
548 UPDATE_STAT64(tx_stat_gt1518
,
549 tx_stat_etherstatspkts1024octetsto1522octets
);
550 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_mac_2047
);
551 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_mac_4095
);
552 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_mac_9216
);
553 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_mac_16383
);
554 UPDATE_STAT64(tx_stat_gterr
,
555 tx_stat_dot3statsinternalmactransmiterrors
);
556 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_mac_ufl
);
558 /* collect PFC stats */
559 pstats
->pfc_frames_tx_hi
= new->tx_stat_gtpp_hi
;
560 pstats
->pfc_frames_tx_lo
= new->tx_stat_gtpp_lo
;
562 pstats
->pfc_frames_rx_hi
= new->rx_stat_grpp_hi
;
563 pstats
->pfc_frames_rx_lo
= new->rx_stat_grpp_lo
;
566 estats
->pause_frames_received_hi
=
567 pstats
->mac_stx
[1].rx_stat_mac_xpf_hi
;
568 estats
->pause_frames_received_lo
=
569 pstats
->mac_stx
[1].rx_stat_mac_xpf_lo
;
571 estats
->pause_frames_sent_hi
=
572 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
573 estats
->pause_frames_sent_lo
=
574 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
576 estats
->pfc_frames_received_hi
=
577 pstats
->pfc_frames_rx_hi
;
578 estats
->pfc_frames_received_lo
=
579 pstats
->pfc_frames_rx_lo
;
580 estats
->pfc_frames_sent_hi
=
581 pstats
->pfc_frames_tx_hi
;
582 estats
->pfc_frames_sent_lo
=
583 pstats
->pfc_frames_tx_lo
;
586 static void bnx2x_mstat_stats_update(struct bnx2x
*bp
)
588 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
589 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
591 struct mstat_stats
*new = bnx2x_sp(bp
, mac_stats
.mstat_stats
);
593 ADD_STAT64(stats_rx
.rx_grerb
, rx_stat_ifhcinbadoctets
);
594 ADD_STAT64(stats_rx
.rx_grfcs
, rx_stat_dot3statsfcserrors
);
595 ADD_STAT64(stats_rx
.rx_grund
, rx_stat_etherstatsundersizepkts
);
596 ADD_STAT64(stats_rx
.rx_grovr
, rx_stat_dot3statsframestoolong
);
597 ADD_STAT64(stats_rx
.rx_grfrg
, rx_stat_etherstatsfragments
);
598 ADD_STAT64(stats_rx
.rx_grxcf
, rx_stat_maccontrolframesreceived
);
599 ADD_STAT64(stats_rx
.rx_grxpf
, rx_stat_xoffstateentered
);
600 ADD_STAT64(stats_rx
.rx_grxpf
, rx_stat_mac_xpf
);
601 ADD_STAT64(stats_tx
.tx_gtxpf
, tx_stat_outxoffsent
);
602 ADD_STAT64(stats_tx
.tx_gtxpf
, tx_stat_flowcontroldone
);
604 /* collect pfc stats */
605 ADD_64(pstats
->pfc_frames_tx_hi
, new->stats_tx
.tx_gtxpp_hi
,
606 pstats
->pfc_frames_tx_lo
, new->stats_tx
.tx_gtxpp_lo
);
607 ADD_64(pstats
->pfc_frames_rx_hi
, new->stats_rx
.rx_grxpp_hi
,
608 pstats
->pfc_frames_rx_lo
, new->stats_rx
.rx_grxpp_lo
);
610 ADD_STAT64(stats_tx
.tx_gt64
, tx_stat_etherstatspkts64octets
);
611 ADD_STAT64(stats_tx
.tx_gt127
,
612 tx_stat_etherstatspkts65octetsto127octets
);
613 ADD_STAT64(stats_tx
.tx_gt255
,
614 tx_stat_etherstatspkts128octetsto255octets
);
615 ADD_STAT64(stats_tx
.tx_gt511
,
616 tx_stat_etherstatspkts256octetsto511octets
);
617 ADD_STAT64(stats_tx
.tx_gt1023
,
618 tx_stat_etherstatspkts512octetsto1023octets
);
619 ADD_STAT64(stats_tx
.tx_gt1518
,
620 tx_stat_etherstatspkts1024octetsto1522octets
);
621 ADD_STAT64(stats_tx
.tx_gt2047
, tx_stat_mac_2047
);
623 ADD_STAT64(stats_tx
.tx_gt4095
, tx_stat_mac_4095
);
624 ADD_STAT64(stats_tx
.tx_gt9216
, tx_stat_mac_9216
);
625 ADD_STAT64(stats_tx
.tx_gt16383
, tx_stat_mac_16383
);
627 ADD_STAT64(stats_tx
.tx_gterr
,
628 tx_stat_dot3statsinternalmactransmiterrors
);
629 ADD_STAT64(stats_tx
.tx_gtufl
, tx_stat_mac_ufl
);
631 estats
->etherstatspkts1024octetsto1522octets_hi
=
632 pstats
->mac_stx
[1].tx_stat_etherstatspkts1024octetsto1522octets_hi
;
633 estats
->etherstatspkts1024octetsto1522octets_lo
=
634 pstats
->mac_stx
[1].tx_stat_etherstatspkts1024octetsto1522octets_lo
;
636 estats
->etherstatspktsover1522octets_hi
=
637 pstats
->mac_stx
[1].tx_stat_mac_2047_hi
;
638 estats
->etherstatspktsover1522octets_lo
=
639 pstats
->mac_stx
[1].tx_stat_mac_2047_lo
;
641 ADD_64(estats
->etherstatspktsover1522octets_hi
,
642 pstats
->mac_stx
[1].tx_stat_mac_4095_hi
,
643 estats
->etherstatspktsover1522octets_lo
,
644 pstats
->mac_stx
[1].tx_stat_mac_4095_lo
);
646 ADD_64(estats
->etherstatspktsover1522octets_hi
,
647 pstats
->mac_stx
[1].tx_stat_mac_9216_hi
,
648 estats
->etherstatspktsover1522octets_lo
,
649 pstats
->mac_stx
[1].tx_stat_mac_9216_lo
);
651 ADD_64(estats
->etherstatspktsover1522octets_hi
,
652 pstats
->mac_stx
[1].tx_stat_mac_16383_hi
,
653 estats
->etherstatspktsover1522octets_lo
,
654 pstats
->mac_stx
[1].tx_stat_mac_16383_lo
);
656 estats
->pause_frames_received_hi
=
657 pstats
->mac_stx
[1].rx_stat_mac_xpf_hi
;
658 estats
->pause_frames_received_lo
=
659 pstats
->mac_stx
[1].rx_stat_mac_xpf_lo
;
661 estats
->pause_frames_sent_hi
=
662 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
;
663 estats
->pause_frames_sent_lo
=
664 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
;
666 estats
->pfc_frames_received_hi
=
667 pstats
->pfc_frames_rx_hi
;
668 estats
->pfc_frames_received_lo
=
669 pstats
->pfc_frames_rx_lo
;
670 estats
->pfc_frames_sent_hi
=
671 pstats
->pfc_frames_tx_hi
;
672 estats
->pfc_frames_sent_lo
=
673 pstats
->pfc_frames_tx_lo
;
676 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
678 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
679 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
680 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
682 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
683 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
684 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
685 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
686 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
687 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
688 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
689 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
690 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
691 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
692 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
693 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
694 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
695 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
696 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
697 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
698 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
699 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
700 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
701 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
702 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
703 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
704 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
705 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
706 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
707 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
708 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
709 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
710 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
711 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
712 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
714 estats
->pause_frames_received_hi
=
715 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_hi
;
716 estats
->pause_frames_received_lo
=
717 pstats
->mac_stx
[1].rx_stat_xonpauseframesreceived_lo
;
718 ADD_64(estats
->pause_frames_received_hi
,
719 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_hi
,
720 estats
->pause_frames_received_lo
,
721 pstats
->mac_stx
[1].rx_stat_xoffpauseframesreceived_lo
);
723 estats
->pause_frames_sent_hi
=
724 pstats
->mac_stx
[1].tx_stat_outxonsent_hi
;
725 estats
->pause_frames_sent_lo
=
726 pstats
->mac_stx
[1].tx_stat_outxonsent_lo
;
727 ADD_64(estats
->pause_frames_sent_hi
,
728 pstats
->mac_stx
[1].tx_stat_outxoffsent_hi
,
729 estats
->pause_frames_sent_lo
,
730 pstats
->mac_stx
[1].tx_stat_outxoffsent_lo
);
733 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
735 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
736 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
737 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
738 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
744 switch (bp
->link_vars
.mac_type
) {
746 bnx2x_bmac_stats_update(bp
);
750 bnx2x_emac_stats_update(bp
);
755 bnx2x_mstat_stats_update(bp
);
758 case MAC_TYPE_NONE
: /* unreached */
760 "stats updated by DMAE but no MAC active\n");
763 default: /* unreached */
764 BNX2X_ERR("Unknown MAC type\n");
767 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
768 new->brb_discard
- old
->brb_discard
);
769 ADD_EXTEND_64(estats
->brb_truncate_hi
, estats
->brb_truncate_lo
,
770 new->brb_truncate
- old
->brb_truncate
);
772 if (!CHIP_IS_E3(bp
)) {
773 UPDATE_STAT64_NIG(egress_mac_pkt0
,
774 etherstatspkts1024octetsto1522octets
);
775 UPDATE_STAT64_NIG(egress_mac_pkt1
,
776 etherstatspktsover1522octets
);
779 memcpy(old
, new, sizeof(struct nig_stats
));
781 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
782 sizeof(struct mac_stx
));
783 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
784 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
786 pstats
->host_port_stats_counter
++;
790 SHMEM_RD(bp
, port_mb
[BP_PORT(bp
)].stat_nig_timer
);
791 if (nig_timer_max
!= estats
->nig_timer_max
) {
792 estats
->nig_timer_max
= nig_timer_max
;
793 BNX2X_ERR("NIG timer max (%u)\n",
794 estats
->nig_timer_max
);
801 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
803 struct tstorm_per_port_stats
*tport
=
804 &bp
->fw_stats_data
->port
.tstorm_port_statistics
;
805 struct tstorm_per_pf_stats
*tfunc
=
806 &bp
->fw_stats_data
->pf
.tstorm_pf_statistics
;
807 struct host_func_stats
*fstats
= &bp
->func_stats
;
808 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
809 struct bnx2x_eth_stats_old
*estats_old
= &bp
->eth_stats_old
;
810 struct stats_counter
*counters
= &bp
->fw_stats_data
->storm_counters
;
812 u16 cur_stats_counter
;
814 /* Make sure we use the value of the counter
815 * used for sending the last stats ramrod.
817 spin_lock_bh(&bp
->stats_lock
);
818 cur_stats_counter
= bp
->stats_counter
- 1;
819 spin_unlock_bh(&bp
->stats_lock
);
821 /* are storm stats valid? */
822 if (le16_to_cpu(counters
->xstats_counter
) != cur_stats_counter
) {
824 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
825 le16_to_cpu(counters
->xstats_counter
), bp
->stats_counter
);
829 if (le16_to_cpu(counters
->ustats_counter
) != cur_stats_counter
) {
831 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
832 le16_to_cpu(counters
->ustats_counter
), bp
->stats_counter
);
836 if (le16_to_cpu(counters
->cstats_counter
) != cur_stats_counter
) {
838 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
839 le16_to_cpu(counters
->cstats_counter
), bp
->stats_counter
);
843 if (le16_to_cpu(counters
->tstats_counter
) != cur_stats_counter
) {
845 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
846 le16_to_cpu(counters
->tstats_counter
), bp
->stats_counter
);
850 estats
->error_bytes_received_hi
= 0;
851 estats
->error_bytes_received_lo
= 0;
853 for_each_eth_queue(bp
, i
) {
854 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
855 struct tstorm_per_queue_stats
*tclient
=
856 &bp
->fw_stats_data
->queue_stats
[i
].
857 tstorm_queue_statistics
;
858 struct tstorm_per_queue_stats
*old_tclient
= &fp
->old_tclient
;
859 struct ustorm_per_queue_stats
*uclient
=
860 &bp
->fw_stats_data
->queue_stats
[i
].
861 ustorm_queue_statistics
;
862 struct ustorm_per_queue_stats
*old_uclient
= &fp
->old_uclient
;
863 struct xstorm_per_queue_stats
*xclient
=
864 &bp
->fw_stats_data
->queue_stats
[i
].
865 xstorm_queue_statistics
;
866 struct xstorm_per_queue_stats
*old_xclient
= &fp
->old_xclient
;
867 struct bnx2x_eth_q_stats
*qstats
= &fp
->eth_q_stats
;
868 struct bnx2x_eth_q_stats_old
*qstats_old
= &fp
->eth_q_stats_old
;
872 DP(BNX2X_MSG_STATS
, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
873 i
, xclient
->ucast_pkts_sent
,
874 xclient
->bcast_pkts_sent
, xclient
->mcast_pkts_sent
);
876 DP(BNX2X_MSG_STATS
, "---------------\n");
878 UPDATE_QSTAT(tclient
->rcv_bcast_bytes
,
879 total_broadcast_bytes_received
);
880 UPDATE_QSTAT(tclient
->rcv_mcast_bytes
,
881 total_multicast_bytes_received
);
882 UPDATE_QSTAT(tclient
->rcv_ucast_bytes
,
883 total_unicast_bytes_received
);
886 * sum to total_bytes_received all
887 * unicast/multicast/broadcast
889 qstats
->total_bytes_received_hi
=
890 qstats
->total_broadcast_bytes_received_hi
;
891 qstats
->total_bytes_received_lo
=
892 qstats
->total_broadcast_bytes_received_lo
;
894 ADD_64(qstats
->total_bytes_received_hi
,
895 qstats
->total_multicast_bytes_received_hi
,
896 qstats
->total_bytes_received_lo
,
897 qstats
->total_multicast_bytes_received_lo
);
899 ADD_64(qstats
->total_bytes_received_hi
,
900 qstats
->total_unicast_bytes_received_hi
,
901 qstats
->total_bytes_received_lo
,
902 qstats
->total_unicast_bytes_received_lo
);
904 qstats
->valid_bytes_received_hi
=
905 qstats
->total_bytes_received_hi
;
906 qstats
->valid_bytes_received_lo
=
907 qstats
->total_bytes_received_lo
;
910 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts
,
911 total_unicast_packets_received
);
912 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts
,
913 total_multicast_packets_received
);
914 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts
,
915 total_broadcast_packets_received
);
916 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard
,
917 etherstatsoverrsizepkts
);
918 UPDATE_EXTEND_E_TSTAT(no_buff_discard
, no_buff_discard
);
920 SUB_EXTEND_USTAT(ucast_no_buff_pkts
,
921 total_unicast_packets_received
);
922 SUB_EXTEND_USTAT(mcast_no_buff_pkts
,
923 total_multicast_packets_received
);
924 SUB_EXTEND_USTAT(bcast_no_buff_pkts
,
925 total_broadcast_packets_received
);
926 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts
, no_buff_discard
);
927 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts
, no_buff_discard
);
928 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts
, no_buff_discard
);
930 UPDATE_QSTAT(xclient
->bcast_bytes_sent
,
931 total_broadcast_bytes_transmitted
);
932 UPDATE_QSTAT(xclient
->mcast_bytes_sent
,
933 total_multicast_bytes_transmitted
);
934 UPDATE_QSTAT(xclient
->ucast_bytes_sent
,
935 total_unicast_bytes_transmitted
);
938 * sum to total_bytes_transmitted all
939 * unicast/multicast/broadcast
941 qstats
->total_bytes_transmitted_hi
=
942 qstats
->total_unicast_bytes_transmitted_hi
;
943 qstats
->total_bytes_transmitted_lo
=
944 qstats
->total_unicast_bytes_transmitted_lo
;
946 ADD_64(qstats
->total_bytes_transmitted_hi
,
947 qstats
->total_broadcast_bytes_transmitted_hi
,
948 qstats
->total_bytes_transmitted_lo
,
949 qstats
->total_broadcast_bytes_transmitted_lo
);
951 ADD_64(qstats
->total_bytes_transmitted_hi
,
952 qstats
->total_multicast_bytes_transmitted_hi
,
953 qstats
->total_bytes_transmitted_lo
,
954 qstats
->total_multicast_bytes_transmitted_lo
);
956 UPDATE_EXTEND_XSTAT(ucast_pkts_sent
,
957 total_unicast_packets_transmitted
);
958 UPDATE_EXTEND_XSTAT(mcast_pkts_sent
,
959 total_multicast_packets_transmitted
);
960 UPDATE_EXTEND_XSTAT(bcast_pkts_sent
,
961 total_broadcast_packets_transmitted
);
963 UPDATE_EXTEND_TSTAT(checksum_discard
,
964 total_packets_received_checksum_discarded
);
965 UPDATE_EXTEND_TSTAT(ttl0_discard
,
966 total_packets_received_ttl0_discarded
);
968 UPDATE_EXTEND_XSTAT(error_drop_pkts
,
969 total_transmitted_dropped_packets_error
);
971 /* TPA aggregations completed */
972 UPDATE_EXTEND_E_USTAT(coalesced_events
, total_tpa_aggregations
);
973 /* Number of network frames aggregated by TPA */
974 UPDATE_EXTEND_E_USTAT(coalesced_pkts
,
975 total_tpa_aggregated_frames
);
976 /* Total number of bytes in completed TPA aggregations */
977 UPDATE_QSTAT(uclient
->coalesced_bytes
, total_tpa_bytes
);
979 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes
);
981 UPDATE_FSTAT_QSTAT(total_bytes_received
);
982 UPDATE_FSTAT_QSTAT(total_bytes_transmitted
);
983 UPDATE_FSTAT_QSTAT(total_unicast_packets_received
);
984 UPDATE_FSTAT_QSTAT(total_multicast_packets_received
);
985 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received
);
986 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted
);
987 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted
);
988 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted
);
989 UPDATE_FSTAT_QSTAT(valid_bytes_received
);
992 ADD_64(estats
->total_bytes_received_hi
,
993 estats
->rx_stat_ifhcinbadoctets_hi
,
994 estats
->total_bytes_received_lo
,
995 estats
->rx_stat_ifhcinbadoctets_lo
);
997 ADD_64(estats
->total_bytes_received_hi
,
998 le32_to_cpu(tfunc
->rcv_error_bytes
.hi
),
999 estats
->total_bytes_received_lo
,
1000 le32_to_cpu(tfunc
->rcv_error_bytes
.lo
));
1002 ADD_64(estats
->error_bytes_received_hi
,
1003 le32_to_cpu(tfunc
->rcv_error_bytes
.hi
),
1004 estats
->error_bytes_received_lo
,
1005 le32_to_cpu(tfunc
->rcv_error_bytes
.lo
));
1007 UPDATE_ESTAT(etherstatsoverrsizepkts
, rx_stat_dot3statsframestoolong
);
1009 ADD_64(estats
->error_bytes_received_hi
,
1010 estats
->rx_stat_ifhcinbadoctets_hi
,
1011 estats
->error_bytes_received_lo
,
1012 estats
->rx_stat_ifhcinbadoctets_lo
);
1015 struct bnx2x_fw_port_stats_old
*fwstats
= &bp
->fw_stats_old
;
1016 UPDATE_FW_STAT(mac_filter_discard
);
1017 UPDATE_FW_STAT(mf_tag_discard
);
1018 UPDATE_FW_STAT(brb_truncate_discard
);
1019 UPDATE_FW_STAT(mac_discard
);
1022 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
1024 bp
->stats_pending
= 0;
1029 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
1031 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1032 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
1036 nstats
->rx_packets
=
1037 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
1038 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
1039 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
1041 nstats
->tx_packets
=
1042 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
1043 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
1044 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
1046 nstats
->rx_bytes
= bnx2x_hilo(&estats
->total_bytes_received_hi
);
1048 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
1050 tmp
= estats
->mac_discard
;
1051 for_each_rx_queue(bp
, i
)
1052 tmp
+= le32_to_cpu(bp
->fp
[i
].old_tclient
.checksum_discard
);
1053 nstats
->rx_dropped
= tmp
+ bp
->net_stats_old
.rx_dropped
;
1055 nstats
->tx_dropped
= 0;
1058 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
);
1060 nstats
->collisions
=
1061 bnx2x_hilo(&estats
->tx_stat_etherstatscollisions_hi
);
1063 nstats
->rx_length_errors
=
1064 bnx2x_hilo(&estats
->rx_stat_etherstatsundersizepkts_hi
) +
1065 bnx2x_hilo(&estats
->etherstatsoverrsizepkts_hi
);
1066 nstats
->rx_over_errors
= bnx2x_hilo(&estats
->brb_drop_hi
) +
1067 bnx2x_hilo(&estats
->brb_truncate_hi
);
1068 nstats
->rx_crc_errors
=
1069 bnx2x_hilo(&estats
->rx_stat_dot3statsfcserrors_hi
);
1070 nstats
->rx_frame_errors
=
1071 bnx2x_hilo(&estats
->rx_stat_dot3statsalignmenterrors_hi
);
1072 nstats
->rx_fifo_errors
= bnx2x_hilo(&estats
->no_buff_discard_hi
);
1073 nstats
->rx_missed_errors
= 0;
1075 nstats
->rx_errors
= nstats
->rx_length_errors
+
1076 nstats
->rx_over_errors
+
1077 nstats
->rx_crc_errors
+
1078 nstats
->rx_frame_errors
+
1079 nstats
->rx_fifo_errors
+
1080 nstats
->rx_missed_errors
;
1082 nstats
->tx_aborted_errors
=
1083 bnx2x_hilo(&estats
->tx_stat_dot3statslatecollisions_hi
) +
1084 bnx2x_hilo(&estats
->tx_stat_dot3statsexcessivecollisions_hi
);
1085 nstats
->tx_carrier_errors
=
1086 bnx2x_hilo(&estats
->rx_stat_dot3statscarriersenseerrors_hi
);
1087 nstats
->tx_fifo_errors
= 0;
1088 nstats
->tx_heartbeat_errors
= 0;
1089 nstats
->tx_window_errors
= 0;
1091 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
1092 nstats
->tx_carrier_errors
+
1093 bnx2x_hilo(&estats
->tx_stat_dot3statsinternalmactransmiterrors_hi
);
1096 static void bnx2x_drv_stats_update(struct bnx2x
*bp
)
1098 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1101 for_each_queue(bp
, i
) {
1102 struct bnx2x_eth_q_stats
*qstats
= &bp
->fp
[i
].eth_q_stats
;
1103 struct bnx2x_eth_q_stats_old
*qstats_old
=
1104 &bp
->fp
[i
].eth_q_stats_old
;
1106 UPDATE_ESTAT_QSTAT(driver_xoff
);
1107 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt
);
1108 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed
);
1109 UPDATE_ESTAT_QSTAT(hw_csum_err
);
1113 static bool bnx2x_edebug_stats_stopped(struct bnx2x
*bp
)
1117 if (SHMEM2_HAS(bp
, edebug_driver_if
[1])) {
1118 val
= SHMEM2_RD(bp
, edebug_driver_if
[1]);
1120 if (val
== EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT
)
1127 static void bnx2x_stats_update(struct bnx2x
*bp
)
1129 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
1131 if (bnx2x_edebug_stats_stopped(bp
))
1134 if (*stats_comp
!= DMAE_COMP_VAL
)
1138 bnx2x_hw_stats_update(bp
);
1140 if (bnx2x_storm_stats_update(bp
) && (bp
->stats_pending
++ == 3)) {
1141 BNX2X_ERR("storm stats were not updated for 3 times\n");
1146 bnx2x_net_stats_update(bp
);
1147 bnx2x_drv_stats_update(bp
);
1149 if (netif_msg_timer(bp
)) {
1150 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1152 netdev_dbg(bp
->dev
, "brb drops %u brb truncate %u\n",
1153 estats
->brb_drop_lo
, estats
->brb_truncate_lo
);
1156 bnx2x_hw_stats_post(bp
);
1157 bnx2x_storm_stats_post(bp
);
1160 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
1162 struct dmae_command
*dmae
;
1164 int loader_idx
= PMF_DMAE_C(bp
);
1165 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
1167 bp
->executer_idx
= 0;
1169 opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
, false, 0);
1171 if (bp
->port
.port_stx
) {
1173 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
1175 dmae
->opcode
= bnx2x_dmae_opcode_add_comp(
1176 opcode
, DMAE_COMP_GRC
);
1178 dmae
->opcode
= bnx2x_dmae_opcode_add_comp(
1179 opcode
, DMAE_COMP_PCI
);
1181 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
1182 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
1183 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
1184 dmae
->dst_addr_hi
= 0;
1185 dmae
->len
= bnx2x_get_port_stats_dma_len(bp
);
1187 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
1188 dmae
->comp_addr_hi
= 0;
1191 dmae
->comp_addr_lo
=
1192 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
1193 dmae
->comp_addr_hi
=
1194 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
1195 dmae
->comp_val
= DMAE_COMP_VAL
;
1203 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
1205 bnx2x_dmae_opcode_add_comp(opcode
, DMAE_COMP_PCI
);
1206 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
1207 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
1208 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
1209 dmae
->dst_addr_hi
= 0;
1210 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
1211 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
1212 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
1213 dmae
->comp_val
= DMAE_COMP_VAL
;
1219 static void bnx2x_stats_stop(struct bnx2x
*bp
)
1223 bnx2x_stats_comp(bp
);
1226 update
= (bnx2x_hw_stats_update(bp
) == 0);
1228 update
|= (bnx2x_storm_stats_update(bp
) == 0);
1231 bnx2x_net_stats_update(bp
);
1234 bnx2x_port_stats_stop(bp
);
1236 bnx2x_hw_stats_post(bp
);
1237 bnx2x_stats_comp(bp
);
1241 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
1245 static const struct {
1246 void (*action
)(struct bnx2x
*bp
);
1247 enum bnx2x_stats_state next_state
;
1248 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
1251 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
1252 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
1253 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
1254 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
1257 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
1258 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
1259 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
1260 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
1264 void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
1266 enum bnx2x_stats_state state
;
1267 if (unlikely(bp
->panic
))
1270 spin_lock_bh(&bp
->stats_lock
);
1271 state
= bp
->stats_state
;
1272 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
1273 spin_unlock_bh(&bp
->stats_lock
);
1275 bnx2x_stats_stm
[state
][event
].action(bp
);
1277 if ((event
!= STATS_EVENT_UPDATE
) || netif_msg_timer(bp
))
1278 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
1279 state
, event
, bp
->stats_state
);
1282 static void bnx2x_port_stats_base_init(struct bnx2x
*bp
)
1284 struct dmae_command
*dmae
;
1285 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
1288 if (!bp
->port
.pmf
|| !bp
->port
.port_stx
) {
1289 BNX2X_ERR("BUG!\n");
1293 bp
->executer_idx
= 0;
1295 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
1296 dmae
->opcode
= bnx2x_dmae_opcode(bp
, DMAE_SRC_PCI
, DMAE_DST_GRC
,
1297 true, DMAE_COMP_PCI
);
1298 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
1299 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
1300 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
1301 dmae
->dst_addr_hi
= 0;
1302 dmae
->len
= bnx2x_get_port_stats_dma_len(bp
);
1303 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
1304 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
1305 dmae
->comp_val
= DMAE_COMP_VAL
;
1308 bnx2x_hw_stats_post(bp
);
1309 bnx2x_stats_comp(bp
);
1313 * This function will prepare the statistics ramrod data the way
1314 * we will only have to increment the statistics counter and
1315 * send the ramrod each time we have to.
1319 static inline void bnx2x_prep_fw_stats_req(struct bnx2x
*bp
)
1322 int first_queue_query_index
;
1323 struct stats_query_header
*stats_hdr
= &bp
->fw_stats_req
->hdr
;
1325 dma_addr_t cur_data_offset
;
1326 struct stats_query_entry
*cur_query_entry
;
1328 stats_hdr
->cmd_num
= bp
->fw_stats_num
;
1329 stats_hdr
->drv_stats_counter
= 0;
1331 /* storm_counters struct contains the counters of completed
1332 * statistics requests per storm which are incremented by FW
1333 * each time it completes hadning a statistics ramrod. We will
1334 * check these counters in the timer handler and discard a
1335 * (statistics) ramrod completion.
1337 cur_data_offset
= bp
->fw_stats_data_mapping
+
1338 offsetof(struct bnx2x_fw_stats_data
, storm_counters
);
1340 stats_hdr
->stats_counters_addrs
.hi
=
1341 cpu_to_le32(U64_HI(cur_data_offset
));
1342 stats_hdr
->stats_counters_addrs
.lo
=
1343 cpu_to_le32(U64_LO(cur_data_offset
));
1345 /* prepare to the first stats ramrod (will be completed with
1346 * the counters equal to zero) - init counters to somethig different.
1348 memset(&bp
->fw_stats_data
->storm_counters
, 0xff,
1349 sizeof(struct stats_counter
));
1351 /**** Port FW statistics data ****/
1352 cur_data_offset
= bp
->fw_stats_data_mapping
+
1353 offsetof(struct bnx2x_fw_stats_data
, port
);
1355 cur_query_entry
= &bp
->fw_stats_req
->query
[BNX2X_PORT_QUERY_IDX
];
1357 cur_query_entry
->kind
= STATS_TYPE_PORT
;
1358 /* For port query index is a DONT CARE */
1359 cur_query_entry
->index
= BP_PORT(bp
);
1360 /* For port query funcID is a DONT CARE */
1361 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1362 cur_query_entry
->address
.hi
= cpu_to_le32(U64_HI(cur_data_offset
));
1363 cur_query_entry
->address
.lo
= cpu_to_le32(U64_LO(cur_data_offset
));
1365 /**** PF FW statistics data ****/
1366 cur_data_offset
= bp
->fw_stats_data_mapping
+
1367 offsetof(struct bnx2x_fw_stats_data
, pf
);
1369 cur_query_entry
= &bp
->fw_stats_req
->query
[BNX2X_PF_QUERY_IDX
];
1371 cur_query_entry
->kind
= STATS_TYPE_PF
;
1372 /* For PF query index is a DONT CARE */
1373 cur_query_entry
->index
= BP_PORT(bp
);
1374 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1375 cur_query_entry
->address
.hi
= cpu_to_le32(U64_HI(cur_data_offset
));
1376 cur_query_entry
->address
.lo
= cpu_to_le32(U64_LO(cur_data_offset
));
1378 /**** FCoE FW statistics data ****/
1380 cur_data_offset
= bp
->fw_stats_data_mapping
+
1381 offsetof(struct bnx2x_fw_stats_data
, fcoe
);
1384 &bp
->fw_stats_req
->query
[BNX2X_FCOE_QUERY_IDX
];
1386 cur_query_entry
->kind
= STATS_TYPE_FCOE
;
1387 /* For FCoE query index is a DONT CARE */
1388 cur_query_entry
->index
= BP_PORT(bp
);
1389 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1390 cur_query_entry
->address
.hi
=
1391 cpu_to_le32(U64_HI(cur_data_offset
));
1392 cur_query_entry
->address
.lo
=
1393 cpu_to_le32(U64_LO(cur_data_offset
));
1396 /**** Clients' queries ****/
1397 cur_data_offset
= bp
->fw_stats_data_mapping
+
1398 offsetof(struct bnx2x_fw_stats_data
, queue_stats
);
1400 /* first queue query index depends whether FCoE offloaded request will
1401 * be included in the ramrod
1404 first_queue_query_index
= BNX2X_FIRST_QUEUE_QUERY_IDX
;
1406 first_queue_query_index
= BNX2X_FIRST_QUEUE_QUERY_IDX
- 1;
1408 for_each_eth_queue(bp
, i
) {
1411 query
[first_queue_query_index
+ i
];
1413 cur_query_entry
->kind
= STATS_TYPE_QUEUE
;
1414 cur_query_entry
->index
= bnx2x_stats_id(&bp
->fp
[i
]);
1415 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1416 cur_query_entry
->address
.hi
=
1417 cpu_to_le32(U64_HI(cur_data_offset
));
1418 cur_query_entry
->address
.lo
=
1419 cpu_to_le32(U64_LO(cur_data_offset
));
1421 cur_data_offset
+= sizeof(struct per_queue_stats
);
1424 /* add FCoE queue query if needed */
1428 query
[first_queue_query_index
+ i
];
1430 cur_query_entry
->kind
= STATS_TYPE_QUEUE
;
1431 cur_query_entry
->index
= bnx2x_stats_id(&bp
->fp
[FCOE_IDX
]);
1432 cur_query_entry
->funcID
= cpu_to_le16(BP_FUNC(bp
));
1433 cur_query_entry
->address
.hi
=
1434 cpu_to_le32(U64_HI(cur_data_offset
));
1435 cur_query_entry
->address
.lo
=
1436 cpu_to_le32(U64_LO(cur_data_offset
));
1440 void bnx2x_stats_init(struct bnx2x
*bp
)
1442 int /*abs*/port
= BP_PORT(bp
);
1443 int mb_idx
= BP_FW_MB_IDX(bp
);
1446 bp
->stats_pending
= 0;
1447 bp
->executer_idx
= 0;
1448 bp
->stats_counter
= 0;
1450 /* port and func stats for management */
1451 if (!BP_NOMCP(bp
)) {
1452 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
1453 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[mb_idx
].fw_mb_param
);
1456 bp
->port
.port_stx
= 0;
1459 DP(BNX2X_MSG_STATS
, "port_stx 0x%x func_stx 0x%x\n",
1460 bp
->port
.port_stx
, bp
->func_stx
);
1462 /* pmf should retrieve port statistics from SP on a non-init*/
1463 if (!bp
->stats_init
&& bp
->port
.pmf
&& bp
->port
.port_stx
)
1464 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
1468 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
1469 bp
->port
.old_nig_stats
.brb_discard
=
1470 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
1471 bp
->port
.old_nig_stats
.brb_truncate
=
1472 REG_RD(bp
, NIG_REG_STAT0_BRB_TRUNCATE
+ port
*0x38);
1473 if (!CHIP_IS_E3(bp
)) {
1474 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
1475 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
1476 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
1477 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
1480 /* function stats */
1481 for_each_queue(bp
, i
) {
1482 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1484 memset(&fp
->old_tclient
, 0, sizeof(fp
->old_tclient
));
1485 memset(&fp
->old_uclient
, 0, sizeof(fp
->old_uclient
));
1486 memset(&fp
->old_xclient
, 0, sizeof(fp
->old_xclient
));
1487 if (bp
->stats_init
) {
1488 memset(&fp
->eth_q_stats
, 0, sizeof(fp
->eth_q_stats
));
1489 memset(&fp
->eth_q_stats_old
, 0,
1490 sizeof(fp
->eth_q_stats_old
));
1494 /* Prepare statistics ramrod data */
1495 bnx2x_prep_fw_stats_req(bp
);
1497 memset(&bp
->dev
->stats
, 0, sizeof(bp
->dev
->stats
));
1498 if (bp
->stats_init
) {
1499 memset(&bp
->net_stats_old
, 0, sizeof(bp
->net_stats_old
));
1500 memset(&bp
->fw_stats_old
, 0, sizeof(bp
->fw_stats_old
));
1501 memset(&bp
->eth_stats_old
, 0, sizeof(bp
->eth_stats_old
));
1502 memset(&bp
->eth_stats
, 0, sizeof(bp
->eth_stats
));
1503 memset(&bp
->func_stats
, 0, sizeof(bp
->func_stats
));
1505 /* Clean SP from previous statistics */
1507 memset(bnx2x_sp(bp
, func_stats
), 0,
1508 sizeof(struct host_func_stats
));
1509 bnx2x_func_stats_init(bp
);
1510 bnx2x_hw_stats_post(bp
);
1511 bnx2x_stats_comp(bp
);
1515 bp
->stats_state
= STATS_STATE_DISABLED
;
1517 if (bp
->port
.pmf
&& bp
->port
.port_stx
)
1518 bnx2x_port_stats_base_init(bp
);
1520 /* mark the end of statistics initializiation */
1521 bp
->stats_init
= false;
1524 void bnx2x_save_statistics(struct bnx2x
*bp
)
1527 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
1529 /* save queue statistics */
1530 for_each_eth_queue(bp
, i
) {
1531 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1532 struct bnx2x_eth_q_stats
*qstats
= &fp
->eth_q_stats
;
1533 struct bnx2x_eth_q_stats_old
*qstats_old
= &fp
->eth_q_stats_old
;
1535 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi
);
1536 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo
);
1537 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi
);
1538 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo
);
1539 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi
);
1540 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo
);
1541 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi
);
1542 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo
);
1543 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi
);
1544 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo
);
1545 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi
);
1546 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo
);
1547 UPDATE_QSTAT_OLD(total_tpa_bytes_hi
);
1548 UPDATE_QSTAT_OLD(total_tpa_bytes_lo
);
1551 /* save net_device_stats statistics */
1552 bp
->net_stats_old
.rx_dropped
= nstats
->rx_dropped
;
1554 /* store port firmware statistics */
1555 if (bp
->port
.pmf
&& IS_MF(bp
)) {
1556 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
1557 struct bnx2x_fw_port_stats_old
*fwstats
= &bp
->fw_stats_old
;
1558 UPDATE_FW_STAT_OLD(mac_filter_discard
);
1559 UPDATE_FW_STAT_OLD(mf_tag_discard
);
1560 UPDATE_FW_STAT_OLD(brb_truncate_discard
);
1561 UPDATE_FW_STAT_OLD(mac_discard
);