2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * # of exact address filters. The first one is used for the station address,
37 * the rest are available for multicast addresses.
39 #define EXACT_ADDR_FILTERS 8
41 static inline int macidx(const struct cmac
*mac
)
43 return mac
->offset
/ (XGMAC0_1_BASE_ADDR
- XGMAC0_0_BASE_ADDR
);
46 static void xaui_serdes_reset(struct cmac
*mac
)
48 static const unsigned int clear
[] = {
49 F_PWRDN0
| F_PWRDN1
, F_RESETPLL01
, F_RESET0
| F_RESET1
,
50 F_PWRDN2
| F_PWRDN3
, F_RESETPLL23
, F_RESET2
| F_RESET3
54 struct adapter
*adap
= mac
->adapter
;
55 u32 ctrl
= A_XGM_SERDES_CTRL0
+ mac
->offset
;
57 t3_write_reg(adap
, ctrl
, adap
->params
.vpd
.xauicfg
[macidx(mac
)] |
58 F_RESET3
| F_RESET2
| F_RESET1
| F_RESET0
|
59 F_PWRDN3
| F_PWRDN2
| F_PWRDN1
| F_PWRDN0
|
60 F_RESETPLL23
| F_RESETPLL01
);
61 t3_read_reg(adap
, ctrl
);
64 for (i
= 0; i
< ARRAY_SIZE(clear
); i
++) {
65 t3_set_reg_field(adap
, ctrl
, clear
[i
], 0);
70 void t3b_pcs_reset(struct cmac
*mac
)
72 t3_set_reg_field(mac
->adapter
, A_XGM_RESET_CTRL
+ mac
->offset
,
75 t3_set_reg_field(mac
->adapter
, A_XGM_RESET_CTRL
+ mac
->offset
, 0,
79 int t3_mac_reset(struct cmac
*mac
)
81 static const struct addr_val_pair mac_reset_avp
[] = {
84 {A_XGM_RX_CFG
, F_DISPAUSEFRAMES
| F_EN1536BFRAMES
|
85 F_RMFCS
| F_ENJUMBO
| F_ENHASHMCAST
},
86 {A_XGM_RX_HASH_LOW
, 0},
87 {A_XGM_RX_HASH_HIGH
, 0},
88 {A_XGM_RX_EXACT_MATCH_LOW_1
, 0},
89 {A_XGM_RX_EXACT_MATCH_LOW_2
, 0},
90 {A_XGM_RX_EXACT_MATCH_LOW_3
, 0},
91 {A_XGM_RX_EXACT_MATCH_LOW_4
, 0},
92 {A_XGM_RX_EXACT_MATCH_LOW_5
, 0},
93 {A_XGM_RX_EXACT_MATCH_LOW_6
, 0},
94 {A_XGM_RX_EXACT_MATCH_LOW_7
, 0},
95 {A_XGM_RX_EXACT_MATCH_LOW_8
, 0},
96 {A_XGM_STAT_CTRL
, F_CLRSTATS
}
99 struct adapter
*adap
= mac
->adapter
;
100 unsigned int oft
= mac
->offset
;
102 t3_write_reg(adap
, A_XGM_RESET_CTRL
+ oft
, F_MAC_RESET_
);
103 t3_read_reg(adap
, A_XGM_RESET_CTRL
+ oft
); /* flush */
105 t3_write_regs(adap
, mac_reset_avp
, ARRAY_SIZE(mac_reset_avp
), oft
);
106 t3_set_reg_field(adap
, A_XGM_RXFIFO_CFG
+ oft
,
107 F_RXSTRFRWRD
| F_DISERRFRAMES
,
108 uses_xaui(adap
) ? 0 : F_RXSTRFRWRD
);
109 t3_set_reg_field(adap
, A_XGM_TXFIFO_CFG
+ oft
, 0, F_UNDERUNFIX
);
111 if (uses_xaui(adap
)) {
112 if (adap
->params
.rev
== 0) {
113 t3_set_reg_field(adap
, A_XGM_SERDES_CTRL
+ oft
, 0,
114 F_RXENABLE
| F_TXENABLE
);
115 if (t3_wait_op_done(adap
, A_XGM_SERDES_STATUS1
+ oft
,
116 F_CMULOCK
, 1, 5, 2)) {
118 "MAC %d XAUI SERDES CMU lock failed\n",
122 t3_set_reg_field(adap
, A_XGM_SERDES_CTRL
+ oft
, 0,
125 xaui_serdes_reset(mac
);
128 t3_set_reg_field(adap
, A_XGM_RX_MAX_PKT_SIZE
+ oft
,
129 V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE
),
130 V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE
) | F_RXENFRAMER
);
131 val
= F_MAC_RESET_
| F_XGMAC_STOP_EN
;
135 else if (uses_xaui(adap
))
136 val
|= F_PCS_RESET_
| F_XG2G_RESET_
;
138 val
|= F_RGMII_RESET_
| F_XG2G_RESET_
;
139 t3_write_reg(adap
, A_XGM_RESET_CTRL
+ oft
, val
);
140 t3_read_reg(adap
, A_XGM_RESET_CTRL
+ oft
); /* flush */
141 if ((val
& F_PCS_RESET_
) && adap
->params
.rev
) {
146 memset(&mac
->stats
, 0, sizeof(mac
->stats
));
150 static int t3b2_mac_reset(struct cmac
*mac
)
152 struct adapter
*adap
= mac
->adapter
;
153 unsigned int oft
= mac
->offset
, store
;
154 int idx
= macidx(mac
);
158 t3_set_reg_field(adap
, A_MPS_CFG
, F_PORT0ACTIVE
, 0);
160 t3_set_reg_field(adap
, A_MPS_CFG
, F_PORT1ACTIVE
, 0);
162 /* Stop NIC traffic to reduce the number of TXTOGGLES */
163 t3_set_reg_field(adap
, A_MPS_CFG
, F_ENFORCEPKT
, 0);
164 /* Ensure TX drains */
165 t3_set_reg_field(adap
, A_XGM_TX_CFG
+ oft
, F_TXPAUSEEN
, 0);
167 t3_write_reg(adap
, A_XGM_RESET_CTRL
+ oft
, F_MAC_RESET_
);
168 t3_read_reg(adap
, A_XGM_RESET_CTRL
+ oft
); /* flush */
170 /* Store A_TP_TX_DROP_CFG_CH0 */
171 t3_write_reg(adap
, A_TP_PIO_ADDR
, A_TP_TX_DROP_CFG_CH0
+ idx
);
172 store
= t3_read_reg(adap
, A_TP_TX_DROP_CFG_CH0
+ idx
);
176 /* Change DROP_CFG to 0xc0000011 */
177 t3_write_reg(adap
, A_TP_PIO_ADDR
, A_TP_TX_DROP_CFG_CH0
+ idx
);
178 t3_write_reg(adap
, A_TP_PIO_DATA
, 0xc0000011);
180 /* Check for xgm Rx fifo empty */
181 /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
182 if (t3_wait_op_done(adap
, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT
+ oft
,
183 0x80000000, 1, 1000, 2)) {
184 CH_ERR(adap
, "MAC %d Rx fifo drain failed\n",
189 t3_write_reg(adap
, A_XGM_RESET_CTRL
+ oft
, 0);
190 t3_read_reg(adap
, A_XGM_RESET_CTRL
+ oft
); /* flush */
195 else if (uses_xaui(adap
))
196 val
|= F_PCS_RESET_
| F_XG2G_RESET_
;
198 val
|= F_RGMII_RESET_
| F_XG2G_RESET_
;
199 t3_write_reg(adap
, A_XGM_RESET_CTRL
+ oft
, val
);
200 t3_read_reg(adap
, A_XGM_RESET_CTRL
+ oft
); /* flush */
201 if ((val
& F_PCS_RESET_
) && adap
->params
.rev
) {
205 t3_write_reg(adap
, A_XGM_RX_CFG
+ oft
,
206 F_DISPAUSEFRAMES
| F_EN1536BFRAMES
|
207 F_RMFCS
| F_ENJUMBO
| F_ENHASHMCAST
);
209 /* Restore the DROP_CFG */
210 t3_write_reg(adap
, A_TP_PIO_ADDR
, A_TP_TX_DROP_CFG_CH0
+ idx
);
211 t3_write_reg(adap
, A_TP_PIO_DATA
, store
);
214 t3_set_reg_field(adap
, A_MPS_CFG
, 0, F_PORT0ACTIVE
);
216 t3_set_reg_field(adap
, A_MPS_CFG
, 0, F_PORT1ACTIVE
);
218 /* re-enable nic traffic */
219 t3_set_reg_field(adap
, A_MPS_CFG
, F_ENFORCEPKT
, 1);
221 /* Set: re-enable NIC traffic */
222 t3_set_reg_field(adap
, A_MPS_CFG
, F_ENFORCEPKT
, 1);
228 * Set the exact match register 'idx' to recognize the given Ethernet address.
230 static void set_addr_filter(struct cmac
*mac
, int idx
, const u8
* addr
)
232 u32 addr_lo
, addr_hi
;
233 unsigned int oft
= mac
->offset
+ idx
* 8;
235 addr_lo
= (addr
[3] << 24) | (addr
[2] << 16) | (addr
[1] << 8) | addr
[0];
236 addr_hi
= (addr
[5] << 8) | addr
[4];
238 t3_write_reg(mac
->adapter
, A_XGM_RX_EXACT_MATCH_LOW_1
+ oft
, addr_lo
);
239 t3_write_reg(mac
->adapter
, A_XGM_RX_EXACT_MATCH_HIGH_1
+ oft
, addr_hi
);
242 /* Set one of the station's unicast MAC addresses. */
243 int t3_mac_set_address(struct cmac
*mac
, unsigned int idx
, u8 addr
[6])
245 if (idx
>= mac
->nucast
)
247 set_addr_filter(mac
, idx
, addr
);
252 * Specify the number of exact address filters that should be reserved for
253 * unicast addresses. Caller should reload the unicast and multicast addresses
254 * after calling this.
256 int t3_mac_set_num_ucast(struct cmac
*mac
, int n
)
258 if (n
> EXACT_ADDR_FILTERS
)
264 void t3_mac_disable_exact_filters(struct cmac
*mac
)
266 unsigned int i
, reg
= mac
->offset
+ A_XGM_RX_EXACT_MATCH_LOW_1
;
268 for (i
= 0; i
< EXACT_ADDR_FILTERS
; i
++, reg
+= 8) {
269 u32 v
= t3_read_reg(mac
->adapter
, reg
);
270 t3_write_reg(mac
->adapter
, reg
, v
);
272 t3_read_reg(mac
->adapter
, A_XGM_RX_EXACT_MATCH_LOW_1
); /* flush */
275 void t3_mac_enable_exact_filters(struct cmac
*mac
)
277 unsigned int i
, reg
= mac
->offset
+ A_XGM_RX_EXACT_MATCH_HIGH_1
;
279 for (i
= 0; i
< EXACT_ADDR_FILTERS
; i
++, reg
+= 8) {
280 u32 v
= t3_read_reg(mac
->adapter
, reg
);
281 t3_write_reg(mac
->adapter
, reg
, v
);
283 t3_read_reg(mac
->adapter
, A_XGM_RX_EXACT_MATCH_LOW_1
); /* flush */
286 /* Calculate the RX hash filter index of an Ethernet address */
287 static int hash_hw_addr(const u8
* addr
)
289 int hash
= 0, octet
, bit
, i
= 0, c
;
291 for (octet
= 0; octet
< 6; ++octet
)
292 for (c
= addr
[octet
], bit
= 0; bit
< 8; c
>>= 1, ++bit
) {
293 hash
^= (c
& 1) << i
;
300 int t3_mac_set_rx_mode(struct cmac
*mac
, struct t3_rx_mode
*rm
)
302 u32 val
, hash_lo
, hash_hi
;
303 struct adapter
*adap
= mac
->adapter
;
304 unsigned int oft
= mac
->offset
;
306 val
= t3_read_reg(adap
, A_XGM_RX_CFG
+ oft
) & ~F_COPYALLFRAMES
;
307 if (rm
->dev
->flags
& IFF_PROMISC
)
308 val
|= F_COPYALLFRAMES
;
309 t3_write_reg(adap
, A_XGM_RX_CFG
+ oft
, val
);
311 if (rm
->dev
->flags
& IFF_ALLMULTI
)
312 hash_lo
= hash_hi
= 0xffffffff;
315 int exact_addr_idx
= mac
->nucast
;
317 hash_lo
= hash_hi
= 0;
318 while ((addr
= t3_get_next_mcaddr(rm
)))
319 if (exact_addr_idx
< EXACT_ADDR_FILTERS
)
320 set_addr_filter(mac
, exact_addr_idx
++, addr
);
322 int hash
= hash_hw_addr(addr
);
325 hash_lo
|= (1 << hash
);
327 hash_hi
|= (1 << (hash
- 32));
331 t3_write_reg(adap
, A_XGM_RX_HASH_LOW
+ oft
, hash_lo
);
332 t3_write_reg(adap
, A_XGM_RX_HASH_HIGH
+ oft
, hash_hi
);
336 static int rx_fifo_hwm(int mtu
)
340 hwm
= max(MAC_RXFIFO_SIZE
- 3 * mtu
, (MAC_RXFIFO_SIZE
* 38) / 100);
341 return min(hwm
, MAC_RXFIFO_SIZE
- 8192);
344 int t3_mac_set_mtu(struct cmac
*mac
, unsigned int mtu
)
346 int hwm
, lwm
, divisor
;
348 unsigned int thres
, v
, reg
;
349 struct adapter
*adap
= mac
->adapter
;
352 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
353 * packet size register includes header, but not FCS.
356 if (mtu
> MAX_FRAME_SIZE
- 4)
358 t3_write_reg(adap
, A_XGM_RX_MAX_PKT_SIZE
+ mac
->offset
, mtu
);
360 if (adap
->params
.rev
>= T3_REV_B2
&&
361 (t3_read_reg(adap
, A_XGM_RX_CTRL
+ mac
->offset
) & F_RXEN
)) {
362 t3_mac_disable_exact_filters(mac
);
363 v
= t3_read_reg(adap
, A_XGM_RX_CFG
+ mac
->offset
);
364 t3_set_reg_field(adap
, A_XGM_RX_CFG
+ mac
->offset
,
365 F_ENHASHMCAST
| F_COPYALLFRAMES
, F_DISBCAST
);
367 reg
= adap
->params
.rev
== T3_REV_B2
?
368 A_XGM_RX_MAX_PKT_SIZE_ERR_CNT
: A_XGM_RXFIFO_CFG
;
371 if (t3_wait_op_done(adap
, reg
+ mac
->offset
,
372 F_RXFIFO_EMPTY
, 1, 20, 5)) {
373 t3_write_reg(adap
, A_XGM_RX_CFG
+ mac
->offset
, v
);
374 t3_mac_enable_exact_filters(mac
);
377 t3_set_reg_field(adap
, A_XGM_RX_MAX_PKT_SIZE
+ mac
->offset
,
378 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE
),
379 V_RXMAXPKTSIZE(mtu
));
380 t3_write_reg(adap
, A_XGM_RX_CFG
+ mac
->offset
, v
);
381 t3_mac_enable_exact_filters(mac
);
383 t3_set_reg_field(adap
, A_XGM_RX_MAX_PKT_SIZE
+ mac
->offset
,
384 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE
),
385 V_RXMAXPKTSIZE(mtu
));
388 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
389 * HWM only if flow-control is enabled.
391 hwm
= rx_fifo_hwm(mtu
);
392 lwm
= min(3 * (int)mtu
, MAC_RXFIFO_SIZE
/ 4);
393 v
= t3_read_reg(adap
, A_XGM_RXFIFO_CFG
+ mac
->offset
);
394 v
&= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM
);
395 v
|= V_RXFIFOPAUSELWM(lwm
/ 8);
396 if (G_RXFIFOPAUSEHWM(v
))
397 v
= (v
& ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM
)) |
398 V_RXFIFOPAUSEHWM(hwm
/ 8);
400 t3_write_reg(adap
, A_XGM_RXFIFO_CFG
+ mac
->offset
, v
);
402 /* Adjust the TX FIFO threshold based on the MTU */
403 thres
= (adap
->params
.vpd
.cclk
* 1000) / 15625;
404 thres
= (thres
* mtu
) / 1000;
407 thres
= mtu
> thres
? (mtu
- thres
+ 7) / 8 : 0;
408 thres
= max(thres
, 8U); /* need at least 8 */
409 ipg
= (adap
->params
.rev
== T3_REV_C
) ? 0 : 1;
410 t3_set_reg_field(adap
, A_XGM_TXFIFO_CFG
+ mac
->offset
,
411 V_TXFIFOTHRESH(M_TXFIFOTHRESH
) | V_TXIPG(M_TXIPG
),
412 V_TXFIFOTHRESH(thres
) | V_TXIPG(ipg
));
414 if (adap
->params
.rev
> 0) {
415 divisor
= (adap
->params
.rev
== T3_REV_C
) ? 64 : 8;
416 t3_write_reg(adap
, A_XGM_PAUSE_TIMER
+ mac
->offset
,
417 (hwm
- lwm
) * 4 / divisor
);
419 t3_write_reg(adap
, A_XGM_TX_PAUSE_QUANTA
+ mac
->offset
,
420 MAC_RXFIFO_SIZE
* 4 * 8 / 512);
424 int t3_mac_set_speed_duplex_fc(struct cmac
*mac
, int speed
, int duplex
, int fc
)
427 struct adapter
*adap
= mac
->adapter
;
428 unsigned int oft
= mac
->offset
;
430 if (duplex
>= 0 && duplex
!= DUPLEX_FULL
)
433 if (speed
== SPEED_10
)
434 val
= V_PORTSPEED(0);
435 else if (speed
== SPEED_100
)
436 val
= V_PORTSPEED(1);
437 else if (speed
== SPEED_1000
)
438 val
= V_PORTSPEED(2);
439 else if (speed
== SPEED_10000
)
440 val
= V_PORTSPEED(3);
444 t3_set_reg_field(adap
, A_XGM_PORT_CFG
+ oft
,
445 V_PORTSPEED(M_PORTSPEED
), val
);
448 val
= t3_read_reg(adap
, A_XGM_RXFIFO_CFG
+ oft
);
449 val
&= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM
);
451 u32 rx_max_pkt_size
=
452 G_RXMAXPKTSIZE(t3_read_reg(adap
,
453 A_XGM_RX_MAX_PKT_SIZE
+ oft
));
454 val
|= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size
) / 8);
456 t3_write_reg(adap
, A_XGM_RXFIFO_CFG
+ oft
, val
);
458 t3_set_reg_field(adap
, A_XGM_TX_CFG
+ oft
, F_TXPAUSEEN
,
459 (fc
& PAUSE_RX
) ? F_TXPAUSEEN
: 0);
463 int t3_mac_enable(struct cmac
*mac
, int which
)
465 int idx
= macidx(mac
);
466 struct adapter
*adap
= mac
->adapter
;
467 unsigned int oft
= mac
->offset
;
468 struct mac_stats
*s
= &mac
->stats
;
470 if (which
& MAC_DIRECTION_TX
) {
471 t3_write_reg(adap
, A_TP_PIO_ADDR
, A_TP_TX_DROP_CFG_CH0
+ idx
);
472 t3_write_reg(adap
, A_TP_PIO_DATA
,
473 adap
->params
.rev
== T3_REV_C
?
474 0xc4ffff01 : 0xc0ede401);
475 t3_write_reg(adap
, A_TP_PIO_ADDR
, A_TP_TX_DROP_MODE
);
476 t3_set_reg_field(adap
, A_TP_PIO_DATA
, 1 << idx
,
477 adap
->params
.rev
== T3_REV_C
? 0 : 1 << idx
);
479 t3_write_reg(adap
, A_XGM_TX_CTRL
+ oft
, F_TXEN
);
481 t3_write_reg(adap
, A_TP_PIO_ADDR
, A_TP_TX_DROP_CNT_CH0
+ idx
);
482 mac
->tx_mcnt
= s
->tx_frames
;
483 mac
->tx_tcnt
= (G_TXDROPCNTCH0RCVD(t3_read_reg(adap
,
485 mac
->tx_xcnt
= (G_TXSPI4SOPCNT(t3_read_reg(adap
,
486 A_XGM_TX_SPI4_SOP_EOP_CNT
+
488 mac
->rx_mcnt
= s
->rx_frames
;
489 mac
->rx_pause
= s
->rx_pause
;
490 mac
->rx_xcnt
= (G_TXSPI4SOPCNT(t3_read_reg(adap
,
491 A_XGM_RX_SPI4_SOP_EOP_CNT
+
493 mac
->rx_ocnt
= s
->rx_fifo_ovfl
;
497 if (which
& MAC_DIRECTION_RX
)
498 t3_write_reg(adap
, A_XGM_RX_CTRL
+ oft
, F_RXEN
);
502 int t3_mac_disable(struct cmac
*mac
, int which
)
504 struct adapter
*adap
= mac
->adapter
;
506 if (which
& MAC_DIRECTION_TX
) {
507 t3_write_reg(adap
, A_XGM_TX_CTRL
+ mac
->offset
, 0);
510 if (which
& MAC_DIRECTION_RX
) {
511 int val
= F_MAC_RESET_
;
513 t3_set_reg_field(mac
->adapter
, A_XGM_RESET_CTRL
+ mac
->offset
,
516 t3_write_reg(adap
, A_XGM_RX_CTRL
+ mac
->offset
, 0);
519 else if (uses_xaui(adap
))
520 val
|= F_PCS_RESET_
| F_XG2G_RESET_
;
522 val
|= F_RGMII_RESET_
| F_XG2G_RESET_
;
523 t3_write_reg(mac
->adapter
, A_XGM_RESET_CTRL
+ mac
->offset
, val
);
528 int t3b2_mac_watchdog_task(struct cmac
*mac
)
530 struct adapter
*adap
= mac
->adapter
;
531 struct mac_stats
*s
= &mac
->stats
;
532 unsigned int tx_tcnt
, tx_xcnt
;
533 u64 tx_mcnt
= s
->tx_frames
;
537 tx_xcnt
= 1; /* By default tx_xcnt is making progress */
538 tx_tcnt
= mac
->tx_tcnt
; /* If tx_mcnt is progressing ignore tx_tcnt */
539 if (tx_mcnt
== mac
->tx_mcnt
&& mac
->rx_pause
== s
->rx_pause
) {
540 tx_xcnt
= (G_TXSPI4SOPCNT(t3_read_reg(adap
,
541 A_XGM_TX_SPI4_SOP_EOP_CNT
+
544 t3_write_reg(adap
, A_TP_PIO_ADDR
,
545 A_TP_TX_DROP_CNT_CH0
+ macidx(mac
));
546 tx_tcnt
= (G_TXDROPCNTCH0RCVD(t3_read_reg(adap
,
556 if ((tx_tcnt
!= mac
->tx_tcnt
) && (mac
->tx_xcnt
== 0)) {
557 if (mac
->toggle_cnt
> 4) {
570 mac
->tx_tcnt
= tx_tcnt
;
571 mac
->tx_xcnt
= tx_xcnt
;
572 mac
->tx_mcnt
= s
->tx_frames
;
573 mac
->rx_pause
= s
->rx_pause
;
575 t3_write_reg(adap
, A_XGM_TX_CTRL
+ mac
->offset
, 0);
576 t3_read_reg(adap
, A_XGM_TX_CTRL
+ mac
->offset
); /* flush */
577 t3_write_reg(adap
, A_XGM_TX_CTRL
+ mac
->offset
, mac
->txen
);
578 t3_read_reg(adap
, A_XGM_TX_CTRL
+ mac
->offset
); /* flush */
580 } else if (status
== 2) {
588 * This function is called periodically to accumulate the current values of the
589 * RMON counters into the port statistics. Since the packet counters are only
590 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
591 * called more frequently than that. The byte counters are 45-bit wide, they
592 * would overflow in ~7.8 hours.
594 const struct mac_stats
*t3_mac_update_stats(struct cmac
*mac
)
596 #define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
597 #define RMON_UPDATE(mac, name, reg) \
598 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
599 #define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
600 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
601 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
605 RMON_UPDATE64(mac
, rx_octets
, RX_BYTES_LOW
, RX_BYTES_HIGH
);
606 RMON_UPDATE64(mac
, rx_frames
, RX_FRAMES_LOW
, RX_FRAMES_HIGH
);
607 RMON_UPDATE(mac
, rx_mcast_frames
, RX_MCAST_FRAMES
);
608 RMON_UPDATE(mac
, rx_bcast_frames
, RX_BCAST_FRAMES
);
609 RMON_UPDATE(mac
, rx_fcs_errs
, RX_CRC_ERR_FRAMES
);
610 RMON_UPDATE(mac
, rx_pause
, RX_PAUSE_FRAMES
);
611 RMON_UPDATE(mac
, rx_jabber
, RX_JABBER_FRAMES
);
612 RMON_UPDATE(mac
, rx_short
, RX_SHORT_FRAMES
);
613 RMON_UPDATE(mac
, rx_symbol_errs
, RX_SYM_CODE_ERR_FRAMES
);
615 RMON_UPDATE(mac
, rx_too_long
, RX_OVERSIZE_FRAMES
);
617 v
= RMON_READ(mac
, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT
);
618 if (mac
->adapter
->params
.rev
== T3_REV_B2
)
620 mac
->stats
.rx_too_long
+= v
;
622 RMON_UPDATE(mac
, rx_frames_64
, RX_64B_FRAMES
);
623 RMON_UPDATE(mac
, rx_frames_65_127
, RX_65_127B_FRAMES
);
624 RMON_UPDATE(mac
, rx_frames_128_255
, RX_128_255B_FRAMES
);
625 RMON_UPDATE(mac
, rx_frames_256_511
, RX_256_511B_FRAMES
);
626 RMON_UPDATE(mac
, rx_frames_512_1023
, RX_512_1023B_FRAMES
);
627 RMON_UPDATE(mac
, rx_frames_1024_1518
, RX_1024_1518B_FRAMES
);
628 RMON_UPDATE(mac
, rx_frames_1519_max
, RX_1519_MAXB_FRAMES
);
630 RMON_UPDATE64(mac
, tx_octets
, TX_BYTE_LOW
, TX_BYTE_HIGH
);
631 RMON_UPDATE64(mac
, tx_frames
, TX_FRAME_LOW
, TX_FRAME_HIGH
);
632 RMON_UPDATE(mac
, tx_mcast_frames
, TX_MCAST
);
633 RMON_UPDATE(mac
, tx_bcast_frames
, TX_BCAST
);
634 RMON_UPDATE(mac
, tx_pause
, TX_PAUSE
);
635 /* This counts error frames in general (bad FCS, underrun, etc). */
636 RMON_UPDATE(mac
, tx_underrun
, TX_ERR_FRAMES
);
638 RMON_UPDATE(mac
, tx_frames_64
, TX_64B_FRAMES
);
639 RMON_UPDATE(mac
, tx_frames_65_127
, TX_65_127B_FRAMES
);
640 RMON_UPDATE(mac
, tx_frames_128_255
, TX_128_255B_FRAMES
);
641 RMON_UPDATE(mac
, tx_frames_256_511
, TX_256_511B_FRAMES
);
642 RMON_UPDATE(mac
, tx_frames_512_1023
, TX_512_1023B_FRAMES
);
643 RMON_UPDATE(mac
, tx_frames_1024_1518
, TX_1024_1518B_FRAMES
);
644 RMON_UPDATE(mac
, tx_frames_1519_max
, TX_1519_MAXB_FRAMES
);
646 /* The next stat isn't clear-on-read. */
647 t3_write_reg(mac
->adapter
, A_TP_MIB_INDEX
, mac
->offset
? 51 : 50);
648 v
= t3_read_reg(mac
->adapter
, A_TP_MIB_RDATA
);
649 lo
= (u32
) mac
->stats
.rx_cong_drops
;
650 mac
->stats
.rx_cong_drops
+= (u64
) (v
- lo
);