2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
37 #include <linux/mlx4/device.h>
38 #include <linux/mlx4/cmd.h>
44 int mlx4_SET_VLAN_FLTR(struct mlx4_dev
*dev
, struct mlx4_en_priv
*priv
)
46 struct mlx4_cmd_mailbox
*mailbox
;
47 struct mlx4_set_vlan_fltr_mbox
*filter
;
54 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
56 return PTR_ERR(mailbox
);
58 filter
= mailbox
->buf
;
59 for (i
= VLAN_FLTR_SIZE
- 1; i
>= 0; i
--) {
61 for (j
= 0; j
< 32; j
++)
62 if (test_bit(index
++, priv
->active_vlans
))
64 filter
->entry
[i
] = cpu_to_be32(entry
);
66 err
= mlx4_cmd(dev
, mailbox
->dma
, priv
->port
, 0, MLX4_CMD_SET_VLAN_FLTR
,
67 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
68 mlx4_free_cmd_mailbox(dev
, mailbox
);
72 int mlx4_en_QUERY_PORT(struct mlx4_en_dev
*mdev
, u8 port
)
74 struct mlx4_en_query_port_context
*qport_context
;
75 struct mlx4_en_priv
*priv
= netdev_priv(mdev
->pndev
[port
]);
76 struct mlx4_en_port_state
*state
= &priv
->port_state
;
77 struct mlx4_cmd_mailbox
*mailbox
;
80 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
82 return PTR_ERR(mailbox
);
83 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
84 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
88 qport_context
= mailbox
->buf
;
90 /* This command is always accessed from Ethtool context
91 * already synchronized, no need in locking */
92 state
->link_state
= !!(qport_context
->link_up
& MLX4_EN_LINK_UP_MASK
);
93 switch (qport_context
->link_speed
& MLX4_EN_SPEED_MASK
) {
94 case MLX4_EN_100M_SPEED
:
95 state
->link_speed
= SPEED_100
;
97 case MLX4_EN_1G_SPEED
:
98 state
->link_speed
= SPEED_1000
;
100 case MLX4_EN_10G_SPEED_XAUI
:
101 case MLX4_EN_10G_SPEED_XFI
:
102 state
->link_speed
= SPEED_10000
;
104 case MLX4_EN_20G_SPEED
:
105 state
->link_speed
= SPEED_20000
;
107 case MLX4_EN_40G_SPEED
:
108 state
->link_speed
= SPEED_40000
;
110 case MLX4_EN_56G_SPEED
:
111 state
->link_speed
= SPEED_56000
;
114 state
->link_speed
= -1;
118 state
->transceiver
= qport_context
->transceiver
;
120 state
->flags
= 0; /* Reset and recalculate the port flags */
121 state
->flags
|= (qport_context
->link_up
& MLX4_EN_ANC_MASK
) ?
122 MLX4_EN_PORT_ANC
: 0;
123 state
->flags
|= (qport_context
->autoneg
& MLX4_EN_AUTONEG_MASK
) ?
124 MLX4_EN_PORT_ANE
: 0;
127 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
131 /* Each counter set is located in struct mlx4_en_stat_out_mbox
132 * with a const offset between its prio components.
133 * This function runs over a counter set and sum all of it's prio components.
135 static unsigned long en_stats_adder(__be64
*start
, __be64
*next
, int num
)
137 __be64
*curr
= start
;
138 unsigned long ret
= 0;
140 int offset
= next
- start
;
142 for (i
= 0; i
< num
; i
++) {
143 ret
+= be64_to_cpu(*curr
);
150 void mlx4_en_fold_software_stats(struct net_device
*dev
)
152 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
153 struct mlx4_en_dev
*mdev
= priv
->mdev
;
154 unsigned long packets
, bytes
;
157 if (!priv
->port_up
|| mlx4_is_master(mdev
->dev
))
162 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
163 const struct mlx4_en_rx_ring
*ring
= priv
->rx_ring
[i
];
165 packets
+= READ_ONCE(ring
->packets
);
166 bytes
+= READ_ONCE(ring
->bytes
);
168 dev
->stats
.rx_packets
= packets
;
169 dev
->stats
.rx_bytes
= bytes
;
173 for (i
= 0; i
< priv
->tx_ring_num
[TX
]; i
++) {
174 const struct mlx4_en_tx_ring
*ring
= priv
->tx_ring
[TX
][i
];
176 packets
+= READ_ONCE(ring
->packets
);
177 bytes
+= READ_ONCE(ring
->bytes
);
179 dev
->stats
.tx_packets
= packets
;
180 dev
->stats
.tx_bytes
= bytes
;
183 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev
*mdev
, u8 port
, u8 reset
)
185 struct mlx4_counter tmp_counter_stats
;
186 struct mlx4_en_stat_out_mbox
*mlx4_en_stats
;
187 struct mlx4_en_stat_out_flow_control_mbox
*flowstats
;
188 struct net_device
*dev
= mdev
->pndev
[port
];
189 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
190 struct net_device_stats
*stats
= &dev
->stats
;
191 struct mlx4_cmd_mailbox
*mailbox
, *mailbox_priority
;
192 u64 in_mod
= reset
<< 8 | port
;
194 int i
, counter_index
;
195 unsigned long sw_tx_dropped
= 0;
196 unsigned long sw_rx_dropped
= 0;
198 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
200 return PTR_ERR(mailbox
);
202 mailbox_priority
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
203 if (IS_ERR(mailbox_priority
)) {
204 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
205 return PTR_ERR(mailbox_priority
);
208 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, in_mod
, 0,
209 MLX4_CMD_DUMP_ETH_STATS
, MLX4_CMD_TIME_CLASS_B
,
214 mlx4_en_stats
= mailbox
->buf
;
216 memset(&tmp_counter_stats
, 0, sizeof(tmp_counter_stats
));
217 counter_index
= mlx4_get_default_counter_index(mdev
->dev
, port
);
218 err
= mlx4_get_counter_stats(mdev
->dev
, counter_index
,
219 &tmp_counter_stats
, reset
);
221 /* 0xffs indicates invalid value */
222 memset(mailbox_priority
->buf
, 0xff,
223 sizeof(*flowstats
) * MLX4_NUM_PRIORITIES
);
225 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN
) {
226 memset(mailbox_priority
->buf
, 0,
227 sizeof(*flowstats
) * MLX4_NUM_PRIORITIES
);
228 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox_priority
->dma
,
229 in_mod
| MLX4_DUMP_ETH_STATS_FLOW_CONTROL
,
230 0, MLX4_CMD_DUMP_ETH_STATS
,
231 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
236 flowstats
= mailbox_priority
->buf
;
238 spin_lock_bh(&priv
->stats_lock
);
240 mlx4_en_fold_software_stats(dev
);
242 priv
->port_stats
.rx_chksum_good
= 0;
243 priv
->port_stats
.rx_chksum_none
= 0;
244 priv
->port_stats
.rx_chksum_complete
= 0;
245 priv
->port_stats
.rx_alloc_pages
= 0;
246 priv
->xdp_stats
.rx_xdp_drop
= 0;
247 priv
->xdp_stats
.rx_xdp_tx
= 0;
248 priv
->xdp_stats
.rx_xdp_tx_full
= 0;
249 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
250 const struct mlx4_en_rx_ring
*ring
= priv
->rx_ring
[i
];
252 sw_rx_dropped
+= READ_ONCE(ring
->dropped
);
253 priv
->port_stats
.rx_chksum_good
+= READ_ONCE(ring
->csum_ok
);
254 priv
->port_stats
.rx_chksum_none
+= READ_ONCE(ring
->csum_none
);
255 priv
->port_stats
.rx_chksum_complete
+= READ_ONCE(ring
->csum_complete
);
256 priv
->port_stats
.rx_alloc_pages
+= READ_ONCE(ring
->rx_alloc_pages
);
257 priv
->xdp_stats
.rx_xdp_drop
+= READ_ONCE(ring
->xdp_drop
);
258 priv
->xdp_stats
.rx_xdp_tx
+= READ_ONCE(ring
->xdp_tx
);
259 priv
->xdp_stats
.rx_xdp_tx_full
+= READ_ONCE(ring
->xdp_tx_full
);
261 priv
->port_stats
.tx_chksum_offload
= 0;
262 priv
->port_stats
.queue_stopped
= 0;
263 priv
->port_stats
.wake_queue
= 0;
264 priv
->port_stats
.tso_packets
= 0;
265 priv
->port_stats
.xmit_more
= 0;
267 for (i
= 0; i
< priv
->tx_ring_num
[TX
]; i
++) {
268 const struct mlx4_en_tx_ring
*ring
= priv
->tx_ring
[TX
][i
];
270 sw_tx_dropped
+= READ_ONCE(ring
->tx_dropped
);
271 priv
->port_stats
.tx_chksum_offload
+= READ_ONCE(ring
->tx_csum
);
272 priv
->port_stats
.queue_stopped
+= READ_ONCE(ring
->queue_stopped
);
273 priv
->port_stats
.wake_queue
+= READ_ONCE(ring
->wake_queue
);
274 priv
->port_stats
.tso_packets
+= READ_ONCE(ring
->tso_packets
);
275 priv
->port_stats
.xmit_more
+= READ_ONCE(ring
->xmit_more
);
278 if (!mlx4_is_slave(mdev
->dev
)) {
279 struct mlx4_en_phy_stats
*p_stats
= &priv
->phy_stats
;
281 p_stats
->rx_packets_phy
=
282 en_stats_adder(&mlx4_en_stats
->RTOT_prio_0
,
283 &mlx4_en_stats
->RTOT_prio_1
,
285 p_stats
->tx_packets_phy
=
286 en_stats_adder(&mlx4_en_stats
->TTOT_prio_0
,
287 &mlx4_en_stats
->TTOT_prio_1
,
289 p_stats
->rx_bytes_phy
=
290 en_stats_adder(&mlx4_en_stats
->ROCT_prio_0
,
291 &mlx4_en_stats
->ROCT_prio_1
,
293 p_stats
->tx_bytes_phy
=
294 en_stats_adder(&mlx4_en_stats
->TOCT_prio_0
,
295 &mlx4_en_stats
->TOCT_prio_1
,
297 if (mlx4_is_master(mdev
->dev
)) {
298 stats
->rx_packets
= p_stats
->rx_packets_phy
;
299 stats
->tx_packets
= p_stats
->tx_packets_phy
;
300 stats
->rx_bytes
= p_stats
->rx_bytes_phy
;
301 stats
->tx_bytes
= p_stats
->tx_bytes_phy
;
305 /* net device stats */
306 stats
->rx_errors
= be64_to_cpu(mlx4_en_stats
->PCS
) +
307 be32_to_cpu(mlx4_en_stats
->RJBBR
) +
308 be32_to_cpu(mlx4_en_stats
->RCRC
) +
309 be32_to_cpu(mlx4_en_stats
->RRUNT
) +
310 be64_to_cpu(mlx4_en_stats
->RInRangeLengthErr
) +
311 be64_to_cpu(mlx4_en_stats
->ROutRangeLengthErr
) +
312 be32_to_cpu(mlx4_en_stats
->RSHORT
) +
313 en_stats_adder(&mlx4_en_stats
->RGIANT_prio_0
,
314 &mlx4_en_stats
->RGIANT_prio_1
,
316 stats
->tx_errors
= en_stats_adder(&mlx4_en_stats
->TGIANT_prio_0
,
317 &mlx4_en_stats
->TGIANT_prio_1
,
319 stats
->multicast
= en_stats_adder(&mlx4_en_stats
->MCAST_prio_0
,
320 &mlx4_en_stats
->MCAST_prio_1
,
322 stats
->rx_dropped
= be32_to_cpu(mlx4_en_stats
->RDROP
) +
324 stats
->rx_length_errors
= be32_to_cpu(mlx4_en_stats
->RdropLength
);
325 stats
->rx_crc_errors
= be32_to_cpu(mlx4_en_stats
->RCRC
);
326 stats
->rx_fifo_errors
= be32_to_cpu(mlx4_en_stats
->RdropOvflw
);
327 stats
->tx_dropped
= be32_to_cpu(mlx4_en_stats
->TDROP
) +
331 priv
->pkstats
.rx_multicast_packets
= stats
->multicast
;
332 priv
->pkstats
.rx_broadcast_packets
=
333 en_stats_adder(&mlx4_en_stats
->RBCAST_prio_0
,
334 &mlx4_en_stats
->RBCAST_prio_1
,
336 priv
->pkstats
.rx_jabbers
= be32_to_cpu(mlx4_en_stats
->RJBBR
);
337 priv
->pkstats
.rx_in_range_length_error
=
338 be64_to_cpu(mlx4_en_stats
->RInRangeLengthErr
);
339 priv
->pkstats
.rx_out_range_length_error
=
340 be64_to_cpu(mlx4_en_stats
->ROutRangeLengthErr
);
343 priv
->pkstats
.tx_multicast_packets
=
344 en_stats_adder(&mlx4_en_stats
->TMCAST_prio_0
,
345 &mlx4_en_stats
->TMCAST_prio_1
,
347 priv
->pkstats
.tx_broadcast_packets
=
348 en_stats_adder(&mlx4_en_stats
->TBCAST_prio_0
,
349 &mlx4_en_stats
->TBCAST_prio_1
,
352 priv
->pkstats
.rx_prio
[0][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_0
);
353 priv
->pkstats
.rx_prio
[0][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_0
);
354 priv
->pkstats
.rx_prio
[1][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_1
);
355 priv
->pkstats
.rx_prio
[1][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_1
);
356 priv
->pkstats
.rx_prio
[2][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_2
);
357 priv
->pkstats
.rx_prio
[2][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_2
);
358 priv
->pkstats
.rx_prio
[3][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_3
);
359 priv
->pkstats
.rx_prio
[3][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_3
);
360 priv
->pkstats
.rx_prio
[4][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_4
);
361 priv
->pkstats
.rx_prio
[4][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_4
);
362 priv
->pkstats
.rx_prio
[5][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_5
);
363 priv
->pkstats
.rx_prio
[5][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_5
);
364 priv
->pkstats
.rx_prio
[6][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_6
);
365 priv
->pkstats
.rx_prio
[6][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_6
);
366 priv
->pkstats
.rx_prio
[7][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_7
);
367 priv
->pkstats
.rx_prio
[7][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_7
);
368 priv
->pkstats
.rx_prio
[8][0] = be64_to_cpu(mlx4_en_stats
->RTOT_novlan
);
369 priv
->pkstats
.rx_prio
[8][1] = be64_to_cpu(mlx4_en_stats
->ROCT_novlan
);
370 priv
->pkstats
.tx_prio
[0][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_0
);
371 priv
->pkstats
.tx_prio
[0][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_0
);
372 priv
->pkstats
.tx_prio
[1][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_1
);
373 priv
->pkstats
.tx_prio
[1][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_1
);
374 priv
->pkstats
.tx_prio
[2][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_2
);
375 priv
->pkstats
.tx_prio
[2][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_2
);
376 priv
->pkstats
.tx_prio
[3][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_3
);
377 priv
->pkstats
.tx_prio
[3][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_3
);
378 priv
->pkstats
.tx_prio
[4][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_4
);
379 priv
->pkstats
.tx_prio
[4][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_4
);
380 priv
->pkstats
.tx_prio
[5][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_5
);
381 priv
->pkstats
.tx_prio
[5][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_5
);
382 priv
->pkstats
.tx_prio
[6][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_6
);
383 priv
->pkstats
.tx_prio
[6][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_6
);
384 priv
->pkstats
.tx_prio
[7][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_7
);
385 priv
->pkstats
.tx_prio
[7][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_7
);
386 priv
->pkstats
.tx_prio
[8][0] = be64_to_cpu(mlx4_en_stats
->TTOT_novlan
);
387 priv
->pkstats
.tx_prio
[8][1] = be64_to_cpu(mlx4_en_stats
->TOCT_novlan
);
389 if (tmp_counter_stats
.counter_mode
== 0) {
390 priv
->pf_stats
.rx_bytes
= be64_to_cpu(tmp_counter_stats
.rx_bytes
);
391 priv
->pf_stats
.tx_bytes
= be64_to_cpu(tmp_counter_stats
.tx_bytes
);
392 priv
->pf_stats
.rx_packets
= be64_to_cpu(tmp_counter_stats
.rx_frames
);
393 priv
->pf_stats
.tx_packets
= be64_to_cpu(tmp_counter_stats
.tx_frames
);
396 for (i
= 0; i
< MLX4_NUM_PRIORITIES
; i
++) {
397 priv
->rx_priority_flowstats
[i
].rx_pause
=
398 be64_to_cpu(flowstats
[i
].rx_pause
);
399 priv
->rx_priority_flowstats
[i
].rx_pause_duration
=
400 be64_to_cpu(flowstats
[i
].rx_pause_duration
);
401 priv
->rx_priority_flowstats
[i
].rx_pause_transition
=
402 be64_to_cpu(flowstats
[i
].rx_pause_transition
);
403 priv
->tx_priority_flowstats
[i
].tx_pause
=
404 be64_to_cpu(flowstats
[i
].tx_pause
);
405 priv
->tx_priority_flowstats
[i
].tx_pause_duration
=
406 be64_to_cpu(flowstats
[i
].tx_pause_duration
);
407 priv
->tx_priority_flowstats
[i
].tx_pause_transition
=
408 be64_to_cpu(flowstats
[i
].tx_pause_transition
);
411 /* if pfc is not in use, all priorities counters have the same value */
412 priv
->rx_flowstats
.rx_pause
=
413 be64_to_cpu(flowstats
[0].rx_pause
);
414 priv
->rx_flowstats
.rx_pause_duration
=
415 be64_to_cpu(flowstats
[0].rx_pause_duration
);
416 priv
->rx_flowstats
.rx_pause_transition
=
417 be64_to_cpu(flowstats
[0].rx_pause_transition
);
418 priv
->tx_flowstats
.tx_pause
=
419 be64_to_cpu(flowstats
[0].tx_pause
);
420 priv
->tx_flowstats
.tx_pause_duration
=
421 be64_to_cpu(flowstats
[0].tx_pause_duration
);
422 priv
->tx_flowstats
.tx_pause_transition
=
423 be64_to_cpu(flowstats
[0].tx_pause_transition
);
425 spin_unlock_bh(&priv
->stats_lock
);
428 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
429 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox_priority
);