2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
37 #include <linux/mlx4/device.h>
38 #include <linux/mlx4/cmd.h>
44 int mlx4_SET_VLAN_FLTR(struct mlx4_dev
*dev
, struct mlx4_en_priv
*priv
)
46 struct mlx4_cmd_mailbox
*mailbox
;
47 struct mlx4_set_vlan_fltr_mbox
*filter
;
54 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
56 return PTR_ERR(mailbox
);
58 filter
= mailbox
->buf
;
59 for (i
= VLAN_FLTR_SIZE
- 1; i
>= 0; i
--) {
61 for (j
= 0; j
< 32; j
++)
62 if (test_bit(index
++, priv
->active_vlans
))
64 filter
->entry
[i
] = cpu_to_be32(entry
);
66 err
= mlx4_cmd(dev
, mailbox
->dma
, priv
->port
, 0, MLX4_CMD_SET_VLAN_FLTR
,
67 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
68 mlx4_free_cmd_mailbox(dev
, mailbox
);
72 int mlx4_en_QUERY_PORT(struct mlx4_en_dev
*mdev
, u8 port
)
74 struct mlx4_en_query_port_context
*qport_context
;
75 struct mlx4_en_priv
*priv
= netdev_priv(mdev
->pndev
[port
]);
76 struct mlx4_en_port_state
*state
= &priv
->port_state
;
77 struct mlx4_cmd_mailbox
*mailbox
;
80 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
82 return PTR_ERR(mailbox
);
83 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
84 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
88 qport_context
= mailbox
->buf
;
90 /* This command is always accessed from Ethtool context
91 * already synchronized, no need in locking */
92 state
->link_state
= !!(qport_context
->link_up
& MLX4_EN_LINK_UP_MASK
);
93 switch (qport_context
->link_speed
& MLX4_EN_SPEED_MASK
) {
94 case MLX4_EN_100M_SPEED
:
95 state
->link_speed
= SPEED_100
;
97 case MLX4_EN_1G_SPEED
:
98 state
->link_speed
= SPEED_1000
;
100 case MLX4_EN_10G_SPEED_XAUI
:
101 case MLX4_EN_10G_SPEED_XFI
:
102 state
->link_speed
= SPEED_10000
;
104 case MLX4_EN_20G_SPEED
:
105 state
->link_speed
= SPEED_20000
;
107 case MLX4_EN_40G_SPEED
:
108 state
->link_speed
= SPEED_40000
;
110 case MLX4_EN_56G_SPEED
:
111 state
->link_speed
= SPEED_56000
;
114 state
->link_speed
= -1;
118 state
->transceiver
= qport_context
->transceiver
;
120 state
->flags
= 0; /* Reset and recalculate the port flags */
121 state
->flags
|= (qport_context
->link_up
& MLX4_EN_ANC_MASK
) ?
122 MLX4_EN_PORT_ANC
: 0;
123 state
->flags
|= (qport_context
->autoneg
& MLX4_EN_AUTONEG_MASK
) ?
124 MLX4_EN_PORT_ANE
: 0;
127 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
131 /* Each counter set is located in struct mlx4_en_stat_out_mbox
132 * with a const offset between its prio components.
133 * This function runs over a counter set and sum all of it's prio components.
135 static unsigned long en_stats_adder(__be64
*start
, __be64
*next
, int num
)
137 __be64
*curr
= start
;
138 unsigned long ret
= 0;
140 int offset
= next
- start
;
142 for (i
= 0; i
< num
; i
++) {
143 ret
+= be64_to_cpu(*curr
);
150 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev
*mdev
, u8 port
, u8 reset
)
152 struct mlx4_counter tmp_counter_stats
;
153 struct mlx4_en_stat_out_mbox
*mlx4_en_stats
;
154 struct mlx4_en_stat_out_flow_control_mbox
*flowstats
;
155 struct mlx4_en_priv
*priv
= netdev_priv(mdev
->pndev
[port
]);
156 struct net_device_stats
*stats
= &priv
->stats
;
157 struct mlx4_cmd_mailbox
*mailbox
;
158 u64 in_mod
= reset
<< 8 | port
;
160 int i
, counter_index
;
162 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
164 return PTR_ERR(mailbox
);
165 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, in_mod
, 0,
166 MLX4_CMD_DUMP_ETH_STATS
, MLX4_CMD_TIME_CLASS_B
,
171 mlx4_en_stats
= mailbox
->buf
;
173 spin_lock_bh(&priv
->stats_lock
);
175 stats
->rx_packets
= 0;
177 priv
->port_stats
.rx_chksum_good
= 0;
178 priv
->port_stats
.rx_chksum_none
= 0;
179 priv
->port_stats
.rx_chksum_complete
= 0;
180 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
181 stats
->rx_packets
+= priv
->rx_ring
[i
]->packets
;
182 stats
->rx_bytes
+= priv
->rx_ring
[i
]->bytes
;
183 priv
->port_stats
.rx_chksum_good
+= priv
->rx_ring
[i
]->csum_ok
;
184 priv
->port_stats
.rx_chksum_none
+= priv
->rx_ring
[i
]->csum_none
;
185 priv
->port_stats
.rx_chksum_complete
+= priv
->rx_ring
[i
]->csum_complete
;
187 stats
->tx_packets
= 0;
189 priv
->port_stats
.tx_chksum_offload
= 0;
190 priv
->port_stats
.queue_stopped
= 0;
191 priv
->port_stats
.wake_queue
= 0;
192 priv
->port_stats
.tso_packets
= 0;
193 priv
->port_stats
.xmit_more
= 0;
195 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
196 const struct mlx4_en_tx_ring
*ring
= priv
->tx_ring
[i
];
198 stats
->tx_packets
+= ring
->packets
;
199 stats
->tx_bytes
+= ring
->bytes
;
200 priv
->port_stats
.tx_chksum_offload
+= ring
->tx_csum
;
201 priv
->port_stats
.queue_stopped
+= ring
->queue_stopped
;
202 priv
->port_stats
.wake_queue
+= ring
->wake_queue
;
203 priv
->port_stats
.tso_packets
+= ring
->tso_packets
;
204 priv
->port_stats
.xmit_more
+= ring
->xmit_more
;
206 if (mlx4_is_master(mdev
->dev
)) {
207 stats
->rx_packets
= en_stats_adder(&mlx4_en_stats
->RTOT_prio_0
,
208 &mlx4_en_stats
->RTOT_prio_1
,
210 stats
->tx_packets
= en_stats_adder(&mlx4_en_stats
->TTOT_prio_0
,
211 &mlx4_en_stats
->TTOT_prio_1
,
213 stats
->rx_bytes
= en_stats_adder(&mlx4_en_stats
->ROCT_prio_0
,
214 &mlx4_en_stats
->ROCT_prio_1
,
216 stats
->tx_bytes
= en_stats_adder(&mlx4_en_stats
->TOCT_prio_0
,
217 &mlx4_en_stats
->TOCT_prio_1
,
221 /* net device stats */
222 stats
->rx_errors
= be64_to_cpu(mlx4_en_stats
->PCS
) +
223 be32_to_cpu(mlx4_en_stats
->RJBBR
) +
224 be32_to_cpu(mlx4_en_stats
->RCRC
) +
225 be32_to_cpu(mlx4_en_stats
->RRUNT
) +
226 be64_to_cpu(mlx4_en_stats
->RInRangeLengthErr
) +
227 be64_to_cpu(mlx4_en_stats
->ROutRangeLengthErr
) +
228 be32_to_cpu(mlx4_en_stats
->RSHORT
) +
229 en_stats_adder(&mlx4_en_stats
->RGIANT_prio_0
,
230 &mlx4_en_stats
->RGIANT_prio_1
,
232 stats
->tx_errors
= en_stats_adder(&mlx4_en_stats
->TGIANT_prio_0
,
233 &mlx4_en_stats
->TGIANT_prio_1
,
235 stats
->multicast
= en_stats_adder(&mlx4_en_stats
->MCAST_prio_0
,
236 &mlx4_en_stats
->MCAST_prio_1
,
238 stats
->collisions
= 0;
239 stats
->rx_dropped
= be32_to_cpu(mlx4_en_stats
->RDROP
);
240 stats
->rx_length_errors
= be32_to_cpu(mlx4_en_stats
->RdropLength
);
241 stats
->rx_over_errors
= be32_to_cpu(mlx4_en_stats
->RdropOvflw
);
242 stats
->rx_crc_errors
= be32_to_cpu(mlx4_en_stats
->RCRC
);
243 stats
->rx_frame_errors
= 0;
244 stats
->rx_fifo_errors
= be32_to_cpu(mlx4_en_stats
->RdropOvflw
);
245 stats
->rx_missed_errors
= be32_to_cpu(mlx4_en_stats
->RdropOvflw
);
246 stats
->tx_aborted_errors
= 0;
247 stats
->tx_carrier_errors
= 0;
248 stats
->tx_fifo_errors
= 0;
249 stats
->tx_heartbeat_errors
= 0;
250 stats
->tx_window_errors
= 0;
251 stats
->tx_dropped
= be32_to_cpu(mlx4_en_stats
->TDROP
);
254 priv
->pkstats
.rx_multicast_packets
= stats
->multicast
;
255 priv
->pkstats
.rx_broadcast_packets
=
256 en_stats_adder(&mlx4_en_stats
->RBCAST_prio_0
,
257 &mlx4_en_stats
->RBCAST_prio_1
,
259 priv
->pkstats
.rx_jabbers
= be32_to_cpu(mlx4_en_stats
->RJBBR
);
260 priv
->pkstats
.rx_in_range_length_error
=
261 be64_to_cpu(mlx4_en_stats
->RInRangeLengthErr
);
262 priv
->pkstats
.rx_out_range_length_error
=
263 be64_to_cpu(mlx4_en_stats
->ROutRangeLengthErr
);
266 priv
->pkstats
.tx_multicast_packets
=
267 en_stats_adder(&mlx4_en_stats
->TMCAST_prio_0
,
268 &mlx4_en_stats
->TMCAST_prio_1
,
270 priv
->pkstats
.tx_broadcast_packets
=
271 en_stats_adder(&mlx4_en_stats
->TBCAST_prio_0
,
272 &mlx4_en_stats
->TBCAST_prio_1
,
275 priv
->pkstats
.rx_prio
[0][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_0
);
276 priv
->pkstats
.rx_prio
[0][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_0
);
277 priv
->pkstats
.rx_prio
[1][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_1
);
278 priv
->pkstats
.rx_prio
[1][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_1
);
279 priv
->pkstats
.rx_prio
[2][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_2
);
280 priv
->pkstats
.rx_prio
[2][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_2
);
281 priv
->pkstats
.rx_prio
[3][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_3
);
282 priv
->pkstats
.rx_prio
[3][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_3
);
283 priv
->pkstats
.rx_prio
[4][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_4
);
284 priv
->pkstats
.rx_prio
[4][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_4
);
285 priv
->pkstats
.rx_prio
[5][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_5
);
286 priv
->pkstats
.rx_prio
[5][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_5
);
287 priv
->pkstats
.rx_prio
[6][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_6
);
288 priv
->pkstats
.rx_prio
[6][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_6
);
289 priv
->pkstats
.rx_prio
[7][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_7
);
290 priv
->pkstats
.rx_prio
[7][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_7
);
291 priv
->pkstats
.rx_prio
[8][0] = be64_to_cpu(mlx4_en_stats
->RTOT_novlan
);
292 priv
->pkstats
.rx_prio
[8][1] = be64_to_cpu(mlx4_en_stats
->ROCT_novlan
);
293 priv
->pkstats
.tx_prio
[0][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_0
);
294 priv
->pkstats
.tx_prio
[0][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_0
);
295 priv
->pkstats
.tx_prio
[1][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_1
);
296 priv
->pkstats
.tx_prio
[1][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_1
);
297 priv
->pkstats
.tx_prio
[2][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_2
);
298 priv
->pkstats
.tx_prio
[2][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_2
);
299 priv
->pkstats
.tx_prio
[3][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_3
);
300 priv
->pkstats
.tx_prio
[3][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_3
);
301 priv
->pkstats
.tx_prio
[4][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_4
);
302 priv
->pkstats
.tx_prio
[4][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_4
);
303 priv
->pkstats
.tx_prio
[5][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_5
);
304 priv
->pkstats
.tx_prio
[5][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_5
);
305 priv
->pkstats
.tx_prio
[6][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_6
);
306 priv
->pkstats
.tx_prio
[6][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_6
);
307 priv
->pkstats
.tx_prio
[7][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_7
);
308 priv
->pkstats
.tx_prio
[7][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_7
);
309 priv
->pkstats
.tx_prio
[8][0] = be64_to_cpu(mlx4_en_stats
->TTOT_novlan
);
310 priv
->pkstats
.tx_prio
[8][1] = be64_to_cpu(mlx4_en_stats
->TOCT_novlan
);
312 spin_unlock_bh(&priv
->stats_lock
);
314 memset(&tmp_counter_stats
, 0, sizeof(tmp_counter_stats
));
315 counter_index
= mlx4_get_default_counter_index(mdev
->dev
, port
);
316 err
= mlx4_get_counter_stats(mdev
->dev
, counter_index
,
317 &tmp_counter_stats
, reset
);
319 /* 0xffs indicates invalid value */
320 memset(mailbox
->buf
, 0xff, sizeof(*flowstats
) * MLX4_NUM_PRIORITIES
);
322 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN
) {
323 memset(mailbox
->buf
, 0,
324 sizeof(*flowstats
) * MLX4_NUM_PRIORITIES
);
325 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
,
326 in_mod
| MLX4_DUMP_ETH_STATS_FLOW_CONTROL
,
327 0, MLX4_CMD_DUMP_ETH_STATS
,
328 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
333 flowstats
= mailbox
->buf
;
335 spin_lock_bh(&priv
->stats_lock
);
337 if (tmp_counter_stats
.counter_mode
== 0) {
338 priv
->pf_stats
.rx_bytes
= be64_to_cpu(tmp_counter_stats
.rx_bytes
);
339 priv
->pf_stats
.tx_bytes
= be64_to_cpu(tmp_counter_stats
.tx_bytes
);
340 priv
->pf_stats
.rx_packets
= be64_to_cpu(tmp_counter_stats
.rx_frames
);
341 priv
->pf_stats
.tx_packets
= be64_to_cpu(tmp_counter_stats
.tx_frames
);
344 for (i
= 0; i
< MLX4_NUM_PRIORITIES
; i
++) {
345 priv
->rx_priority_flowstats
[i
].rx_pause
=
346 be64_to_cpu(flowstats
[i
].rx_pause
);
347 priv
->rx_priority_flowstats
[i
].rx_pause_duration
=
348 be64_to_cpu(flowstats
[i
].rx_pause_duration
);
349 priv
->rx_priority_flowstats
[i
].rx_pause_transition
=
350 be64_to_cpu(flowstats
[i
].rx_pause_transition
);
351 priv
->tx_priority_flowstats
[i
].tx_pause
=
352 be64_to_cpu(flowstats
[i
].tx_pause
);
353 priv
->tx_priority_flowstats
[i
].tx_pause_duration
=
354 be64_to_cpu(flowstats
[i
].tx_pause_duration
);
355 priv
->tx_priority_flowstats
[i
].tx_pause_transition
=
356 be64_to_cpu(flowstats
[i
].tx_pause_transition
);
359 /* if pfc is not in use, all priorities counters have the same value */
360 priv
->rx_flowstats
.rx_pause
=
361 be64_to_cpu(flowstats
[0].rx_pause
);
362 priv
->rx_flowstats
.rx_pause_duration
=
363 be64_to_cpu(flowstats
[0].rx_pause_duration
);
364 priv
->rx_flowstats
.rx_pause_transition
=
365 be64_to_cpu(flowstats
[0].rx_pause_transition
);
366 priv
->tx_flowstats
.tx_pause
=
367 be64_to_cpu(flowstats
[0].tx_pause
);
368 priv
->tx_flowstats
.tx_pause_duration
=
369 be64_to_cpu(flowstats
[0].tx_pause_duration
);
370 priv
->tx_flowstats
.tx_pause_transition
=
371 be64_to_cpu(flowstats
[0].tx_pause_transition
);
373 spin_unlock_bh(&priv
->stats_lock
);
376 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);