2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
37 #include <linux/mlx4/device.h>
38 #include <linux/mlx4/cmd.h>
44 int mlx4_SET_VLAN_FLTR(struct mlx4_dev
*dev
, struct mlx4_en_priv
*priv
)
46 struct mlx4_cmd_mailbox
*mailbox
;
47 struct mlx4_set_vlan_fltr_mbox
*filter
;
54 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
56 return PTR_ERR(mailbox
);
58 filter
= mailbox
->buf
;
59 for (i
= VLAN_FLTR_SIZE
- 1; i
>= 0; i
--) {
61 for (j
= 0; j
< 32; j
++)
62 if (test_bit(index
++, priv
->active_vlans
))
64 filter
->entry
[i
] = cpu_to_be32(entry
);
66 err
= mlx4_cmd(dev
, mailbox
->dma
, priv
->port
, 0, MLX4_CMD_SET_VLAN_FLTR
,
67 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
68 mlx4_free_cmd_mailbox(dev
, mailbox
);
72 int mlx4_en_QUERY_PORT(struct mlx4_en_dev
*mdev
, u8 port
)
74 struct mlx4_en_query_port_context
*qport_context
;
75 struct mlx4_en_priv
*priv
= netdev_priv(mdev
->pndev
[port
]);
76 struct mlx4_en_port_state
*state
= &priv
->port_state
;
77 struct mlx4_cmd_mailbox
*mailbox
;
80 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
82 return PTR_ERR(mailbox
);
83 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
84 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
88 qport_context
= mailbox
->buf
;
90 /* This command is always accessed from Ethtool context
91 * already synchronized, no need in locking */
92 state
->link_state
= !!(qport_context
->link_up
& MLX4_EN_LINK_UP_MASK
);
93 switch (qport_context
->link_speed
& MLX4_EN_SPEED_MASK
) {
94 case MLX4_EN_100M_SPEED
:
95 state
->link_speed
= SPEED_100
;
97 case MLX4_EN_1G_SPEED
:
98 state
->link_speed
= SPEED_1000
;
100 case MLX4_EN_10G_SPEED_XAUI
:
101 case MLX4_EN_10G_SPEED_XFI
:
102 state
->link_speed
= SPEED_10000
;
104 case MLX4_EN_20G_SPEED
:
105 state
->link_speed
= SPEED_20000
;
107 case MLX4_EN_40G_SPEED
:
108 state
->link_speed
= SPEED_40000
;
110 case MLX4_EN_56G_SPEED
:
111 state
->link_speed
= SPEED_56000
;
114 state
->link_speed
= -1;
118 state
->transceiver
= qport_context
->transceiver
;
120 state
->flags
= 0; /* Reset and recalculate the port flags */
121 state
->flags
|= (qport_context
->link_up
& MLX4_EN_ANC_MASK
) ?
122 MLX4_EN_PORT_ANC
: 0;
123 state
->flags
|= (qport_context
->autoneg
& MLX4_EN_AUTONEG_MASK
) ?
124 MLX4_EN_PORT_ANE
: 0;
127 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
131 /* Each counter set is located in struct mlx4_en_stat_out_mbox
132 * with a const offset between its prio components.
133 * This function runs over a counter set and sum all of it's prio components.
135 static unsigned long en_stats_adder(__be64
*start
, __be64
*next
, int num
)
137 __be64
*curr
= start
;
138 unsigned long ret
= 0;
140 int offset
= next
- start
;
142 for (i
= 0; i
< num
; i
++) {
143 ret
+= be64_to_cpu(*curr
);
150 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev
*mdev
, u8 port
, u8 reset
)
152 struct mlx4_counter tmp_counter_stats
;
153 struct mlx4_en_stat_out_mbox
*mlx4_en_stats
;
154 struct mlx4_en_stat_out_flow_control_mbox
*flowstats
;
155 struct net_device
*dev
= mdev
->pndev
[port
];
156 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
157 struct net_device_stats
*stats
= &dev
->stats
;
158 struct mlx4_cmd_mailbox
*mailbox
;
159 u64 in_mod
= reset
<< 8 | port
;
161 int i
, counter_index
;
162 unsigned long sw_rx_dropped
= 0;
164 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
166 return PTR_ERR(mailbox
);
167 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, in_mod
, 0,
168 MLX4_CMD_DUMP_ETH_STATS
, MLX4_CMD_TIME_CLASS_B
,
173 mlx4_en_stats
= mailbox
->buf
;
175 spin_lock_bh(&priv
->stats_lock
);
177 stats
->rx_packets
= 0;
179 priv
->port_stats
.rx_chksum_good
= 0;
180 priv
->port_stats
.rx_chksum_none
= 0;
181 priv
->port_stats
.rx_chksum_complete
= 0;
182 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
183 stats
->rx_packets
+= priv
->rx_ring
[i
]->packets
;
184 stats
->rx_bytes
+= priv
->rx_ring
[i
]->bytes
;
185 sw_rx_dropped
+= priv
->rx_ring
[i
]->dropped
;
186 priv
->port_stats
.rx_chksum_good
+= priv
->rx_ring
[i
]->csum_ok
;
187 priv
->port_stats
.rx_chksum_none
+= priv
->rx_ring
[i
]->csum_none
;
188 priv
->port_stats
.rx_chksum_complete
+= priv
->rx_ring
[i
]->csum_complete
;
190 stats
->tx_packets
= 0;
192 stats
->tx_dropped
= 0;
193 priv
->port_stats
.tx_chksum_offload
= 0;
194 priv
->port_stats
.queue_stopped
= 0;
195 priv
->port_stats
.wake_queue
= 0;
196 priv
->port_stats
.tso_packets
= 0;
197 priv
->port_stats
.xmit_more
= 0;
199 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
200 const struct mlx4_en_tx_ring
*ring
= priv
->tx_ring
[i
];
202 stats
->tx_packets
+= ring
->packets
;
203 stats
->tx_bytes
+= ring
->bytes
;
204 stats
->tx_dropped
+= ring
->tx_dropped
;
205 priv
->port_stats
.tx_chksum_offload
+= ring
->tx_csum
;
206 priv
->port_stats
.queue_stopped
+= ring
->queue_stopped
;
207 priv
->port_stats
.wake_queue
+= ring
->wake_queue
;
208 priv
->port_stats
.tso_packets
+= ring
->tso_packets
;
209 priv
->port_stats
.xmit_more
+= ring
->xmit_more
;
211 if (mlx4_is_master(mdev
->dev
)) {
212 stats
->rx_packets
= en_stats_adder(&mlx4_en_stats
->RTOT_prio_0
,
213 &mlx4_en_stats
->RTOT_prio_1
,
215 stats
->tx_packets
= en_stats_adder(&mlx4_en_stats
->TTOT_prio_0
,
216 &mlx4_en_stats
->TTOT_prio_1
,
218 stats
->rx_bytes
= en_stats_adder(&mlx4_en_stats
->ROCT_prio_0
,
219 &mlx4_en_stats
->ROCT_prio_1
,
221 stats
->tx_bytes
= en_stats_adder(&mlx4_en_stats
->TOCT_prio_0
,
222 &mlx4_en_stats
->TOCT_prio_1
,
226 /* net device stats */
227 stats
->rx_errors
= be64_to_cpu(mlx4_en_stats
->PCS
) +
228 be32_to_cpu(mlx4_en_stats
->RJBBR
) +
229 be32_to_cpu(mlx4_en_stats
->RCRC
) +
230 be32_to_cpu(mlx4_en_stats
->RRUNT
) +
231 be64_to_cpu(mlx4_en_stats
->RInRangeLengthErr
) +
232 be64_to_cpu(mlx4_en_stats
->ROutRangeLengthErr
) +
233 be32_to_cpu(mlx4_en_stats
->RSHORT
) +
234 en_stats_adder(&mlx4_en_stats
->RGIANT_prio_0
,
235 &mlx4_en_stats
->RGIANT_prio_1
,
237 stats
->tx_errors
= en_stats_adder(&mlx4_en_stats
->TGIANT_prio_0
,
238 &mlx4_en_stats
->TGIANT_prio_1
,
240 stats
->multicast
= en_stats_adder(&mlx4_en_stats
->MCAST_prio_0
,
241 &mlx4_en_stats
->MCAST_prio_1
,
243 stats
->rx_dropped
= be32_to_cpu(mlx4_en_stats
->RDROP
) +
245 stats
->rx_length_errors
= be32_to_cpu(mlx4_en_stats
->RdropLength
);
246 stats
->rx_crc_errors
= be32_to_cpu(mlx4_en_stats
->RCRC
);
247 stats
->rx_fifo_errors
= be32_to_cpu(mlx4_en_stats
->RdropOvflw
);
248 stats
->tx_dropped
+= be32_to_cpu(mlx4_en_stats
->TDROP
);
251 priv
->pkstats
.rx_multicast_packets
= stats
->multicast
;
252 priv
->pkstats
.rx_broadcast_packets
=
253 en_stats_adder(&mlx4_en_stats
->RBCAST_prio_0
,
254 &mlx4_en_stats
->RBCAST_prio_1
,
256 priv
->pkstats
.rx_jabbers
= be32_to_cpu(mlx4_en_stats
->RJBBR
);
257 priv
->pkstats
.rx_in_range_length_error
=
258 be64_to_cpu(mlx4_en_stats
->RInRangeLengthErr
);
259 priv
->pkstats
.rx_out_range_length_error
=
260 be64_to_cpu(mlx4_en_stats
->ROutRangeLengthErr
);
263 priv
->pkstats
.tx_multicast_packets
=
264 en_stats_adder(&mlx4_en_stats
->TMCAST_prio_0
,
265 &mlx4_en_stats
->TMCAST_prio_1
,
267 priv
->pkstats
.tx_broadcast_packets
=
268 en_stats_adder(&mlx4_en_stats
->TBCAST_prio_0
,
269 &mlx4_en_stats
->TBCAST_prio_1
,
272 priv
->pkstats
.rx_prio
[0][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_0
);
273 priv
->pkstats
.rx_prio
[0][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_0
);
274 priv
->pkstats
.rx_prio
[1][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_1
);
275 priv
->pkstats
.rx_prio
[1][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_1
);
276 priv
->pkstats
.rx_prio
[2][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_2
);
277 priv
->pkstats
.rx_prio
[2][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_2
);
278 priv
->pkstats
.rx_prio
[3][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_3
);
279 priv
->pkstats
.rx_prio
[3][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_3
);
280 priv
->pkstats
.rx_prio
[4][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_4
);
281 priv
->pkstats
.rx_prio
[4][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_4
);
282 priv
->pkstats
.rx_prio
[5][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_5
);
283 priv
->pkstats
.rx_prio
[5][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_5
);
284 priv
->pkstats
.rx_prio
[6][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_6
);
285 priv
->pkstats
.rx_prio
[6][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_6
);
286 priv
->pkstats
.rx_prio
[7][0] = be64_to_cpu(mlx4_en_stats
->RTOT_prio_7
);
287 priv
->pkstats
.rx_prio
[7][1] = be64_to_cpu(mlx4_en_stats
->ROCT_prio_7
);
288 priv
->pkstats
.rx_prio
[8][0] = be64_to_cpu(mlx4_en_stats
->RTOT_novlan
);
289 priv
->pkstats
.rx_prio
[8][1] = be64_to_cpu(mlx4_en_stats
->ROCT_novlan
);
290 priv
->pkstats
.tx_prio
[0][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_0
);
291 priv
->pkstats
.tx_prio
[0][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_0
);
292 priv
->pkstats
.tx_prio
[1][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_1
);
293 priv
->pkstats
.tx_prio
[1][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_1
);
294 priv
->pkstats
.tx_prio
[2][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_2
);
295 priv
->pkstats
.tx_prio
[2][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_2
);
296 priv
->pkstats
.tx_prio
[3][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_3
);
297 priv
->pkstats
.tx_prio
[3][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_3
);
298 priv
->pkstats
.tx_prio
[4][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_4
);
299 priv
->pkstats
.tx_prio
[4][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_4
);
300 priv
->pkstats
.tx_prio
[5][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_5
);
301 priv
->pkstats
.tx_prio
[5][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_5
);
302 priv
->pkstats
.tx_prio
[6][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_6
);
303 priv
->pkstats
.tx_prio
[6][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_6
);
304 priv
->pkstats
.tx_prio
[7][0] = be64_to_cpu(mlx4_en_stats
->TTOT_prio_7
);
305 priv
->pkstats
.tx_prio
[7][1] = be64_to_cpu(mlx4_en_stats
->TOCT_prio_7
);
306 priv
->pkstats
.tx_prio
[8][0] = be64_to_cpu(mlx4_en_stats
->TTOT_novlan
);
307 priv
->pkstats
.tx_prio
[8][1] = be64_to_cpu(mlx4_en_stats
->TOCT_novlan
);
309 spin_unlock_bh(&priv
->stats_lock
);
311 memset(&tmp_counter_stats
, 0, sizeof(tmp_counter_stats
));
312 counter_index
= mlx4_get_default_counter_index(mdev
->dev
, port
);
313 err
= mlx4_get_counter_stats(mdev
->dev
, counter_index
,
314 &tmp_counter_stats
, reset
);
316 /* 0xffs indicates invalid value */
317 memset(mailbox
->buf
, 0xff, sizeof(*flowstats
) * MLX4_NUM_PRIORITIES
);
319 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN
) {
320 memset(mailbox
->buf
, 0,
321 sizeof(*flowstats
) * MLX4_NUM_PRIORITIES
);
322 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
,
323 in_mod
| MLX4_DUMP_ETH_STATS_FLOW_CONTROL
,
324 0, MLX4_CMD_DUMP_ETH_STATS
,
325 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
330 flowstats
= mailbox
->buf
;
332 spin_lock_bh(&priv
->stats_lock
);
334 if (tmp_counter_stats
.counter_mode
== 0) {
335 priv
->pf_stats
.rx_bytes
= be64_to_cpu(tmp_counter_stats
.rx_bytes
);
336 priv
->pf_stats
.tx_bytes
= be64_to_cpu(tmp_counter_stats
.tx_bytes
);
337 priv
->pf_stats
.rx_packets
= be64_to_cpu(tmp_counter_stats
.rx_frames
);
338 priv
->pf_stats
.tx_packets
= be64_to_cpu(tmp_counter_stats
.tx_frames
);
341 for (i
= 0; i
< MLX4_NUM_PRIORITIES
; i
++) {
342 priv
->rx_priority_flowstats
[i
].rx_pause
=
343 be64_to_cpu(flowstats
[i
].rx_pause
);
344 priv
->rx_priority_flowstats
[i
].rx_pause_duration
=
345 be64_to_cpu(flowstats
[i
].rx_pause_duration
);
346 priv
->rx_priority_flowstats
[i
].rx_pause_transition
=
347 be64_to_cpu(flowstats
[i
].rx_pause_transition
);
348 priv
->tx_priority_flowstats
[i
].tx_pause
=
349 be64_to_cpu(flowstats
[i
].tx_pause
);
350 priv
->tx_priority_flowstats
[i
].tx_pause_duration
=
351 be64_to_cpu(flowstats
[i
].tx_pause_duration
);
352 priv
->tx_priority_flowstats
[i
].tx_pause_transition
=
353 be64_to_cpu(flowstats
[i
].tx_pause_transition
);
356 /* if pfc is not in use, all priorities counters have the same value */
357 priv
->rx_flowstats
.rx_pause
=
358 be64_to_cpu(flowstats
[0].rx_pause
);
359 priv
->rx_flowstats
.rx_pause_duration
=
360 be64_to_cpu(flowstats
[0].rx_pause_duration
);
361 priv
->rx_flowstats
.rx_pause_transition
=
362 be64_to_cpu(flowstats
[0].rx_pause_transition
);
363 priv
->tx_flowstats
.tx_pause
=
364 be64_to_cpu(flowstats
[0].tx_pause
);
365 priv
->tx_flowstats
.tx_pause_duration
=
366 be64_to_cpu(flowstats
[0].tx_pause_duration
);
367 priv
->tx_flowstats
.tx_pause_transition
=
368 be64_to_cpu(flowstats
[0].tx_pause_transition
);
370 spin_unlock_bh(&priv
->stats_lock
);
373 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);