1 /* Copyright 2008-2016 Freescale Semiconductor, Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any
20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/string.h>
39 static const char dpaa_stats_percpu
[][ETH_GSTRING_LEN
] = {
49 static char dpaa_stats_global
[][ETH_GSTRING_LEN
] = {
52 "rx frame physical error",
53 "rx frame size error",
56 /* demultiplexing errors */
66 /* congestion related stats */
67 "congestion time (ms)",
72 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
73 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
75 static int dpaa_get_link_ksettings(struct net_device
*net_dev
,
76 struct ethtool_link_ksettings
*cmd
)
78 if (!net_dev
->phydev
) {
79 netdev_dbg(net_dev
, "phy device not initialized\n");
83 phy_ethtool_ksettings_get(net_dev
->phydev
, cmd
);
88 static int dpaa_set_link_ksettings(struct net_device
*net_dev
,
89 const struct ethtool_link_ksettings
*cmd
)
93 if (!net_dev
->phydev
) {
94 netdev_err(net_dev
, "phy device not initialized\n");
98 err
= phy_ethtool_ksettings_set(net_dev
->phydev
, cmd
);
100 netdev_err(net_dev
, "phy_ethtool_ksettings_set() = %d\n", err
);
105 static void dpaa_get_drvinfo(struct net_device
*net_dev
,
106 struct ethtool_drvinfo
*drvinfo
)
110 strlcpy(drvinfo
->driver
, KBUILD_MODNAME
,
111 sizeof(drvinfo
->driver
));
112 len
= snprintf(drvinfo
->version
, sizeof(drvinfo
->version
),
114 len
= snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
117 if (len
>= sizeof(drvinfo
->fw_version
)) {
118 /* Truncated output */
119 netdev_notice(net_dev
, "snprintf() = %d\n", len
);
121 strlcpy(drvinfo
->bus_info
, dev_name(net_dev
->dev
.parent
->parent
),
122 sizeof(drvinfo
->bus_info
));
125 static u32
dpaa_get_msglevel(struct net_device
*net_dev
)
127 return ((struct dpaa_priv
*)netdev_priv(net_dev
))->msg_enable
;
130 static void dpaa_set_msglevel(struct net_device
*net_dev
,
133 ((struct dpaa_priv
*)netdev_priv(net_dev
))->msg_enable
= msg_enable
;
136 static int dpaa_nway_reset(struct net_device
*net_dev
)
140 if (!net_dev
->phydev
) {
141 netdev_err(net_dev
, "phy device not initialized\n");
146 if (net_dev
->phydev
->autoneg
) {
147 err
= phy_start_aneg(net_dev
->phydev
);
149 netdev_err(net_dev
, "phy_start_aneg() = %d\n",
156 static void dpaa_get_pauseparam(struct net_device
*net_dev
,
157 struct ethtool_pauseparam
*epause
)
159 struct mac_device
*mac_dev
;
160 struct dpaa_priv
*priv
;
162 priv
= netdev_priv(net_dev
);
163 mac_dev
= priv
->mac_dev
;
165 if (!net_dev
->phydev
) {
166 netdev_err(net_dev
, "phy device not initialized\n");
170 epause
->autoneg
= mac_dev
->autoneg_pause
;
171 epause
->rx_pause
= mac_dev
->rx_pause_active
;
172 epause
->tx_pause
= mac_dev
->tx_pause_active
;
175 static int dpaa_set_pauseparam(struct net_device
*net_dev
,
176 struct ethtool_pauseparam
*epause
)
178 struct mac_device
*mac_dev
;
179 struct phy_device
*phydev
;
180 bool rx_pause
, tx_pause
;
181 struct dpaa_priv
*priv
;
185 priv
= netdev_priv(net_dev
);
186 mac_dev
= priv
->mac_dev
;
188 phydev
= net_dev
->phydev
;
190 netdev_err(net_dev
, "phy device not initialized\n");
194 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
195 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
196 (epause
->rx_pause
!= epause
->tx_pause
)))
199 /* The MAC should know how to handle PAUSE frame autonegotiation before
200 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
203 mac_dev
->autoneg_pause
= !!epause
->autoneg
;
204 mac_dev
->rx_pause_req
= !!epause
->rx_pause
;
205 mac_dev
->tx_pause_req
= !!epause
->tx_pause
;
207 /* Determine the sym/asym advertised PAUSE capabilities from the desired
208 * rx/tx pause settings.
211 if (epause
->rx_pause
)
212 newadv
= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
213 if (epause
->tx_pause
)
214 newadv
|= ADVERTISED_Asym_Pause
;
216 oldadv
= phydev
->advertising
&
217 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
219 /* If there are differences between the old and the new advertised
220 * values, restart PHY autonegotiation and advertise the new values.
222 if (oldadv
!= newadv
) {
223 phydev
->advertising
&= ~(ADVERTISED_Pause
224 | ADVERTISED_Asym_Pause
);
225 phydev
->advertising
|= newadv
;
226 if (phydev
->autoneg
) {
227 err
= phy_start_aneg(phydev
);
229 netdev_err(net_dev
, "phy_start_aneg() = %d\n",
234 fman_get_pause_cfg(mac_dev
, &rx_pause
, &tx_pause
);
235 err
= fman_set_mac_active_pause(mac_dev
, rx_pause
, tx_pause
);
237 netdev_err(net_dev
, "set_mac_active_pause() = %d\n", err
);
242 static int dpaa_get_sset_count(struct net_device
*net_dev
, int type
)
244 unsigned int total_stats
, num_stats
;
246 num_stats
= num_online_cpus() + 1;
247 total_stats
= num_stats
* (DPAA_STATS_PERCPU_LEN
+ DPAA_BPS_NUM
) +
248 DPAA_STATS_GLOBAL_LEN
;
258 static void copy_stats(struct dpaa_percpu_priv
*percpu_priv
, int num_cpus
,
259 int crr_cpu
, u64
*bp_count
, u64
*data
)
261 int num_values
= num_cpus
+ 1;
264 /* update current CPU's stats and also add them to the total values */
265 data
[crr
* num_values
+ crr_cpu
] = percpu_priv
->in_interrupt
;
266 data
[crr
++ * num_values
+ num_cpus
] += percpu_priv
->in_interrupt
;
268 data
[crr
* num_values
+ crr_cpu
] = percpu_priv
->stats
.rx_packets
;
269 data
[crr
++ * num_values
+ num_cpus
] += percpu_priv
->stats
.rx_packets
;
271 data
[crr
* num_values
+ crr_cpu
] = percpu_priv
->stats
.tx_packets
;
272 data
[crr
++ * num_values
+ num_cpus
] += percpu_priv
->stats
.tx_packets
;
274 data
[crr
* num_values
+ crr_cpu
] = percpu_priv
->tx_confirm
;
275 data
[crr
++ * num_values
+ num_cpus
] += percpu_priv
->tx_confirm
;
277 data
[crr
* num_values
+ crr_cpu
] = percpu_priv
->tx_frag_skbuffs
;
278 data
[crr
++ * num_values
+ num_cpus
] += percpu_priv
->tx_frag_skbuffs
;
280 data
[crr
* num_values
+ crr_cpu
] = percpu_priv
->stats
.tx_errors
;
281 data
[crr
++ * num_values
+ num_cpus
] += percpu_priv
->stats
.tx_errors
;
283 data
[crr
* num_values
+ crr_cpu
] = percpu_priv
->stats
.rx_errors
;
284 data
[crr
++ * num_values
+ num_cpus
] += percpu_priv
->stats
.rx_errors
;
286 for (j
= 0; j
< DPAA_BPS_NUM
; j
++) {
287 data
[crr
* num_values
+ crr_cpu
] = bp_count
[j
];
288 data
[crr
++ * num_values
+ num_cpus
] += bp_count
[j
];
292 static void dpaa_get_ethtool_stats(struct net_device
*net_dev
,
293 struct ethtool_stats
*stats
, u64
*data
)
295 u64 bp_count
[DPAA_BPS_NUM
], cg_time
, cg_num
;
296 struct dpaa_percpu_priv
*percpu_priv
;
297 struct dpaa_rx_errors rx_errors
;
298 unsigned int num_cpus
, offset
;
299 struct dpaa_ern_cnt ern_cnt
;
300 struct dpaa_bp
*dpaa_bp
;
301 struct dpaa_priv
*priv
;
302 int total_stats
, i
, j
;
305 total_stats
= dpaa_get_sset_count(net_dev
, ETH_SS_STATS
);
306 priv
= netdev_priv(net_dev
);
307 num_cpus
= num_online_cpus();
309 memset(&bp_count
, 0, sizeof(bp_count
));
310 memset(&rx_errors
, 0, sizeof(struct dpaa_rx_errors
));
311 memset(&ern_cnt
, 0, sizeof(struct dpaa_ern_cnt
));
312 memset(data
, 0, total_stats
* sizeof(u64
));
314 for_each_online_cpu(i
) {
315 percpu_priv
= per_cpu_ptr(priv
->percpu_priv
, i
);
316 for (j
= 0; j
< DPAA_BPS_NUM
; j
++) {
317 dpaa_bp
= priv
->dpaa_bps
[j
];
318 if (!dpaa_bp
->percpu_count
)
320 bp_count
[j
] = *(per_cpu_ptr(dpaa_bp
->percpu_count
, i
));
322 rx_errors
.dme
+= percpu_priv
->rx_errors
.dme
;
323 rx_errors
.fpe
+= percpu_priv
->rx_errors
.fpe
;
324 rx_errors
.fse
+= percpu_priv
->rx_errors
.fse
;
325 rx_errors
.phe
+= percpu_priv
->rx_errors
.phe
;
327 ern_cnt
.cg_tdrop
+= percpu_priv
->ern_cnt
.cg_tdrop
;
328 ern_cnt
.wred
+= percpu_priv
->ern_cnt
.wred
;
329 ern_cnt
.err_cond
+= percpu_priv
->ern_cnt
.err_cond
;
330 ern_cnt
.early_window
+= percpu_priv
->ern_cnt
.early_window
;
331 ern_cnt
.late_window
+= percpu_priv
->ern_cnt
.late_window
;
332 ern_cnt
.fq_tdrop
+= percpu_priv
->ern_cnt
.fq_tdrop
;
333 ern_cnt
.fq_retired
+= percpu_priv
->ern_cnt
.fq_retired
;
334 ern_cnt
.orp_zero
+= percpu_priv
->ern_cnt
.orp_zero
;
336 copy_stats(percpu_priv
, num_cpus
, i
, bp_count
, data
);
339 offset
= (num_cpus
+ 1) * (DPAA_STATS_PERCPU_LEN
+ DPAA_BPS_NUM
);
340 memcpy(data
+ offset
, &rx_errors
, sizeof(struct dpaa_rx_errors
));
342 offset
+= sizeof(struct dpaa_rx_errors
) / sizeof(u64
);
343 memcpy(data
+ offset
, &ern_cnt
, sizeof(struct dpaa_ern_cnt
));
345 /* gather congestion related counters */
348 cg_time
= jiffies_to_msecs(priv
->cgr_data
.congested_jiffies
);
349 if (qman_query_cgr_congested(&priv
->cgr_data
.cgr
, &cg_status
) == 0) {
350 cg_num
= priv
->cgr_data
.cgr_congested_count
;
352 /* reset congestion stats (like QMan API does */
353 priv
->cgr_data
.congested_jiffies
= 0;
354 priv
->cgr_data
.cgr_congested_count
= 0;
357 offset
+= sizeof(struct dpaa_ern_cnt
) / sizeof(u64
);
358 data
[offset
++] = cg_time
;
359 data
[offset
++] = cg_num
;
360 data
[offset
++] = cg_status
;
363 static void dpaa_get_strings(struct net_device
*net_dev
, u32 stringset
,
366 unsigned int i
, j
, num_cpus
, size
;
367 char string_cpu
[ETH_GSTRING_LEN
];
370 memset(string_cpu
, 0, sizeof(string_cpu
));
372 num_cpus
= num_online_cpus();
373 size
= DPAA_STATS_GLOBAL_LEN
* ETH_GSTRING_LEN
;
375 for (i
= 0; i
< DPAA_STATS_PERCPU_LEN
; i
++) {
376 for (j
= 0; j
< num_cpus
; j
++) {
377 snprintf(string_cpu
, ETH_GSTRING_LEN
, "%s [CPU %d]",
378 dpaa_stats_percpu
[i
], j
);
379 memcpy(strings
, string_cpu
, ETH_GSTRING_LEN
);
380 strings
+= ETH_GSTRING_LEN
;
382 snprintf(string_cpu
, ETH_GSTRING_LEN
, "%s [TOTAL]",
383 dpaa_stats_percpu
[i
]);
384 memcpy(strings
, string_cpu
, ETH_GSTRING_LEN
);
385 strings
+= ETH_GSTRING_LEN
;
387 for (i
= 0; i
< DPAA_BPS_NUM
; i
++) {
388 for (j
= 0; j
< num_cpus
; j
++) {
389 snprintf(string_cpu
, ETH_GSTRING_LEN
,
390 "bpool %c [CPU %d]", 'a' + i
, j
);
391 memcpy(strings
, string_cpu
, ETH_GSTRING_LEN
);
392 strings
+= ETH_GSTRING_LEN
;
394 snprintf(string_cpu
, ETH_GSTRING_LEN
, "bpool %c [TOTAL]",
396 memcpy(strings
, string_cpu
, ETH_GSTRING_LEN
);
397 strings
+= ETH_GSTRING_LEN
;
399 memcpy(strings
, dpaa_stats_global
, size
);
402 static int dpaa_get_hash_opts(struct net_device
*dev
,
403 struct ethtool_rxnfc
*cmd
)
405 struct dpaa_priv
*priv
= netdev_priv(dev
);
409 switch (cmd
->flow_type
) {
414 if (priv
->keygen_in_use
)
415 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
427 if (priv
->keygen_in_use
)
428 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
438 static int dpaa_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
441 int ret
= -EOPNOTSUPP
;
445 ret
= dpaa_get_hash_opts(dev
, cmd
);
454 static void dpaa_set_hash(struct net_device
*net_dev
, bool enable
)
456 struct mac_device
*mac_dev
;
457 struct fman_port
*rxport
;
458 struct dpaa_priv
*priv
;
460 priv
= netdev_priv(net_dev
);
461 mac_dev
= priv
->mac_dev
;
462 rxport
= mac_dev
->port
[0];
464 fman_port_use_kg_hash(rxport
, enable
);
465 priv
->keygen_in_use
= enable
;
468 static int dpaa_set_hash_opts(struct net_device
*dev
,
469 struct ethtool_rxnfc
*nfc
)
473 /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
475 ~(RXH_IP_SRC
| RXH_IP_DST
| RXH_L4_B_0_1
| RXH_L4_B_2_3
))
478 switch (nfc
->flow_type
) {
493 dpaa_set_hash(dev
, !!nfc
->data
);
503 static int dpaa_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
505 int ret
= -EOPNOTSUPP
;
509 ret
= dpaa_set_hash_opts(dev
, cmd
);
518 const struct ethtool_ops dpaa_ethtool_ops
= {
519 .get_drvinfo
= dpaa_get_drvinfo
,
520 .get_msglevel
= dpaa_get_msglevel
,
521 .set_msglevel
= dpaa_set_msglevel
,
522 .nway_reset
= dpaa_nway_reset
,
523 .get_pauseparam
= dpaa_get_pauseparam
,
524 .set_pauseparam
= dpaa_set_pauseparam
,
525 .get_link
= ethtool_op_get_link
,
526 .get_sset_count
= dpaa_get_sset_count
,
527 .get_ethtool_stats
= dpaa_get_ethtool_stats
,
528 .get_strings
= dpaa_get_strings
,
529 .get_link_ksettings
= dpaa_get_link_ksettings
,
530 .set_link_ksettings
= dpaa_set_link_ksettings
,
531 .get_rxnfc
= dpaa_get_rxnfc
,
532 .set_rxnfc
= dpaa_set_rxnfc
,