2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: ethtool support
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
43 #include <linux/bitfield.h>
44 #include <linux/kernel.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/interrupt.h>
48 #include <linux/pci.h>
49 #include <linux/ethtool.h>
50 #include <linux/firmware.h>
52 #include "nfpcore/nfp.h"
53 #include "nfpcore/nfp_nsp.h"
56 #include "nfp_net_ctrl.h"
61 char name
[ETH_GSTRING_LEN
];
65 static const struct nfp_et_stat nfp_net_et_stats
[] = {
66 /* Stats from the device */
67 { "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS
},
68 { "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS
},
69 { "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS
},
70 { "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS
},
71 { "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS
},
72 { "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS
},
73 { "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES
},
74 { "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES
},
75 { "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES
},
77 { "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS
},
78 { "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS
},
79 { "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS
},
80 { "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS
},
81 { "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS
},
82 { "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS
},
83 { "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES
},
84 { "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES
},
85 { "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES
},
87 { "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES
},
88 { "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES
},
89 /* see comments in outro functions in nfp_bpf_jit.c to find out
90 * how different BPF modes use app-specific counters
92 { "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES
},
93 { "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES
},
94 { "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES
},
95 { "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES
},
96 { "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES
},
97 { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES
},
100 static const struct nfp_et_stat nfp_mac_et_stats
[] = {
101 { "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS
, },
102 { "rx_frame_too_long_errors",
103 NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS
, },
104 { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS
, },
105 { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK
, },
106 { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS
, },
107 { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS
, },
108 { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS
, },
109 { "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS
, },
110 { "rx_pause_mac_ctrl_frames",
111 NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES
, },
112 { "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK
, },
113 { "rx_frame_check_sequence_errors",
114 NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS
, },
115 { "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS
, },
116 { "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS
, },
117 { "rx_pkts", NFP_MAC_STATS_RX_PKTS
, },
118 { "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS
, },
119 { "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS
, },
120 { "rx_pkts_65_to_127_octets",
121 NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS
, },
122 { "rx_pkts_128_to_255_octets",
123 NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS
, },
124 { "rx_pkts_256_to_511_octets",
125 NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS
, },
126 { "rx_pkts_512_to_1023_octets",
127 NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS
, },
128 { "rx_pkts_1024_to_1518_octets",
129 NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS
, },
130 { "rx_pkts_1519_to_max_octets",
131 NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS
, },
132 { "rx_jabbers", NFP_MAC_STATS_RX_JABBERS
, },
133 { "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS
, },
134 { "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS
, },
135 { "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0
, },
136 { "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1
, },
137 { "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2
, },
138 { "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3
, },
139 { "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4
, },
140 { "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5
, },
141 { "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6
, },
142 { "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7
, },
143 { "rx_mac_ctrl_frames_received",
144 NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED
, },
145 { "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP
, },
146 { "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP
, },
147 { "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS
, },
148 { "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK
, },
149 { "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS
, },
150 { "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS
, },
151 { "tx_pause_mac_ctrl_frames",
152 NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES
, },
153 { "tx_frames_transmitted_ok",
154 NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK
, },
155 { "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS
, },
156 { "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS
, },
157 { "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS
, },
158 { "tx_pkts_65_to_127_octets",
159 NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS
, },
160 { "tx_pkts_128_to_255_octets",
161 NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS
, },
162 { "tx_pkts_256_to_511_octets",
163 NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS
, },
164 { "tx_pkts_512_to_1023_octets",
165 NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS
, },
166 { "tx_pkts_1024_to_1518_octets",
167 NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS
, },
168 { "tx_pkts_1519_to_max_octets",
169 NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS
, },
170 { "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0
, },
171 { "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1
, },
172 { "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2
, },
173 { "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3
, },
174 { "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4
, },
175 { "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5
, },
176 { "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6
, },
177 { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7
, },
180 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
181 #define NN_ET_SWITCH_STATS_LEN 9
182 #define NN_RVEC_GATHER_STATS 8
183 #define NN_RVEC_PER_Q_STATS 3
185 static void nfp_net_get_nspinfo(struct nfp_app
*app
, char *version
)
192 nsp
= nfp_nsp_open(app
->cpp
);
196 snprintf(version
, ETHTOOL_FWVERS_LEN
, "%hu.%hu",
197 nfp_nsp_get_abi_ver_major(nsp
),
198 nfp_nsp_get_abi_ver_minor(nsp
));
204 nfp_get_drvinfo(struct nfp_app
*app
, struct pci_dev
*pdev
,
205 const char *vnic_version
, struct ethtool_drvinfo
*drvinfo
)
207 char nsp_version
[ETHTOOL_FWVERS_LEN
] = {};
209 strlcpy(drvinfo
->driver
, pdev
->driver
->name
, sizeof(drvinfo
->driver
));
210 strlcpy(drvinfo
->version
, nfp_driver_version
, sizeof(drvinfo
->version
));
212 nfp_net_get_nspinfo(app
, nsp_version
);
213 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
214 "%s %s %s %s", vnic_version
, nsp_version
,
215 nfp_app_mip_name(app
), nfp_app_name(app
));
219 nfp_net_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
221 char vnic_version
[ETHTOOL_FWVERS_LEN
] = {};
222 struct nfp_net
*nn
= netdev_priv(netdev
);
224 snprintf(vnic_version
, sizeof(vnic_version
), "%d.%d.%d.%d",
225 nn
->fw_ver
.resv
, nn
->fw_ver
.class,
226 nn
->fw_ver
.major
, nn
->fw_ver
.minor
);
227 strlcpy(drvinfo
->bus_info
, pci_name(nn
->pdev
),
228 sizeof(drvinfo
->bus_info
));
230 nfp_get_drvinfo(nn
->app
, nn
->pdev
, vnic_version
, drvinfo
);
234 nfp_app_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
238 app
= nfp_app_from_netdev(netdev
);
242 nfp_get_drvinfo(app
, app
->pdev
, "*", drvinfo
);
246 nfp_net_set_fec_link_mode(struct nfp_eth_table_port
*eth_port
,
247 struct ethtool_link_ksettings
*c
)
251 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_NONE
);
252 if (!nfp_eth_can_support_fec(eth_port
)) {
253 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_NONE
);
257 modes
= nfp_eth_supported_fec_modes(eth_port
);
258 if (modes
& NFP_FEC_BASER
) {
259 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_BASER
);
260 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_BASER
);
263 if (modes
& NFP_FEC_REED_SOLOMON
) {
264 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_RS
);
265 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_RS
);
270 * nfp_net_get_link_ksettings - Get Link Speed settings
271 * @netdev: network interface device structure
272 * @cmd: ethtool command
274 * Reports speed settings based on info in the BAR provided by the fw.
277 nfp_net_get_link_ksettings(struct net_device
*netdev
,
278 struct ethtool_link_ksettings
*cmd
)
280 static const u32 ls_to_ethtool
[] = {
281 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED
] = 0,
282 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
] = SPEED_UNKNOWN
,
283 [NFP_NET_CFG_STS_LINK_RATE_1G
] = SPEED_1000
,
284 [NFP_NET_CFG_STS_LINK_RATE_10G
] = SPEED_10000
,
285 [NFP_NET_CFG_STS_LINK_RATE_25G
] = SPEED_25000
,
286 [NFP_NET_CFG_STS_LINK_RATE_40G
] = SPEED_40000
,
287 [NFP_NET_CFG_STS_LINK_RATE_50G
] = SPEED_50000
,
288 [NFP_NET_CFG_STS_LINK_RATE_100G
] = SPEED_100000
,
290 struct nfp_eth_table_port
*eth_port
;
291 struct nfp_port
*port
;
295 /* Init to unknowns */
296 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
297 cmd
->base
.port
= PORT_OTHER
;
298 cmd
->base
.speed
= SPEED_UNKNOWN
;
299 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
301 port
= nfp_port_from_netdev(netdev
);
302 eth_port
= nfp_port_get_eth_port(port
);
304 cmd
->base
.autoneg
= eth_port
->aneg
!= NFP_ANEG_DISABLED
?
305 AUTONEG_ENABLE
: AUTONEG_DISABLE
;
306 nfp_net_set_fec_link_mode(eth_port
, cmd
);
309 if (!netif_carrier_ok(netdev
))
312 /* Use link speed from ETH table if available, otherwise try the BAR */
314 cmd
->base
.port
= eth_port
->port_type
;
315 cmd
->base
.speed
= eth_port
->speed
;
316 cmd
->base
.duplex
= DUPLEX_FULL
;
320 if (!nfp_netdev_is_nfp_net(netdev
))
322 nn
= netdev_priv(netdev
);
324 sts
= nn_readl(nn
, NFP_NET_CFG_STS
);
326 ls
= FIELD_GET(NFP_NET_CFG_STS_LINK_RATE
, sts
);
327 if (ls
== NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED
)
330 if (ls
== NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
||
331 ls
>= ARRAY_SIZE(ls_to_ethtool
))
334 cmd
->base
.speed
= ls_to_ethtool
[ls
];
335 cmd
->base
.duplex
= DUPLEX_FULL
;
341 nfp_net_set_link_ksettings(struct net_device
*netdev
,
342 const struct ethtool_link_ksettings
*cmd
)
344 struct nfp_eth_table_port
*eth_port
;
345 struct nfp_port
*port
;
349 port
= nfp_port_from_netdev(netdev
);
350 eth_port
= __nfp_port_get_eth_port(port
);
354 if (netif_running(netdev
)) {
355 netdev_warn(netdev
, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
359 nsp
= nfp_eth_config_start(port
->app
->cpp
, eth_port
->index
);
363 err
= __nfp_eth_set_aneg(nsp
, cmd
->base
.autoneg
== AUTONEG_ENABLE
?
364 NFP_ANEG_AUTO
: NFP_ANEG_DISABLED
);
367 if (cmd
->base
.speed
!= SPEED_UNKNOWN
) {
368 u32 speed
= cmd
->base
.speed
/ eth_port
->lanes
;
370 err
= __nfp_eth_set_speed(nsp
, speed
);
375 err
= nfp_eth_config_commit_end(nsp
);
377 return 0; /* no change */
379 nfp_net_refresh_port_table(port
);
384 nfp_eth_config_cleanup_end(nsp
);
388 static void nfp_net_get_ringparam(struct net_device
*netdev
,
389 struct ethtool_ringparam
*ring
)
391 struct nfp_net
*nn
= netdev_priv(netdev
);
393 ring
->rx_max_pending
= NFP_NET_MAX_RX_DESCS
;
394 ring
->tx_max_pending
= NFP_NET_MAX_TX_DESCS
;
395 ring
->rx_pending
= nn
->dp
.rxd_cnt
;
396 ring
->tx_pending
= nn
->dp
.txd_cnt
;
399 static int nfp_net_set_ring_size(struct nfp_net
*nn
, u32 rxd_cnt
, u32 txd_cnt
)
401 struct nfp_net_dp
*dp
;
403 dp
= nfp_net_clone_dp(nn
);
407 dp
->rxd_cnt
= rxd_cnt
;
408 dp
->txd_cnt
= txd_cnt
;
410 return nfp_net_ring_reconfig(nn
, dp
, NULL
);
413 static int nfp_net_set_ringparam(struct net_device
*netdev
,
414 struct ethtool_ringparam
*ring
)
416 struct nfp_net
*nn
= netdev_priv(netdev
);
417 u32 rxd_cnt
, txd_cnt
;
419 /* We don't have separate queues/rings for small/large frames. */
420 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
423 /* Round up to supported values */
424 rxd_cnt
= roundup_pow_of_two(ring
->rx_pending
);
425 txd_cnt
= roundup_pow_of_two(ring
->tx_pending
);
427 if (rxd_cnt
< NFP_NET_MIN_RX_DESCS
|| rxd_cnt
> NFP_NET_MAX_RX_DESCS
||
428 txd_cnt
< NFP_NET_MIN_TX_DESCS
|| txd_cnt
> NFP_NET_MAX_TX_DESCS
)
431 if (nn
->dp
.rxd_cnt
== rxd_cnt
&& nn
->dp
.txd_cnt
== txd_cnt
)
434 nn_dbg(nn
, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
435 nn
->dp
.rxd_cnt
, rxd_cnt
, nn
->dp
.txd_cnt
, txd_cnt
);
437 return nfp_net_set_ring_size(nn
, rxd_cnt
, txd_cnt
);
440 static __printf(2, 3) u8
*nfp_pr_et(u8
*data
, const char *fmt
, ...)
445 vsnprintf(data
, ETH_GSTRING_LEN
, fmt
, args
);
448 return data
+ ETH_GSTRING_LEN
;
451 static unsigned int nfp_vnic_get_sw_stats_count(struct net_device
*netdev
)
453 struct nfp_net
*nn
= netdev_priv(netdev
);
455 return NN_RVEC_GATHER_STATS
+ nn
->dp
.num_r_vecs
* NN_RVEC_PER_Q_STATS
;
458 static u8
*nfp_vnic_get_sw_stats_strings(struct net_device
*netdev
, u8
*data
)
460 struct nfp_net
*nn
= netdev_priv(netdev
);
463 for (i
= 0; i
< nn
->dp
.num_r_vecs
; i
++) {
464 data
= nfp_pr_et(data
, "rvec_%u_rx_pkts", i
);
465 data
= nfp_pr_et(data
, "rvec_%u_tx_pkts", i
);
466 data
= nfp_pr_et(data
, "rvec_%u_tx_busy", i
);
469 data
= nfp_pr_et(data
, "hw_rx_csum_ok");
470 data
= nfp_pr_et(data
, "hw_rx_csum_inner_ok");
471 data
= nfp_pr_et(data
, "hw_rx_csum_err");
472 data
= nfp_pr_et(data
, "rx_replace_buf_alloc_fail");
473 data
= nfp_pr_et(data
, "hw_tx_csum");
474 data
= nfp_pr_et(data
, "hw_tx_inner_csum");
475 data
= nfp_pr_et(data
, "tx_gather");
476 data
= nfp_pr_et(data
, "tx_lso");
481 static u64
*nfp_vnic_get_sw_stats(struct net_device
*netdev
, u64
*data
)
483 u64 gathered_stats
[NN_RVEC_GATHER_STATS
] = {};
484 struct nfp_net
*nn
= netdev_priv(netdev
);
485 u64 tmp
[NN_RVEC_GATHER_STATS
];
488 for (i
= 0; i
< nn
->dp
.num_r_vecs
; i
++) {
492 start
= u64_stats_fetch_begin(&nn
->r_vecs
[i
].rx_sync
);
493 data
[0] = nn
->r_vecs
[i
].rx_pkts
;
494 tmp
[0] = nn
->r_vecs
[i
].hw_csum_rx_ok
;
495 tmp
[1] = nn
->r_vecs
[i
].hw_csum_rx_inner_ok
;
496 tmp
[2] = nn
->r_vecs
[i
].hw_csum_rx_error
;
497 tmp
[3] = nn
->r_vecs
[i
].rx_replace_buf_alloc_fail
;
498 } while (u64_stats_fetch_retry(&nn
->r_vecs
[i
].rx_sync
, start
));
501 start
= u64_stats_fetch_begin(&nn
->r_vecs
[i
].tx_sync
);
502 data
[1] = nn
->r_vecs
[i
].tx_pkts
;
503 data
[2] = nn
->r_vecs
[i
].tx_busy
;
504 tmp
[4] = nn
->r_vecs
[i
].hw_csum_tx
;
505 tmp
[5] = nn
->r_vecs
[i
].hw_csum_tx_inner
;
506 tmp
[6] = nn
->r_vecs
[i
].tx_gather
;
507 tmp
[7] = nn
->r_vecs
[i
].tx_lso
;
508 } while (u64_stats_fetch_retry(&nn
->r_vecs
[i
].tx_sync
, start
));
510 data
+= NN_RVEC_PER_Q_STATS
;
512 for (j
= 0; j
< NN_RVEC_GATHER_STATS
; j
++)
513 gathered_stats
[j
] += tmp
[j
];
516 for (j
= 0; j
< NN_RVEC_GATHER_STATS
; j
++)
517 *data
++ = gathered_stats
[j
];
523 nfp_vnic_get_hw_stats_count(unsigned int rx_rings
, unsigned int tx_rings
)
525 return NN_ET_GLOBAL_STATS_LEN
+ (rx_rings
+ tx_rings
) * 2;
529 nfp_vnic_get_hw_stats_strings(u8
*data
, unsigned int rx_rings
,
530 unsigned int tx_rings
, bool repr
)
534 BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN
< NN_ET_SWITCH_STATS_LEN
* 2);
535 /* If repr is true first add SWITCH_STATS_LEN and then subtract it
536 * effectively swapping the RX and TX statistics (giving us the RX
537 * and TX from perspective of the switch).
539 swap_off
= repr
* NN_ET_SWITCH_STATS_LEN
;
541 for (i
= 0; i
< NN_ET_SWITCH_STATS_LEN
; i
++)
542 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
+ swap_off
].name
);
544 for (i
= NN_ET_SWITCH_STATS_LEN
; i
< NN_ET_SWITCH_STATS_LEN
* 2; i
++)
545 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
- swap_off
].name
);
547 for (i
= NN_ET_SWITCH_STATS_LEN
* 2; i
< NN_ET_GLOBAL_STATS_LEN
; i
++)
548 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
].name
);
550 for (i
= 0; i
< tx_rings
; i
++) {
551 data
= nfp_pr_et(data
, "txq_%u_pkts", i
);
552 data
= nfp_pr_et(data
, "txq_%u_bytes", i
);
555 for (i
= 0; i
< rx_rings
; i
++) {
556 data
= nfp_pr_et(data
, "rxq_%u_pkts", i
);
557 data
= nfp_pr_et(data
, "rxq_%u_bytes", i
);
564 nfp_vnic_get_hw_stats(u64
*data
, u8 __iomem
*mem
,
565 unsigned int rx_rings
, unsigned int tx_rings
)
569 for (i
= 0; i
< NN_ET_GLOBAL_STATS_LEN
; i
++)
570 *data
++ = readq(mem
+ nfp_net_et_stats
[i
].off
);
572 for (i
= 0; i
< tx_rings
; i
++) {
573 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
));
574 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
) + 8);
577 for (i
= 0; i
< rx_rings
; i
++) {
578 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
));
579 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
) + 8);
585 static unsigned int nfp_mac_get_stats_count(struct net_device
*netdev
)
587 struct nfp_port
*port
;
589 port
= nfp_port_from_netdev(netdev
);
590 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
593 return ARRAY_SIZE(nfp_mac_et_stats
);
596 static u8
*nfp_mac_get_stats_strings(struct net_device
*netdev
, u8
*data
)
598 struct nfp_port
*port
;
601 port
= nfp_port_from_netdev(netdev
);
602 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
605 for (i
= 0; i
< ARRAY_SIZE(nfp_mac_et_stats
); i
++)
606 data
= nfp_pr_et(data
, "mac.%s", nfp_mac_et_stats
[i
].name
);
611 static u64
*nfp_mac_get_stats(struct net_device
*netdev
, u64
*data
)
613 struct nfp_port
*port
;
616 port
= nfp_port_from_netdev(netdev
);
617 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
620 for (i
= 0; i
< ARRAY_SIZE(nfp_mac_et_stats
); i
++)
621 *data
++ = readq(port
->eth_stats
+ nfp_mac_et_stats
[i
].off
);
626 static void nfp_net_get_strings(struct net_device
*netdev
,
627 u32 stringset
, u8
*data
)
629 struct nfp_net
*nn
= netdev_priv(netdev
);
633 data
= nfp_vnic_get_sw_stats_strings(netdev
, data
);
634 data
= nfp_vnic_get_hw_stats_strings(data
, nn
->dp
.num_rx_rings
,
637 data
= nfp_mac_get_stats_strings(netdev
, data
);
643 nfp_net_get_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
646 struct nfp_net
*nn
= netdev_priv(netdev
);
648 data
= nfp_vnic_get_sw_stats(netdev
, data
);
649 data
= nfp_vnic_get_hw_stats(data
, nn
->dp
.ctrl_bar
,
650 nn
->dp
.num_rx_rings
, nn
->dp
.num_tx_rings
);
651 data
= nfp_mac_get_stats(netdev
, data
);
654 static int nfp_net_get_sset_count(struct net_device
*netdev
, int sset
)
656 struct nfp_net
*nn
= netdev_priv(netdev
);
660 return nfp_vnic_get_sw_stats_count(netdev
) +
661 nfp_vnic_get_hw_stats_count(nn
->dp
.num_rx_rings
,
662 nn
->dp
.num_tx_rings
) +
663 nfp_mac_get_stats_count(netdev
);
669 static void nfp_port_get_strings(struct net_device
*netdev
,
670 u32 stringset
, u8
*data
)
672 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
676 if (nfp_port_is_vnic(port
))
677 data
= nfp_vnic_get_hw_stats_strings(data
, 0, 0, true);
679 data
= nfp_mac_get_stats_strings(netdev
, data
);
685 nfp_port_get_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
688 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
690 if (nfp_port_is_vnic(port
))
691 data
= nfp_vnic_get_hw_stats(data
, port
->vnic
, 0, 0);
693 data
= nfp_mac_get_stats(netdev
, data
);
696 static int nfp_port_get_sset_count(struct net_device
*netdev
, int sset
)
698 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
703 if (nfp_port_is_vnic(port
))
704 count
= nfp_vnic_get_hw_stats_count(0, 0);
706 count
= nfp_mac_get_stats_count(netdev
);
713 static int nfp_port_fec_ethtool_to_nsp(u32 fec
)
716 case ETHTOOL_FEC_AUTO
:
717 return NFP_FEC_AUTO_BIT
;
718 case ETHTOOL_FEC_OFF
:
719 return NFP_FEC_DISABLED_BIT
;
721 return NFP_FEC_REED_SOLOMON_BIT
;
722 case ETHTOOL_FEC_BASER
:
723 return NFP_FEC_BASER_BIT
;
725 /* NSP only supports a single mode at a time */
730 static u32
nfp_port_fec_nsp_to_ethtool(u32 fec
)
734 if (fec
& NFP_FEC_AUTO
)
735 result
|= ETHTOOL_FEC_AUTO
;
736 if (fec
& NFP_FEC_BASER
)
737 result
|= ETHTOOL_FEC_BASER
;
738 if (fec
& NFP_FEC_REED_SOLOMON
)
739 result
|= ETHTOOL_FEC_RS
;
740 if (fec
& NFP_FEC_DISABLED
)
741 result
|= ETHTOOL_FEC_OFF
;
743 return result
?: ETHTOOL_FEC_NONE
;
747 nfp_port_get_fecparam(struct net_device
*netdev
,
748 struct ethtool_fecparam
*param
)
750 struct nfp_eth_table_port
*eth_port
;
751 struct nfp_port
*port
;
753 param
->active_fec
= ETHTOOL_FEC_NONE_BIT
;
754 param
->fec
= ETHTOOL_FEC_NONE_BIT
;
756 port
= nfp_port_from_netdev(netdev
);
757 eth_port
= nfp_port_get_eth_port(port
);
761 if (!nfp_eth_can_support_fec(eth_port
))
764 param
->fec
= nfp_port_fec_nsp_to_ethtool(eth_port
->fec_modes_supported
);
765 param
->active_fec
= nfp_port_fec_nsp_to_ethtool(eth_port
->fec
);
771 nfp_port_set_fecparam(struct net_device
*netdev
,
772 struct ethtool_fecparam
*param
)
774 struct nfp_eth_table_port
*eth_port
;
775 struct nfp_port
*port
;
778 port
= nfp_port_from_netdev(netdev
);
779 eth_port
= nfp_port_get_eth_port(port
);
783 if (!nfp_eth_can_support_fec(eth_port
))
786 fec
= nfp_port_fec_ethtool_to_nsp(param
->fec
);
790 err
= nfp_eth_set_fec(port
->app
->cpp
, eth_port
->index
, fec
);
792 /* Only refresh if we did something */
793 nfp_net_refresh_port_table(port
);
795 return err
< 0 ? err
: 0;
798 /* RX network flow classification (RSS, filters, etc)
800 static u32
ethtool_flow_to_nfp_flag(u32 flow_type
)
802 static const u32 xlate_ethtool_to_nfp
[IPV6_FLOW
+ 1] = {
803 [TCP_V4_FLOW
] = NFP_NET_CFG_RSS_IPV4_TCP
,
804 [TCP_V6_FLOW
] = NFP_NET_CFG_RSS_IPV6_TCP
,
805 [UDP_V4_FLOW
] = NFP_NET_CFG_RSS_IPV4_UDP
,
806 [UDP_V6_FLOW
] = NFP_NET_CFG_RSS_IPV6_UDP
,
807 [IPV4_FLOW
] = NFP_NET_CFG_RSS_IPV4
,
808 [IPV6_FLOW
] = NFP_NET_CFG_RSS_IPV6
,
811 if (flow_type
>= ARRAY_SIZE(xlate_ethtool_to_nfp
))
814 return xlate_ethtool_to_nfp
[flow_type
];
817 static int nfp_net_get_rss_hash_opts(struct nfp_net
*nn
,
818 struct ethtool_rxnfc
*cmd
)
824 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
827 nfp_rss_flag
= ethtool_flow_to_nfp_flag(cmd
->flow_type
);
831 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
832 if (nn
->rss_cfg
& nfp_rss_flag
)
833 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
838 static int nfp_net_get_rxnfc(struct net_device
*netdev
,
839 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
841 struct nfp_net
*nn
= netdev_priv(netdev
);
844 case ETHTOOL_GRXRINGS
:
845 cmd
->data
= nn
->dp
.num_rx_rings
;
848 return nfp_net_get_rss_hash_opts(nn
, cmd
);
854 static int nfp_net_set_rss_hash_opt(struct nfp_net
*nn
,
855 struct ethtool_rxnfc
*nfc
)
857 u32 new_rss_cfg
= nn
->rss_cfg
;
861 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
864 /* RSS only supports IP SA/DA and L4 src/dst ports */
865 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
866 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
869 /* We need at least the IP SA/DA fields for hashing */
870 if (!(nfc
->data
& RXH_IP_SRC
) ||
871 !(nfc
->data
& RXH_IP_DST
))
874 nfp_rss_flag
= ethtool_flow_to_nfp_flag(nfc
->flow_type
);
878 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
880 new_rss_cfg
&= ~nfp_rss_flag
;
882 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
883 new_rss_cfg
|= nfp_rss_flag
;
889 new_rss_cfg
|= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC
, nn
->rss_hfunc
);
890 new_rss_cfg
|= NFP_NET_CFG_RSS_MASK
;
892 if (new_rss_cfg
== nn
->rss_cfg
)
895 writel(new_rss_cfg
, nn
->dp
.ctrl_bar
+ NFP_NET_CFG_RSS_CTRL
);
896 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RSS
);
900 nn
->rss_cfg
= new_rss_cfg
;
902 nn_dbg(nn
, "Changed RSS config to 0x%x\n", nn
->rss_cfg
);
906 static int nfp_net_set_rxnfc(struct net_device
*netdev
,
907 struct ethtool_rxnfc
*cmd
)
909 struct nfp_net
*nn
= netdev_priv(netdev
);
913 return nfp_net_set_rss_hash_opt(nn
, cmd
);
919 static u32
nfp_net_get_rxfh_indir_size(struct net_device
*netdev
)
921 struct nfp_net
*nn
= netdev_priv(netdev
);
923 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
926 return ARRAY_SIZE(nn
->rss_itbl
);
929 static u32
nfp_net_get_rxfh_key_size(struct net_device
*netdev
)
931 struct nfp_net
*nn
= netdev_priv(netdev
);
933 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
936 return nfp_net_rss_key_sz(nn
);
939 static int nfp_net_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
942 struct nfp_net
*nn
= netdev_priv(netdev
);
945 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
949 for (i
= 0; i
< ARRAY_SIZE(nn
->rss_itbl
); i
++)
950 indir
[i
] = nn
->rss_itbl
[i
];
952 memcpy(key
, nn
->rss_key
, nfp_net_rss_key_sz(nn
));
954 *hfunc
= nn
->rss_hfunc
;
955 if (*hfunc
>= 1 << ETH_RSS_HASH_FUNCS_COUNT
)
956 *hfunc
= ETH_RSS_HASH_UNKNOWN
;
962 static int nfp_net_set_rxfh(struct net_device
*netdev
,
963 const u32
*indir
, const u8
*key
,
966 struct nfp_net
*nn
= netdev_priv(netdev
);
969 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
) ||
970 !(hfunc
== ETH_RSS_HASH_NO_CHANGE
|| hfunc
== nn
->rss_hfunc
))
977 memcpy(nn
->rss_key
, key
, nfp_net_rss_key_sz(nn
));
978 nfp_net_rss_write_key(nn
);
981 for (i
= 0; i
< ARRAY_SIZE(nn
->rss_itbl
); i
++)
982 nn
->rss_itbl
[i
] = indir
[i
];
984 nfp_net_rss_write_itbl(nn
);
987 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RSS
);
990 /* Dump BAR registers
992 static int nfp_net_get_regs_len(struct net_device
*netdev
)
994 return NFP_NET_CFG_BAR_SZ
;
997 static void nfp_net_get_regs(struct net_device
*netdev
,
998 struct ethtool_regs
*regs
, void *p
)
1000 struct nfp_net
*nn
= netdev_priv(netdev
);
1004 regs
->version
= nn_readl(nn
, NFP_NET_CFG_VERSION
);
1006 for (i
= 0; i
< NFP_NET_CFG_BAR_SZ
/ sizeof(u32
); i
++)
1007 regs_buf
[i
] = readl(nn
->dp
.ctrl_bar
+ (i
* sizeof(u32
)));
1010 static int nfp_net_get_coalesce(struct net_device
*netdev
,
1011 struct ethtool_coalesce
*ec
)
1013 struct nfp_net
*nn
= netdev_priv(netdev
);
1015 if (!(nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
))
1018 ec
->rx_coalesce_usecs
= nn
->rx_coalesce_usecs
;
1019 ec
->rx_max_coalesced_frames
= nn
->rx_coalesce_max_frames
;
1020 ec
->tx_coalesce_usecs
= nn
->tx_coalesce_usecs
;
1021 ec
->tx_max_coalesced_frames
= nn
->tx_coalesce_max_frames
;
1026 /* Other debug dumps
1029 nfp_dump_nsp_diag(struct nfp_app
*app
, struct ethtool_dump
*dump
, void *buffer
)
1031 struct nfp_resource
*res
;
1038 dump
->flag
= NFP_DUMP_NSP_DIAG
;
1040 res
= nfp_resource_acquire(app
->cpp
, NFP_RESOURCE_NSP_DIAG
);
1042 return PTR_ERR(res
);
1045 if (dump
->len
!= nfp_resource_size(res
)) {
1050 ret
= nfp_cpp_read(app
->cpp
, nfp_resource_cpp_id(res
),
1051 nfp_resource_address(res
),
1053 if (ret
!= dump
->len
)
1054 ret
= ret
< 0 ? ret
: -EIO
;
1058 dump
->len
= nfp_resource_size(res
);
1062 nfp_resource_release(res
);
1067 /* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
1068 * based dumps), since flag 0 (default) calculates the length in
1069 * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
1070 * without setting the flag first, for backward compatibility.
1072 static int nfp_app_set_dump(struct net_device
*netdev
, struct ethtool_dump
*val
)
1074 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1080 if (val
->flag
== NFP_DUMP_NSP_DIAG
) {
1081 app
->pf
->dump_flag
= val
->flag
;
1085 if (!app
->pf
->dumpspec
)
1088 len
= nfp_net_dump_calculate_size(app
->pf
, app
->pf
->dumpspec
,
1093 app
->pf
->dump_flag
= val
->flag
;
1094 app
->pf
->dump_len
= len
;
1100 nfp_app_get_dump_flag(struct net_device
*netdev
, struct ethtool_dump
*dump
)
1102 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1107 if (app
->pf
->dump_flag
== NFP_DUMP_NSP_DIAG
)
1108 return nfp_dump_nsp_diag(app
, dump
, NULL
);
1110 dump
->flag
= app
->pf
->dump_flag
;
1111 dump
->len
= app
->pf
->dump_len
;
1117 nfp_app_get_dump_data(struct net_device
*netdev
, struct ethtool_dump
*dump
,
1120 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1125 if (app
->pf
->dump_flag
== NFP_DUMP_NSP_DIAG
)
1126 return nfp_dump_nsp_diag(app
, dump
, buffer
);
1128 dump
->flag
= app
->pf
->dump_flag
;
1129 dump
->len
= app
->pf
->dump_len
;
1131 return nfp_net_dump_populate_buffer(app
->pf
, app
->pf
->dumpspec
, dump
,
1135 static int nfp_net_set_coalesce(struct net_device
*netdev
,
1136 struct ethtool_coalesce
*ec
)
1138 struct nfp_net
*nn
= netdev_priv(netdev
);
1139 unsigned int factor
;
1141 if (ec
->rx_coalesce_usecs_irq
||
1142 ec
->rx_max_coalesced_frames_irq
||
1143 ec
->tx_coalesce_usecs_irq
||
1144 ec
->tx_max_coalesced_frames_irq
||
1145 ec
->stats_block_coalesce_usecs
||
1146 ec
->use_adaptive_rx_coalesce
||
1147 ec
->use_adaptive_tx_coalesce
||
1149 ec
->rx_coalesce_usecs_low
||
1150 ec
->rx_max_coalesced_frames_low
||
1151 ec
->tx_coalesce_usecs_low
||
1152 ec
->tx_max_coalesced_frames_low
||
1153 ec
->pkt_rate_high
||
1154 ec
->rx_coalesce_usecs_high
||
1155 ec
->rx_max_coalesced_frames_high
||
1156 ec
->tx_coalesce_usecs_high
||
1157 ec
->tx_max_coalesced_frames_high
||
1158 ec
->rate_sample_interval
)
1161 /* Compute factor used to convert coalesce '_usecs' parameters to
1162 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1165 factor
= nn
->me_freq_mhz
/ 16;
1167 /* Each pair of (usecs, max_frames) fields specifies that interrupts
1168 * should be coalesced until
1169 * (usecs > 0 && time_since_first_completion >= usecs) ||
1170 * (max_frames > 0 && completed_frames >= max_frames)
1172 * It is illegal to set both usecs and max_frames to zero as this would
1173 * cause interrupts to never be generated. To disable coalescing, set
1174 * usecs = 0 and max_frames = 1.
1176 * Some implementations ignore the value of max_frames and use the
1177 * condition time_since_first_completion >= usecs
1180 if (!(nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
))
1183 /* ensure valid configuration */
1184 if (!ec
->rx_coalesce_usecs
&& !ec
->rx_max_coalesced_frames
)
1187 if (!ec
->tx_coalesce_usecs
&& !ec
->tx_max_coalesced_frames
)
1190 if (ec
->rx_coalesce_usecs
* factor
>= ((1 << 16) - 1))
1193 if (ec
->tx_coalesce_usecs
* factor
>= ((1 << 16) - 1))
1196 if (ec
->rx_max_coalesced_frames
>= ((1 << 16) - 1))
1199 if (ec
->tx_max_coalesced_frames
>= ((1 << 16) - 1))
1202 /* configuration is valid */
1203 nn
->rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
1204 nn
->rx_coalesce_max_frames
= ec
->rx_max_coalesced_frames
;
1205 nn
->tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
1206 nn
->tx_coalesce_max_frames
= ec
->tx_max_coalesced_frames
;
1208 /* write configuration to device */
1209 nfp_net_coalesce_write_cfg(nn
);
1210 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_IRQMOD
);
1213 static void nfp_net_get_channels(struct net_device
*netdev
,
1214 struct ethtool_channels
*channel
)
1216 struct nfp_net
*nn
= netdev_priv(netdev
);
1217 unsigned int num_tx_rings
;
1219 num_tx_rings
= nn
->dp
.num_tx_rings
;
1220 if (nn
->dp
.xdp_prog
)
1221 num_tx_rings
-= nn
->dp
.num_rx_rings
;
1223 channel
->max_rx
= min(nn
->max_rx_rings
, nn
->max_r_vecs
);
1224 channel
->max_tx
= min(nn
->max_tx_rings
, nn
->max_r_vecs
);
1225 channel
->max_combined
= min(channel
->max_rx
, channel
->max_tx
);
1226 channel
->max_other
= NFP_NET_NON_Q_VECTORS
;
1227 channel
->combined_count
= min(nn
->dp
.num_rx_rings
, num_tx_rings
);
1228 channel
->rx_count
= nn
->dp
.num_rx_rings
- channel
->combined_count
;
1229 channel
->tx_count
= num_tx_rings
- channel
->combined_count
;
1230 channel
->other_count
= NFP_NET_NON_Q_VECTORS
;
1233 static int nfp_net_set_num_rings(struct nfp_net
*nn
, unsigned int total_rx
,
1234 unsigned int total_tx
)
1236 struct nfp_net_dp
*dp
;
1238 dp
= nfp_net_clone_dp(nn
);
1242 dp
->num_rx_rings
= total_rx
;
1243 dp
->num_tx_rings
= total_tx
;
1244 /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
1246 dp
->num_tx_rings
+= total_rx
;
1248 return nfp_net_ring_reconfig(nn
, dp
, NULL
);
1251 static int nfp_net_set_channels(struct net_device
*netdev
,
1252 struct ethtool_channels
*channel
)
1254 struct nfp_net
*nn
= netdev_priv(netdev
);
1255 unsigned int total_rx
, total_tx
;
1257 /* Reject unsupported */
1258 if (!channel
->combined_count
||
1259 channel
->other_count
!= NFP_NET_NON_Q_VECTORS
||
1260 (channel
->rx_count
&& channel
->tx_count
))
1263 total_rx
= channel
->combined_count
+ channel
->rx_count
;
1264 total_tx
= channel
->combined_count
+ channel
->tx_count
;
1266 if (total_rx
> min(nn
->max_rx_rings
, nn
->max_r_vecs
) ||
1267 total_tx
> min(nn
->max_tx_rings
, nn
->max_r_vecs
))
1270 return nfp_net_set_num_rings(nn
, total_rx
, total_tx
);
1274 nfp_net_flash_device(struct net_device
*netdev
, struct ethtool_flash
*flash
)
1276 const struct firmware
*fw
;
1277 struct nfp_app
*app
;
1278 struct nfp_nsp
*nsp
;
1282 if (flash
->region
!= ETHTOOL_FLASH_ALL_REGIONS
)
1285 app
= nfp_app_from_netdev(netdev
);
1289 dev
= &app
->pdev
->dev
;
1291 nsp
= nfp_nsp_open(app
->cpp
);
1294 dev_err(dev
, "Failed to access the NSP: %d\n", err
);
1298 err
= request_firmware_direct(&fw
, flash
->data
, dev
);
1300 goto exit_close_nsp
;
1302 dev_info(dev
, "Please be patient while writing flash image: %s\n",
1307 err
= nfp_nsp_write_flash(nsp
, fw
);
1309 dev_err(dev
, "Flash write failed: %d\n", err
);
1310 goto exit_rtnl_lock
;
1312 dev_info(dev
, "Finished writing flash image\n");
1317 release_firmware(fw
);
1324 static const struct ethtool_ops nfp_net_ethtool_ops
= {
1325 .get_drvinfo
= nfp_net_get_drvinfo
,
1326 .get_link
= ethtool_op_get_link
,
1327 .get_ringparam
= nfp_net_get_ringparam
,
1328 .set_ringparam
= nfp_net_set_ringparam
,
1329 .get_strings
= nfp_net_get_strings
,
1330 .get_ethtool_stats
= nfp_net_get_stats
,
1331 .get_sset_count
= nfp_net_get_sset_count
,
1332 .get_rxnfc
= nfp_net_get_rxnfc
,
1333 .set_rxnfc
= nfp_net_set_rxnfc
,
1334 .flash_device
= nfp_net_flash_device
,
1335 .get_rxfh_indir_size
= nfp_net_get_rxfh_indir_size
,
1336 .get_rxfh_key_size
= nfp_net_get_rxfh_key_size
,
1337 .get_rxfh
= nfp_net_get_rxfh
,
1338 .set_rxfh
= nfp_net_set_rxfh
,
1339 .get_regs_len
= nfp_net_get_regs_len
,
1340 .get_regs
= nfp_net_get_regs
,
1341 .set_dump
= nfp_app_set_dump
,
1342 .get_dump_flag
= nfp_app_get_dump_flag
,
1343 .get_dump_data
= nfp_app_get_dump_data
,
1344 .get_coalesce
= nfp_net_get_coalesce
,
1345 .set_coalesce
= nfp_net_set_coalesce
,
1346 .get_channels
= nfp_net_get_channels
,
1347 .set_channels
= nfp_net_set_channels
,
1348 .get_link_ksettings
= nfp_net_get_link_ksettings
,
1349 .set_link_ksettings
= nfp_net_set_link_ksettings
,
1350 .get_fecparam
= nfp_port_get_fecparam
,
1351 .set_fecparam
= nfp_port_set_fecparam
,
1354 const struct ethtool_ops nfp_port_ethtool_ops
= {
1355 .get_drvinfo
= nfp_app_get_drvinfo
,
1356 .get_link
= ethtool_op_get_link
,
1357 .get_strings
= nfp_port_get_strings
,
1358 .get_ethtool_stats
= nfp_port_get_stats
,
1359 .get_sset_count
= nfp_port_get_sset_count
,
1360 .flash_device
= nfp_net_flash_device
,
1361 .set_dump
= nfp_app_set_dump
,
1362 .get_dump_flag
= nfp_app_get_dump_flag
,
1363 .get_dump_data
= nfp_app_get_dump_data
,
1364 .get_link_ksettings
= nfp_net_get_link_ksettings
,
1365 .set_link_ksettings
= nfp_net_set_link_ksettings
,
1366 .get_fecparam
= nfp_port_get_fecparam
,
1367 .set_fecparam
= nfp_port_set_fecparam
,
1370 void nfp_net_set_ethtool_ops(struct net_device
*netdev
)
1372 netdev
->ethtool_ops
= &nfp_net_ethtool_ops
;