2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: ethtool support
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
43 #include <linux/bitfield.h>
44 #include <linux/kernel.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/interrupt.h>
48 #include <linux/pci.h>
49 #include <linux/ethtool.h>
50 #include <linux/firmware.h>
52 #include "nfpcore/nfp.h"
53 #include "nfpcore/nfp_nsp.h"
56 #include "nfp_net_ctrl.h"
61 char name
[ETH_GSTRING_LEN
];
65 static const struct nfp_et_stat nfp_net_et_stats
[] = {
66 /* Stats from the device */
67 { "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS
},
68 { "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS
},
69 { "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS
},
70 { "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS
},
71 { "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS
},
72 { "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS
},
73 { "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES
},
74 { "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES
},
75 { "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES
},
77 { "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS
},
78 { "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS
},
79 { "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS
},
80 { "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS
},
81 { "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS
},
82 { "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS
},
83 { "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES
},
84 { "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES
},
85 { "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES
},
87 { "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES
},
88 { "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES
},
89 /* see comments in outro functions in nfp_bpf_jit.c to find out
90 * how different BPF modes use app-specific counters
92 { "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES
},
93 { "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES
},
94 { "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES
},
95 { "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES
},
96 { "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES
},
97 { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES
},
100 static const struct nfp_et_stat nfp_mac_et_stats
[] = {
101 { "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS
, },
102 { "rx_frame_too_long_errors",
103 NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS
, },
104 { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS
, },
105 { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK
, },
106 { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS
, },
107 { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS
, },
108 { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS
, },
109 { "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS
, },
110 { "rx_pause_mac_ctrl_frames",
111 NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES
, },
112 { "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK
, },
113 { "rx_frame_check_sequence_errors",
114 NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS
, },
115 { "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS
, },
116 { "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS
, },
117 { "rx_pkts", NFP_MAC_STATS_RX_PKTS
, },
118 { "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS
, },
119 { "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS
, },
120 { "rx_pkts_65_to_127_octets",
121 NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS
, },
122 { "rx_pkts_128_to_255_octets",
123 NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS
, },
124 { "rx_pkts_256_to_511_octets",
125 NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS
, },
126 { "rx_pkts_512_to_1023_octets",
127 NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS
, },
128 { "rx_pkts_1024_to_1518_octets",
129 NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS
, },
130 { "rx_pkts_1519_to_max_octets",
131 NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS
, },
132 { "rx_jabbers", NFP_MAC_STATS_RX_JABBERS
, },
133 { "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS
, },
134 { "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS
, },
135 { "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0
, },
136 { "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1
, },
137 { "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2
, },
138 { "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3
, },
139 { "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4
, },
140 { "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5
, },
141 { "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6
, },
142 { "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7
, },
143 { "rx_mac_ctrl_frames_received",
144 NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED
, },
145 { "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP
, },
146 { "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP
, },
147 { "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS
, },
148 { "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK
, },
149 { "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS
, },
150 { "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS
, },
151 { "tx_pause_mac_ctrl_frames",
152 NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES
, },
153 { "tx_frames_transmitted_ok",
154 NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK
, },
155 { "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS
, },
156 { "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS
, },
157 { "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS
, },
158 { "tx_pkts_65_to_127_octets",
159 NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS
, },
160 { "tx_pkts_128_to_255_octets",
161 NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS
, },
162 { "tx_pkts_256_to_511_octets",
163 NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS
, },
164 { "tx_pkts_512_to_1023_octets",
165 NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS
, },
166 { "tx_pkts_1024_to_1518_octets",
167 NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS
, },
168 { "tx_pkts_1519_to_max_octets",
169 NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS
, },
170 { "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0
, },
171 { "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1
, },
172 { "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2
, },
173 { "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3
, },
174 { "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4
, },
175 { "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5
, },
176 { "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6
, },
177 { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7
, },
180 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
181 #define NN_ET_SWITCH_STATS_LEN 9
182 #define NN_RVEC_GATHER_STATS 9
183 #define NN_RVEC_PER_Q_STATS 3
185 static void nfp_net_get_nspinfo(struct nfp_app
*app
, char *version
)
192 nsp
= nfp_nsp_open(app
->cpp
);
196 snprintf(version
, ETHTOOL_FWVERS_LEN
, "%hu.%hu",
197 nfp_nsp_get_abi_ver_major(nsp
),
198 nfp_nsp_get_abi_ver_minor(nsp
));
204 nfp_get_drvinfo(struct nfp_app
*app
, struct pci_dev
*pdev
,
205 const char *vnic_version
, struct ethtool_drvinfo
*drvinfo
)
207 char nsp_version
[ETHTOOL_FWVERS_LEN
] = {};
209 strlcpy(drvinfo
->driver
, pdev
->driver
->name
, sizeof(drvinfo
->driver
));
210 strlcpy(drvinfo
->version
, nfp_driver_version
, sizeof(drvinfo
->version
));
212 nfp_net_get_nspinfo(app
, nsp_version
);
213 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
214 "%s %s %s %s", vnic_version
, nsp_version
,
215 nfp_app_mip_name(app
), nfp_app_name(app
));
219 nfp_net_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
221 char vnic_version
[ETHTOOL_FWVERS_LEN
] = {};
222 struct nfp_net
*nn
= netdev_priv(netdev
);
224 snprintf(vnic_version
, sizeof(vnic_version
), "%d.%d.%d.%d",
225 nn
->fw_ver
.resv
, nn
->fw_ver
.class,
226 nn
->fw_ver
.major
, nn
->fw_ver
.minor
);
227 strlcpy(drvinfo
->bus_info
, pci_name(nn
->pdev
),
228 sizeof(drvinfo
->bus_info
));
230 nfp_get_drvinfo(nn
->app
, nn
->pdev
, vnic_version
, drvinfo
);
234 nfp_app_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
236 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
238 strlcpy(drvinfo
->bus_info
, pci_name(app
->pdev
),
239 sizeof(drvinfo
->bus_info
));
240 nfp_get_drvinfo(app
, app
->pdev
, "*", drvinfo
);
244 nfp_net_set_fec_link_mode(struct nfp_eth_table_port
*eth_port
,
245 struct ethtool_link_ksettings
*c
)
249 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_NONE
);
250 if (!nfp_eth_can_support_fec(eth_port
)) {
251 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_NONE
);
255 modes
= nfp_eth_supported_fec_modes(eth_port
);
256 if (modes
& NFP_FEC_BASER
) {
257 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_BASER
);
258 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_BASER
);
261 if (modes
& NFP_FEC_REED_SOLOMON
) {
262 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_RS
);
263 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_RS
);
268 * nfp_net_get_link_ksettings - Get Link Speed settings
269 * @netdev: network interface device structure
270 * @cmd: ethtool command
272 * Reports speed settings based on info in the BAR provided by the fw.
275 nfp_net_get_link_ksettings(struct net_device
*netdev
,
276 struct ethtool_link_ksettings
*cmd
)
278 static const u32 ls_to_ethtool
[] = {
279 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED
] = 0,
280 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
] = SPEED_UNKNOWN
,
281 [NFP_NET_CFG_STS_LINK_RATE_1G
] = SPEED_1000
,
282 [NFP_NET_CFG_STS_LINK_RATE_10G
] = SPEED_10000
,
283 [NFP_NET_CFG_STS_LINK_RATE_25G
] = SPEED_25000
,
284 [NFP_NET_CFG_STS_LINK_RATE_40G
] = SPEED_40000
,
285 [NFP_NET_CFG_STS_LINK_RATE_50G
] = SPEED_50000
,
286 [NFP_NET_CFG_STS_LINK_RATE_100G
] = SPEED_100000
,
288 struct nfp_eth_table_port
*eth_port
;
289 struct nfp_port
*port
;
293 /* Init to unknowns */
294 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
295 cmd
->base
.port
= PORT_OTHER
;
296 cmd
->base
.speed
= SPEED_UNKNOWN
;
297 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
299 port
= nfp_port_from_netdev(netdev
);
300 eth_port
= nfp_port_get_eth_port(port
);
302 cmd
->base
.autoneg
= eth_port
->aneg
!= NFP_ANEG_DISABLED
?
303 AUTONEG_ENABLE
: AUTONEG_DISABLE
;
304 nfp_net_set_fec_link_mode(eth_port
, cmd
);
307 if (!netif_carrier_ok(netdev
))
310 /* Use link speed from ETH table if available, otherwise try the BAR */
312 cmd
->base
.port
= eth_port
->port_type
;
313 cmd
->base
.speed
= eth_port
->speed
;
314 cmd
->base
.duplex
= DUPLEX_FULL
;
318 if (!nfp_netdev_is_nfp_net(netdev
))
320 nn
= netdev_priv(netdev
);
322 sts
= nn_readl(nn
, NFP_NET_CFG_STS
);
324 ls
= FIELD_GET(NFP_NET_CFG_STS_LINK_RATE
, sts
);
325 if (ls
== NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED
)
328 if (ls
== NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
||
329 ls
>= ARRAY_SIZE(ls_to_ethtool
))
332 cmd
->base
.speed
= ls_to_ethtool
[ls
];
333 cmd
->base
.duplex
= DUPLEX_FULL
;
339 nfp_net_set_link_ksettings(struct net_device
*netdev
,
340 const struct ethtool_link_ksettings
*cmd
)
342 struct nfp_eth_table_port
*eth_port
;
343 struct nfp_port
*port
;
347 port
= nfp_port_from_netdev(netdev
);
348 eth_port
= __nfp_port_get_eth_port(port
);
352 if (netif_running(netdev
)) {
353 netdev_warn(netdev
, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
357 nsp
= nfp_eth_config_start(port
->app
->cpp
, eth_port
->index
);
361 err
= __nfp_eth_set_aneg(nsp
, cmd
->base
.autoneg
== AUTONEG_ENABLE
?
362 NFP_ANEG_AUTO
: NFP_ANEG_DISABLED
);
365 if (cmd
->base
.speed
!= SPEED_UNKNOWN
) {
366 u32 speed
= cmd
->base
.speed
/ eth_port
->lanes
;
368 err
= __nfp_eth_set_speed(nsp
, speed
);
373 err
= nfp_eth_config_commit_end(nsp
);
375 return 0; /* no change */
377 nfp_net_refresh_port_table(port
);
382 nfp_eth_config_cleanup_end(nsp
);
386 static void nfp_net_get_ringparam(struct net_device
*netdev
,
387 struct ethtool_ringparam
*ring
)
389 struct nfp_net
*nn
= netdev_priv(netdev
);
391 ring
->rx_max_pending
= NFP_NET_MAX_RX_DESCS
;
392 ring
->tx_max_pending
= NFP_NET_MAX_TX_DESCS
;
393 ring
->rx_pending
= nn
->dp
.rxd_cnt
;
394 ring
->tx_pending
= nn
->dp
.txd_cnt
;
397 static int nfp_net_set_ring_size(struct nfp_net
*nn
, u32 rxd_cnt
, u32 txd_cnt
)
399 struct nfp_net_dp
*dp
;
401 dp
= nfp_net_clone_dp(nn
);
405 dp
->rxd_cnt
= rxd_cnt
;
406 dp
->txd_cnt
= txd_cnt
;
408 return nfp_net_ring_reconfig(nn
, dp
, NULL
);
411 static int nfp_net_set_ringparam(struct net_device
*netdev
,
412 struct ethtool_ringparam
*ring
)
414 struct nfp_net
*nn
= netdev_priv(netdev
);
415 u32 rxd_cnt
, txd_cnt
;
417 /* We don't have separate queues/rings for small/large frames. */
418 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
421 /* Round up to supported values */
422 rxd_cnt
= roundup_pow_of_two(ring
->rx_pending
);
423 txd_cnt
= roundup_pow_of_two(ring
->tx_pending
);
425 if (rxd_cnt
< NFP_NET_MIN_RX_DESCS
|| rxd_cnt
> NFP_NET_MAX_RX_DESCS
||
426 txd_cnt
< NFP_NET_MIN_TX_DESCS
|| txd_cnt
> NFP_NET_MAX_TX_DESCS
)
429 if (nn
->dp
.rxd_cnt
== rxd_cnt
&& nn
->dp
.txd_cnt
== txd_cnt
)
432 nn_dbg(nn
, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
433 nn
->dp
.rxd_cnt
, rxd_cnt
, nn
->dp
.txd_cnt
, txd_cnt
);
435 return nfp_net_set_ring_size(nn
, rxd_cnt
, txd_cnt
);
438 __printf(2, 3) u8
*nfp_pr_et(u8
*data
, const char *fmt
, ...)
443 vsnprintf(data
, ETH_GSTRING_LEN
, fmt
, args
);
446 return data
+ ETH_GSTRING_LEN
;
449 static unsigned int nfp_vnic_get_sw_stats_count(struct net_device
*netdev
)
451 struct nfp_net
*nn
= netdev_priv(netdev
);
453 return NN_RVEC_GATHER_STATS
+ nn
->max_r_vecs
* NN_RVEC_PER_Q_STATS
;
456 static u8
*nfp_vnic_get_sw_stats_strings(struct net_device
*netdev
, u8
*data
)
458 struct nfp_net
*nn
= netdev_priv(netdev
);
461 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
462 data
= nfp_pr_et(data
, "rvec_%u_rx_pkts", i
);
463 data
= nfp_pr_et(data
, "rvec_%u_tx_pkts", i
);
464 data
= nfp_pr_et(data
, "rvec_%u_tx_busy", i
);
467 data
= nfp_pr_et(data
, "hw_rx_csum_ok");
468 data
= nfp_pr_et(data
, "hw_rx_csum_inner_ok");
469 data
= nfp_pr_et(data
, "hw_rx_csum_complete");
470 data
= nfp_pr_et(data
, "hw_rx_csum_err");
471 data
= nfp_pr_et(data
, "rx_replace_buf_alloc_fail");
472 data
= nfp_pr_et(data
, "hw_tx_csum");
473 data
= nfp_pr_et(data
, "hw_tx_inner_csum");
474 data
= nfp_pr_et(data
, "tx_gather");
475 data
= nfp_pr_et(data
, "tx_lso");
480 static u64
*nfp_vnic_get_sw_stats(struct net_device
*netdev
, u64
*data
)
482 u64 gathered_stats
[NN_RVEC_GATHER_STATS
] = {};
483 struct nfp_net
*nn
= netdev_priv(netdev
);
484 u64 tmp
[NN_RVEC_GATHER_STATS
];
487 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
491 start
= u64_stats_fetch_begin(&nn
->r_vecs
[i
].rx_sync
);
492 data
[0] = nn
->r_vecs
[i
].rx_pkts
;
493 tmp
[0] = nn
->r_vecs
[i
].hw_csum_rx_ok
;
494 tmp
[1] = nn
->r_vecs
[i
].hw_csum_rx_inner_ok
;
495 tmp
[2] = nn
->r_vecs
[i
].hw_csum_rx_complete
;
496 tmp
[3] = nn
->r_vecs
[i
].hw_csum_rx_error
;
497 tmp
[4] = nn
->r_vecs
[i
].rx_replace_buf_alloc_fail
;
498 } while (u64_stats_fetch_retry(&nn
->r_vecs
[i
].rx_sync
, start
));
501 start
= u64_stats_fetch_begin(&nn
->r_vecs
[i
].tx_sync
);
502 data
[1] = nn
->r_vecs
[i
].tx_pkts
;
503 data
[2] = nn
->r_vecs
[i
].tx_busy
;
504 tmp
[5] = nn
->r_vecs
[i
].hw_csum_tx
;
505 tmp
[6] = nn
->r_vecs
[i
].hw_csum_tx_inner
;
506 tmp
[7] = nn
->r_vecs
[i
].tx_gather
;
507 tmp
[8] = nn
->r_vecs
[i
].tx_lso
;
508 } while (u64_stats_fetch_retry(&nn
->r_vecs
[i
].tx_sync
, start
));
510 data
+= NN_RVEC_PER_Q_STATS
;
512 for (j
= 0; j
< NN_RVEC_GATHER_STATS
; j
++)
513 gathered_stats
[j
] += tmp
[j
];
516 for (j
= 0; j
< NN_RVEC_GATHER_STATS
; j
++)
517 *data
++ = gathered_stats
[j
];
522 static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs
)
524 return NN_ET_GLOBAL_STATS_LEN
+ num_vecs
* 4;
528 nfp_vnic_get_hw_stats_strings(u8
*data
, unsigned int num_vecs
, bool repr
)
532 BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN
< NN_ET_SWITCH_STATS_LEN
* 2);
533 /* If repr is true first add SWITCH_STATS_LEN and then subtract it
534 * effectively swapping the RX and TX statistics (giving us the RX
535 * and TX from perspective of the switch).
537 swap_off
= repr
* NN_ET_SWITCH_STATS_LEN
;
539 for (i
= 0; i
< NN_ET_SWITCH_STATS_LEN
; i
++)
540 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
+ swap_off
].name
);
542 for (i
= NN_ET_SWITCH_STATS_LEN
; i
< NN_ET_SWITCH_STATS_LEN
* 2; i
++)
543 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
- swap_off
].name
);
545 for (i
= NN_ET_SWITCH_STATS_LEN
* 2; i
< NN_ET_GLOBAL_STATS_LEN
; i
++)
546 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
].name
);
548 for (i
= 0; i
< num_vecs
; i
++) {
549 data
= nfp_pr_et(data
, "rxq_%u_pkts", i
);
550 data
= nfp_pr_et(data
, "rxq_%u_bytes", i
);
551 data
= nfp_pr_et(data
, "txq_%u_pkts", i
);
552 data
= nfp_pr_et(data
, "txq_%u_bytes", i
);
559 nfp_vnic_get_hw_stats(u64
*data
, u8 __iomem
*mem
, unsigned int num_vecs
)
563 for (i
= 0; i
< NN_ET_GLOBAL_STATS_LEN
; i
++)
564 *data
++ = readq(mem
+ nfp_net_et_stats
[i
].off
);
566 for (i
= 0; i
< num_vecs
; i
++) {
567 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
));
568 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
) + 8);
569 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
));
570 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
) + 8);
576 static unsigned int nfp_mac_get_stats_count(struct net_device
*netdev
)
578 struct nfp_port
*port
;
580 port
= nfp_port_from_netdev(netdev
);
581 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
584 return ARRAY_SIZE(nfp_mac_et_stats
);
587 static u8
*nfp_mac_get_stats_strings(struct net_device
*netdev
, u8
*data
)
589 struct nfp_port
*port
;
592 port
= nfp_port_from_netdev(netdev
);
593 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
596 for (i
= 0; i
< ARRAY_SIZE(nfp_mac_et_stats
); i
++)
597 data
= nfp_pr_et(data
, "mac.%s", nfp_mac_et_stats
[i
].name
);
602 static u64
*nfp_mac_get_stats(struct net_device
*netdev
, u64
*data
)
604 struct nfp_port
*port
;
607 port
= nfp_port_from_netdev(netdev
);
608 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
611 for (i
= 0; i
< ARRAY_SIZE(nfp_mac_et_stats
); i
++)
612 *data
++ = readq(port
->eth_stats
+ nfp_mac_et_stats
[i
].off
);
617 static void nfp_net_get_strings(struct net_device
*netdev
,
618 u32 stringset
, u8
*data
)
620 struct nfp_net
*nn
= netdev_priv(netdev
);
624 data
= nfp_vnic_get_sw_stats_strings(netdev
, data
);
625 data
= nfp_vnic_get_hw_stats_strings(data
, nn
->max_r_vecs
,
627 data
= nfp_mac_get_stats_strings(netdev
, data
);
628 data
= nfp_app_port_get_stats_strings(nn
->port
, data
);
634 nfp_net_get_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
637 struct nfp_net
*nn
= netdev_priv(netdev
);
639 data
= nfp_vnic_get_sw_stats(netdev
, data
);
640 data
= nfp_vnic_get_hw_stats(data
, nn
->dp
.ctrl_bar
, nn
->max_r_vecs
);
641 data
= nfp_mac_get_stats(netdev
, data
);
642 data
= nfp_app_port_get_stats(nn
->port
, data
);
645 static int nfp_net_get_sset_count(struct net_device
*netdev
, int sset
)
647 struct nfp_net
*nn
= netdev_priv(netdev
);
651 return nfp_vnic_get_sw_stats_count(netdev
) +
652 nfp_vnic_get_hw_stats_count(nn
->max_r_vecs
) +
653 nfp_mac_get_stats_count(netdev
) +
654 nfp_app_port_get_stats_count(nn
->port
);
660 static void nfp_port_get_strings(struct net_device
*netdev
,
661 u32 stringset
, u8
*data
)
663 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
667 if (nfp_port_is_vnic(port
))
668 data
= nfp_vnic_get_hw_stats_strings(data
, 0, true);
670 data
= nfp_mac_get_stats_strings(netdev
, data
);
671 data
= nfp_app_port_get_stats_strings(port
, data
);
677 nfp_port_get_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
680 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
682 if (nfp_port_is_vnic(port
))
683 data
= nfp_vnic_get_hw_stats(data
, port
->vnic
, 0);
685 data
= nfp_mac_get_stats(netdev
, data
);
686 data
= nfp_app_port_get_stats(port
, data
);
689 static int nfp_port_get_sset_count(struct net_device
*netdev
, int sset
)
691 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
696 if (nfp_port_is_vnic(port
))
697 count
= nfp_vnic_get_hw_stats_count(0);
699 count
= nfp_mac_get_stats_count(netdev
);
700 count
+= nfp_app_port_get_stats_count(port
);
707 static int nfp_port_fec_ethtool_to_nsp(u32 fec
)
710 case ETHTOOL_FEC_AUTO
:
711 return NFP_FEC_AUTO_BIT
;
712 case ETHTOOL_FEC_OFF
:
713 return NFP_FEC_DISABLED_BIT
;
715 return NFP_FEC_REED_SOLOMON_BIT
;
716 case ETHTOOL_FEC_BASER
:
717 return NFP_FEC_BASER_BIT
;
719 /* NSP only supports a single mode at a time */
724 static u32
nfp_port_fec_nsp_to_ethtool(u32 fec
)
728 if (fec
& NFP_FEC_AUTO
)
729 result
|= ETHTOOL_FEC_AUTO
;
730 if (fec
& NFP_FEC_BASER
)
731 result
|= ETHTOOL_FEC_BASER
;
732 if (fec
& NFP_FEC_REED_SOLOMON
)
733 result
|= ETHTOOL_FEC_RS
;
734 if (fec
& NFP_FEC_DISABLED
)
735 result
|= ETHTOOL_FEC_OFF
;
737 return result
?: ETHTOOL_FEC_NONE
;
741 nfp_port_get_fecparam(struct net_device
*netdev
,
742 struct ethtool_fecparam
*param
)
744 struct nfp_eth_table_port
*eth_port
;
745 struct nfp_port
*port
;
747 param
->active_fec
= ETHTOOL_FEC_NONE_BIT
;
748 param
->fec
= ETHTOOL_FEC_NONE_BIT
;
750 port
= nfp_port_from_netdev(netdev
);
751 eth_port
= nfp_port_get_eth_port(port
);
755 if (!nfp_eth_can_support_fec(eth_port
))
758 param
->fec
= nfp_port_fec_nsp_to_ethtool(eth_port
->fec_modes_supported
);
759 param
->active_fec
= nfp_port_fec_nsp_to_ethtool(eth_port
->fec
);
765 nfp_port_set_fecparam(struct net_device
*netdev
,
766 struct ethtool_fecparam
*param
)
768 struct nfp_eth_table_port
*eth_port
;
769 struct nfp_port
*port
;
772 port
= nfp_port_from_netdev(netdev
);
773 eth_port
= nfp_port_get_eth_port(port
);
777 if (!nfp_eth_can_support_fec(eth_port
))
780 fec
= nfp_port_fec_ethtool_to_nsp(param
->fec
);
784 err
= nfp_eth_set_fec(port
->app
->cpp
, eth_port
->index
, fec
);
786 /* Only refresh if we did something */
787 nfp_net_refresh_port_table(port
);
789 return err
< 0 ? err
: 0;
792 /* RX network flow classification (RSS, filters, etc)
794 static u32
ethtool_flow_to_nfp_flag(u32 flow_type
)
796 static const u32 xlate_ethtool_to_nfp
[IPV6_FLOW
+ 1] = {
797 [TCP_V4_FLOW
] = NFP_NET_CFG_RSS_IPV4_TCP
,
798 [TCP_V6_FLOW
] = NFP_NET_CFG_RSS_IPV6_TCP
,
799 [UDP_V4_FLOW
] = NFP_NET_CFG_RSS_IPV4_UDP
,
800 [UDP_V6_FLOW
] = NFP_NET_CFG_RSS_IPV6_UDP
,
801 [IPV4_FLOW
] = NFP_NET_CFG_RSS_IPV4
,
802 [IPV6_FLOW
] = NFP_NET_CFG_RSS_IPV6
,
805 if (flow_type
>= ARRAY_SIZE(xlate_ethtool_to_nfp
))
808 return xlate_ethtool_to_nfp
[flow_type
];
811 static int nfp_net_get_rss_hash_opts(struct nfp_net
*nn
,
812 struct ethtool_rxnfc
*cmd
)
818 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
821 nfp_rss_flag
= ethtool_flow_to_nfp_flag(cmd
->flow_type
);
825 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
826 if (nn
->rss_cfg
& nfp_rss_flag
)
827 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
832 static int nfp_net_get_rxnfc(struct net_device
*netdev
,
833 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
835 struct nfp_net
*nn
= netdev_priv(netdev
);
838 case ETHTOOL_GRXRINGS
:
839 cmd
->data
= nn
->dp
.num_rx_rings
;
842 return nfp_net_get_rss_hash_opts(nn
, cmd
);
848 static int nfp_net_set_rss_hash_opt(struct nfp_net
*nn
,
849 struct ethtool_rxnfc
*nfc
)
851 u32 new_rss_cfg
= nn
->rss_cfg
;
855 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
858 /* RSS only supports IP SA/DA and L4 src/dst ports */
859 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
860 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
863 /* We need at least the IP SA/DA fields for hashing */
864 if (!(nfc
->data
& RXH_IP_SRC
) ||
865 !(nfc
->data
& RXH_IP_DST
))
868 nfp_rss_flag
= ethtool_flow_to_nfp_flag(nfc
->flow_type
);
872 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
874 new_rss_cfg
&= ~nfp_rss_flag
;
876 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
877 new_rss_cfg
|= nfp_rss_flag
;
883 new_rss_cfg
|= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC
, nn
->rss_hfunc
);
884 new_rss_cfg
|= NFP_NET_CFG_RSS_MASK
;
886 if (new_rss_cfg
== nn
->rss_cfg
)
889 writel(new_rss_cfg
, nn
->dp
.ctrl_bar
+ NFP_NET_CFG_RSS_CTRL
);
890 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RSS
);
894 nn
->rss_cfg
= new_rss_cfg
;
896 nn_dbg(nn
, "Changed RSS config to 0x%x\n", nn
->rss_cfg
);
900 static int nfp_net_set_rxnfc(struct net_device
*netdev
,
901 struct ethtool_rxnfc
*cmd
)
903 struct nfp_net
*nn
= netdev_priv(netdev
);
907 return nfp_net_set_rss_hash_opt(nn
, cmd
);
913 static u32
nfp_net_get_rxfh_indir_size(struct net_device
*netdev
)
915 struct nfp_net
*nn
= netdev_priv(netdev
);
917 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
920 return ARRAY_SIZE(nn
->rss_itbl
);
923 static u32
nfp_net_get_rxfh_key_size(struct net_device
*netdev
)
925 struct nfp_net
*nn
= netdev_priv(netdev
);
927 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
930 return nfp_net_rss_key_sz(nn
);
933 static int nfp_net_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
936 struct nfp_net
*nn
= netdev_priv(netdev
);
939 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
943 for (i
= 0; i
< ARRAY_SIZE(nn
->rss_itbl
); i
++)
944 indir
[i
] = nn
->rss_itbl
[i
];
946 memcpy(key
, nn
->rss_key
, nfp_net_rss_key_sz(nn
));
948 *hfunc
= nn
->rss_hfunc
;
949 if (*hfunc
>= 1 << ETH_RSS_HASH_FUNCS_COUNT
)
950 *hfunc
= ETH_RSS_HASH_UNKNOWN
;
956 static int nfp_net_set_rxfh(struct net_device
*netdev
,
957 const u32
*indir
, const u8
*key
,
960 struct nfp_net
*nn
= netdev_priv(netdev
);
963 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
) ||
964 !(hfunc
== ETH_RSS_HASH_NO_CHANGE
|| hfunc
== nn
->rss_hfunc
))
971 memcpy(nn
->rss_key
, key
, nfp_net_rss_key_sz(nn
));
972 nfp_net_rss_write_key(nn
);
975 for (i
= 0; i
< ARRAY_SIZE(nn
->rss_itbl
); i
++)
976 nn
->rss_itbl
[i
] = indir
[i
];
978 nfp_net_rss_write_itbl(nn
);
981 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RSS
);
984 /* Dump BAR registers
986 static int nfp_net_get_regs_len(struct net_device
*netdev
)
988 return NFP_NET_CFG_BAR_SZ
;
991 static void nfp_net_get_regs(struct net_device
*netdev
,
992 struct ethtool_regs
*regs
, void *p
)
994 struct nfp_net
*nn
= netdev_priv(netdev
);
998 regs
->version
= nn_readl(nn
, NFP_NET_CFG_VERSION
);
1000 for (i
= 0; i
< NFP_NET_CFG_BAR_SZ
/ sizeof(u32
); i
++)
1001 regs_buf
[i
] = readl(nn
->dp
.ctrl_bar
+ (i
* sizeof(u32
)));
1004 static int nfp_net_get_coalesce(struct net_device
*netdev
,
1005 struct ethtool_coalesce
*ec
)
1007 struct nfp_net
*nn
= netdev_priv(netdev
);
1009 if (!(nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
))
1012 ec
->rx_coalesce_usecs
= nn
->rx_coalesce_usecs
;
1013 ec
->rx_max_coalesced_frames
= nn
->rx_coalesce_max_frames
;
1014 ec
->tx_coalesce_usecs
= nn
->tx_coalesce_usecs
;
1015 ec
->tx_max_coalesced_frames
= nn
->tx_coalesce_max_frames
;
1020 /* Other debug dumps
1023 nfp_dump_nsp_diag(struct nfp_app
*app
, struct ethtool_dump
*dump
, void *buffer
)
1025 struct nfp_resource
*res
;
1032 dump
->flag
= NFP_DUMP_NSP_DIAG
;
1034 res
= nfp_resource_acquire(app
->cpp
, NFP_RESOURCE_NSP_DIAG
);
1036 return PTR_ERR(res
);
1039 if (dump
->len
!= nfp_resource_size(res
)) {
1044 ret
= nfp_cpp_read(app
->cpp
, nfp_resource_cpp_id(res
),
1045 nfp_resource_address(res
),
1047 if (ret
!= dump
->len
)
1048 ret
= ret
< 0 ? ret
: -EIO
;
1052 dump
->len
= nfp_resource_size(res
);
1056 nfp_resource_release(res
);
1061 /* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
1062 * based dumps), since flag 0 (default) calculates the length in
1063 * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
1064 * without setting the flag first, for backward compatibility.
1066 static int nfp_app_set_dump(struct net_device
*netdev
, struct ethtool_dump
*val
)
1068 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1074 if (val
->flag
== NFP_DUMP_NSP_DIAG
) {
1075 app
->pf
->dump_flag
= val
->flag
;
1079 if (!app
->pf
->dumpspec
)
1082 len
= nfp_net_dump_calculate_size(app
->pf
, app
->pf
->dumpspec
,
1087 app
->pf
->dump_flag
= val
->flag
;
1088 app
->pf
->dump_len
= len
;
1094 nfp_app_get_dump_flag(struct net_device
*netdev
, struct ethtool_dump
*dump
)
1096 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1101 if (app
->pf
->dump_flag
== NFP_DUMP_NSP_DIAG
)
1102 return nfp_dump_nsp_diag(app
, dump
, NULL
);
1104 dump
->flag
= app
->pf
->dump_flag
;
1105 dump
->len
= app
->pf
->dump_len
;
1111 nfp_app_get_dump_data(struct net_device
*netdev
, struct ethtool_dump
*dump
,
1114 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1119 if (app
->pf
->dump_flag
== NFP_DUMP_NSP_DIAG
)
1120 return nfp_dump_nsp_diag(app
, dump
, buffer
);
1122 dump
->flag
= app
->pf
->dump_flag
;
1123 dump
->len
= app
->pf
->dump_len
;
1125 return nfp_net_dump_populate_buffer(app
->pf
, app
->pf
->dumpspec
, dump
,
1129 static int nfp_net_set_coalesce(struct net_device
*netdev
,
1130 struct ethtool_coalesce
*ec
)
1132 struct nfp_net
*nn
= netdev_priv(netdev
);
1133 unsigned int factor
;
1135 if (ec
->rx_coalesce_usecs_irq
||
1136 ec
->rx_max_coalesced_frames_irq
||
1137 ec
->tx_coalesce_usecs_irq
||
1138 ec
->tx_max_coalesced_frames_irq
||
1139 ec
->stats_block_coalesce_usecs
||
1140 ec
->use_adaptive_rx_coalesce
||
1141 ec
->use_adaptive_tx_coalesce
||
1143 ec
->rx_coalesce_usecs_low
||
1144 ec
->rx_max_coalesced_frames_low
||
1145 ec
->tx_coalesce_usecs_low
||
1146 ec
->tx_max_coalesced_frames_low
||
1147 ec
->pkt_rate_high
||
1148 ec
->rx_coalesce_usecs_high
||
1149 ec
->rx_max_coalesced_frames_high
||
1150 ec
->tx_coalesce_usecs_high
||
1151 ec
->tx_max_coalesced_frames_high
||
1152 ec
->rate_sample_interval
)
1155 /* Compute factor used to convert coalesce '_usecs' parameters to
1156 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1159 factor
= nn
->me_freq_mhz
/ 16;
1161 /* Each pair of (usecs, max_frames) fields specifies that interrupts
1162 * should be coalesced until
1163 * (usecs > 0 && time_since_first_completion >= usecs) ||
1164 * (max_frames > 0 && completed_frames >= max_frames)
1166 * It is illegal to set both usecs and max_frames to zero as this would
1167 * cause interrupts to never be generated. To disable coalescing, set
1168 * usecs = 0 and max_frames = 1.
1170 * Some implementations ignore the value of max_frames and use the
1171 * condition time_since_first_completion >= usecs
1174 if (!(nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
))
1177 /* ensure valid configuration */
1178 if (!ec
->rx_coalesce_usecs
&& !ec
->rx_max_coalesced_frames
)
1181 if (!ec
->tx_coalesce_usecs
&& !ec
->tx_max_coalesced_frames
)
1184 if (ec
->rx_coalesce_usecs
* factor
>= ((1 << 16) - 1))
1187 if (ec
->tx_coalesce_usecs
* factor
>= ((1 << 16) - 1))
1190 if (ec
->rx_max_coalesced_frames
>= ((1 << 16) - 1))
1193 if (ec
->tx_max_coalesced_frames
>= ((1 << 16) - 1))
1196 /* configuration is valid */
1197 nn
->rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
1198 nn
->rx_coalesce_max_frames
= ec
->rx_max_coalesced_frames
;
1199 nn
->tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
1200 nn
->tx_coalesce_max_frames
= ec
->tx_max_coalesced_frames
;
1202 /* write configuration to device */
1203 nfp_net_coalesce_write_cfg(nn
);
1204 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_IRQMOD
);
1207 static void nfp_net_get_channels(struct net_device
*netdev
,
1208 struct ethtool_channels
*channel
)
1210 struct nfp_net
*nn
= netdev_priv(netdev
);
1211 unsigned int num_tx_rings
;
1213 num_tx_rings
= nn
->dp
.num_tx_rings
;
1214 if (nn
->dp
.xdp_prog
)
1215 num_tx_rings
-= nn
->dp
.num_rx_rings
;
1217 channel
->max_rx
= min(nn
->max_rx_rings
, nn
->max_r_vecs
);
1218 channel
->max_tx
= min(nn
->max_tx_rings
, nn
->max_r_vecs
);
1219 channel
->max_combined
= min(channel
->max_rx
, channel
->max_tx
);
1220 channel
->max_other
= NFP_NET_NON_Q_VECTORS
;
1221 channel
->combined_count
= min(nn
->dp
.num_rx_rings
, num_tx_rings
);
1222 channel
->rx_count
= nn
->dp
.num_rx_rings
- channel
->combined_count
;
1223 channel
->tx_count
= num_tx_rings
- channel
->combined_count
;
1224 channel
->other_count
= NFP_NET_NON_Q_VECTORS
;
1227 static int nfp_net_set_num_rings(struct nfp_net
*nn
, unsigned int total_rx
,
1228 unsigned int total_tx
)
1230 struct nfp_net_dp
*dp
;
1232 dp
= nfp_net_clone_dp(nn
);
1236 dp
->num_rx_rings
= total_rx
;
1237 dp
->num_tx_rings
= total_tx
;
1238 /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
1240 dp
->num_tx_rings
+= total_rx
;
1242 return nfp_net_ring_reconfig(nn
, dp
, NULL
);
1245 static int nfp_net_set_channels(struct net_device
*netdev
,
1246 struct ethtool_channels
*channel
)
1248 struct nfp_net
*nn
= netdev_priv(netdev
);
1249 unsigned int total_rx
, total_tx
;
1251 /* Reject unsupported */
1252 if (!channel
->combined_count
||
1253 channel
->other_count
!= NFP_NET_NON_Q_VECTORS
||
1254 (channel
->rx_count
&& channel
->tx_count
))
1257 total_rx
= channel
->combined_count
+ channel
->rx_count
;
1258 total_tx
= channel
->combined_count
+ channel
->tx_count
;
1260 if (total_rx
> min(nn
->max_rx_rings
, nn
->max_r_vecs
) ||
1261 total_tx
> min(nn
->max_tx_rings
, nn
->max_r_vecs
))
1264 return nfp_net_set_num_rings(nn
, total_rx
, total_tx
);
1268 nfp_net_flash_device(struct net_device
*netdev
, struct ethtool_flash
*flash
)
1270 const struct firmware
*fw
;
1271 struct nfp_app
*app
;
1272 struct nfp_nsp
*nsp
;
1276 if (flash
->region
!= ETHTOOL_FLASH_ALL_REGIONS
)
1279 app
= nfp_app_from_netdev(netdev
);
1283 dev
= &app
->pdev
->dev
;
1285 nsp
= nfp_nsp_open(app
->cpp
);
1288 dev_err(dev
, "Failed to access the NSP: %d\n", err
);
1292 err
= request_firmware_direct(&fw
, flash
->data
, dev
);
1294 goto exit_close_nsp
;
1296 dev_info(dev
, "Please be patient while writing flash image: %s\n",
1301 err
= nfp_nsp_write_flash(nsp
, fw
);
1303 dev_err(dev
, "Flash write failed: %d\n", err
);
1304 goto exit_rtnl_lock
;
1306 dev_info(dev
, "Finished writing flash image\n");
1311 release_firmware(fw
);
1318 static const struct ethtool_ops nfp_net_ethtool_ops
= {
1319 .get_drvinfo
= nfp_net_get_drvinfo
,
1320 .get_link
= ethtool_op_get_link
,
1321 .get_ringparam
= nfp_net_get_ringparam
,
1322 .set_ringparam
= nfp_net_set_ringparam
,
1323 .get_strings
= nfp_net_get_strings
,
1324 .get_ethtool_stats
= nfp_net_get_stats
,
1325 .get_sset_count
= nfp_net_get_sset_count
,
1326 .get_rxnfc
= nfp_net_get_rxnfc
,
1327 .set_rxnfc
= nfp_net_set_rxnfc
,
1328 .flash_device
= nfp_net_flash_device
,
1329 .get_rxfh_indir_size
= nfp_net_get_rxfh_indir_size
,
1330 .get_rxfh_key_size
= nfp_net_get_rxfh_key_size
,
1331 .get_rxfh
= nfp_net_get_rxfh
,
1332 .set_rxfh
= nfp_net_set_rxfh
,
1333 .get_regs_len
= nfp_net_get_regs_len
,
1334 .get_regs
= nfp_net_get_regs
,
1335 .set_dump
= nfp_app_set_dump
,
1336 .get_dump_flag
= nfp_app_get_dump_flag
,
1337 .get_dump_data
= nfp_app_get_dump_data
,
1338 .get_coalesce
= nfp_net_get_coalesce
,
1339 .set_coalesce
= nfp_net_set_coalesce
,
1340 .get_channels
= nfp_net_get_channels
,
1341 .set_channels
= nfp_net_set_channels
,
1342 .get_link_ksettings
= nfp_net_get_link_ksettings
,
1343 .set_link_ksettings
= nfp_net_set_link_ksettings
,
1344 .get_fecparam
= nfp_port_get_fecparam
,
1345 .set_fecparam
= nfp_port_set_fecparam
,
1348 const struct ethtool_ops nfp_port_ethtool_ops
= {
1349 .get_drvinfo
= nfp_app_get_drvinfo
,
1350 .get_link
= ethtool_op_get_link
,
1351 .get_strings
= nfp_port_get_strings
,
1352 .get_ethtool_stats
= nfp_port_get_stats
,
1353 .get_sset_count
= nfp_port_get_sset_count
,
1354 .flash_device
= nfp_net_flash_device
,
1355 .set_dump
= nfp_app_set_dump
,
1356 .get_dump_flag
= nfp_app_get_dump_flag
,
1357 .get_dump_data
= nfp_app_get_dump_data
,
1358 .get_link_ksettings
= nfp_net_get_link_ksettings
,
1359 .set_link_ksettings
= nfp_net_set_link_ksettings
,
1360 .get_fecparam
= nfp_port_get_fecparam
,
1361 .set_fecparam
= nfp_port_set_fecparam
,
1364 void nfp_net_set_ethtool_ops(struct net_device
*netdev
)
1366 netdev
->ethtool_ops
= &nfp_net_ethtool_ops
;