1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
6 * Netronome network device driver: ethtool support
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 * Jason McMullan <jason.mcmullan@netronome.com>
9 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
10 * Brad Petrus <brad.petrus@netronome.com>
13 #include <linux/bitfield.h>
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/ethtool.h>
20 #include <linux/firmware.h>
21 #include <linux/sfp.h>
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_nsp.h"
27 #include "nfp_net_ctrl.h"
32 char name
[ETH_GSTRING_LEN
];
36 static const struct nfp_et_stat nfp_net_et_stats
[] = {
37 /* Stats from the device */
38 { "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS
},
39 { "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS
},
40 { "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS
},
41 { "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS
},
42 { "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS
},
43 { "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS
},
44 { "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES
},
45 { "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES
},
46 { "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES
},
48 { "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS
},
49 { "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS
},
50 { "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS
},
51 { "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS
},
52 { "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS
},
53 { "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS
},
54 { "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES
},
55 { "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES
},
56 { "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES
},
58 { "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES
},
59 { "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES
},
60 /* see comments in outro functions in nfp_bpf_jit.c to find out
61 * how different BPF modes use app-specific counters
63 { "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES
},
64 { "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES
},
65 { "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES
},
66 { "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES
},
67 { "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES
},
68 { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES
},
71 static const struct nfp_et_stat nfp_mac_et_stats
[] = {
72 { "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS
, },
73 { "rx_frame_too_long_errors",
74 NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS
, },
75 { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS
, },
76 { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK
, },
77 { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS
, },
78 { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS
, },
79 { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS
, },
80 { "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS
, },
81 { "rx_pause_mac_ctrl_frames",
82 NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES
, },
83 { "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK
, },
84 { "rx_frame_check_sequence_errors",
85 NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS
, },
86 { "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS
, },
87 { "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS
, },
88 { "rx_pkts", NFP_MAC_STATS_RX_PKTS
, },
89 { "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS
, },
90 { "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS
, },
91 { "rx_pkts_65_to_127_octets",
92 NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS
, },
93 { "rx_pkts_128_to_255_octets",
94 NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS
, },
95 { "rx_pkts_256_to_511_octets",
96 NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS
, },
97 { "rx_pkts_512_to_1023_octets",
98 NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS
, },
99 { "rx_pkts_1024_to_1518_octets",
100 NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS
, },
101 { "rx_pkts_1519_to_max_octets",
102 NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS
, },
103 { "rx_jabbers", NFP_MAC_STATS_RX_JABBERS
, },
104 { "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS
, },
105 { "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS
, },
106 { "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0
, },
107 { "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1
, },
108 { "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2
, },
109 { "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3
, },
110 { "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4
, },
111 { "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5
, },
112 { "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6
, },
113 { "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7
, },
114 { "rx_mac_ctrl_frames_received",
115 NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED
, },
116 { "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP
, },
117 { "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP
, },
118 { "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS
, },
119 { "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK
, },
120 { "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS
, },
121 { "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS
, },
122 { "tx_pause_mac_ctrl_frames",
123 NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES
, },
124 { "tx_frames_transmitted_ok",
125 NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK
, },
126 { "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS
, },
127 { "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS
, },
128 { "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS
, },
129 { "tx_pkts_65_to_127_octets",
130 NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS
, },
131 { "tx_pkts_128_to_255_octets",
132 NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS
, },
133 { "tx_pkts_256_to_511_octets",
134 NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS
, },
135 { "tx_pkts_512_to_1023_octets",
136 NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS
, },
137 { "tx_pkts_1024_to_1518_octets",
138 NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS
, },
139 { "tx_pkts_1519_to_max_octets",
140 NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS
, },
141 { "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0
, },
142 { "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1
, },
143 { "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2
, },
144 { "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3
, },
145 { "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4
, },
146 { "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5
, },
147 { "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6
, },
148 { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7
, },
151 static const char nfp_tlv_stat_names
[][ETH_GSTRING_LEN
] = {
152 [1] = "dev_rx_discards",
153 [2] = "dev_rx_errors",
154 [3] = "dev_rx_bytes",
155 [4] = "dev_rx_uc_bytes",
156 [5] = "dev_rx_mc_bytes",
157 [6] = "dev_rx_bc_bytes",
159 [8] = "dev_rx_mc_pkts",
160 [9] = "dev_rx_bc_pkts",
162 [10] = "dev_tx_discards",
163 [11] = "dev_tx_errors",
164 [12] = "dev_tx_bytes",
165 [13] = "dev_tx_uc_bytes",
166 [14] = "dev_tx_mc_bytes",
167 [15] = "dev_tx_bc_bytes",
168 [16] = "dev_tx_pkts",
169 [17] = "dev_tx_mc_pkts",
170 [18] = "dev_tx_bc_pkts",
173 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
174 #define NN_ET_SWITCH_STATS_LEN 9
175 #define NN_RVEC_GATHER_STATS 13
176 #define NN_RVEC_PER_Q_STATS 3
177 #define NN_CTRL_PATH_STATS 4
179 #define SFP_SFF_REV_COMPLIANCE 1
181 static void nfp_net_get_nspinfo(struct nfp_app
*app
, char *version
)
188 nsp
= nfp_nsp_open(app
->cpp
);
192 snprintf(version
, ETHTOOL_FWVERS_LEN
, "%hu.%hu",
193 nfp_nsp_get_abi_ver_major(nsp
),
194 nfp_nsp_get_abi_ver_minor(nsp
));
200 nfp_get_drvinfo(struct nfp_app
*app
, struct pci_dev
*pdev
,
201 const char *vnic_version
, struct ethtool_drvinfo
*drvinfo
)
203 char nsp_version
[ETHTOOL_FWVERS_LEN
] = {};
205 strlcpy(drvinfo
->driver
, pdev
->driver
->name
, sizeof(drvinfo
->driver
));
206 nfp_net_get_nspinfo(app
, nsp_version
);
207 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
208 "%s %s %s %s", vnic_version
, nsp_version
,
209 nfp_app_mip_name(app
), nfp_app_name(app
));
213 nfp_net_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
215 char vnic_version
[ETHTOOL_FWVERS_LEN
] = {};
216 struct nfp_net
*nn
= netdev_priv(netdev
);
218 snprintf(vnic_version
, sizeof(vnic_version
), "%d.%d.%d.%d",
219 nn
->fw_ver
.resv
, nn
->fw_ver
.class,
220 nn
->fw_ver
.major
, nn
->fw_ver
.minor
);
221 strlcpy(drvinfo
->bus_info
, pci_name(nn
->pdev
),
222 sizeof(drvinfo
->bus_info
));
224 nfp_get_drvinfo(nn
->app
, nn
->pdev
, vnic_version
, drvinfo
);
228 nfp_app_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
230 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
232 strlcpy(drvinfo
->bus_info
, pci_name(app
->pdev
),
233 sizeof(drvinfo
->bus_info
));
234 nfp_get_drvinfo(app
, app
->pdev
, "*", drvinfo
);
238 nfp_net_set_fec_link_mode(struct nfp_eth_table_port
*eth_port
,
239 struct ethtool_link_ksettings
*c
)
243 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_NONE
);
244 if (!nfp_eth_can_support_fec(eth_port
)) {
245 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_NONE
);
249 modes
= nfp_eth_supported_fec_modes(eth_port
);
250 if (modes
& NFP_FEC_BASER
) {
251 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_BASER
);
252 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_BASER
);
255 if (modes
& NFP_FEC_REED_SOLOMON
) {
256 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_RS
);
257 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_RS
);
262 * nfp_net_get_link_ksettings - Get Link Speed settings
263 * @netdev: network interface device structure
264 * @cmd: ethtool command
266 * Reports speed settings based on info in the BAR provided by the fw.
269 nfp_net_get_link_ksettings(struct net_device
*netdev
,
270 struct ethtool_link_ksettings
*cmd
)
272 static const u32 ls_to_ethtool
[] = {
273 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED
] = 0,
274 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
] = SPEED_UNKNOWN
,
275 [NFP_NET_CFG_STS_LINK_RATE_1G
] = SPEED_1000
,
276 [NFP_NET_CFG_STS_LINK_RATE_10G
] = SPEED_10000
,
277 [NFP_NET_CFG_STS_LINK_RATE_25G
] = SPEED_25000
,
278 [NFP_NET_CFG_STS_LINK_RATE_40G
] = SPEED_40000
,
279 [NFP_NET_CFG_STS_LINK_RATE_50G
] = SPEED_50000
,
280 [NFP_NET_CFG_STS_LINK_RATE_100G
] = SPEED_100000
,
282 struct nfp_eth_table_port
*eth_port
;
283 struct nfp_port
*port
;
287 /* Init to unknowns */
288 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
289 cmd
->base
.port
= PORT_OTHER
;
290 cmd
->base
.speed
= SPEED_UNKNOWN
;
291 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
293 port
= nfp_port_from_netdev(netdev
);
294 eth_port
= nfp_port_get_eth_port(port
);
296 cmd
->base
.autoneg
= eth_port
->aneg
!= NFP_ANEG_DISABLED
?
297 AUTONEG_ENABLE
: AUTONEG_DISABLE
;
298 nfp_net_set_fec_link_mode(eth_port
, cmd
);
301 if (!netif_carrier_ok(netdev
))
304 /* Use link speed from ETH table if available, otherwise try the BAR */
306 cmd
->base
.port
= eth_port
->port_type
;
307 cmd
->base
.speed
= eth_port
->speed
;
308 cmd
->base
.duplex
= DUPLEX_FULL
;
312 if (!nfp_netdev_is_nfp_net(netdev
))
314 nn
= netdev_priv(netdev
);
316 sts
= nn_readl(nn
, NFP_NET_CFG_STS
);
318 ls
= FIELD_GET(NFP_NET_CFG_STS_LINK_RATE
, sts
);
319 if (ls
== NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED
)
322 if (ls
== NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
||
323 ls
>= ARRAY_SIZE(ls_to_ethtool
))
326 cmd
->base
.speed
= ls_to_ethtool
[ls
];
327 cmd
->base
.duplex
= DUPLEX_FULL
;
333 nfp_net_set_link_ksettings(struct net_device
*netdev
,
334 const struct ethtool_link_ksettings
*cmd
)
336 struct nfp_eth_table_port
*eth_port
;
337 struct nfp_port
*port
;
341 port
= nfp_port_from_netdev(netdev
);
342 eth_port
= __nfp_port_get_eth_port(port
);
346 if (netif_running(netdev
)) {
347 netdev_warn(netdev
, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
351 nsp
= nfp_eth_config_start(port
->app
->cpp
, eth_port
->index
);
355 err
= __nfp_eth_set_aneg(nsp
, cmd
->base
.autoneg
== AUTONEG_ENABLE
?
356 NFP_ANEG_AUTO
: NFP_ANEG_DISABLED
);
359 if (cmd
->base
.speed
!= SPEED_UNKNOWN
) {
360 u32 speed
= cmd
->base
.speed
/ eth_port
->lanes
;
362 err
= __nfp_eth_set_speed(nsp
, speed
);
367 err
= nfp_eth_config_commit_end(nsp
);
369 return 0; /* no change */
371 nfp_net_refresh_port_table(port
);
376 nfp_eth_config_cleanup_end(nsp
);
380 static void nfp_net_get_ringparam(struct net_device
*netdev
,
381 struct ethtool_ringparam
*ring
)
383 struct nfp_net
*nn
= netdev_priv(netdev
);
385 ring
->rx_max_pending
= NFP_NET_MAX_RX_DESCS
;
386 ring
->tx_max_pending
= NFP_NET_MAX_TX_DESCS
;
387 ring
->rx_pending
= nn
->dp
.rxd_cnt
;
388 ring
->tx_pending
= nn
->dp
.txd_cnt
;
391 static int nfp_net_set_ring_size(struct nfp_net
*nn
, u32 rxd_cnt
, u32 txd_cnt
)
393 struct nfp_net_dp
*dp
;
395 dp
= nfp_net_clone_dp(nn
);
399 dp
->rxd_cnt
= rxd_cnt
;
400 dp
->txd_cnt
= txd_cnt
;
402 return nfp_net_ring_reconfig(nn
, dp
, NULL
);
405 static int nfp_net_set_ringparam(struct net_device
*netdev
,
406 struct ethtool_ringparam
*ring
)
408 struct nfp_net
*nn
= netdev_priv(netdev
);
409 u32 rxd_cnt
, txd_cnt
;
411 /* We don't have separate queues/rings for small/large frames. */
412 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
415 /* Round up to supported values */
416 rxd_cnt
= roundup_pow_of_two(ring
->rx_pending
);
417 txd_cnt
= roundup_pow_of_two(ring
->tx_pending
);
419 if (rxd_cnt
< NFP_NET_MIN_RX_DESCS
|| rxd_cnt
> NFP_NET_MAX_RX_DESCS
||
420 txd_cnt
< NFP_NET_MIN_TX_DESCS
|| txd_cnt
> NFP_NET_MAX_TX_DESCS
)
423 if (nn
->dp
.rxd_cnt
== rxd_cnt
&& nn
->dp
.txd_cnt
== txd_cnt
)
426 nn_dbg(nn
, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
427 nn
->dp
.rxd_cnt
, rxd_cnt
, nn
->dp
.txd_cnt
, txd_cnt
);
429 return nfp_net_set_ring_size(nn
, rxd_cnt
, txd_cnt
);
432 __printf(2, 3) u8
*nfp_pr_et(u8
*data
, const char *fmt
, ...)
437 vsnprintf(data
, ETH_GSTRING_LEN
, fmt
, args
);
440 return data
+ ETH_GSTRING_LEN
;
443 static unsigned int nfp_vnic_get_sw_stats_count(struct net_device
*netdev
)
445 struct nfp_net
*nn
= netdev_priv(netdev
);
447 return NN_RVEC_GATHER_STATS
+ nn
->max_r_vecs
* NN_RVEC_PER_Q_STATS
+
451 static u8
*nfp_vnic_get_sw_stats_strings(struct net_device
*netdev
, u8
*data
)
453 struct nfp_net
*nn
= netdev_priv(netdev
);
456 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
457 data
= nfp_pr_et(data
, "rvec_%u_rx_pkts", i
);
458 data
= nfp_pr_et(data
, "rvec_%u_tx_pkts", i
);
459 data
= nfp_pr_et(data
, "rvec_%u_tx_busy", i
);
462 data
= nfp_pr_et(data
, "hw_rx_csum_ok");
463 data
= nfp_pr_et(data
, "hw_rx_csum_inner_ok");
464 data
= nfp_pr_et(data
, "hw_rx_csum_complete");
465 data
= nfp_pr_et(data
, "hw_rx_csum_err");
466 data
= nfp_pr_et(data
, "rx_replace_buf_alloc_fail");
467 data
= nfp_pr_et(data
, "rx_tls_decrypted_packets");
468 data
= nfp_pr_et(data
, "hw_tx_csum");
469 data
= nfp_pr_et(data
, "hw_tx_inner_csum");
470 data
= nfp_pr_et(data
, "tx_gather");
471 data
= nfp_pr_et(data
, "tx_lso");
472 data
= nfp_pr_et(data
, "tx_tls_encrypted_packets");
473 data
= nfp_pr_et(data
, "tx_tls_ooo");
474 data
= nfp_pr_et(data
, "tx_tls_drop_no_sync_data");
476 data
= nfp_pr_et(data
, "hw_tls_no_space");
477 data
= nfp_pr_et(data
, "rx_tls_resync_req_ok");
478 data
= nfp_pr_et(data
, "rx_tls_resync_req_ign");
479 data
= nfp_pr_et(data
, "rx_tls_resync_sent");
484 static u64
*nfp_vnic_get_sw_stats(struct net_device
*netdev
, u64
*data
)
486 u64 gathered_stats
[NN_RVEC_GATHER_STATS
] = {};
487 struct nfp_net
*nn
= netdev_priv(netdev
);
488 u64 tmp
[NN_RVEC_GATHER_STATS
];
491 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
495 start
= u64_stats_fetch_begin(&nn
->r_vecs
[i
].rx_sync
);
496 data
[0] = nn
->r_vecs
[i
].rx_pkts
;
497 tmp
[0] = nn
->r_vecs
[i
].hw_csum_rx_ok
;
498 tmp
[1] = nn
->r_vecs
[i
].hw_csum_rx_inner_ok
;
499 tmp
[2] = nn
->r_vecs
[i
].hw_csum_rx_complete
;
500 tmp
[3] = nn
->r_vecs
[i
].hw_csum_rx_error
;
501 tmp
[4] = nn
->r_vecs
[i
].rx_replace_buf_alloc_fail
;
502 tmp
[5] = nn
->r_vecs
[i
].hw_tls_rx
;
503 } while (u64_stats_fetch_retry(&nn
->r_vecs
[i
].rx_sync
, start
));
506 start
= u64_stats_fetch_begin(&nn
->r_vecs
[i
].tx_sync
);
507 data
[1] = nn
->r_vecs
[i
].tx_pkts
;
508 data
[2] = nn
->r_vecs
[i
].tx_busy
;
509 tmp
[6] = nn
->r_vecs
[i
].hw_csum_tx
;
510 tmp
[7] = nn
->r_vecs
[i
].hw_csum_tx_inner
;
511 tmp
[8] = nn
->r_vecs
[i
].tx_gather
;
512 tmp
[9] = nn
->r_vecs
[i
].tx_lso
;
513 tmp
[10] = nn
->r_vecs
[i
].hw_tls_tx
;
514 tmp
[11] = nn
->r_vecs
[i
].tls_tx_fallback
;
515 tmp
[12] = nn
->r_vecs
[i
].tls_tx_no_fallback
;
516 } while (u64_stats_fetch_retry(&nn
->r_vecs
[i
].tx_sync
, start
));
518 data
+= NN_RVEC_PER_Q_STATS
;
520 for (j
= 0; j
< NN_RVEC_GATHER_STATS
; j
++)
521 gathered_stats
[j
] += tmp
[j
];
524 for (j
= 0; j
< NN_RVEC_GATHER_STATS
; j
++)
525 *data
++ = gathered_stats
[j
];
527 *data
++ = atomic_read(&nn
->ktls_no_space
);
528 *data
++ = atomic_read(&nn
->ktls_rx_resync_req
);
529 *data
++ = atomic_read(&nn
->ktls_rx_resync_ign
);
530 *data
++ = atomic_read(&nn
->ktls_rx_resync_sent
);
535 static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs
)
537 return NN_ET_GLOBAL_STATS_LEN
+ num_vecs
* 4;
541 nfp_vnic_get_hw_stats_strings(u8
*data
, unsigned int num_vecs
, bool repr
)
545 BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN
< NN_ET_SWITCH_STATS_LEN
* 2);
546 /* If repr is true first add SWITCH_STATS_LEN and then subtract it
547 * effectively swapping the RX and TX statistics (giving us the RX
548 * and TX from perspective of the switch).
550 swap_off
= repr
* NN_ET_SWITCH_STATS_LEN
;
552 for (i
= 0; i
< NN_ET_SWITCH_STATS_LEN
; i
++)
553 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
+ swap_off
].name
);
555 for (i
= NN_ET_SWITCH_STATS_LEN
; i
< NN_ET_SWITCH_STATS_LEN
* 2; i
++)
556 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
- swap_off
].name
);
558 for (i
= NN_ET_SWITCH_STATS_LEN
* 2; i
< NN_ET_GLOBAL_STATS_LEN
; i
++)
559 data
= nfp_pr_et(data
, nfp_net_et_stats
[i
].name
);
561 for (i
= 0; i
< num_vecs
; i
++) {
562 data
= nfp_pr_et(data
, "rxq_%u_pkts", i
);
563 data
= nfp_pr_et(data
, "rxq_%u_bytes", i
);
564 data
= nfp_pr_et(data
, "txq_%u_pkts", i
);
565 data
= nfp_pr_et(data
, "txq_%u_bytes", i
);
572 nfp_vnic_get_hw_stats(u64
*data
, u8 __iomem
*mem
, unsigned int num_vecs
)
576 for (i
= 0; i
< NN_ET_GLOBAL_STATS_LEN
; i
++)
577 *data
++ = readq(mem
+ nfp_net_et_stats
[i
].off
);
579 for (i
= 0; i
< num_vecs
; i
++) {
580 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
));
581 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
) + 8);
582 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
));
583 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
) + 8);
589 static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net
*nn
)
591 return nn
->tlv_caps
.vnic_stats_cnt
+ nn
->max_r_vecs
* 4;
594 static u8
*nfp_vnic_get_tlv_stats_strings(struct nfp_net
*nn
, u8
*data
)
600 mem
= nn
->dp
.ctrl_bar
+ nn
->tlv_caps
.vnic_stats_off
;
601 for (i
= 0; i
< nn
->tlv_caps
.vnic_stats_cnt
; i
++) {
603 id_word
= readq(mem
+ i
* 2);
608 if (id
< ARRAY_SIZE(nfp_tlv_stat_names
) &&
609 nfp_tlv_stat_names
[id
][0]) {
610 memcpy(data
, nfp_tlv_stat_names
[id
], ETH_GSTRING_LEN
);
611 data
+= ETH_GSTRING_LEN
;
613 data
= nfp_pr_et(data
, "dev_unknown_stat%u", id
);
617 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
618 data
= nfp_pr_et(data
, "rxq_%u_pkts", i
);
619 data
= nfp_pr_et(data
, "rxq_%u_bytes", i
);
620 data
= nfp_pr_et(data
, "txq_%u_pkts", i
);
621 data
= nfp_pr_et(data
, "txq_%u_bytes", i
);
627 static u64
*nfp_vnic_get_tlv_stats(struct nfp_net
*nn
, u64
*data
)
632 mem
= nn
->dp
.ctrl_bar
+ nn
->tlv_caps
.vnic_stats_off
;
633 mem
+= roundup(2 * nn
->tlv_caps
.vnic_stats_cnt
, 8);
634 for (i
= 0; i
< nn
->tlv_caps
.vnic_stats_cnt
; i
++)
635 *data
++ = readq(mem
+ i
* 8);
637 mem
= nn
->dp
.ctrl_bar
;
638 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
639 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
));
640 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
) + 8);
641 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
));
642 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
) + 8);
648 static unsigned int nfp_mac_get_stats_count(struct net_device
*netdev
)
650 struct nfp_port
*port
;
652 port
= nfp_port_from_netdev(netdev
);
653 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
656 return ARRAY_SIZE(nfp_mac_et_stats
);
659 static u8
*nfp_mac_get_stats_strings(struct net_device
*netdev
, u8
*data
)
661 struct nfp_port
*port
;
664 port
= nfp_port_from_netdev(netdev
);
665 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
668 for (i
= 0; i
< ARRAY_SIZE(nfp_mac_et_stats
); i
++)
669 data
= nfp_pr_et(data
, "mac.%s", nfp_mac_et_stats
[i
].name
);
674 static u64
*nfp_mac_get_stats(struct net_device
*netdev
, u64
*data
)
676 struct nfp_port
*port
;
679 port
= nfp_port_from_netdev(netdev
);
680 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
683 for (i
= 0; i
< ARRAY_SIZE(nfp_mac_et_stats
); i
++)
684 *data
++ = readq(port
->eth_stats
+ nfp_mac_et_stats
[i
].off
);
689 static void nfp_net_get_strings(struct net_device
*netdev
,
690 u32 stringset
, u8
*data
)
692 struct nfp_net
*nn
= netdev_priv(netdev
);
696 data
= nfp_vnic_get_sw_stats_strings(netdev
, data
);
697 if (!nn
->tlv_caps
.vnic_stats_off
)
698 data
= nfp_vnic_get_hw_stats_strings(data
,
702 data
= nfp_vnic_get_tlv_stats_strings(nn
, data
);
703 data
= nfp_mac_get_stats_strings(netdev
, data
);
704 data
= nfp_app_port_get_stats_strings(nn
->port
, data
);
710 nfp_net_get_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
713 struct nfp_net
*nn
= netdev_priv(netdev
);
715 data
= nfp_vnic_get_sw_stats(netdev
, data
);
716 if (!nn
->tlv_caps
.vnic_stats_off
)
717 data
= nfp_vnic_get_hw_stats(data
, nn
->dp
.ctrl_bar
,
720 data
= nfp_vnic_get_tlv_stats(nn
, data
);
721 data
= nfp_mac_get_stats(netdev
, data
);
722 data
= nfp_app_port_get_stats(nn
->port
, data
);
725 static int nfp_net_get_sset_count(struct net_device
*netdev
, int sset
)
727 struct nfp_net
*nn
= netdev_priv(netdev
);
732 cnt
= nfp_vnic_get_sw_stats_count(netdev
);
733 if (!nn
->tlv_caps
.vnic_stats_off
)
734 cnt
+= nfp_vnic_get_hw_stats_count(nn
->max_r_vecs
);
736 cnt
+= nfp_vnic_get_tlv_stats_count(nn
);
737 cnt
+= nfp_mac_get_stats_count(netdev
);
738 cnt
+= nfp_app_port_get_stats_count(nn
->port
);
745 static void nfp_port_get_strings(struct net_device
*netdev
,
746 u32 stringset
, u8
*data
)
748 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
752 if (nfp_port_is_vnic(port
))
753 data
= nfp_vnic_get_hw_stats_strings(data
, 0, true);
755 data
= nfp_mac_get_stats_strings(netdev
, data
);
756 data
= nfp_app_port_get_stats_strings(port
, data
);
762 nfp_port_get_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
765 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
767 if (nfp_port_is_vnic(port
))
768 data
= nfp_vnic_get_hw_stats(data
, port
->vnic
, 0);
770 data
= nfp_mac_get_stats(netdev
, data
);
771 data
= nfp_app_port_get_stats(port
, data
);
774 static int nfp_port_get_sset_count(struct net_device
*netdev
, int sset
)
776 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
781 if (nfp_port_is_vnic(port
))
782 count
= nfp_vnic_get_hw_stats_count(0);
784 count
= nfp_mac_get_stats_count(netdev
);
785 count
+= nfp_app_port_get_stats_count(port
);
792 static int nfp_port_fec_ethtool_to_nsp(u32 fec
)
795 case ETHTOOL_FEC_AUTO
:
796 return NFP_FEC_AUTO_BIT
;
797 case ETHTOOL_FEC_OFF
:
798 return NFP_FEC_DISABLED_BIT
;
800 return NFP_FEC_REED_SOLOMON_BIT
;
801 case ETHTOOL_FEC_BASER
:
802 return NFP_FEC_BASER_BIT
;
804 /* NSP only supports a single mode at a time */
809 static u32
nfp_port_fec_nsp_to_ethtool(u32 fec
)
813 if (fec
& NFP_FEC_AUTO
)
814 result
|= ETHTOOL_FEC_AUTO
;
815 if (fec
& NFP_FEC_BASER
)
816 result
|= ETHTOOL_FEC_BASER
;
817 if (fec
& NFP_FEC_REED_SOLOMON
)
818 result
|= ETHTOOL_FEC_RS
;
819 if (fec
& NFP_FEC_DISABLED
)
820 result
|= ETHTOOL_FEC_OFF
;
822 return result
?: ETHTOOL_FEC_NONE
;
826 nfp_port_get_fecparam(struct net_device
*netdev
,
827 struct ethtool_fecparam
*param
)
829 struct nfp_eth_table_port
*eth_port
;
830 struct nfp_port
*port
;
832 param
->active_fec
= ETHTOOL_FEC_NONE
;
833 param
->fec
= ETHTOOL_FEC_NONE
;
835 port
= nfp_port_from_netdev(netdev
);
836 eth_port
= nfp_port_get_eth_port(port
);
840 if (!nfp_eth_can_support_fec(eth_port
))
843 param
->fec
= nfp_port_fec_nsp_to_ethtool(eth_port
->fec_modes_supported
);
844 param
->active_fec
= nfp_port_fec_nsp_to_ethtool(eth_port
->fec
);
850 nfp_port_set_fecparam(struct net_device
*netdev
,
851 struct ethtool_fecparam
*param
)
853 struct nfp_eth_table_port
*eth_port
;
854 struct nfp_port
*port
;
857 port
= nfp_port_from_netdev(netdev
);
858 eth_port
= nfp_port_get_eth_port(port
);
862 if (!nfp_eth_can_support_fec(eth_port
))
865 fec
= nfp_port_fec_ethtool_to_nsp(param
->fec
);
869 err
= nfp_eth_set_fec(port
->app
->cpp
, eth_port
->index
, fec
);
871 /* Only refresh if we did something */
872 nfp_net_refresh_port_table(port
);
874 return err
< 0 ? err
: 0;
877 /* RX network flow classification (RSS, filters, etc)
879 static u32
ethtool_flow_to_nfp_flag(u32 flow_type
)
881 static const u32 xlate_ethtool_to_nfp
[IPV6_FLOW
+ 1] = {
882 [TCP_V4_FLOW
] = NFP_NET_CFG_RSS_IPV4_TCP
,
883 [TCP_V6_FLOW
] = NFP_NET_CFG_RSS_IPV6_TCP
,
884 [UDP_V4_FLOW
] = NFP_NET_CFG_RSS_IPV4_UDP
,
885 [UDP_V6_FLOW
] = NFP_NET_CFG_RSS_IPV6_UDP
,
886 [IPV4_FLOW
] = NFP_NET_CFG_RSS_IPV4
,
887 [IPV6_FLOW
] = NFP_NET_CFG_RSS_IPV6
,
890 if (flow_type
>= ARRAY_SIZE(xlate_ethtool_to_nfp
))
893 return xlate_ethtool_to_nfp
[flow_type
];
896 static int nfp_net_get_rss_hash_opts(struct nfp_net
*nn
,
897 struct ethtool_rxnfc
*cmd
)
903 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
906 nfp_rss_flag
= ethtool_flow_to_nfp_flag(cmd
->flow_type
);
910 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
911 if (nn
->rss_cfg
& nfp_rss_flag
)
912 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
917 static int nfp_net_get_rxnfc(struct net_device
*netdev
,
918 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
920 struct nfp_net
*nn
= netdev_priv(netdev
);
923 case ETHTOOL_GRXRINGS
:
924 cmd
->data
= nn
->dp
.num_rx_rings
;
927 return nfp_net_get_rss_hash_opts(nn
, cmd
);
933 static int nfp_net_set_rss_hash_opt(struct nfp_net
*nn
,
934 struct ethtool_rxnfc
*nfc
)
936 u32 new_rss_cfg
= nn
->rss_cfg
;
940 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
943 /* RSS only supports IP SA/DA and L4 src/dst ports */
944 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
945 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
948 /* We need at least the IP SA/DA fields for hashing */
949 if (!(nfc
->data
& RXH_IP_SRC
) ||
950 !(nfc
->data
& RXH_IP_DST
))
953 nfp_rss_flag
= ethtool_flow_to_nfp_flag(nfc
->flow_type
);
957 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
959 new_rss_cfg
&= ~nfp_rss_flag
;
961 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
962 new_rss_cfg
|= nfp_rss_flag
;
968 new_rss_cfg
|= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC
, nn
->rss_hfunc
);
969 new_rss_cfg
|= NFP_NET_CFG_RSS_MASK
;
971 if (new_rss_cfg
== nn
->rss_cfg
)
974 writel(new_rss_cfg
, nn
->dp
.ctrl_bar
+ NFP_NET_CFG_RSS_CTRL
);
975 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RSS
);
979 nn
->rss_cfg
= new_rss_cfg
;
981 nn_dbg(nn
, "Changed RSS config to 0x%x\n", nn
->rss_cfg
);
985 static int nfp_net_set_rxnfc(struct net_device
*netdev
,
986 struct ethtool_rxnfc
*cmd
)
988 struct nfp_net
*nn
= netdev_priv(netdev
);
992 return nfp_net_set_rss_hash_opt(nn
, cmd
);
998 static u32
nfp_net_get_rxfh_indir_size(struct net_device
*netdev
)
1000 struct nfp_net
*nn
= netdev_priv(netdev
);
1002 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
1005 return ARRAY_SIZE(nn
->rss_itbl
);
1008 static u32
nfp_net_get_rxfh_key_size(struct net_device
*netdev
)
1010 struct nfp_net
*nn
= netdev_priv(netdev
);
1012 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
1015 return nfp_net_rss_key_sz(nn
);
1018 static int nfp_net_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
1021 struct nfp_net
*nn
= netdev_priv(netdev
);
1024 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
1028 for (i
= 0; i
< ARRAY_SIZE(nn
->rss_itbl
); i
++)
1029 indir
[i
] = nn
->rss_itbl
[i
];
1031 memcpy(key
, nn
->rss_key
, nfp_net_rss_key_sz(nn
));
1033 *hfunc
= nn
->rss_hfunc
;
1034 if (*hfunc
>= 1 << ETH_RSS_HASH_FUNCS_COUNT
)
1035 *hfunc
= ETH_RSS_HASH_UNKNOWN
;
1041 static int nfp_net_set_rxfh(struct net_device
*netdev
,
1042 const u32
*indir
, const u8
*key
,
1045 struct nfp_net
*nn
= netdev_priv(netdev
);
1048 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
) ||
1049 !(hfunc
== ETH_RSS_HASH_NO_CHANGE
|| hfunc
== nn
->rss_hfunc
))
1056 memcpy(nn
->rss_key
, key
, nfp_net_rss_key_sz(nn
));
1057 nfp_net_rss_write_key(nn
);
1060 for (i
= 0; i
< ARRAY_SIZE(nn
->rss_itbl
); i
++)
1061 nn
->rss_itbl
[i
] = indir
[i
];
1063 nfp_net_rss_write_itbl(nn
);
1066 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RSS
);
1069 /* Dump BAR registers
1071 static int nfp_net_get_regs_len(struct net_device
*netdev
)
1073 return NFP_NET_CFG_BAR_SZ
;
1076 static void nfp_net_get_regs(struct net_device
*netdev
,
1077 struct ethtool_regs
*regs
, void *p
)
1079 struct nfp_net
*nn
= netdev_priv(netdev
);
1083 regs
->version
= nn_readl(nn
, NFP_NET_CFG_VERSION
);
1085 for (i
= 0; i
< NFP_NET_CFG_BAR_SZ
/ sizeof(u32
); i
++)
1086 regs_buf
[i
] = readl(nn
->dp
.ctrl_bar
+ (i
* sizeof(u32
)));
1089 static int nfp_net_get_coalesce(struct net_device
*netdev
,
1090 struct ethtool_coalesce
*ec
)
1092 struct nfp_net
*nn
= netdev_priv(netdev
);
1094 if (!(nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
))
1097 ec
->rx_coalesce_usecs
= nn
->rx_coalesce_usecs
;
1098 ec
->rx_max_coalesced_frames
= nn
->rx_coalesce_max_frames
;
1099 ec
->tx_coalesce_usecs
= nn
->tx_coalesce_usecs
;
1100 ec
->tx_max_coalesced_frames
= nn
->tx_coalesce_max_frames
;
1105 /* Other debug dumps
1108 nfp_dump_nsp_diag(struct nfp_app
*app
, struct ethtool_dump
*dump
, void *buffer
)
1110 struct nfp_resource
*res
;
1117 dump
->flag
= NFP_DUMP_NSP_DIAG
;
1119 res
= nfp_resource_acquire(app
->cpp
, NFP_RESOURCE_NSP_DIAG
);
1121 return PTR_ERR(res
);
1124 if (dump
->len
!= nfp_resource_size(res
)) {
1129 ret
= nfp_cpp_read(app
->cpp
, nfp_resource_cpp_id(res
),
1130 nfp_resource_address(res
),
1132 if (ret
!= dump
->len
)
1133 ret
= ret
< 0 ? ret
: -EIO
;
1137 dump
->len
= nfp_resource_size(res
);
1141 nfp_resource_release(res
);
1146 /* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
1147 * based dumps), since flag 0 (default) calculates the length in
1148 * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
1149 * without setting the flag first, for backward compatibility.
1151 static int nfp_app_set_dump(struct net_device
*netdev
, struct ethtool_dump
*val
)
1153 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1159 if (val
->flag
== NFP_DUMP_NSP_DIAG
) {
1160 app
->pf
->dump_flag
= val
->flag
;
1164 if (!app
->pf
->dumpspec
)
1167 len
= nfp_net_dump_calculate_size(app
->pf
, app
->pf
->dumpspec
,
1172 app
->pf
->dump_flag
= val
->flag
;
1173 app
->pf
->dump_len
= len
;
1179 nfp_app_get_dump_flag(struct net_device
*netdev
, struct ethtool_dump
*dump
)
1181 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1186 if (app
->pf
->dump_flag
== NFP_DUMP_NSP_DIAG
)
1187 return nfp_dump_nsp_diag(app
, dump
, NULL
);
1189 dump
->flag
= app
->pf
->dump_flag
;
1190 dump
->len
= app
->pf
->dump_len
;
1196 nfp_app_get_dump_data(struct net_device
*netdev
, struct ethtool_dump
*dump
,
1199 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1204 if (app
->pf
->dump_flag
== NFP_DUMP_NSP_DIAG
)
1205 return nfp_dump_nsp_diag(app
, dump
, buffer
);
1207 dump
->flag
= app
->pf
->dump_flag
;
1208 dump
->len
= app
->pf
->dump_len
;
1210 return nfp_net_dump_populate_buffer(app
->pf
, app
->pf
->dumpspec
, dump
,
1215 nfp_port_get_module_info(struct net_device
*netdev
,
1216 struct ethtool_modinfo
*modinfo
)
1218 struct nfp_eth_table_port
*eth_port
;
1219 struct nfp_port
*port
;
1220 unsigned int read_len
;
1221 struct nfp_nsp
*nsp
;
1225 port
= nfp_port_from_netdev(netdev
);
1226 eth_port
= nfp_port_get_eth_port(port
);
1230 nsp
= nfp_nsp_open(port
->app
->cpp
);
1233 netdev_err(netdev
, "Failed to access the NSP: %d\n", err
);
1237 if (!nfp_nsp_has_read_module_eeprom(nsp
)) {
1238 netdev_info(netdev
, "reading module EEPROM not supported. Please update flash\n");
1240 goto exit_close_nsp
;
1243 switch (eth_port
->interface
) {
1244 case NFP_INTERFACE_SFP
:
1245 case NFP_INTERFACE_SFP28
:
1246 err
= nfp_nsp_read_module_eeprom(nsp
, eth_port
->eth_index
,
1247 SFP_SFF8472_COMPLIANCE
, &data
,
1250 goto exit_close_nsp
;
1253 modinfo
->type
= ETH_MODULE_SFF_8079
;
1254 modinfo
->eeprom_len
= ETH_MODULE_SFF_8079_LEN
;
1256 modinfo
->type
= ETH_MODULE_SFF_8472
;
1257 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
1260 case NFP_INTERFACE_QSFP
:
1261 err
= nfp_nsp_read_module_eeprom(nsp
, eth_port
->eth_index
,
1262 SFP_SFF_REV_COMPLIANCE
, &data
,
1265 goto exit_close_nsp
;
1268 modinfo
->type
= ETH_MODULE_SFF_8436
;
1269 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
1271 modinfo
->type
= ETH_MODULE_SFF_8636
;
1272 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
1275 case NFP_INTERFACE_QSFP28
:
1276 modinfo
->type
= ETH_MODULE_SFF_8636
;
1277 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
1280 netdev_err(netdev
, "Unsupported module 0x%x detected\n",
1281 eth_port
->interface
);
1291 nfp_port_get_module_eeprom(struct net_device
*netdev
,
1292 struct ethtool_eeprom
*eeprom
, u8
*data
)
1294 struct nfp_eth_table_port
*eth_port
;
1295 struct nfp_port
*port
;
1296 struct nfp_nsp
*nsp
;
1299 port
= nfp_port_from_netdev(netdev
);
1300 eth_port
= __nfp_port_get_eth_port(port
);
1304 nsp
= nfp_nsp_open(port
->app
->cpp
);
1307 netdev_err(netdev
, "Failed to access the NSP: %d\n", err
);
1311 if (!nfp_nsp_has_read_module_eeprom(nsp
)) {
1312 netdev_info(netdev
, "reading module EEPROM not supported. Please update flash\n");
1314 goto exit_close_nsp
;
1317 err
= nfp_nsp_read_module_eeprom(nsp
, eth_port
->eth_index
,
1318 eeprom
->offset
, data
, eeprom
->len
,
1323 "Incomplete read from module EEPROM: %d\n",
1328 "Reading from module EEPROM failed: %d\n",
1338 static int nfp_net_set_coalesce(struct net_device
*netdev
,
1339 struct ethtool_coalesce
*ec
)
1341 struct nfp_net
*nn
= netdev_priv(netdev
);
1342 unsigned int factor
;
1344 /* Compute factor used to convert coalesce '_usecs' parameters to
1345 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1348 factor
= nn
->me_freq_mhz
/ 16;
1350 /* Each pair of (usecs, max_frames) fields specifies that interrupts
1351 * should be coalesced until
1352 * (usecs > 0 && time_since_first_completion >= usecs) ||
1353 * (max_frames > 0 && completed_frames >= max_frames)
1355 * It is illegal to set both usecs and max_frames to zero as this would
1356 * cause interrupts to never be generated. To disable coalescing, set
1357 * usecs = 0 and max_frames = 1.
1359 * Some implementations ignore the value of max_frames and use the
1360 * condition time_since_first_completion >= usecs
1363 if (!(nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
))
1366 /* ensure valid configuration */
1367 if (!ec
->rx_coalesce_usecs
&& !ec
->rx_max_coalesced_frames
)
1370 if (!ec
->tx_coalesce_usecs
&& !ec
->tx_max_coalesced_frames
)
1373 if (ec
->rx_coalesce_usecs
* factor
>= ((1 << 16) - 1))
1376 if (ec
->tx_coalesce_usecs
* factor
>= ((1 << 16) - 1))
1379 if (ec
->rx_max_coalesced_frames
>= ((1 << 16) - 1))
1382 if (ec
->tx_max_coalesced_frames
>= ((1 << 16) - 1))
1385 /* configuration is valid */
1386 nn
->rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
1387 nn
->rx_coalesce_max_frames
= ec
->rx_max_coalesced_frames
;
1388 nn
->tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
1389 nn
->tx_coalesce_max_frames
= ec
->tx_max_coalesced_frames
;
1391 /* write configuration to device */
1392 nfp_net_coalesce_write_cfg(nn
);
1393 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_IRQMOD
);
1396 static void nfp_net_get_channels(struct net_device
*netdev
,
1397 struct ethtool_channels
*channel
)
1399 struct nfp_net
*nn
= netdev_priv(netdev
);
1400 unsigned int num_tx_rings
;
1402 num_tx_rings
= nn
->dp
.num_tx_rings
;
1403 if (nn
->dp
.xdp_prog
)
1404 num_tx_rings
-= nn
->dp
.num_rx_rings
;
1406 channel
->max_rx
= min(nn
->max_rx_rings
, nn
->max_r_vecs
);
1407 channel
->max_tx
= min(nn
->max_tx_rings
, nn
->max_r_vecs
);
1408 channel
->max_combined
= min(channel
->max_rx
, channel
->max_tx
);
1409 channel
->max_other
= NFP_NET_NON_Q_VECTORS
;
1410 channel
->combined_count
= min(nn
->dp
.num_rx_rings
, num_tx_rings
);
1411 channel
->rx_count
= nn
->dp
.num_rx_rings
- channel
->combined_count
;
1412 channel
->tx_count
= num_tx_rings
- channel
->combined_count
;
1413 channel
->other_count
= NFP_NET_NON_Q_VECTORS
;
1416 static int nfp_net_set_num_rings(struct nfp_net
*nn
, unsigned int total_rx
,
1417 unsigned int total_tx
)
1419 struct nfp_net_dp
*dp
;
1421 dp
= nfp_net_clone_dp(nn
);
1425 dp
->num_rx_rings
= total_rx
;
1426 dp
->num_tx_rings
= total_tx
;
1427 /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
1429 dp
->num_tx_rings
+= total_rx
;
1431 return nfp_net_ring_reconfig(nn
, dp
, NULL
);
1434 static int nfp_net_set_channels(struct net_device
*netdev
,
1435 struct ethtool_channels
*channel
)
1437 struct nfp_net
*nn
= netdev_priv(netdev
);
1438 unsigned int total_rx
, total_tx
;
1440 /* Reject unsupported */
1441 if (channel
->other_count
!= NFP_NET_NON_Q_VECTORS
||
1442 (channel
->rx_count
&& channel
->tx_count
))
1445 total_rx
= channel
->combined_count
+ channel
->rx_count
;
1446 total_tx
= channel
->combined_count
+ channel
->tx_count
;
1448 if (total_rx
> min(nn
->max_rx_rings
, nn
->max_r_vecs
) ||
1449 total_tx
> min(nn
->max_tx_rings
, nn
->max_r_vecs
))
1452 return nfp_net_set_num_rings(nn
, total_rx
, total_tx
);
1455 static const struct ethtool_ops nfp_net_ethtool_ops
= {
1456 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
1457 ETHTOOL_COALESCE_MAX_FRAMES
,
1458 .get_drvinfo
= nfp_net_get_drvinfo
,
1459 .get_link
= ethtool_op_get_link
,
1460 .get_ringparam
= nfp_net_get_ringparam
,
1461 .set_ringparam
= nfp_net_set_ringparam
,
1462 .get_strings
= nfp_net_get_strings
,
1463 .get_ethtool_stats
= nfp_net_get_stats
,
1464 .get_sset_count
= nfp_net_get_sset_count
,
1465 .get_rxnfc
= nfp_net_get_rxnfc
,
1466 .set_rxnfc
= nfp_net_set_rxnfc
,
1467 .get_rxfh_indir_size
= nfp_net_get_rxfh_indir_size
,
1468 .get_rxfh_key_size
= nfp_net_get_rxfh_key_size
,
1469 .get_rxfh
= nfp_net_get_rxfh
,
1470 .set_rxfh
= nfp_net_set_rxfh
,
1471 .get_regs_len
= nfp_net_get_regs_len
,
1472 .get_regs
= nfp_net_get_regs
,
1473 .set_dump
= nfp_app_set_dump
,
1474 .get_dump_flag
= nfp_app_get_dump_flag
,
1475 .get_dump_data
= nfp_app_get_dump_data
,
1476 .get_module_info
= nfp_port_get_module_info
,
1477 .get_module_eeprom
= nfp_port_get_module_eeprom
,
1478 .get_coalesce
= nfp_net_get_coalesce
,
1479 .set_coalesce
= nfp_net_set_coalesce
,
1480 .get_channels
= nfp_net_get_channels
,
1481 .set_channels
= nfp_net_set_channels
,
1482 .get_link_ksettings
= nfp_net_get_link_ksettings
,
1483 .set_link_ksettings
= nfp_net_set_link_ksettings
,
1484 .get_fecparam
= nfp_port_get_fecparam
,
1485 .set_fecparam
= nfp_port_set_fecparam
,
1488 const struct ethtool_ops nfp_port_ethtool_ops
= {
1489 .get_drvinfo
= nfp_app_get_drvinfo
,
1490 .get_link
= ethtool_op_get_link
,
1491 .get_strings
= nfp_port_get_strings
,
1492 .get_ethtool_stats
= nfp_port_get_stats
,
1493 .get_sset_count
= nfp_port_get_sset_count
,
1494 .set_dump
= nfp_app_set_dump
,
1495 .get_dump_flag
= nfp_app_get_dump_flag
,
1496 .get_dump_data
= nfp_app_get_dump_data
,
1497 .get_module_info
= nfp_port_get_module_info
,
1498 .get_module_eeprom
= nfp_port_get_module_eeprom
,
1499 .get_link_ksettings
= nfp_net_get_link_ksettings
,
1500 .set_link_ksettings
= nfp_net_set_link_ksettings
,
1501 .get_fecparam
= nfp_port_get_fecparam
,
1502 .set_fecparam
= nfp_port_set_fecparam
,
1505 void nfp_net_set_ethtool_ops(struct net_device
*netdev
)
1507 netdev
->ethtool_ops
= &nfp_net_ethtool_ops
;