1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
6 * Netronome network device driver: ethtool support
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 * Jason McMullan <jason.mcmullan@netronome.com>
9 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
10 * Brad Petrus <brad.petrus@netronome.com>
13 #include <linux/bitfield.h>
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/ethtool.h>
20 #include <linux/firmware.h>
21 #include <linux/sfp.h>
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_dev.h"
25 #include "nfpcore/nfp_nsp.h"
28 #include "nfp_net_ctrl.h"
29 #include "nfp_net_dp.h"
32 #include "nfpcore/nfp_cpp.h"
35 char name
[ETH_GSTRING_LEN
];
39 static const struct nfp_et_stat nfp_net_et_stats
[] = {
40 /* Stats from the device */
41 { "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS
},
42 { "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS
},
43 { "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS
},
44 { "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS
},
45 { "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS
},
46 { "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS
},
47 { "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES
},
48 { "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES
},
49 { "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES
},
51 { "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS
},
52 { "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS
},
53 { "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS
},
54 { "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS
},
55 { "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS
},
56 { "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS
},
57 { "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES
},
58 { "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES
},
59 { "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES
},
61 { "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES
},
62 { "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES
},
63 /* see comments in outro functions in nfp_bpf_jit.c to find out
64 * how different BPF modes use app-specific counters
66 { "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES
},
67 { "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES
},
68 { "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES
},
69 { "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES
},
70 { "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES
},
71 { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES
},
74 static const struct nfp_et_stat nfp_mac_et_stats
[] = {
75 { "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS
, },
76 { "rx_frame_too_long_errors",
77 NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS
, },
78 { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS
, },
79 { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK
, },
80 { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS
, },
81 { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS
, },
82 { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS
, },
83 { "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS
, },
84 { "rx_pause_mac_ctrl_frames",
85 NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES
, },
86 { "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK
, },
87 { "rx_frame_check_sequence_errors",
88 NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS
, },
89 { "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS
, },
90 { "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS
, },
91 { "rx_pkts", NFP_MAC_STATS_RX_PKTS
, },
92 { "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS
, },
93 { "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS
, },
94 { "rx_pkts_65_to_127_octets",
95 NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS
, },
96 { "rx_pkts_128_to_255_octets",
97 NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS
, },
98 { "rx_pkts_256_to_511_octets",
99 NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS
, },
100 { "rx_pkts_512_to_1023_octets",
101 NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS
, },
102 { "rx_pkts_1024_to_1518_octets",
103 NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS
, },
104 { "rx_pkts_1519_to_max_octets",
105 NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS
, },
106 { "rx_jabbers", NFP_MAC_STATS_RX_JABBERS
, },
107 { "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS
, },
108 { "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS
, },
109 { "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0
, },
110 { "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1
, },
111 { "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2
, },
112 { "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3
, },
113 { "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4
, },
114 { "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5
, },
115 { "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6
, },
116 { "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7
, },
117 { "rx_mac_ctrl_frames_received",
118 NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED
, },
119 { "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP
, },
120 { "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP
, },
121 { "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS
, },
122 { "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK
, },
123 { "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS
, },
124 { "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS
, },
125 { "tx_pause_mac_ctrl_frames",
126 NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES
, },
127 { "tx_frames_transmitted_ok",
128 NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK
, },
129 { "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS
, },
130 { "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS
, },
131 { "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS
, },
132 { "tx_pkts_65_to_127_octets",
133 NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS
, },
134 { "tx_pkts_128_to_255_octets",
135 NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS
, },
136 { "tx_pkts_256_to_511_octets",
137 NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS
, },
138 { "tx_pkts_512_to_1023_octets",
139 NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS
, },
140 { "tx_pkts_1024_to_1518_octets",
141 NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS
, },
142 { "tx_pkts_1519_to_max_octets",
143 NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS
, },
144 { "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0
, },
145 { "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1
, },
146 { "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2
, },
147 { "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3
, },
148 { "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4
, },
149 { "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5
, },
150 { "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6
, },
151 { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7
, },
154 static const char nfp_tlv_stat_names
[][ETH_GSTRING_LEN
] = {
155 [1] = "dev_rx_discards",
156 [2] = "dev_rx_errors",
157 [3] = "dev_rx_bytes",
158 [4] = "dev_rx_uc_bytes",
159 [5] = "dev_rx_mc_bytes",
160 [6] = "dev_rx_bc_bytes",
162 [8] = "dev_rx_mc_pkts",
163 [9] = "dev_rx_bc_pkts",
165 [10] = "dev_tx_discards",
166 [11] = "dev_tx_errors",
167 [12] = "dev_tx_bytes",
168 [13] = "dev_tx_uc_bytes",
169 [14] = "dev_tx_mc_bytes",
170 [15] = "dev_tx_bc_bytes",
171 [16] = "dev_tx_pkts",
172 [17] = "dev_tx_mc_pkts",
173 [18] = "dev_tx_bc_pkts",
176 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
177 #define NN_ET_SWITCH_STATS_LEN 9
178 #define NN_RVEC_GATHER_STATS 13
179 #define NN_RVEC_PER_Q_STATS 3
180 #define NN_CTRL_PATH_STATS 4
182 #define SFP_SFF_REV_COMPLIANCE 1
184 static void nfp_net_get_nspinfo(struct nfp_app
*app
, char *version
)
191 nsp
= nfp_nsp_open(app
->cpp
);
195 snprintf(version
, ETHTOOL_FWVERS_LEN
, "%hu.%hu",
196 nfp_nsp_get_abi_ver_major(nsp
),
197 nfp_nsp_get_abi_ver_minor(nsp
));
203 nfp_get_drvinfo(struct nfp_app
*app
, struct pci_dev
*pdev
,
204 const char *vnic_version
, struct ethtool_drvinfo
*drvinfo
)
206 char nsp_version
[ETHTOOL_FWVERS_LEN
] = {};
208 strscpy(drvinfo
->driver
, dev_driver_string(&pdev
->dev
),
209 sizeof(drvinfo
->driver
));
210 nfp_net_get_nspinfo(app
, nsp_version
);
211 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
212 "%s %s %s %s", vnic_version
, nsp_version
,
213 nfp_app_mip_name(app
), nfp_app_name(app
));
217 nfp_net_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
219 char vnic_version
[ETHTOOL_FWVERS_LEN
] = {};
220 struct nfp_net
*nn
= netdev_priv(netdev
);
222 snprintf(vnic_version
, sizeof(vnic_version
), "%d.%d.%d.%d",
223 nn
->fw_ver
.extend
, nn
->fw_ver
.class,
224 nn
->fw_ver
.major
, nn
->fw_ver
.minor
);
225 strscpy(drvinfo
->bus_info
, pci_name(nn
->pdev
),
226 sizeof(drvinfo
->bus_info
));
228 nfp_get_drvinfo(nn
->app
, nn
->pdev
, vnic_version
, drvinfo
);
232 nfp_net_nway_reset(struct net_device
*netdev
)
234 struct nfp_eth_table_port
*eth_port
;
235 struct nfp_port
*port
;
238 port
= nfp_port_from_netdev(netdev
);
239 eth_port
= nfp_port_get_eth_port(port
);
243 if (!netif_running(netdev
))
246 err
= nfp_eth_set_configured(port
->app
->cpp
, eth_port
->index
, false);
248 netdev_info(netdev
, "Link down failed: %d\n", err
);
252 err
= nfp_eth_set_configured(port
->app
->cpp
, eth_port
->index
, true);
254 netdev_info(netdev
, "Link up failed: %d\n", err
);
258 netdev_info(netdev
, "Link reset succeeded\n");
263 nfp_app_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
265 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
267 strscpy(drvinfo
->bus_info
, pci_name(app
->pdev
),
268 sizeof(drvinfo
->bus_info
));
269 nfp_get_drvinfo(app
, app
->pdev
, "*", drvinfo
);
273 nfp_net_set_fec_link_mode(struct nfp_eth_table_port
*eth_port
,
274 struct ethtool_link_ksettings
*c
)
278 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_NONE
);
279 if (!nfp_eth_can_support_fec(eth_port
)) {
280 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_NONE
);
284 modes
= nfp_eth_supported_fec_modes(eth_port
);
285 if (modes
& NFP_FEC_BASER
) {
286 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_BASER
);
287 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_BASER
);
290 if (modes
& NFP_FEC_REED_SOLOMON
) {
291 ethtool_link_ksettings_add_link_mode(c
, supported
, FEC_RS
);
292 ethtool_link_ksettings_add_link_mode(c
, advertising
, FEC_RS
);
296 static const struct nfp_eth_media_link_mode
{
297 u16 ethtool_link_mode
;
299 } nfp_eth_media_table
[NFP_MEDIA_LINK_MODES_NUMBER
] = {
300 [NFP_MEDIA_1000BASE_CX
] = {
301 .ethtool_link_mode
= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
302 .speed
= NFP_SPEED_1G
,
304 [NFP_MEDIA_1000BASE_KX
] = {
305 .ethtool_link_mode
= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
306 .speed
= NFP_SPEED_1G
,
308 [NFP_MEDIA_10GBASE_KX4
] = {
309 .ethtool_link_mode
= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
310 .speed
= NFP_SPEED_10G
,
312 [NFP_MEDIA_10GBASE_KR
] = {
313 .ethtool_link_mode
= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
314 .speed
= NFP_SPEED_10G
,
316 [NFP_MEDIA_10GBASE_LR
] = {
317 .ethtool_link_mode
= ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
318 .speed
= NFP_SPEED_10G
,
320 [NFP_MEDIA_10GBASE_CX4
] = {
321 .ethtool_link_mode
= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
322 .speed
= NFP_SPEED_10G
,
324 [NFP_MEDIA_10GBASE_CR
] = {
325 .ethtool_link_mode
= ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
326 .speed
= NFP_SPEED_10G
,
328 [NFP_MEDIA_10GBASE_SR
] = {
329 .ethtool_link_mode
= ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
330 .speed
= NFP_SPEED_10G
,
332 [NFP_MEDIA_10GBASE_ER
] = {
333 .ethtool_link_mode
= ETHTOOL_LINK_MODE_10000baseER_Full_BIT
,
334 .speed
= NFP_SPEED_10G
,
336 [NFP_MEDIA_25GBASE_KR
] = {
337 .ethtool_link_mode
= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
338 .speed
= NFP_SPEED_25G
,
340 [NFP_MEDIA_25GBASE_KR_S
] = {
341 .ethtool_link_mode
= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
342 .speed
= NFP_SPEED_25G
,
344 [NFP_MEDIA_25GBASE_CR
] = {
345 .ethtool_link_mode
= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
346 .speed
= NFP_SPEED_25G
,
348 [NFP_MEDIA_25GBASE_CR_S
] = {
349 .ethtool_link_mode
= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
350 .speed
= NFP_SPEED_25G
,
352 [NFP_MEDIA_25GBASE_SR
] = {
353 .ethtool_link_mode
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
354 .speed
= NFP_SPEED_25G
,
356 [NFP_MEDIA_25GBASE_LR
] = {
357 .ethtool_link_mode
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
358 .speed
= NFP_SPEED_25G
,
360 [NFP_MEDIA_25GBASE_ER
] = {
361 .ethtool_link_mode
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
362 .speed
= NFP_SPEED_25G
,
364 [NFP_MEDIA_40GBASE_CR4
] = {
365 .ethtool_link_mode
= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
366 .speed
= NFP_SPEED_40G
,
368 [NFP_MEDIA_40GBASE_KR4
] = {
369 .ethtool_link_mode
= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
370 .speed
= NFP_SPEED_40G
,
372 [NFP_MEDIA_40GBASE_SR4
] = {
373 .ethtool_link_mode
= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
374 .speed
= NFP_SPEED_40G
,
376 [NFP_MEDIA_40GBASE_LR4
] = {
377 .ethtool_link_mode
= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
378 .speed
= NFP_SPEED_40G
,
380 [NFP_MEDIA_50GBASE_KR
] = {
381 .ethtool_link_mode
= ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
,
382 .speed
= NFP_SPEED_50G
,
384 [NFP_MEDIA_50GBASE_SR
] = {
385 .ethtool_link_mode
= ETHTOOL_LINK_MODE_50000baseSR_Full_BIT
,
386 .speed
= NFP_SPEED_50G
,
388 [NFP_MEDIA_50GBASE_CR
] = {
389 .ethtool_link_mode
= ETHTOOL_LINK_MODE_50000baseCR_Full_BIT
,
390 .speed
= NFP_SPEED_50G
,
392 [NFP_MEDIA_50GBASE_LR
] = {
393 .ethtool_link_mode
= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
394 .speed
= NFP_SPEED_50G
,
396 [NFP_MEDIA_50GBASE_ER
] = {
397 .ethtool_link_mode
= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
398 .speed
= NFP_SPEED_50G
,
400 [NFP_MEDIA_50GBASE_FR
] = {
401 .ethtool_link_mode
= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
402 .speed
= NFP_SPEED_50G
,
404 [NFP_MEDIA_100GBASE_KR4
] = {
405 .ethtool_link_mode
= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
406 .speed
= NFP_SPEED_100G
,
408 [NFP_MEDIA_100GBASE_SR4
] = {
409 .ethtool_link_mode
= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
410 .speed
= NFP_SPEED_100G
,
412 [NFP_MEDIA_100GBASE_CR4
] = {
413 .ethtool_link_mode
= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
414 .speed
= NFP_SPEED_100G
,
416 [NFP_MEDIA_100GBASE_KP4
] = {
417 .ethtool_link_mode
= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
418 .speed
= NFP_SPEED_100G
,
420 [NFP_MEDIA_100GBASE_CR10
] = {
421 .ethtool_link_mode
= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
422 .speed
= NFP_SPEED_100G
,
426 static const unsigned int nfp_eth_speed_map
[NFP_SUP_SPEED_NUMBER
] = {
427 [NFP_SPEED_1G
] = SPEED_1000
,
428 [NFP_SPEED_10G
] = SPEED_10000
,
429 [NFP_SPEED_25G
] = SPEED_25000
,
430 [NFP_SPEED_40G
] = SPEED_40000
,
431 [NFP_SPEED_50G
] = SPEED_50000
,
432 [NFP_SPEED_100G
] = SPEED_100000
,
435 static void nfp_add_media_link_mode(struct nfp_port
*port
,
436 struct nfp_eth_table_port
*eth_port
,
437 struct ethtool_link_ksettings
*cmd
)
439 bitmap_zero(port
->speed_bitmap
, NFP_SUP_SPEED_NUMBER
);
441 for (u32 i
= 0; i
< NFP_MEDIA_LINK_MODES_NUMBER
; i
++) {
443 if (eth_port
->link_modes_supp
[0] & BIT_ULL(i
)) {
444 __set_bit(nfp_eth_media_table
[i
].ethtool_link_mode
,
445 cmd
->link_modes
.supported
);
446 __set_bit(nfp_eth_media_table
[i
].speed
,
450 if (eth_port
->link_modes_ad
[0] & BIT_ULL(i
))
451 __set_bit(nfp_eth_media_table
[i
].ethtool_link_mode
,
452 cmd
->link_modes
.advertising
);
454 if (eth_port
->link_modes_supp
[1] & BIT_ULL(i
- 64)) {
455 __set_bit(nfp_eth_media_table
[i
].ethtool_link_mode
,
456 cmd
->link_modes
.supported
);
457 __set_bit(nfp_eth_media_table
[i
].speed
,
461 if (eth_port
->link_modes_ad
[1] & BIT_ULL(i
- 64))
462 __set_bit(nfp_eth_media_table
[i
].ethtool_link_mode
,
463 cmd
->link_modes
.advertising
);
467 /* We take all speeds as supported when it fails to read
468 * link modes due to old management firmware that doesn't
469 * support link modes reading or error occurring, so that
470 * speed change of this port is allowed.
472 if (bitmap_empty(port
->speed_bitmap
, NFP_SUP_SPEED_NUMBER
))
473 bitmap_fill(port
->speed_bitmap
, NFP_SUP_SPEED_NUMBER
);
477 * nfp_net_get_link_ksettings - Get Link Speed settings
478 * @netdev: network interface device structure
479 * @cmd: ethtool command
481 * Reports speed settings based on info in the BAR provided by the fw.
484 nfp_net_get_link_ksettings(struct net_device
*netdev
,
485 struct ethtool_link_ksettings
*cmd
)
487 struct nfp_eth_table_port
*eth_port
;
488 struct nfp_port
*port
;
493 /* Init to unknowns */
494 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
495 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
496 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
497 cmd
->base
.port
= PORT_OTHER
;
498 cmd
->base
.speed
= SPEED_UNKNOWN
;
499 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
501 port
= nfp_port_from_netdev(netdev
);
502 eth_port
= nfp_port_get_eth_port(port
);
504 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Pause
);
505 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Pause
);
506 nfp_add_media_link_mode(port
, eth_port
, cmd
);
507 if (eth_port
->supp_aneg
) {
508 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
509 if (eth_port
->aneg
== NFP_ANEG_AUTO
) {
510 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
511 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
514 nfp_net_set_fec_link_mode(eth_port
, cmd
);
517 if (!netif_carrier_ok(netdev
))
520 /* Use link speed from ETH table if available, otherwise try the BAR */
522 cmd
->base
.port
= eth_port
->port_type
;
523 cmd
->base
.speed
= eth_port
->speed
;
524 cmd
->base
.duplex
= DUPLEX_FULL
;
528 if (!nfp_netdev_is_nfp_net(netdev
))
530 nn
= netdev_priv(netdev
);
532 sts
= nn_readw(nn
, NFP_NET_CFG_STS
);
533 speed
= nfp_net_lr2speed(FIELD_GET(NFP_NET_CFG_STS_LINK_RATE
, sts
));
537 if (speed
!= SPEED_UNKNOWN
) {
538 cmd
->base
.speed
= speed
;
539 cmd
->base
.duplex
= DUPLEX_FULL
;
546 nfp_net_set_link_ksettings(struct net_device
*netdev
,
547 const struct ethtool_link_ksettings
*cmd
)
549 bool req_aneg
= (cmd
->base
.autoneg
== AUTONEG_ENABLE
);
550 struct nfp_eth_table_port
*eth_port
;
551 struct nfp_port
*port
;
555 port
= nfp_port_from_netdev(netdev
);
556 eth_port
= __nfp_port_get_eth_port(port
);
560 if (netif_running(netdev
)) {
561 netdev_warn(netdev
, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
565 nsp
= nfp_eth_config_start(port
->app
->cpp
, eth_port
->index
);
569 if (req_aneg
&& !eth_port
->supp_aneg
) {
570 netdev_warn(netdev
, "Autoneg is not supported.\n");
575 err
= __nfp_eth_set_aneg(nsp
, req_aneg
? NFP_ANEG_AUTO
: NFP_ANEG_DISABLED
);
579 if (cmd
->base
.speed
!= SPEED_UNKNOWN
) {
580 u32 speed
= cmd
->base
.speed
/ eth_port
->lanes
;
581 bool is_supported
= false;
583 for (u32 i
= 0; i
< NFP_SUP_SPEED_NUMBER
; i
++) {
584 if (cmd
->base
.speed
== nfp_eth_speed_map
[i
] &&
585 test_bit(i
, port
->speed_bitmap
)) {
592 netdev_err(netdev
, "Speed %u is not supported.\n",
599 netdev_err(netdev
, "Speed changing is not allowed when working on autoneg mode.\n");
604 err
= __nfp_eth_set_speed(nsp
, speed
);
609 err
= nfp_eth_config_commit_end(nsp
);
611 return 0; /* no change */
613 nfp_net_refresh_port_table(port
);
618 nfp_eth_config_cleanup_end(nsp
);
622 static void nfp_net_get_ringparam(struct net_device
*netdev
,
623 struct ethtool_ringparam
*ring
,
624 struct kernel_ethtool_ringparam
*kernel_ring
,
625 struct netlink_ext_ack
*extack
)
627 struct nfp_net
*nn
= netdev_priv(netdev
);
628 u32 qc_max
= nn
->dev_info
->max_qc_size
;
630 ring
->rx_max_pending
= qc_max
;
631 ring
->tx_max_pending
= qc_max
/ nn
->dp
.ops
->tx_min_desc_per_pkt
;
632 ring
->rx_pending
= nn
->dp
.rxd_cnt
;
633 ring
->tx_pending
= nn
->dp
.txd_cnt
;
636 static int nfp_net_set_ring_size(struct nfp_net
*nn
, u32 rxd_cnt
, u32 txd_cnt
,
637 struct netlink_ext_ack
*extack
)
639 struct nfp_net_dp
*dp
;
641 dp
= nfp_net_clone_dp(nn
);
645 dp
->rxd_cnt
= rxd_cnt
;
646 dp
->txd_cnt
= txd_cnt
;
648 return nfp_net_ring_reconfig(nn
, dp
, extack
);
651 static int nfp_net_set_ringparam(struct net_device
*netdev
,
652 struct ethtool_ringparam
*ring
,
653 struct kernel_ethtool_ringparam
*kernel_ring
,
654 struct netlink_ext_ack
*extack
)
656 u32 tx_dpp
, qc_min
, qc_max
, rxd_cnt
, txd_cnt
;
657 struct nfp_net
*nn
= netdev_priv(netdev
);
659 /* We don't have separate queues/rings for small/large frames. */
660 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
663 qc_min
= nn
->dev_info
->min_qc_size
;
664 qc_max
= nn
->dev_info
->max_qc_size
;
665 tx_dpp
= nn
->dp
.ops
->tx_min_desc_per_pkt
;
666 /* Round up to supported values */
667 rxd_cnt
= roundup_pow_of_two(ring
->rx_pending
);
668 txd_cnt
= roundup_pow_of_two(ring
->tx_pending
);
670 if (rxd_cnt
< qc_min
|| rxd_cnt
> qc_max
) {
671 NL_SET_ERR_MSG_MOD(extack
, "rx parameter out of bounds");
675 if (txd_cnt
< qc_min
/ tx_dpp
|| txd_cnt
> qc_max
/ tx_dpp
) {
676 NL_SET_ERR_MSG_MOD(extack
, "tx parameter out of bounds");
680 if (nn
->dp
.rxd_cnt
== rxd_cnt
&& nn
->dp
.txd_cnt
== txd_cnt
)
683 nn_dbg(nn
, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
684 nn
->dp
.rxd_cnt
, rxd_cnt
, nn
->dp
.txd_cnt
, txd_cnt
);
686 return nfp_net_set_ring_size(nn
, rxd_cnt
, txd_cnt
, extack
);
689 static int nfp_test_link(struct net_device
*netdev
)
691 if (!netif_carrier_ok(netdev
) || !(netdev
->flags
& IFF_UP
))
697 static int nfp_test_nsp(struct net_device
*netdev
)
699 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
700 struct nfp_nsp_identify
*nspi
;
704 nsp
= nfp_nsp_open(app
->cpp
);
707 netdev_info(netdev
, "NSP Test: failed to access the NSP: %d\n", err
);
711 if (nfp_nsp_get_abi_ver_minor(nsp
) < 15) {
716 nspi
= kzalloc(sizeof(*nspi
), GFP_KERNEL
);
722 err
= nfp_nsp_read_identify(nsp
, nspi
, sizeof(*nspi
));
724 netdev_info(netdev
, "NSP Test: reading bsp version failed %d\n", err
);
733 static int nfp_test_fw(struct net_device
*netdev
)
735 struct nfp_net
*nn
= netdev_priv(netdev
);
738 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_GEN
);
740 netdev_info(netdev
, "FW Test: update failed %d\n", err
);
745 static int nfp_test_reg(struct net_device
*netdev
)
747 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
748 struct nfp_cpp
*cpp
= app
->cpp
;
749 u32 model
= nfp_cpp_model(cpp
);
753 err
= nfp_cpp_model_autodetect(cpp
, &value
);
755 netdev_info(netdev
, "REG Test: NFP model detection failed %d\n", err
);
759 return (value
== model
) ? 0 : 1;
762 static bool link_test_supported(struct net_device
*netdev
)
767 static bool nsp_test_supported(struct net_device
*netdev
)
769 if (nfp_app_from_netdev(netdev
))
775 static bool fw_test_supported(struct net_device
*netdev
)
777 if (nfp_netdev_is_nfp_net(netdev
))
783 static bool reg_test_supported(struct net_device
*netdev
)
785 if (nfp_app_from_netdev(netdev
))
791 static struct nfp_self_test_item
{
792 char name
[ETH_GSTRING_LEN
];
793 bool (*is_supported
)(struct net_device
*dev
);
794 int (*func
)(struct net_device
*dev
);
795 } nfp_self_test
[] = {
796 {"Link Test", link_test_supported
, nfp_test_link
},
797 {"NSP Test", nsp_test_supported
, nfp_test_nsp
},
798 {"Firmware Test", fw_test_supported
, nfp_test_fw
},
799 {"Register Test", reg_test_supported
, nfp_test_reg
}
802 #define NFP_TEST_TOTAL_NUM ARRAY_SIZE(nfp_self_test)
804 static void nfp_get_self_test_strings(struct net_device
*netdev
, u8
*data
)
808 for (i
= 0; i
< NFP_TEST_TOTAL_NUM
; i
++)
809 if (nfp_self_test
[i
].is_supported(netdev
))
810 ethtool_puts(&data
, nfp_self_test
[i
].name
);
813 static int nfp_get_self_test_count(struct net_device
*netdev
)
817 for (i
= 0; i
< NFP_TEST_TOTAL_NUM
; i
++)
818 if (nfp_self_test
[i
].is_supported(netdev
))
824 static void nfp_net_self_test(struct net_device
*netdev
, struct ethtool_test
*eth_test
,
827 int i
, ret
, count
= 0;
829 netdev_info(netdev
, "Start self test\n");
831 for (i
= 0; i
< NFP_TEST_TOTAL_NUM
; i
++) {
832 if (nfp_self_test
[i
].is_supported(netdev
)) {
833 ret
= nfp_self_test
[i
].func(netdev
);
835 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
840 netdev_info(netdev
, "Test end\n");
843 static unsigned int nfp_vnic_get_sw_stats_count(struct net_device
*netdev
)
845 struct nfp_net
*nn
= netdev_priv(netdev
);
847 return NN_RVEC_GATHER_STATS
+ nn
->max_r_vecs
* NN_RVEC_PER_Q_STATS
+
851 static u8
*nfp_vnic_get_sw_stats_strings(struct net_device
*netdev
, u8
*data
)
853 struct nfp_net
*nn
= netdev_priv(netdev
);
856 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
857 ethtool_sprintf(&data
, "rvec_%u_rx_pkts", i
);
858 ethtool_sprintf(&data
, "rvec_%u_tx_pkts", i
);
859 ethtool_sprintf(&data
, "rvec_%u_tx_busy", i
);
862 ethtool_puts(&data
, "hw_rx_csum_ok");
863 ethtool_puts(&data
, "hw_rx_csum_inner_ok");
864 ethtool_puts(&data
, "hw_rx_csum_complete");
865 ethtool_puts(&data
, "hw_rx_csum_err");
866 ethtool_puts(&data
, "rx_replace_buf_alloc_fail");
867 ethtool_puts(&data
, "rx_tls_decrypted_packets");
868 ethtool_puts(&data
, "hw_tx_csum");
869 ethtool_puts(&data
, "hw_tx_inner_csum");
870 ethtool_puts(&data
, "tx_gather");
871 ethtool_puts(&data
, "tx_lso");
872 ethtool_puts(&data
, "tx_tls_encrypted_packets");
873 ethtool_puts(&data
, "tx_tls_ooo");
874 ethtool_puts(&data
, "tx_tls_drop_no_sync_data");
876 ethtool_puts(&data
, "hw_tls_no_space");
877 ethtool_puts(&data
, "rx_tls_resync_req_ok");
878 ethtool_puts(&data
, "rx_tls_resync_req_ign");
879 ethtool_puts(&data
, "rx_tls_resync_sent");
884 static u64
*nfp_vnic_get_sw_stats(struct net_device
*netdev
, u64
*data
)
886 u64 gathered_stats
[NN_RVEC_GATHER_STATS
] = {};
887 struct nfp_net
*nn
= netdev_priv(netdev
);
888 u64 tmp
[NN_RVEC_GATHER_STATS
];
891 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
895 start
= u64_stats_fetch_begin(&nn
->r_vecs
[i
].rx_sync
);
896 data
[0] = nn
->r_vecs
[i
].rx_pkts
;
897 tmp
[0] = nn
->r_vecs
[i
].hw_csum_rx_ok
;
898 tmp
[1] = nn
->r_vecs
[i
].hw_csum_rx_inner_ok
;
899 tmp
[2] = nn
->r_vecs
[i
].hw_csum_rx_complete
;
900 tmp
[3] = nn
->r_vecs
[i
].hw_csum_rx_error
;
901 tmp
[4] = nn
->r_vecs
[i
].rx_replace_buf_alloc_fail
;
902 tmp
[5] = nn
->r_vecs
[i
].hw_tls_rx
;
903 } while (u64_stats_fetch_retry(&nn
->r_vecs
[i
].rx_sync
, start
));
906 start
= u64_stats_fetch_begin(&nn
->r_vecs
[i
].tx_sync
);
907 data
[1] = nn
->r_vecs
[i
].tx_pkts
;
908 data
[2] = nn
->r_vecs
[i
].tx_busy
;
909 tmp
[6] = nn
->r_vecs
[i
].hw_csum_tx
;
910 tmp
[7] = nn
->r_vecs
[i
].hw_csum_tx_inner
;
911 tmp
[8] = nn
->r_vecs
[i
].tx_gather
;
912 tmp
[9] = nn
->r_vecs
[i
].tx_lso
;
913 tmp
[10] = nn
->r_vecs
[i
].hw_tls_tx
;
914 tmp
[11] = nn
->r_vecs
[i
].tls_tx_fallback
;
915 tmp
[12] = nn
->r_vecs
[i
].tls_tx_no_fallback
;
916 } while (u64_stats_fetch_retry(&nn
->r_vecs
[i
].tx_sync
, start
));
918 data
+= NN_RVEC_PER_Q_STATS
;
920 for (j
= 0; j
< NN_RVEC_GATHER_STATS
; j
++)
921 gathered_stats
[j
] += tmp
[j
];
924 for (j
= 0; j
< NN_RVEC_GATHER_STATS
; j
++)
925 *data
++ = gathered_stats
[j
];
927 *data
++ = atomic_read(&nn
->ktls_no_space
);
928 *data
++ = atomic_read(&nn
->ktls_rx_resync_req
);
929 *data
++ = atomic_read(&nn
->ktls_rx_resync_ign
);
930 *data
++ = atomic_read(&nn
->ktls_rx_resync_sent
);
935 static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs
)
937 return NN_ET_GLOBAL_STATS_LEN
+ num_vecs
* 4;
941 nfp_vnic_get_hw_stats_strings(u8
*data
, unsigned int num_vecs
, bool repr
)
945 BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN
< NN_ET_SWITCH_STATS_LEN
* 2);
946 /* If repr is true first add SWITCH_STATS_LEN and then subtract it
947 * effectively swapping the RX and TX statistics (giving us the RX
948 * and TX from perspective of the switch).
950 swap_off
= repr
* NN_ET_SWITCH_STATS_LEN
;
952 for (i
= 0; i
< NN_ET_SWITCH_STATS_LEN
; i
++)
953 ethtool_puts(&data
, nfp_net_et_stats
[i
+ swap_off
].name
);
955 for (i
= NN_ET_SWITCH_STATS_LEN
; i
< NN_ET_SWITCH_STATS_LEN
* 2; i
++)
956 ethtool_puts(&data
, nfp_net_et_stats
[i
- swap_off
].name
);
958 for (i
= NN_ET_SWITCH_STATS_LEN
* 2; i
< NN_ET_GLOBAL_STATS_LEN
; i
++)
959 ethtool_puts(&data
, nfp_net_et_stats
[i
].name
);
961 for (i
= 0; i
< num_vecs
; i
++) {
962 ethtool_sprintf(&data
, "rxq_%u_pkts", i
);
963 ethtool_sprintf(&data
, "rxq_%u_bytes", i
);
964 ethtool_sprintf(&data
, "txq_%u_pkts", i
);
965 ethtool_sprintf(&data
, "txq_%u_bytes", i
);
972 nfp_vnic_get_hw_stats(u64
*data
, u8 __iomem
*mem
, unsigned int num_vecs
)
976 for (i
= 0; i
< NN_ET_GLOBAL_STATS_LEN
; i
++)
977 *data
++ = readq(mem
+ nfp_net_et_stats
[i
].off
);
979 for (i
= 0; i
< num_vecs
; i
++) {
980 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
));
981 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
) + 8);
982 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
));
983 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
) + 8);
989 static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net
*nn
)
991 return nn
->tlv_caps
.vnic_stats_cnt
+ nn
->max_r_vecs
* 4;
994 static u8
*nfp_vnic_get_tlv_stats_strings(struct nfp_net
*nn
, u8
*data
)
1000 mem
= nn
->dp
.ctrl_bar
+ nn
->tlv_caps
.vnic_stats_off
;
1001 for (i
= 0; i
< nn
->tlv_caps
.vnic_stats_cnt
; i
++) {
1003 id_word
= readq(mem
+ i
* 2);
1008 if (id
< ARRAY_SIZE(nfp_tlv_stat_names
) &&
1009 nfp_tlv_stat_names
[id
][0]) {
1010 memcpy(data
, nfp_tlv_stat_names
[id
], ETH_GSTRING_LEN
);
1011 data
+= ETH_GSTRING_LEN
;
1013 ethtool_sprintf(&data
, "dev_unknown_stat%u", id
);
1017 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
1018 ethtool_sprintf(&data
, "rxq_%u_pkts", i
);
1019 ethtool_sprintf(&data
, "rxq_%u_bytes", i
);
1020 ethtool_sprintf(&data
, "txq_%u_pkts", i
);
1021 ethtool_sprintf(&data
, "txq_%u_bytes", i
);
1027 static u64
*nfp_vnic_get_tlv_stats(struct nfp_net
*nn
, u64
*data
)
1032 mem
= nn
->dp
.ctrl_bar
+ nn
->tlv_caps
.vnic_stats_off
;
1033 mem
+= roundup(2 * nn
->tlv_caps
.vnic_stats_cnt
, 8);
1034 for (i
= 0; i
< nn
->tlv_caps
.vnic_stats_cnt
; i
++)
1035 *data
++ = readq(mem
+ i
* 8);
1037 mem
= nn
->dp
.ctrl_bar
;
1038 for (i
= 0; i
< nn
->max_r_vecs
; i
++) {
1039 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
));
1040 *data
++ = readq(mem
+ NFP_NET_CFG_RXR_STATS(i
) + 8);
1041 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
));
1042 *data
++ = readq(mem
+ NFP_NET_CFG_TXR_STATS(i
) + 8);
1048 static unsigned int nfp_mac_get_stats_count(struct net_device
*netdev
)
1050 struct nfp_port
*port
;
1052 port
= nfp_port_from_netdev(netdev
);
1053 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
1056 return ARRAY_SIZE(nfp_mac_et_stats
);
1059 static u8
*nfp_mac_get_stats_strings(struct net_device
*netdev
, u8
*data
)
1061 struct nfp_port
*port
;
1064 port
= nfp_port_from_netdev(netdev
);
1065 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
1068 for (i
= 0; i
< ARRAY_SIZE(nfp_mac_et_stats
); i
++)
1069 ethtool_sprintf(&data
, "mac.%s", nfp_mac_et_stats
[i
].name
);
1074 static u64
*nfp_mac_get_stats(struct net_device
*netdev
, u64
*data
)
1076 struct nfp_port
*port
;
1079 port
= nfp_port_from_netdev(netdev
);
1080 if (!__nfp_port_get_eth_port(port
) || !port
->eth_stats
)
1083 for (i
= 0; i
< ARRAY_SIZE(nfp_mac_et_stats
); i
++)
1084 *data
++ = readq(port
->eth_stats
+ nfp_mac_et_stats
[i
].off
);
1089 static void nfp_net_get_strings(struct net_device
*netdev
,
1090 u32 stringset
, u8
*data
)
1092 struct nfp_net
*nn
= netdev_priv(netdev
);
1094 switch (stringset
) {
1096 data
= nfp_vnic_get_sw_stats_strings(netdev
, data
);
1097 if (!nn
->tlv_caps
.vnic_stats_off
)
1098 data
= nfp_vnic_get_hw_stats_strings(data
,
1102 data
= nfp_vnic_get_tlv_stats_strings(nn
, data
);
1103 data
= nfp_mac_get_stats_strings(netdev
, data
);
1104 data
= nfp_app_port_get_stats_strings(nn
->port
, data
);
1107 nfp_get_self_test_strings(netdev
, data
);
1113 nfp_net_get_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
1116 struct nfp_net
*nn
= netdev_priv(netdev
);
1118 data
= nfp_vnic_get_sw_stats(netdev
, data
);
1119 if (!nn
->tlv_caps
.vnic_stats_off
)
1120 data
= nfp_vnic_get_hw_stats(data
, nn
->dp
.ctrl_bar
,
1123 data
= nfp_vnic_get_tlv_stats(nn
, data
);
1124 data
= nfp_mac_get_stats(netdev
, data
);
1125 data
= nfp_app_port_get_stats(nn
->port
, data
);
1128 static int nfp_net_get_sset_count(struct net_device
*netdev
, int sset
)
1130 struct nfp_net
*nn
= netdev_priv(netdev
);
1135 cnt
= nfp_vnic_get_sw_stats_count(netdev
);
1136 if (!nn
->tlv_caps
.vnic_stats_off
)
1137 cnt
+= nfp_vnic_get_hw_stats_count(nn
->max_r_vecs
);
1139 cnt
+= nfp_vnic_get_tlv_stats_count(nn
);
1140 cnt
+= nfp_mac_get_stats_count(netdev
);
1141 cnt
+= nfp_app_port_get_stats_count(nn
->port
);
1144 return nfp_get_self_test_count(netdev
);
1150 static void nfp_port_get_strings(struct net_device
*netdev
,
1151 u32 stringset
, u8
*data
)
1153 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
1155 switch (stringset
) {
1157 if (nfp_port_is_vnic(port
))
1158 data
= nfp_vnic_get_hw_stats_strings(data
, 0, true);
1160 data
= nfp_mac_get_stats_strings(netdev
, data
);
1161 data
= nfp_app_port_get_stats_strings(port
, data
);
1164 nfp_get_self_test_strings(netdev
, data
);
1170 nfp_port_get_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
1173 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
1175 if (nfp_port_is_vnic(port
))
1176 data
= nfp_vnic_get_hw_stats(data
, port
->vnic
, 0);
1178 data
= nfp_mac_get_stats(netdev
, data
);
1179 data
= nfp_app_port_get_stats(port
, data
);
1182 static int nfp_port_get_sset_count(struct net_device
*netdev
, int sset
)
1184 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
1189 if (nfp_port_is_vnic(port
))
1190 count
= nfp_vnic_get_hw_stats_count(0);
1192 count
= nfp_mac_get_stats_count(netdev
);
1193 count
+= nfp_app_port_get_stats_count(port
);
1196 return nfp_get_self_test_count(netdev
);
1202 static int nfp_port_fec_ethtool_to_nsp(u32 fec
)
1205 case ETHTOOL_FEC_AUTO
:
1206 return NFP_FEC_AUTO_BIT
;
1207 case ETHTOOL_FEC_OFF
:
1208 return NFP_FEC_DISABLED_BIT
;
1209 case ETHTOOL_FEC_RS
:
1210 return NFP_FEC_REED_SOLOMON_BIT
;
1211 case ETHTOOL_FEC_BASER
:
1212 return NFP_FEC_BASER_BIT
;
1214 /* NSP only supports a single mode at a time */
1219 static u32
nfp_port_fec_nsp_to_ethtool(u32 fec
)
1223 if (fec
& NFP_FEC_AUTO
)
1224 result
|= ETHTOOL_FEC_AUTO
;
1225 if (fec
& NFP_FEC_BASER
)
1226 result
|= ETHTOOL_FEC_BASER
;
1227 if (fec
& NFP_FEC_REED_SOLOMON
)
1228 result
|= ETHTOOL_FEC_RS
;
1229 if (fec
& NFP_FEC_DISABLED
)
1230 result
|= ETHTOOL_FEC_OFF
;
1232 return result
?: ETHTOOL_FEC_NONE
;
1236 nfp_port_get_fecparam(struct net_device
*netdev
,
1237 struct ethtool_fecparam
*param
)
1239 struct nfp_eth_table_port
*eth_port
;
1240 struct nfp_port
*port
;
1242 param
->active_fec
= ETHTOOL_FEC_NONE
;
1243 param
->fec
= ETHTOOL_FEC_NONE
;
1245 port
= nfp_port_from_netdev(netdev
);
1246 eth_port
= nfp_port_get_eth_port(port
);
1250 if (!nfp_eth_can_support_fec(eth_port
))
1253 param
->fec
= nfp_port_fec_nsp_to_ethtool(eth_port
->fec_modes_supported
);
1254 param
->active_fec
= nfp_port_fec_nsp_to_ethtool(BIT(eth_port
->act_fec
));
1260 nfp_port_set_fecparam(struct net_device
*netdev
,
1261 struct ethtool_fecparam
*param
)
1263 struct nfp_eth_table_port
*eth_port
;
1264 struct nfp_port
*port
;
1267 port
= nfp_port_from_netdev(netdev
);
1268 eth_port
= nfp_port_get_eth_port(port
);
1272 if (!nfp_eth_can_support_fec(eth_port
))
1275 fec
= nfp_port_fec_ethtool_to_nsp(param
->fec
);
1279 err
= nfp_eth_set_fec(port
->app
->cpp
, eth_port
->index
, fec
);
1281 /* Only refresh if we did something */
1282 nfp_net_refresh_port_table(port
);
1284 return err
< 0 ? err
: 0;
1287 /* RX network flow classification (RSS, filters, etc)
1289 static u32
ethtool_flow_to_nfp_flag(u32 flow_type
)
1291 static const u32 xlate_ethtool_to_nfp
[IPV6_FLOW
+ 1] = {
1292 [TCP_V4_FLOW
] = NFP_NET_CFG_RSS_IPV4_TCP
,
1293 [TCP_V6_FLOW
] = NFP_NET_CFG_RSS_IPV6_TCP
,
1294 [UDP_V4_FLOW
] = NFP_NET_CFG_RSS_IPV4_UDP
,
1295 [UDP_V6_FLOW
] = NFP_NET_CFG_RSS_IPV6_UDP
,
1296 [IPV4_FLOW
] = NFP_NET_CFG_RSS_IPV4
,
1297 [IPV6_FLOW
] = NFP_NET_CFG_RSS_IPV6
,
1300 if (flow_type
>= ARRAY_SIZE(xlate_ethtool_to_nfp
))
1303 return xlate_ethtool_to_nfp
[flow_type
];
1306 static int nfp_net_get_rss_hash_opts(struct nfp_net
*nn
,
1307 struct ethtool_rxnfc
*cmd
)
1313 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
1316 nfp_rss_flag
= ethtool_flow_to_nfp_flag(cmd
->flow_type
);
1320 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
1321 if (nn
->rss_cfg
& nfp_rss_flag
)
1322 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1327 #define NFP_FS_MAX_ENTRY 1024
1329 static int nfp_net_fs_to_ethtool(struct nfp_fs_entry
*entry
, struct ethtool_rxnfc
*cmd
)
1331 struct ethtool_rx_flow_spec
*fs
= &cmd
->fs
;
1334 switch (entry
->flow_type
& ~FLOW_RSS
) {
1338 fs
->h_u
.tcp_ip4_spec
.ip4src
= entry
->key
.sip4
;
1339 fs
->h_u
.tcp_ip4_spec
.ip4dst
= entry
->key
.dip4
;
1340 fs
->h_u
.tcp_ip4_spec
.psrc
= entry
->key
.sport
;
1341 fs
->h_u
.tcp_ip4_spec
.pdst
= entry
->key
.dport
;
1342 fs
->m_u
.tcp_ip4_spec
.ip4src
= entry
->msk
.sip4
;
1343 fs
->m_u
.tcp_ip4_spec
.ip4dst
= entry
->msk
.dip4
;
1344 fs
->m_u
.tcp_ip4_spec
.psrc
= entry
->msk
.sport
;
1345 fs
->m_u
.tcp_ip4_spec
.pdst
= entry
->msk
.dport
;
1350 for (i
= 0; i
< 4; i
++) {
1351 fs
->h_u
.tcp_ip6_spec
.ip6src
[i
] = entry
->key
.sip6
[i
];
1352 fs
->h_u
.tcp_ip6_spec
.ip6dst
[i
] = entry
->key
.dip6
[i
];
1353 fs
->m_u
.tcp_ip6_spec
.ip6src
[i
] = entry
->msk
.sip6
[i
];
1354 fs
->m_u
.tcp_ip6_spec
.ip6dst
[i
] = entry
->msk
.dip6
[i
];
1356 fs
->h_u
.tcp_ip6_spec
.psrc
= entry
->key
.sport
;
1357 fs
->h_u
.tcp_ip6_spec
.pdst
= entry
->key
.dport
;
1358 fs
->m_u
.tcp_ip6_spec
.psrc
= entry
->msk
.sport
;
1359 fs
->m_u
.tcp_ip6_spec
.pdst
= entry
->msk
.dport
;
1361 case IPV4_USER_FLOW
:
1362 fs
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
1363 fs
->h_u
.usr_ip4_spec
.ip4src
= entry
->key
.sip4
;
1364 fs
->h_u
.usr_ip4_spec
.ip4dst
= entry
->key
.dip4
;
1365 fs
->h_u
.usr_ip4_spec
.proto
= entry
->key
.l4_proto
;
1366 fs
->m_u
.usr_ip4_spec
.ip4src
= entry
->msk
.sip4
;
1367 fs
->m_u
.usr_ip4_spec
.ip4dst
= entry
->msk
.dip4
;
1368 fs
->m_u
.usr_ip4_spec
.proto
= entry
->msk
.l4_proto
;
1370 case IPV6_USER_FLOW
:
1371 for (i
= 0; i
< 4; i
++) {
1372 fs
->h_u
.usr_ip6_spec
.ip6src
[i
] = entry
->key
.sip6
[i
];
1373 fs
->h_u
.usr_ip6_spec
.ip6dst
[i
] = entry
->key
.dip6
[i
];
1374 fs
->m_u
.usr_ip6_spec
.ip6src
[i
] = entry
->msk
.sip6
[i
];
1375 fs
->m_u
.usr_ip6_spec
.ip6dst
[i
] = entry
->msk
.dip6
[i
];
1377 fs
->h_u
.usr_ip6_spec
.l4_proto
= entry
->key
.l4_proto
;
1378 fs
->m_u
.usr_ip6_spec
.l4_proto
= entry
->msk
.l4_proto
;
1381 fs
->h_u
.ether_spec
.h_proto
= entry
->key
.l3_proto
;
1382 fs
->m_u
.ether_spec
.h_proto
= entry
->msk
.l3_proto
;
1388 fs
->flow_type
= entry
->flow_type
;
1389 fs
->ring_cookie
= entry
->action
;
1391 if (fs
->flow_type
& FLOW_RSS
) {
1392 /* Only rss_context of 0 is supported. */
1393 cmd
->rss_context
= 0;
1394 /* RSS is used, mask the ring. */
1395 fs
->ring_cookie
|= ETHTOOL_RX_FLOW_SPEC_RING
;
1401 static int nfp_net_get_fs_rule(struct nfp_net
*nn
, struct ethtool_rxnfc
*cmd
)
1403 struct nfp_fs_entry
*entry
;
1405 if (!(nn
->cap_w1
& NFP_NET_CFG_CTRL_FLOW_STEER
))
1408 if (cmd
->fs
.location
>= NFP_FS_MAX_ENTRY
)
1411 list_for_each_entry(entry
, &nn
->fs
.list
, node
) {
1412 if (entry
->loc
== cmd
->fs
.location
)
1413 return nfp_net_fs_to_ethtool(entry
, cmd
);
1415 if (entry
->loc
> cmd
->fs
.location
)
1416 /* no need to continue */
1423 static int nfp_net_get_fs_loc(struct nfp_net
*nn
, u32
*rule_locs
)
1425 struct nfp_fs_entry
*entry
;
1428 if (!(nn
->cap_w1
& NFP_NET_CFG_CTRL_FLOW_STEER
))
1431 list_for_each_entry(entry
, &nn
->fs
.list
, node
)
1432 rule_locs
[count
++] = entry
->loc
;
1437 static int nfp_net_get_rxnfc(struct net_device
*netdev
,
1438 struct ethtool_rxnfc
*cmd
, u32
*rule_locs
)
1440 struct nfp_net
*nn
= netdev_priv(netdev
);
1443 case ETHTOOL_GRXRINGS
:
1444 cmd
->data
= nn
->dp
.num_rx_rings
;
1446 case ETHTOOL_GRXCLSRLCNT
:
1447 cmd
->rule_cnt
= nn
->fs
.count
;
1449 case ETHTOOL_GRXCLSRULE
:
1450 return nfp_net_get_fs_rule(nn
, cmd
);
1451 case ETHTOOL_GRXCLSRLALL
:
1452 cmd
->data
= NFP_FS_MAX_ENTRY
;
1453 return nfp_net_get_fs_loc(nn
, rule_locs
);
1455 return nfp_net_get_rss_hash_opts(nn
, cmd
);
1461 static int nfp_net_set_rss_hash_opt(struct nfp_net
*nn
,
1462 struct ethtool_rxnfc
*nfc
)
1464 u32 new_rss_cfg
= nn
->rss_cfg
;
1468 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
1471 /* RSS only supports IP SA/DA and L4 src/dst ports */
1472 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
1473 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
1476 /* We need at least the IP SA/DA fields for hashing */
1477 if (!(nfc
->data
& RXH_IP_SRC
) ||
1478 !(nfc
->data
& RXH_IP_DST
))
1481 nfp_rss_flag
= ethtool_flow_to_nfp_flag(nfc
->flow_type
);
1485 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
1487 new_rss_cfg
&= ~nfp_rss_flag
;
1489 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
1490 new_rss_cfg
|= nfp_rss_flag
;
1496 new_rss_cfg
|= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC
, nn
->rss_hfunc
);
1497 new_rss_cfg
|= NFP_NET_CFG_RSS_MASK
;
1499 if (new_rss_cfg
== nn
->rss_cfg
)
1502 writel(new_rss_cfg
, nn
->dp
.ctrl_bar
+ NFP_NET_CFG_RSS_CTRL
);
1503 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RSS
);
1507 nn
->rss_cfg
= new_rss_cfg
;
1509 nn_dbg(nn
, "Changed RSS config to 0x%x\n", nn
->rss_cfg
);
1513 static int nfp_net_fs_from_ethtool(struct nfp_fs_entry
*entry
, struct ethtool_rx_flow_spec
*fs
)
1517 /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
1518 switch (fs
->flow_type
& ~FLOW_RSS
) {
1522 entry
->msk
.sip4
= fs
->m_u
.tcp_ip4_spec
.ip4src
;
1523 entry
->msk
.dip4
= fs
->m_u
.tcp_ip4_spec
.ip4dst
;
1524 entry
->msk
.sport
= fs
->m_u
.tcp_ip4_spec
.psrc
;
1525 entry
->msk
.dport
= fs
->m_u
.tcp_ip4_spec
.pdst
;
1526 entry
->key
.sip4
= fs
->h_u
.tcp_ip4_spec
.ip4src
& entry
->msk
.sip4
;
1527 entry
->key
.dip4
= fs
->h_u
.tcp_ip4_spec
.ip4dst
& entry
->msk
.dip4
;
1528 entry
->key
.sport
= fs
->h_u
.tcp_ip4_spec
.psrc
& entry
->msk
.sport
;
1529 entry
->key
.dport
= fs
->h_u
.tcp_ip4_spec
.pdst
& entry
->msk
.dport
;
1534 for (i
= 0; i
< 4; i
++) {
1535 entry
->msk
.sip6
[i
] = fs
->m_u
.tcp_ip6_spec
.ip6src
[i
];
1536 entry
->msk
.dip6
[i
] = fs
->m_u
.tcp_ip6_spec
.ip6dst
[i
];
1537 entry
->key
.sip6
[i
] = fs
->h_u
.tcp_ip6_spec
.ip6src
[i
] & entry
->msk
.sip6
[i
];
1538 entry
->key
.dip6
[i
] = fs
->h_u
.tcp_ip6_spec
.ip6dst
[i
] & entry
->msk
.dip6
[i
];
1540 entry
->msk
.sport
= fs
->m_u
.tcp_ip6_spec
.psrc
;
1541 entry
->msk
.dport
= fs
->m_u
.tcp_ip6_spec
.pdst
;
1542 entry
->key
.sport
= fs
->h_u
.tcp_ip6_spec
.psrc
& entry
->msk
.sport
;
1543 entry
->key
.dport
= fs
->h_u
.tcp_ip6_spec
.pdst
& entry
->msk
.dport
;
1545 case IPV4_USER_FLOW
:
1546 entry
->msk
.sip4
= fs
->m_u
.usr_ip4_spec
.ip4src
;
1547 entry
->msk
.dip4
= fs
->m_u
.usr_ip4_spec
.ip4dst
;
1548 entry
->msk
.l4_proto
= fs
->m_u
.usr_ip4_spec
.proto
;
1549 entry
->key
.sip4
= fs
->h_u
.usr_ip4_spec
.ip4src
& entry
->msk
.sip4
;
1550 entry
->key
.dip4
= fs
->h_u
.usr_ip4_spec
.ip4dst
& entry
->msk
.dip4
;
1551 entry
->key
.l4_proto
= fs
->h_u
.usr_ip4_spec
.proto
& entry
->msk
.l4_proto
;
1553 case IPV6_USER_FLOW
:
1554 for (i
= 0; i
< 4; i
++) {
1555 entry
->msk
.sip6
[i
] = fs
->m_u
.usr_ip6_spec
.ip6src
[i
];
1556 entry
->msk
.dip6
[i
] = fs
->m_u
.usr_ip6_spec
.ip6dst
[i
];
1557 entry
->key
.sip6
[i
] = fs
->h_u
.usr_ip6_spec
.ip6src
[i
] & entry
->msk
.sip6
[i
];
1558 entry
->key
.dip6
[i
] = fs
->h_u
.usr_ip6_spec
.ip6dst
[i
] & entry
->msk
.dip6
[i
];
1560 entry
->msk
.l4_proto
= fs
->m_u
.usr_ip6_spec
.l4_proto
;
1561 entry
->key
.l4_proto
= fs
->h_u
.usr_ip6_spec
.l4_proto
& entry
->msk
.l4_proto
;
1564 entry
->msk
.l3_proto
= fs
->m_u
.ether_spec
.h_proto
;
1565 entry
->key
.l3_proto
= fs
->h_u
.ether_spec
.h_proto
& entry
->msk
.l3_proto
;
1571 switch (fs
->flow_type
& ~FLOW_RSS
) {
1574 entry
->key
.l4_proto
= IPPROTO_TCP
;
1575 entry
->msk
.l4_proto
= 0xff;
1579 entry
->key
.l4_proto
= IPPROTO_UDP
;
1580 entry
->msk
.l4_proto
= 0xff;
1584 entry
->key
.l4_proto
= IPPROTO_SCTP
;
1585 entry
->msk
.l4_proto
= 0xff;
1589 entry
->flow_type
= fs
->flow_type
;
1590 entry
->action
= fs
->ring_cookie
;
1591 entry
->loc
= fs
->location
;
1596 static int nfp_net_fs_check_existing(struct nfp_net
*nn
, struct nfp_fs_entry
*new)
1598 struct nfp_fs_entry
*entry
;
1600 list_for_each_entry(entry
, &nn
->fs
.list
, node
) {
1601 if (new->loc
!= entry
->loc
&&
1602 !((new->flow_type
^ entry
->flow_type
) & ~FLOW_RSS
) &&
1603 !memcmp(&new->key
, &entry
->key
, sizeof(new->key
)) &&
1604 !memcmp(&new->msk
, &entry
->msk
, sizeof(new->msk
)))
1608 /* -1 means no duplicates */
1612 static int nfp_net_fs_add(struct nfp_net
*nn
, struct ethtool_rxnfc
*cmd
)
1614 struct ethtool_rx_flow_spec
*fs
= &cmd
->fs
;
1615 struct nfp_fs_entry
*new, *entry
;
1619 if (!(nn
->cap_w1
& NFP_NET_CFG_CTRL_FLOW_STEER
))
1622 /* Only default RSS context(0) is supported. */
1623 if ((fs
->flow_type
& FLOW_RSS
) && cmd
->rss_context
)
1626 if (fs
->location
>= NFP_FS_MAX_ENTRY
)
1629 if (fs
->ring_cookie
!= RX_CLS_FLOW_DISC
&&
1630 fs
->ring_cookie
>= nn
->dp
.num_rx_rings
)
1633 /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
1634 switch (fs
->flow_type
& ~FLOW_RSS
) {
1638 unsupp_mask
= !!fs
->m_u
.tcp_ip4_spec
.tos
;
1643 unsupp_mask
= !!fs
->m_u
.tcp_ip6_spec
.tclass
;
1645 case IPV4_USER_FLOW
:
1646 unsupp_mask
= !!fs
->m_u
.usr_ip4_spec
.l4_4_bytes
||
1647 !!fs
->m_u
.usr_ip4_spec
.tos
||
1648 !!fs
->m_u
.usr_ip4_spec
.ip_ver
;
1649 /* ip_ver must be ETH_RX_NFC_IP4. */
1650 unsupp_mask
|= fs
->h_u
.usr_ip4_spec
.ip_ver
!= ETH_RX_NFC_IP4
;
1652 case IPV6_USER_FLOW
:
1653 unsupp_mask
= !!fs
->m_u
.usr_ip6_spec
.l4_4_bytes
||
1654 !!fs
->m_u
.usr_ip6_spec
.tclass
;
1657 if (fs
->h_u
.ether_spec
.h_proto
== htons(ETH_P_IP
) ||
1658 fs
->h_u
.ether_spec
.h_proto
== htons(ETH_P_IPV6
)) {
1659 nn_err(nn
, "Please use ip4/ip6 flow type instead.\n");
1662 /* Only unmasked ethtype is supported. */
1663 unsupp_mask
= !is_zero_ether_addr(fs
->m_u
.ether_spec
.h_dest
) ||
1664 !is_zero_ether_addr(fs
->m_u
.ether_spec
.h_source
) ||
1665 (fs
->m_u
.ether_spec
.h_proto
!= htons(0xffff));
1674 new = kzalloc(sizeof(*new), GFP_KERNEL
);
1678 nfp_net_fs_from_ethtool(new, fs
);
1680 id
= nfp_net_fs_check_existing(nn
, new);
1682 nn_err(nn
, "Identical rule is existing in %d.\n", id
);
1687 /* Insert to list in ascending order of location. */
1688 list_for_each_entry(entry
, &nn
->fs
.list
, node
) {
1689 if (entry
->loc
== fs
->location
) {
1690 err
= nfp_net_fs_del_hw(nn
, entry
);
1695 err
= nfp_net_fs_add_hw(nn
, new);
1700 list_replace(&entry
->node
, &new->node
);
1706 if (entry
->loc
> fs
->location
)
1710 if (nn
->fs
.count
== NFP_FS_MAX_ENTRY
) {
1715 err
= nfp_net_fs_add_hw(nn
, new);
1719 list_add_tail(&new->node
, &entry
->node
);
1729 static int nfp_net_fs_del(struct nfp_net
*nn
, struct ethtool_rxnfc
*cmd
)
1731 struct nfp_fs_entry
*entry
;
1734 if (!(nn
->cap_w1
& NFP_NET_CFG_CTRL_FLOW_STEER
))
1737 if (!nn
->fs
.count
|| cmd
->fs
.location
>= NFP_FS_MAX_ENTRY
)
1740 list_for_each_entry(entry
, &nn
->fs
.list
, node
) {
1741 if (entry
->loc
== cmd
->fs
.location
) {
1742 err
= nfp_net_fs_del_hw(nn
, entry
);
1746 list_del(&entry
->node
);
1751 } else if (entry
->loc
> cmd
->fs
.location
) {
1752 /* no need to continue */
1760 static int nfp_net_set_rxnfc(struct net_device
*netdev
,
1761 struct ethtool_rxnfc
*cmd
)
1763 struct nfp_net
*nn
= netdev_priv(netdev
);
1767 return nfp_net_set_rss_hash_opt(nn
, cmd
);
1768 case ETHTOOL_SRXCLSRLINS
:
1769 return nfp_net_fs_add(nn
, cmd
);
1770 case ETHTOOL_SRXCLSRLDEL
:
1771 return nfp_net_fs_del(nn
, cmd
);
1777 static u32
nfp_net_get_rxfh_indir_size(struct net_device
*netdev
)
1779 struct nfp_net
*nn
= netdev_priv(netdev
);
1781 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
1784 return ARRAY_SIZE(nn
->rss_itbl
);
1787 static u32
nfp_net_get_rxfh_key_size(struct net_device
*netdev
)
1789 struct nfp_net
*nn
= netdev_priv(netdev
);
1791 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
1794 return nfp_net_rss_key_sz(nn
);
1797 static int nfp_net_get_rxfh(struct net_device
*netdev
,
1798 struct ethtool_rxfh_param
*rxfh
)
1800 struct nfp_net
*nn
= netdev_priv(netdev
);
1803 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
))
1807 for (i
= 0; i
< ARRAY_SIZE(nn
->rss_itbl
); i
++)
1808 rxfh
->indir
[i
] = nn
->rss_itbl
[i
];
1810 memcpy(rxfh
->key
, nn
->rss_key
, nfp_net_rss_key_sz(nn
));
1812 rxfh
->hfunc
= nn
->rss_hfunc
;
1813 if (rxfh
->hfunc
>= 1 << ETH_RSS_HASH_FUNCS_COUNT
)
1814 rxfh
->hfunc
= ETH_RSS_HASH_UNKNOWN
;
1819 static int nfp_net_set_rxfh(struct net_device
*netdev
,
1820 struct ethtool_rxfh_param
*rxfh
,
1821 struct netlink_ext_ack
*extack
)
1823 struct nfp_net
*nn
= netdev_priv(netdev
);
1826 if (!(nn
->cap
& NFP_NET_CFG_CTRL_RSS_ANY
) ||
1827 !(rxfh
->hfunc
== ETH_RSS_HASH_NO_CHANGE
||
1828 rxfh
->hfunc
== nn
->rss_hfunc
))
1831 if (!rxfh
->key
&& !rxfh
->indir
)
1835 memcpy(nn
->rss_key
, rxfh
->key
, nfp_net_rss_key_sz(nn
));
1836 nfp_net_rss_write_key(nn
);
1839 for (i
= 0; i
< ARRAY_SIZE(nn
->rss_itbl
); i
++)
1840 nn
->rss_itbl
[i
] = rxfh
->indir
[i
];
1842 nfp_net_rss_write_itbl(nn
);
1845 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RSS
);
1848 /* Dump BAR registers
1850 static int nfp_net_get_regs_len(struct net_device
*netdev
)
1852 return NFP_NET_CFG_BAR_SZ
;
1855 static void nfp_net_get_regs(struct net_device
*netdev
,
1856 struct ethtool_regs
*regs
, void *p
)
1858 struct nfp_net
*nn
= netdev_priv(netdev
);
1862 regs
->version
= nn_readl(nn
, NFP_NET_CFG_VERSION
);
1864 for (i
= 0; i
< NFP_NET_CFG_BAR_SZ
/ sizeof(u32
); i
++)
1865 regs_buf
[i
] = readl(nn
->dp
.ctrl_bar
+ (i
* sizeof(u32
)));
1868 static int nfp_net_get_coalesce(struct net_device
*netdev
,
1869 struct ethtool_coalesce
*ec
,
1870 struct kernel_ethtool_coalesce
*kernel_coal
,
1871 struct netlink_ext_ack
*extack
)
1873 struct nfp_net
*nn
= netdev_priv(netdev
);
1875 if (!(nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
))
1878 ec
->use_adaptive_rx_coalesce
= nn
->rx_coalesce_adapt_on
;
1879 ec
->use_adaptive_tx_coalesce
= nn
->tx_coalesce_adapt_on
;
1881 ec
->rx_coalesce_usecs
= nn
->rx_coalesce_usecs
;
1882 ec
->rx_max_coalesced_frames
= nn
->rx_coalesce_max_frames
;
1883 ec
->tx_coalesce_usecs
= nn
->tx_coalesce_usecs
;
1884 ec
->tx_max_coalesced_frames
= nn
->tx_coalesce_max_frames
;
1889 /* Other debug dumps
1892 nfp_dump_nsp_diag(struct nfp_app
*app
, struct ethtool_dump
*dump
, void *buffer
)
1894 struct nfp_resource
*res
;
1901 dump
->flag
= NFP_DUMP_NSP_DIAG
;
1903 res
= nfp_resource_acquire(app
->cpp
, NFP_RESOURCE_NSP_DIAG
);
1905 return PTR_ERR(res
);
1908 if (dump
->len
!= nfp_resource_size(res
)) {
1913 ret
= nfp_cpp_read(app
->cpp
, nfp_resource_cpp_id(res
),
1914 nfp_resource_address(res
),
1916 if (ret
!= dump
->len
)
1917 ret
= ret
< 0 ? ret
: -EIO
;
1921 dump
->len
= nfp_resource_size(res
);
1925 nfp_resource_release(res
);
1930 /* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
1931 * based dumps), since flag 0 (default) calculates the length in
1932 * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
1933 * without setting the flag first, for backward compatibility.
1935 static int nfp_app_set_dump(struct net_device
*netdev
, struct ethtool_dump
*val
)
1937 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1943 if (val
->flag
== NFP_DUMP_NSP_DIAG
) {
1944 app
->pf
->dump_flag
= val
->flag
;
1948 if (!app
->pf
->dumpspec
)
1951 len
= nfp_net_dump_calculate_size(app
->pf
, app
->pf
->dumpspec
,
1956 app
->pf
->dump_flag
= val
->flag
;
1957 app
->pf
->dump_len
= len
;
1963 nfp_app_get_dump_flag(struct net_device
*netdev
, struct ethtool_dump
*dump
)
1965 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1970 if (app
->pf
->dump_flag
== NFP_DUMP_NSP_DIAG
)
1971 return nfp_dump_nsp_diag(app
, dump
, NULL
);
1973 dump
->flag
= app
->pf
->dump_flag
;
1974 dump
->len
= app
->pf
->dump_len
;
1980 nfp_app_get_dump_data(struct net_device
*netdev
, struct ethtool_dump
*dump
,
1983 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
1988 if (app
->pf
->dump_flag
== NFP_DUMP_NSP_DIAG
)
1989 return nfp_dump_nsp_diag(app
, dump
, buffer
);
1991 dump
->flag
= app
->pf
->dump_flag
;
1992 dump
->len
= app
->pf
->dump_len
;
1994 return nfp_net_dump_populate_buffer(app
->pf
, app
->pf
->dumpspec
, dump
,
1999 nfp_port_get_module_info(struct net_device
*netdev
,
2000 struct ethtool_modinfo
*modinfo
)
2002 struct nfp_eth_table_port
*eth_port
;
2003 struct nfp_port
*port
;
2004 unsigned int read_len
;
2005 struct nfp_nsp
*nsp
;
2009 port
= nfp_port_from_netdev(netdev
);
2013 /* update port state to get latest interface */
2014 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
2015 eth_port
= nfp_port_get_eth_port(port
);
2019 nsp
= nfp_nsp_open(port
->app
->cpp
);
2022 netdev_err(netdev
, "Failed to access the NSP: %d\n", err
);
2026 if (!nfp_nsp_has_read_module_eeprom(nsp
)) {
2027 netdev_info(netdev
, "reading module EEPROM not supported. Please update flash\n");
2029 goto exit_close_nsp
;
2032 switch (eth_port
->interface
) {
2033 case NFP_INTERFACE_SFP
:
2034 case NFP_INTERFACE_SFP28
:
2035 err
= nfp_nsp_read_module_eeprom(nsp
, eth_port
->eth_index
,
2036 SFP_SFF8472_COMPLIANCE
, &data
,
2039 goto exit_close_nsp
;
2042 modinfo
->type
= ETH_MODULE_SFF_8079
;
2043 modinfo
->eeprom_len
= ETH_MODULE_SFF_8079_LEN
;
2045 modinfo
->type
= ETH_MODULE_SFF_8472
;
2046 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
2049 case NFP_INTERFACE_QSFP
:
2050 err
= nfp_nsp_read_module_eeprom(nsp
, eth_port
->eth_index
,
2051 SFP_SFF_REV_COMPLIANCE
, &data
,
2054 goto exit_close_nsp
;
2057 modinfo
->type
= ETH_MODULE_SFF_8436
;
2058 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_MAX_LEN
;
2060 modinfo
->type
= ETH_MODULE_SFF_8636
;
2061 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_MAX_LEN
;
2064 case NFP_INTERFACE_QSFP28
:
2065 modinfo
->type
= ETH_MODULE_SFF_8636
;
2066 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_MAX_LEN
;
2069 netdev_err(netdev
, "Unsupported module 0x%x detected\n",
2070 eth_port
->interface
);
2080 nfp_port_get_module_eeprom(struct net_device
*netdev
,
2081 struct ethtool_eeprom
*eeprom
, u8
*data
)
2083 struct nfp_eth_table_port
*eth_port
;
2084 struct nfp_port
*port
;
2085 struct nfp_nsp
*nsp
;
2088 port
= nfp_port_from_netdev(netdev
);
2089 eth_port
= __nfp_port_get_eth_port(port
);
2093 nsp
= nfp_nsp_open(port
->app
->cpp
);
2096 netdev_err(netdev
, "Failed to access the NSP: %d\n", err
);
2100 if (!nfp_nsp_has_read_module_eeprom(nsp
)) {
2101 netdev_info(netdev
, "reading module EEPROM not supported. Please update flash\n");
2103 goto exit_close_nsp
;
2106 err
= nfp_nsp_read_module_eeprom(nsp
, eth_port
->eth_index
,
2107 eeprom
->offset
, data
, eeprom
->len
,
2112 "Incomplete read from module EEPROM: %d\n",
2117 "Reading from module EEPROM failed: %d\n",
2127 static int nfp_net_set_coalesce(struct net_device
*netdev
,
2128 struct ethtool_coalesce
*ec
,
2129 struct kernel_ethtool_coalesce
*kernel_coal
,
2130 struct netlink_ext_ack
*extack
)
2132 struct nfp_net
*nn
= netdev_priv(netdev
);
2133 unsigned int factor
;
2135 /* Compute factor used to convert coalesce '_usecs' parameters to
2136 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
2139 factor
= nn
->tlv_caps
.me_freq_mhz
/ 16;
2141 /* Each pair of (usecs, max_frames) fields specifies that interrupts
2142 * should be coalesced until
2143 * (usecs > 0 && time_since_first_completion >= usecs) ||
2144 * (max_frames > 0 && completed_frames >= max_frames)
2146 * It is illegal to set both usecs and max_frames to zero as this would
2147 * cause interrupts to never be generated. To disable coalescing, set
2148 * usecs = 0 and max_frames = 1.
2150 * Some implementations ignore the value of max_frames and use the
2151 * condition time_since_first_completion >= usecs
2154 if (!(nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
))
2157 /* ensure valid configuration */
2158 if (!ec
->rx_coalesce_usecs
&& !ec
->rx_max_coalesced_frames
) {
2159 NL_SET_ERR_MSG_MOD(extack
,
2160 "rx-usecs and rx-frames cannot both be zero");
2164 if (!ec
->tx_coalesce_usecs
&& !ec
->tx_max_coalesced_frames
) {
2165 NL_SET_ERR_MSG_MOD(extack
,
2166 "tx-usecs and tx-frames cannot both be zero");
2170 if (nfp_net_coalesce_para_check(ec
->rx_coalesce_usecs
* factor
)) {
2171 NL_SET_ERR_MSG_MOD(extack
, "rx-usecs too large");
2175 if (nfp_net_coalesce_para_check(ec
->rx_max_coalesced_frames
)) {
2176 NL_SET_ERR_MSG_MOD(extack
, "rx-frames too large");
2180 if (nfp_net_coalesce_para_check(ec
->tx_coalesce_usecs
* factor
)) {
2181 NL_SET_ERR_MSG_MOD(extack
, "tx-usecs too large");
2185 if (nfp_net_coalesce_para_check(ec
->tx_max_coalesced_frames
)) {
2186 NL_SET_ERR_MSG_MOD(extack
, "tx-frames too large");
2190 /* configuration is valid */
2191 nn
->rx_coalesce_adapt_on
= !!ec
->use_adaptive_rx_coalesce
;
2192 nn
->tx_coalesce_adapt_on
= !!ec
->use_adaptive_tx_coalesce
;
2194 nn
->rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
2195 nn
->rx_coalesce_max_frames
= ec
->rx_max_coalesced_frames
;
2196 nn
->tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
2197 nn
->tx_coalesce_max_frames
= ec
->tx_max_coalesced_frames
;
2199 /* write configuration to device */
2200 nfp_net_coalesce_write_cfg(nn
);
2201 return nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_IRQMOD
);
2204 static void nfp_net_get_channels(struct net_device
*netdev
,
2205 struct ethtool_channels
*channel
)
2207 struct nfp_net
*nn
= netdev_priv(netdev
);
2208 unsigned int num_tx_rings
;
2210 num_tx_rings
= nn
->dp
.num_tx_rings
;
2211 if (nn
->dp
.xdp_prog
)
2212 num_tx_rings
-= nn
->dp
.num_rx_rings
;
2214 channel
->max_rx
= min(nn
->max_rx_rings
, nn
->max_r_vecs
);
2215 channel
->max_tx
= min(nn
->max_tx_rings
, nn
->max_r_vecs
);
2216 channel
->max_combined
= min(channel
->max_rx
, channel
->max_tx
);
2217 channel
->max_other
= NFP_NET_NON_Q_VECTORS
;
2218 channel
->combined_count
= min(nn
->dp
.num_rx_rings
, num_tx_rings
);
2219 channel
->rx_count
= nn
->dp
.num_rx_rings
- channel
->combined_count
;
2220 channel
->tx_count
= num_tx_rings
- channel
->combined_count
;
2221 channel
->other_count
= NFP_NET_NON_Q_VECTORS
;
2224 static int nfp_net_set_num_rings(struct nfp_net
*nn
, unsigned int total_rx
,
2225 unsigned int total_tx
)
2227 struct nfp_net_dp
*dp
;
2229 dp
= nfp_net_clone_dp(nn
);
2233 dp
->num_rx_rings
= total_rx
;
2234 dp
->num_tx_rings
= total_tx
;
2235 /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
2237 dp
->num_tx_rings
+= total_rx
;
2239 return nfp_net_ring_reconfig(nn
, dp
, NULL
);
2242 static int nfp_net_set_channels(struct net_device
*netdev
,
2243 struct ethtool_channels
*channel
)
2245 struct nfp_net
*nn
= netdev_priv(netdev
);
2246 unsigned int total_rx
, total_tx
;
2248 /* Reject unsupported */
2249 if (channel
->other_count
!= NFP_NET_NON_Q_VECTORS
||
2250 (channel
->rx_count
&& channel
->tx_count
))
2253 total_rx
= channel
->combined_count
+ channel
->rx_count
;
2254 total_tx
= channel
->combined_count
+ channel
->tx_count
;
2256 if (total_rx
> min(nn
->max_rx_rings
, nn
->max_r_vecs
) ||
2257 total_tx
> min(nn
->max_tx_rings
, nn
->max_r_vecs
))
2260 return nfp_net_set_num_rings(nn
, total_rx
, total_tx
);
2263 static int nfp_port_set_pauseparam(struct net_device
*netdev
,
2264 struct ethtool_pauseparam
*pause
)
2266 struct nfp_eth_table_port
*eth_port
;
2267 struct nfp_port
*port
;
2270 port
= nfp_port_from_netdev(netdev
);
2271 eth_port
= nfp_port_get_eth_port(port
);
2275 if (pause
->autoneg
!= AUTONEG_DISABLE
)
2278 err
= nfp_eth_set_pauseparam(port
->app
->cpp
, eth_port
->index
,
2279 pause
->tx_pause
, pause
->rx_pause
);
2281 /* Only refresh if we did something */
2282 nfp_net_refresh_port_table(port
);
2284 return err
< 0 ? err
: 0;
2287 static void nfp_port_get_pauseparam(struct net_device
*netdev
,
2288 struct ethtool_pauseparam
*pause
)
2290 struct nfp_eth_table_port
*eth_port
;
2291 struct nfp_port
*port
;
2293 port
= nfp_port_from_netdev(netdev
);
2294 eth_port
= nfp_port_get_eth_port(port
);
2298 /* Currently pause frame autoneg is fixed */
2299 pause
->autoneg
= AUTONEG_DISABLE
;
2300 pause
->rx_pause
= eth_port
->rx_pause
;
2301 pause
->tx_pause
= eth_port
->tx_pause
;
2304 static int nfp_net_set_phys_id(struct net_device
*netdev
,
2305 enum ethtool_phys_id_state state
)
2307 struct nfp_eth_table_port
*eth_port
;
2308 struct nfp_port
*port
;
2311 port
= nfp_port_from_netdev(netdev
);
2312 eth_port
= __nfp_port_get_eth_port(port
);
2317 case ETHTOOL_ID_ACTIVE
:
2318 /* Control LED to blink */
2319 err
= nfp_eth_set_idmode(port
->app
->cpp
, eth_port
->index
, 1);
2322 case ETHTOOL_ID_INACTIVE
:
2323 /* Control LED to normal mode */
2324 err
= nfp_eth_set_idmode(port
->app
->cpp
, eth_port
->index
, 0);
2328 case ETHTOOL_ID_OFF
:
2336 #define NFP_EEPROM_LEN ETH_ALEN
2339 nfp_net_get_eeprom_len(struct net_device
*netdev
)
2341 struct nfp_eth_table_port
*eth_port
;
2342 struct nfp_port
*port
;
2344 port
= nfp_port_from_netdev(netdev
);
2345 eth_port
= __nfp_port_get_eth_port(port
);
2349 return NFP_EEPROM_LEN
;
2353 nfp_net_get_nsp_hwindex(struct net_device
*netdev
,
2354 struct nfp_nsp
**nspptr
,
2357 struct nfp_eth_table_port
*eth_port
;
2358 struct nfp_port
*port
;
2359 struct nfp_nsp
*nsp
;
2362 port
= nfp_port_from_netdev(netdev
);
2363 eth_port
= __nfp_port_get_eth_port(port
);
2367 nsp
= nfp_nsp_open(port
->app
->cpp
);
2370 netdev_err(netdev
, "Failed to access the NSP: %d\n", err
);
2374 if (!nfp_nsp_has_hwinfo_lookup(nsp
)) {
2375 netdev_err(netdev
, "NSP doesn't support PF MAC generation\n");
2381 *index
= eth_port
->eth_index
;
2387 nfp_net_get_port_mac_by_hwinfo(struct net_device
*netdev
,
2390 char hwinfo
[32] = {};
2391 struct nfp_nsp
*nsp
;
2395 err
= nfp_net_get_nsp_hwindex(netdev
, &nsp
, &index
);
2399 snprintf(hwinfo
, sizeof(hwinfo
), "eth%u.mac", index
);
2400 err
= nfp_nsp_hwinfo_lookup(nsp
, hwinfo
, sizeof(hwinfo
));
2403 netdev_err(netdev
, "Reading persistent MAC address failed: %d\n",
2408 if (sscanf(hwinfo
, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
2409 &mac_addr
[0], &mac_addr
[1], &mac_addr
[2],
2410 &mac_addr
[3], &mac_addr
[4], &mac_addr
[5]) != 6) {
2411 netdev_err(netdev
, "Can't parse persistent MAC address (%s)\n",
2420 nfp_net_set_port_mac_by_hwinfo(struct net_device
*netdev
,
2423 char hwinfo
[32] = {};
2424 struct nfp_nsp
*nsp
;
2428 err
= nfp_net_get_nsp_hwindex(netdev
, &nsp
, &index
);
2432 snprintf(hwinfo
, sizeof(hwinfo
),
2433 "eth%u.mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
2434 index
, mac_addr
[0], mac_addr
[1], mac_addr
[2], mac_addr
[3],
2435 mac_addr
[4], mac_addr
[5]);
2437 err
= nfp_nsp_hwinfo_set(nsp
, hwinfo
, sizeof(hwinfo
));
2440 netdev_err(netdev
, "HWinfo set failed: %d, hwinfo: %s\n",
2449 nfp_net_get_eeprom(struct net_device
*netdev
,
2450 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
2452 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
2453 u8 buf
[NFP_EEPROM_LEN
] = {};
2455 if (nfp_net_get_port_mac_by_hwinfo(netdev
, buf
))
2458 if (eeprom
->len
== 0)
2461 eeprom
->magic
= app
->pdev
->vendor
| (app
->pdev
->device
<< 16);
2462 memcpy(bytes
, buf
+ eeprom
->offset
, eeprom
->len
);
2468 nfp_net_set_eeprom(struct net_device
*netdev
,
2469 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
2471 struct nfp_app
*app
= nfp_app_from_netdev(netdev
);
2472 u8 buf
[NFP_EEPROM_LEN
] = {};
2474 if (nfp_net_get_port_mac_by_hwinfo(netdev
, buf
))
2477 if (eeprom
->len
== 0)
2480 if (eeprom
->magic
!= (app
->pdev
->vendor
| app
->pdev
->device
<< 16))
2483 memcpy(buf
+ eeprom
->offset
, bytes
, eeprom
->len
);
2484 if (nfp_net_set_port_mac_by_hwinfo(netdev
, buf
))
2490 static const struct ethtool_ops nfp_net_ethtool_ops
= {
2491 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
2492 ETHTOOL_COALESCE_MAX_FRAMES
|
2493 ETHTOOL_COALESCE_USE_ADAPTIVE
,
2494 .get_drvinfo
= nfp_net_get_drvinfo
,
2495 .nway_reset
= nfp_net_nway_reset
,
2496 .get_link
= ethtool_op_get_link
,
2497 .get_ringparam
= nfp_net_get_ringparam
,
2498 .set_ringparam
= nfp_net_set_ringparam
,
2499 .self_test
= nfp_net_self_test
,
2500 .get_strings
= nfp_net_get_strings
,
2501 .get_ethtool_stats
= nfp_net_get_stats
,
2502 .get_sset_count
= nfp_net_get_sset_count
,
2503 .get_rxnfc
= nfp_net_get_rxnfc
,
2504 .set_rxnfc
= nfp_net_set_rxnfc
,
2505 .get_rxfh_indir_size
= nfp_net_get_rxfh_indir_size
,
2506 .get_rxfh_key_size
= nfp_net_get_rxfh_key_size
,
2507 .get_rxfh
= nfp_net_get_rxfh
,
2508 .set_rxfh
= nfp_net_set_rxfh
,
2509 .get_regs_len
= nfp_net_get_regs_len
,
2510 .get_regs
= nfp_net_get_regs
,
2511 .set_dump
= nfp_app_set_dump
,
2512 .get_dump_flag
= nfp_app_get_dump_flag
,
2513 .get_dump_data
= nfp_app_get_dump_data
,
2514 .get_eeprom_len
= nfp_net_get_eeprom_len
,
2515 .get_eeprom
= nfp_net_get_eeprom
,
2516 .set_eeprom
= nfp_net_set_eeprom
,
2517 .get_module_info
= nfp_port_get_module_info
,
2518 .get_module_eeprom
= nfp_port_get_module_eeprom
,
2519 .get_coalesce
= nfp_net_get_coalesce
,
2520 .set_coalesce
= nfp_net_set_coalesce
,
2521 .get_channels
= nfp_net_get_channels
,
2522 .set_channels
= nfp_net_set_channels
,
2523 .get_link_ksettings
= nfp_net_get_link_ksettings
,
2524 .set_link_ksettings
= nfp_net_set_link_ksettings
,
2525 .get_fecparam
= nfp_port_get_fecparam
,
2526 .set_fecparam
= nfp_port_set_fecparam
,
2527 .set_pauseparam
= nfp_port_set_pauseparam
,
2528 .get_pauseparam
= nfp_port_get_pauseparam
,
2529 .set_phys_id
= nfp_net_set_phys_id
,
2530 .get_ts_info
= ethtool_op_get_ts_info
,
2533 const struct ethtool_ops nfp_port_ethtool_ops
= {
2534 .get_drvinfo
= nfp_app_get_drvinfo
,
2535 .nway_reset
= nfp_net_nway_reset
,
2536 .get_link
= ethtool_op_get_link
,
2537 .get_strings
= nfp_port_get_strings
,
2538 .get_ethtool_stats
= nfp_port_get_stats
,
2539 .self_test
= nfp_net_self_test
,
2540 .get_sset_count
= nfp_port_get_sset_count
,
2541 .set_dump
= nfp_app_set_dump
,
2542 .get_dump_flag
= nfp_app_get_dump_flag
,
2543 .get_dump_data
= nfp_app_get_dump_data
,
2544 .get_eeprom_len
= nfp_net_get_eeprom_len
,
2545 .get_eeprom
= nfp_net_get_eeprom
,
2546 .set_eeprom
= nfp_net_set_eeprom
,
2547 .get_module_info
= nfp_port_get_module_info
,
2548 .get_module_eeprom
= nfp_port_get_module_eeprom
,
2549 .get_link_ksettings
= nfp_net_get_link_ksettings
,
2550 .set_link_ksettings
= nfp_net_set_link_ksettings
,
2551 .get_fecparam
= nfp_port_get_fecparam
,
2552 .set_fecparam
= nfp_port_set_fecparam
,
2553 .set_pauseparam
= nfp_port_set_pauseparam
,
2554 .get_pauseparam
= nfp_port_get_pauseparam
,
2555 .set_phys_id
= nfp_net_set_phys_id
,
2558 void nfp_net_set_ethtool_ops(struct net_device
*netdev
)
2560 netdev
->ethtool_ops
= &nfp_net_ethtool_ops
;