1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
8 #include <linux/netdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/rtnetlink.h>
12 #include "net_driver.h"
13 #include "workarounds.h"
16 #include "efx_channels.h"
17 #include "rx_common.h"
18 #include "tx_common.h"
19 #include "ethtool_common.h"
23 #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
25 /**************************************************************************
29 **************************************************************************
32 /* Identify device by flashing LEDs */
33 static int efx_ethtool_phys_id(struct net_device
*net_dev
,
34 enum ethtool_phys_id_state state
)
36 struct efx_nic
*efx
= netdev_priv(net_dev
);
37 enum efx_led_mode mode
= EFX_LED_DEFAULT
;
46 case ETHTOOL_ID_INACTIVE
:
47 mode
= EFX_LED_DEFAULT
;
49 case ETHTOOL_ID_ACTIVE
:
50 return 1; /* cycle on/off once per second */
53 efx
->type
->set_id_led(efx
, mode
);
57 /* This must be called with rtnl_lock held. */
59 efx_ethtool_get_link_ksettings(struct net_device
*net_dev
,
60 struct ethtool_link_ksettings
*cmd
)
62 struct efx_nic
*efx
= netdev_priv(net_dev
);
63 struct efx_link_state
*link_state
= &efx
->link_state
;
66 mutex_lock(&efx
->mac_lock
);
67 efx
->phy_op
->get_link_ksettings(efx
, cmd
);
68 mutex_unlock(&efx
->mac_lock
);
70 /* Both MACs support pause frames (bidirectional and respond-only) */
71 ethtool_convert_link_mode_to_legacy_u32(&supported
,
72 cmd
->link_modes
.supported
);
74 supported
|= SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
76 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
79 if (LOOPBACK_INTERNAL(efx
)) {
80 cmd
->base
.speed
= link_state
->speed
;
81 cmd
->base
.duplex
= link_state
->fd
? DUPLEX_FULL
: DUPLEX_HALF
;
87 /* This must be called with rtnl_lock held. */
89 efx_ethtool_set_link_ksettings(struct net_device
*net_dev
,
90 const struct ethtool_link_ksettings
*cmd
)
92 struct efx_nic
*efx
= netdev_priv(net_dev
);
95 /* GMAC does not support 1000Mbps HD */
96 if ((cmd
->base
.speed
== SPEED_1000
) &&
97 (cmd
->base
.duplex
!= DUPLEX_FULL
)) {
98 netif_dbg(efx
, drv
, efx
->net_dev
,
99 "rejecting unsupported 1000Mbps HD setting\n");
103 mutex_lock(&efx
->mac_lock
);
104 rc
= efx
->phy_op
->set_link_ksettings(efx
, cmd
);
105 mutex_unlock(&efx
->mac_lock
);
109 static int efx_ethtool_get_regs_len(struct net_device
*net_dev
)
111 return efx_nic_get_regs_len(netdev_priv(net_dev
));
114 static void efx_ethtool_get_regs(struct net_device
*net_dev
,
115 struct ethtool_regs
*regs
, void *buf
)
117 struct efx_nic
*efx
= netdev_priv(net_dev
);
119 regs
->version
= efx
->type
->revision
;
120 efx_nic_get_regs(efx
, buf
);
123 static void efx_ethtool_self_test(struct net_device
*net_dev
,
124 struct ethtool_test
*test
, u64
*data
)
126 struct efx_nic
*efx
= netdev_priv(net_dev
);
127 struct efx_self_tests
*efx_tests
;
131 efx_tests
= kzalloc(sizeof(*efx_tests
), GFP_KERNEL
);
135 if (efx
->state
!= STATE_READY
) {
140 netif_info(efx
, drv
, efx
->net_dev
, "starting %sline testing\n",
141 (test
->flags
& ETH_TEST_FL_OFFLINE
) ? "off" : "on");
143 /* We need rx buffers and interrupts. */
144 already_up
= (efx
->net_dev
->flags
& IFF_UP
);
146 rc
= dev_open(efx
->net_dev
, NULL
);
148 netif_err(efx
, drv
, efx
->net_dev
,
149 "failed opening device.\n");
154 rc
= efx_selftest(efx
, efx_tests
, test
->flags
);
157 dev_close(efx
->net_dev
);
159 netif_info(efx
, drv
, efx
->net_dev
, "%s %sline self-tests\n",
160 rc
== 0 ? "passed" : "failed",
161 (test
->flags
& ETH_TEST_FL_OFFLINE
) ? "off" : "on");
164 efx_ethtool_fill_self_tests(efx
, efx_tests
, NULL
, data
);
168 test
->flags
|= ETH_TEST_FL_FAILED
;
171 /* Restart autonegotiation */
172 static int efx_ethtool_nway_reset(struct net_device
*net_dev
)
174 struct efx_nic
*efx
= netdev_priv(net_dev
);
176 return mdio45_nway_restart(&efx
->mdio
);
180 * Each channel has a single IRQ and moderation timer, started by any
181 * completion (or other event). Unless the module parameter
182 * separate_tx_channels is set, IRQs and moderation are therefore
183 * shared between RX and TX completions. In this case, when RX IRQ
184 * moderation is explicitly changed then TX IRQ moderation is
185 * automatically changed too, but otherwise we fail if the two values
186 * are requested to be different.
188 * The hardware does not support a limit on the number of completions
189 * before an IRQ, so we do not use the max_frames fields. We should
190 * report and require that max_frames == (usecs != 0), but this would
191 * invalidate existing user documentation.
193 * The hardware does not have distinct settings for interrupt
194 * moderation while the previous IRQ is being handled, so we should
195 * not use the 'irq' fields. However, an earlier developer
196 * misunderstood the meaning of the 'irq' fields and the driver did
197 * not support the standard fields. To avoid invalidating existing
198 * user documentation, we report and accept changes through either the
199 * standard or 'irq' fields. If both are changed at the same time, we
200 * prefer the standard field.
202 * We implement adaptive IRQ moderation, but use a different algorithm
203 * from that assumed in the definition of struct ethtool_coalesce.
204 * Therefore we do not use any of the adaptive moderation parameters
208 static int efx_ethtool_get_coalesce(struct net_device
*net_dev
,
209 struct ethtool_coalesce
*coalesce
)
211 struct efx_nic
*efx
= netdev_priv(net_dev
);
212 unsigned int tx_usecs
, rx_usecs
;
215 efx_get_irq_moderation(efx
, &tx_usecs
, &rx_usecs
, &rx_adaptive
);
217 coalesce
->tx_coalesce_usecs
= tx_usecs
;
218 coalesce
->tx_coalesce_usecs_irq
= tx_usecs
;
219 coalesce
->rx_coalesce_usecs
= rx_usecs
;
220 coalesce
->rx_coalesce_usecs_irq
= rx_usecs
;
221 coalesce
->use_adaptive_rx_coalesce
= rx_adaptive
;
226 static int efx_ethtool_set_coalesce(struct net_device
*net_dev
,
227 struct ethtool_coalesce
*coalesce
)
229 struct efx_nic
*efx
= netdev_priv(net_dev
);
230 struct efx_channel
*channel
;
231 unsigned int tx_usecs
, rx_usecs
;
232 bool adaptive
, rx_may_override_tx
;
235 if (coalesce
->use_adaptive_tx_coalesce
)
238 efx_get_irq_moderation(efx
, &tx_usecs
, &rx_usecs
, &adaptive
);
240 if (coalesce
->rx_coalesce_usecs
!= rx_usecs
)
241 rx_usecs
= coalesce
->rx_coalesce_usecs
;
243 rx_usecs
= coalesce
->rx_coalesce_usecs_irq
;
245 adaptive
= coalesce
->use_adaptive_rx_coalesce
;
247 /* If channels are shared, TX IRQ moderation can be quietly
248 * overridden unless it is changed from its old value.
250 rx_may_override_tx
= (coalesce
->tx_coalesce_usecs
== tx_usecs
&&
251 coalesce
->tx_coalesce_usecs_irq
== tx_usecs
);
252 if (coalesce
->tx_coalesce_usecs
!= tx_usecs
)
253 tx_usecs
= coalesce
->tx_coalesce_usecs
;
255 tx_usecs
= coalesce
->tx_coalesce_usecs_irq
;
257 rc
= efx_init_irq_moderation(efx
, tx_usecs
, rx_usecs
, adaptive
,
262 efx_for_each_channel(channel
, efx
)
263 efx
->type
->push_irq_moderation(channel
);
268 static void efx_ethtool_get_ringparam(struct net_device
*net_dev
,
269 struct ethtool_ringparam
*ring
)
271 struct efx_nic
*efx
= netdev_priv(net_dev
);
273 ring
->rx_max_pending
= EFX_MAX_DMAQ_SIZE
;
274 ring
->tx_max_pending
= EFX_TXQ_MAX_ENT(efx
);
275 ring
->rx_pending
= efx
->rxq_entries
;
276 ring
->tx_pending
= efx
->txq_entries
;
279 static int efx_ethtool_set_ringparam(struct net_device
*net_dev
,
280 struct ethtool_ringparam
*ring
)
282 struct efx_nic
*efx
= netdev_priv(net_dev
);
285 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
||
286 ring
->rx_pending
> EFX_MAX_DMAQ_SIZE
||
287 ring
->tx_pending
> EFX_TXQ_MAX_ENT(efx
))
290 if (ring
->rx_pending
< EFX_RXQ_MIN_ENT
) {
291 netif_err(efx
, drv
, efx
->net_dev
,
292 "RX queues cannot be smaller than %u\n",
297 txq_entries
= max(ring
->tx_pending
, EFX_TXQ_MIN_ENT(efx
));
298 if (txq_entries
!= ring
->tx_pending
)
299 netif_warn(efx
, drv
, efx
->net_dev
,
300 "increasing TX queue size to minimum of %u\n",
303 return efx_realloc_channels(efx
, ring
->rx_pending
, txq_entries
);
306 static int efx_ethtool_set_pauseparam(struct net_device
*net_dev
,
307 struct ethtool_pauseparam
*pause
)
309 struct efx_nic
*efx
= netdev_priv(net_dev
);
310 u8 wanted_fc
, old_fc
;
314 mutex_lock(&efx
->mac_lock
);
316 wanted_fc
= ((pause
->rx_pause
? EFX_FC_RX
: 0) |
317 (pause
->tx_pause
? EFX_FC_TX
: 0) |
318 (pause
->autoneg
? EFX_FC_AUTO
: 0));
320 if ((wanted_fc
& EFX_FC_TX
) && !(wanted_fc
& EFX_FC_RX
)) {
321 netif_dbg(efx
, drv
, efx
->net_dev
,
322 "Flow control unsupported: tx ON rx OFF\n");
327 if ((wanted_fc
& EFX_FC_AUTO
) && !efx
->link_advertising
[0]) {
328 netif_dbg(efx
, drv
, efx
->net_dev
,
329 "Autonegotiation is disabled\n");
334 /* Hook for Falcon bug 11482 workaround */
335 if (efx
->type
->prepare_enable_fc_tx
&&
336 (wanted_fc
& EFX_FC_TX
) && !(efx
->wanted_fc
& EFX_FC_TX
))
337 efx
->type
->prepare_enable_fc_tx(efx
);
339 old_adv
= efx
->link_advertising
[0];
340 old_fc
= efx
->wanted_fc
;
341 efx_link_set_wanted_fc(efx
, wanted_fc
);
342 if (efx
->link_advertising
[0] != old_adv
||
343 (efx
->wanted_fc
^ old_fc
) & EFX_FC_AUTO
) {
344 rc
= efx
->phy_op
->reconfigure(efx
);
346 netif_err(efx
, drv
, efx
->net_dev
,
347 "Unable to advertise requested flow "
348 "control setting\n");
353 /* Reconfigure the MAC. The PHY *may* generate a link state change event
354 * if the user just changed the advertised capabilities, but there's no
355 * harm doing this twice */
356 efx_mac_reconfigure(efx
);
359 mutex_unlock(&efx
->mac_lock
);
364 static void efx_ethtool_get_wol(struct net_device
*net_dev
,
365 struct ethtool_wolinfo
*wol
)
367 struct efx_nic
*efx
= netdev_priv(net_dev
);
368 return efx
->type
->get_wol(efx
, wol
);
372 static int efx_ethtool_set_wol(struct net_device
*net_dev
,
373 struct ethtool_wolinfo
*wol
)
375 struct efx_nic
*efx
= netdev_priv(net_dev
);
376 return efx
->type
->set_wol(efx
, wol
->wolopts
);
379 static int efx_ethtool_reset(struct net_device
*net_dev
, u32
*flags
)
381 struct efx_nic
*efx
= netdev_priv(net_dev
);
384 rc
= efx
->type
->map_reset_flags(flags
);
388 return efx_reset(efx
, rc
);
391 /* MAC address mask including only I/G bit */
392 static const u8 mac_addr_ig_mask
[ETH_ALEN
] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
394 #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
395 #define IP_PROTO_FULL_MASK 0xFF
396 #define PORT_FULL_MASK ((__force __be16)~0)
397 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
399 static inline void ip6_fill_mask(__be32
*mask
)
401 mask
[0] = mask
[1] = mask
[2] = mask
[3] = ~(__be32
)0;
404 static int efx_ethtool_get_class_rule(struct efx_nic
*efx
,
405 struct ethtool_rx_flow_spec
*rule
,
408 struct ethtool_tcpip4_spec
*ip_entry
= &rule
->h_u
.tcp_ip4_spec
;
409 struct ethtool_tcpip4_spec
*ip_mask
= &rule
->m_u
.tcp_ip4_spec
;
410 struct ethtool_usrip4_spec
*uip_entry
= &rule
->h_u
.usr_ip4_spec
;
411 struct ethtool_usrip4_spec
*uip_mask
= &rule
->m_u
.usr_ip4_spec
;
412 struct ethtool_tcpip6_spec
*ip6_entry
= &rule
->h_u
.tcp_ip6_spec
;
413 struct ethtool_tcpip6_spec
*ip6_mask
= &rule
->m_u
.tcp_ip6_spec
;
414 struct ethtool_usrip6_spec
*uip6_entry
= &rule
->h_u
.usr_ip6_spec
;
415 struct ethtool_usrip6_spec
*uip6_mask
= &rule
->m_u
.usr_ip6_spec
;
416 struct ethhdr
*mac_entry
= &rule
->h_u
.ether_spec
;
417 struct ethhdr
*mac_mask
= &rule
->m_u
.ether_spec
;
418 struct efx_filter_spec spec
;
421 rc
= efx_filter_get_filter_safe(efx
, EFX_FILTER_PRI_MANUAL
,
422 rule
->location
, &spec
);
426 if (spec
.dmaq_id
== EFX_FILTER_RX_DMAQ_ID_DROP
)
427 rule
->ring_cookie
= RX_CLS_FLOW_DISC
;
429 rule
->ring_cookie
= spec
.dmaq_id
;
431 if ((spec
.match_flags
& EFX_FILTER_MATCH_ETHER_TYPE
) &&
432 spec
.ether_type
== htons(ETH_P_IP
) &&
433 (spec
.match_flags
& EFX_FILTER_MATCH_IP_PROTO
) &&
434 (spec
.ip_proto
== IPPROTO_TCP
|| spec
.ip_proto
== IPPROTO_UDP
) &&
436 ~(EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_OUTER_VID
|
437 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_REM_HOST
|
438 EFX_FILTER_MATCH_IP_PROTO
|
439 EFX_FILTER_MATCH_LOC_PORT
| EFX_FILTER_MATCH_REM_PORT
))) {
440 rule
->flow_type
= ((spec
.ip_proto
== IPPROTO_TCP
) ?
441 TCP_V4_FLOW
: UDP_V4_FLOW
);
442 if (spec
.match_flags
& EFX_FILTER_MATCH_LOC_HOST
) {
443 ip_entry
->ip4dst
= spec
.loc_host
[0];
444 ip_mask
->ip4dst
= IP4_ADDR_FULL_MASK
;
446 if (spec
.match_flags
& EFX_FILTER_MATCH_REM_HOST
) {
447 ip_entry
->ip4src
= spec
.rem_host
[0];
448 ip_mask
->ip4src
= IP4_ADDR_FULL_MASK
;
450 if (spec
.match_flags
& EFX_FILTER_MATCH_LOC_PORT
) {
451 ip_entry
->pdst
= spec
.loc_port
;
452 ip_mask
->pdst
= PORT_FULL_MASK
;
454 if (spec
.match_flags
& EFX_FILTER_MATCH_REM_PORT
) {
455 ip_entry
->psrc
= spec
.rem_port
;
456 ip_mask
->psrc
= PORT_FULL_MASK
;
458 } else if ((spec
.match_flags
& EFX_FILTER_MATCH_ETHER_TYPE
) &&
459 spec
.ether_type
== htons(ETH_P_IPV6
) &&
460 (spec
.match_flags
& EFX_FILTER_MATCH_IP_PROTO
) &&
461 (spec
.ip_proto
== IPPROTO_TCP
|| spec
.ip_proto
== IPPROTO_UDP
) &&
463 ~(EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_OUTER_VID
|
464 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_REM_HOST
|
465 EFX_FILTER_MATCH_IP_PROTO
|
466 EFX_FILTER_MATCH_LOC_PORT
| EFX_FILTER_MATCH_REM_PORT
))) {
467 rule
->flow_type
= ((spec
.ip_proto
== IPPROTO_TCP
) ?
468 TCP_V6_FLOW
: UDP_V6_FLOW
);
469 if (spec
.match_flags
& EFX_FILTER_MATCH_LOC_HOST
) {
470 memcpy(ip6_entry
->ip6dst
, spec
.loc_host
,
471 sizeof(ip6_entry
->ip6dst
));
472 ip6_fill_mask(ip6_mask
->ip6dst
);
474 if (spec
.match_flags
& EFX_FILTER_MATCH_REM_HOST
) {
475 memcpy(ip6_entry
->ip6src
, spec
.rem_host
,
476 sizeof(ip6_entry
->ip6src
));
477 ip6_fill_mask(ip6_mask
->ip6src
);
479 if (spec
.match_flags
& EFX_FILTER_MATCH_LOC_PORT
) {
480 ip6_entry
->pdst
= spec
.loc_port
;
481 ip6_mask
->pdst
= PORT_FULL_MASK
;
483 if (spec
.match_flags
& EFX_FILTER_MATCH_REM_PORT
) {
484 ip6_entry
->psrc
= spec
.rem_port
;
485 ip6_mask
->psrc
= PORT_FULL_MASK
;
487 } else if (!(spec
.match_flags
&
488 ~(EFX_FILTER_MATCH_LOC_MAC
| EFX_FILTER_MATCH_LOC_MAC_IG
|
489 EFX_FILTER_MATCH_REM_MAC
| EFX_FILTER_MATCH_ETHER_TYPE
|
490 EFX_FILTER_MATCH_OUTER_VID
))) {
491 rule
->flow_type
= ETHER_FLOW
;
492 if (spec
.match_flags
&
493 (EFX_FILTER_MATCH_LOC_MAC
| EFX_FILTER_MATCH_LOC_MAC_IG
)) {
494 ether_addr_copy(mac_entry
->h_dest
, spec
.loc_mac
);
495 if (spec
.match_flags
& EFX_FILTER_MATCH_LOC_MAC
)
496 eth_broadcast_addr(mac_mask
->h_dest
);
498 ether_addr_copy(mac_mask
->h_dest
,
501 if (spec
.match_flags
& EFX_FILTER_MATCH_REM_MAC
) {
502 ether_addr_copy(mac_entry
->h_source
, spec
.rem_mac
);
503 eth_broadcast_addr(mac_mask
->h_source
);
505 if (spec
.match_flags
& EFX_FILTER_MATCH_ETHER_TYPE
) {
506 mac_entry
->h_proto
= spec
.ether_type
;
507 mac_mask
->h_proto
= ETHER_TYPE_FULL_MASK
;
509 } else if (spec
.match_flags
& EFX_FILTER_MATCH_ETHER_TYPE
&&
510 spec
.ether_type
== htons(ETH_P_IP
) &&
512 ~(EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_OUTER_VID
|
513 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_REM_HOST
|
514 EFX_FILTER_MATCH_IP_PROTO
))) {
515 rule
->flow_type
= IPV4_USER_FLOW
;
516 uip_entry
->ip_ver
= ETH_RX_NFC_IP4
;
517 if (spec
.match_flags
& EFX_FILTER_MATCH_IP_PROTO
) {
518 uip_mask
->proto
= IP_PROTO_FULL_MASK
;
519 uip_entry
->proto
= spec
.ip_proto
;
521 if (spec
.match_flags
& EFX_FILTER_MATCH_LOC_HOST
) {
522 uip_entry
->ip4dst
= spec
.loc_host
[0];
523 uip_mask
->ip4dst
= IP4_ADDR_FULL_MASK
;
525 if (spec
.match_flags
& EFX_FILTER_MATCH_REM_HOST
) {
526 uip_entry
->ip4src
= spec
.rem_host
[0];
527 uip_mask
->ip4src
= IP4_ADDR_FULL_MASK
;
529 } else if (spec
.match_flags
& EFX_FILTER_MATCH_ETHER_TYPE
&&
530 spec
.ether_type
== htons(ETH_P_IPV6
) &&
532 ~(EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_OUTER_VID
|
533 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_REM_HOST
|
534 EFX_FILTER_MATCH_IP_PROTO
))) {
535 rule
->flow_type
= IPV6_USER_FLOW
;
536 if (spec
.match_flags
& EFX_FILTER_MATCH_IP_PROTO
) {
537 uip6_mask
->l4_proto
= IP_PROTO_FULL_MASK
;
538 uip6_entry
->l4_proto
= spec
.ip_proto
;
540 if (spec
.match_flags
& EFX_FILTER_MATCH_LOC_HOST
) {
541 memcpy(uip6_entry
->ip6dst
, spec
.loc_host
,
542 sizeof(uip6_entry
->ip6dst
));
543 ip6_fill_mask(uip6_mask
->ip6dst
);
545 if (spec
.match_flags
& EFX_FILTER_MATCH_REM_HOST
) {
546 memcpy(uip6_entry
->ip6src
, spec
.rem_host
,
547 sizeof(uip6_entry
->ip6src
));
548 ip6_fill_mask(uip6_mask
->ip6src
);
551 /* The above should handle all filters that we insert */
556 if (spec
.match_flags
& EFX_FILTER_MATCH_OUTER_VID
) {
557 rule
->flow_type
|= FLOW_EXT
;
558 rule
->h_ext
.vlan_tci
= spec
.outer_vid
;
559 rule
->m_ext
.vlan_tci
= htons(0xfff);
562 if (spec
.flags
& EFX_FILTER_FLAG_RX_RSS
) {
563 rule
->flow_type
|= FLOW_RSS
;
564 *rss_context
= spec
.rss_context
;
571 efx_ethtool_get_rxnfc(struct net_device
*net_dev
,
572 struct ethtool_rxnfc
*info
, u32
*rule_locs
)
574 struct efx_nic
*efx
= netdev_priv(net_dev
);
579 case ETHTOOL_GRXRINGS
:
580 info
->data
= efx
->n_rx_channels
;
583 case ETHTOOL_GRXFH
: {
584 struct efx_rss_context
*ctx
= &efx
->rss_context
;
586 mutex_lock(&efx
->rss_lock
);
587 if (info
->flow_type
& FLOW_RSS
&& info
->rss_context
) {
588 ctx
= efx_find_rss_context_entry(efx
, info
->rss_context
);
595 if (!efx_rss_active(ctx
)) /* No RSS */
597 switch (info
->flow_type
& ~FLOW_RSS
) {
599 if (ctx
->rx_hash_udp_4tuple
)
602 info
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
607 info
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
610 if (ctx
->rx_hash_udp_4tuple
)
613 info
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
618 info
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
624 mutex_unlock(&efx
->rss_lock
);
628 case ETHTOOL_GRXCLSRLCNT
:
629 info
->data
= efx_filter_get_rx_id_limit(efx
);
632 info
->data
|= RX_CLS_LOC_SPECIAL
;
634 efx_filter_count_rx_used(efx
, EFX_FILTER_PRI_MANUAL
);
637 case ETHTOOL_GRXCLSRULE
:
638 if (efx_filter_get_rx_id_limit(efx
) == 0)
640 rc
= efx_ethtool_get_class_rule(efx
, &info
->fs
, &rss_context
);
643 if (info
->fs
.flow_type
& FLOW_RSS
)
644 info
->rss_context
= rss_context
;
647 case ETHTOOL_GRXCLSRLALL
:
648 info
->data
= efx_filter_get_rx_id_limit(efx
);
651 rc
= efx_filter_get_rx_ids(efx
, EFX_FILTER_PRI_MANUAL
,
652 rule_locs
, info
->rule_cnt
);
663 static inline bool ip6_mask_is_full(__be32 mask
[4])
665 return !~(mask
[0] & mask
[1] & mask
[2] & mask
[3]);
668 static inline bool ip6_mask_is_empty(__be32 mask
[4])
670 return !(mask
[0] | mask
[1] | mask
[2] | mask
[3]);
673 static int efx_ethtool_set_class_rule(struct efx_nic
*efx
,
674 struct ethtool_rx_flow_spec
*rule
,
677 struct ethtool_tcpip4_spec
*ip_entry
= &rule
->h_u
.tcp_ip4_spec
;
678 struct ethtool_tcpip4_spec
*ip_mask
= &rule
->m_u
.tcp_ip4_spec
;
679 struct ethtool_usrip4_spec
*uip_entry
= &rule
->h_u
.usr_ip4_spec
;
680 struct ethtool_usrip4_spec
*uip_mask
= &rule
->m_u
.usr_ip4_spec
;
681 struct ethtool_tcpip6_spec
*ip6_entry
= &rule
->h_u
.tcp_ip6_spec
;
682 struct ethtool_tcpip6_spec
*ip6_mask
= &rule
->m_u
.tcp_ip6_spec
;
683 struct ethtool_usrip6_spec
*uip6_entry
= &rule
->h_u
.usr_ip6_spec
;
684 struct ethtool_usrip6_spec
*uip6_mask
= &rule
->m_u
.usr_ip6_spec
;
685 u32 flow_type
= rule
->flow_type
& ~(FLOW_EXT
| FLOW_RSS
);
686 struct ethhdr
*mac_entry
= &rule
->h_u
.ether_spec
;
687 struct ethhdr
*mac_mask
= &rule
->m_u
.ether_spec
;
688 enum efx_filter_flags flags
= 0;
689 struct efx_filter_spec spec
;
692 /* Check that user wants us to choose the location */
693 if (rule
->location
!= RX_CLS_LOC_ANY
)
696 /* Range-check ring_cookie */
697 if (rule
->ring_cookie
>= efx
->n_rx_channels
&&
698 rule
->ring_cookie
!= RX_CLS_FLOW_DISC
)
701 /* Check for unsupported extensions */
702 if ((rule
->flow_type
& FLOW_EXT
) &&
703 (rule
->m_ext
.vlan_etype
|| rule
->m_ext
.data
[0] ||
704 rule
->m_ext
.data
[1]))
708 flags
|= EFX_FILTER_FLAG_RX_SCATTER
;
709 if (rule
->flow_type
& FLOW_RSS
)
710 flags
|= EFX_FILTER_FLAG_RX_RSS
;
712 efx_filter_init_rx(&spec
, EFX_FILTER_PRI_MANUAL
, flags
,
713 (rule
->ring_cookie
== RX_CLS_FLOW_DISC
) ?
714 EFX_FILTER_RX_DMAQ_ID_DROP
: rule
->ring_cookie
);
716 if (rule
->flow_type
& FLOW_RSS
)
717 spec
.rss_context
= rss_context
;
722 spec
.match_flags
= (EFX_FILTER_MATCH_ETHER_TYPE
|
723 EFX_FILTER_MATCH_IP_PROTO
);
724 spec
.ether_type
= htons(ETH_P_IP
);
725 spec
.ip_proto
= flow_type
== TCP_V4_FLOW
? IPPROTO_TCP
727 if (ip_mask
->ip4dst
) {
728 if (ip_mask
->ip4dst
!= IP4_ADDR_FULL_MASK
)
730 spec
.match_flags
|= EFX_FILTER_MATCH_LOC_HOST
;
731 spec
.loc_host
[0] = ip_entry
->ip4dst
;
733 if (ip_mask
->ip4src
) {
734 if (ip_mask
->ip4src
!= IP4_ADDR_FULL_MASK
)
736 spec
.match_flags
|= EFX_FILTER_MATCH_REM_HOST
;
737 spec
.rem_host
[0] = ip_entry
->ip4src
;
740 if (ip_mask
->pdst
!= PORT_FULL_MASK
)
742 spec
.match_flags
|= EFX_FILTER_MATCH_LOC_PORT
;
743 spec
.loc_port
= ip_entry
->pdst
;
746 if (ip_mask
->psrc
!= PORT_FULL_MASK
)
748 spec
.match_flags
|= EFX_FILTER_MATCH_REM_PORT
;
749 spec
.rem_port
= ip_entry
->psrc
;
757 spec
.match_flags
= (EFX_FILTER_MATCH_ETHER_TYPE
|
758 EFX_FILTER_MATCH_IP_PROTO
);
759 spec
.ether_type
= htons(ETH_P_IPV6
);
760 spec
.ip_proto
= flow_type
== TCP_V6_FLOW
? IPPROTO_TCP
762 if (!ip6_mask_is_empty(ip6_mask
->ip6dst
)) {
763 if (!ip6_mask_is_full(ip6_mask
->ip6dst
))
765 spec
.match_flags
|= EFX_FILTER_MATCH_LOC_HOST
;
766 memcpy(spec
.loc_host
, ip6_entry
->ip6dst
, sizeof(spec
.loc_host
));
768 if (!ip6_mask_is_empty(ip6_mask
->ip6src
)) {
769 if (!ip6_mask_is_full(ip6_mask
->ip6src
))
771 spec
.match_flags
|= EFX_FILTER_MATCH_REM_HOST
;
772 memcpy(spec
.rem_host
, ip6_entry
->ip6src
, sizeof(spec
.rem_host
));
774 if (ip6_mask
->pdst
) {
775 if (ip6_mask
->pdst
!= PORT_FULL_MASK
)
777 spec
.match_flags
|= EFX_FILTER_MATCH_LOC_PORT
;
778 spec
.loc_port
= ip6_entry
->pdst
;
780 if (ip6_mask
->psrc
) {
781 if (ip6_mask
->psrc
!= PORT_FULL_MASK
)
783 spec
.match_flags
|= EFX_FILTER_MATCH_REM_PORT
;
784 spec
.rem_port
= ip6_entry
->psrc
;
786 if (ip6_mask
->tclass
)
791 if (uip_mask
->l4_4_bytes
|| uip_mask
->tos
|| uip_mask
->ip_ver
||
792 uip_entry
->ip_ver
!= ETH_RX_NFC_IP4
)
794 spec
.match_flags
= EFX_FILTER_MATCH_ETHER_TYPE
;
795 spec
.ether_type
= htons(ETH_P_IP
);
796 if (uip_mask
->ip4dst
) {
797 if (uip_mask
->ip4dst
!= IP4_ADDR_FULL_MASK
)
799 spec
.match_flags
|= EFX_FILTER_MATCH_LOC_HOST
;
800 spec
.loc_host
[0] = uip_entry
->ip4dst
;
802 if (uip_mask
->ip4src
) {
803 if (uip_mask
->ip4src
!= IP4_ADDR_FULL_MASK
)
805 spec
.match_flags
|= EFX_FILTER_MATCH_REM_HOST
;
806 spec
.rem_host
[0] = uip_entry
->ip4src
;
808 if (uip_mask
->proto
) {
809 if (uip_mask
->proto
!= IP_PROTO_FULL_MASK
)
811 spec
.match_flags
|= EFX_FILTER_MATCH_IP_PROTO
;
812 spec
.ip_proto
= uip_entry
->proto
;
817 if (uip6_mask
->l4_4_bytes
|| uip6_mask
->tclass
)
819 spec
.match_flags
= EFX_FILTER_MATCH_ETHER_TYPE
;
820 spec
.ether_type
= htons(ETH_P_IPV6
);
821 if (!ip6_mask_is_empty(uip6_mask
->ip6dst
)) {
822 if (!ip6_mask_is_full(uip6_mask
->ip6dst
))
824 spec
.match_flags
|= EFX_FILTER_MATCH_LOC_HOST
;
825 memcpy(spec
.loc_host
, uip6_entry
->ip6dst
, sizeof(spec
.loc_host
));
827 if (!ip6_mask_is_empty(uip6_mask
->ip6src
)) {
828 if (!ip6_mask_is_full(uip6_mask
->ip6src
))
830 spec
.match_flags
|= EFX_FILTER_MATCH_REM_HOST
;
831 memcpy(spec
.rem_host
, uip6_entry
->ip6src
, sizeof(spec
.rem_host
));
833 if (uip6_mask
->l4_proto
) {
834 if (uip6_mask
->l4_proto
!= IP_PROTO_FULL_MASK
)
836 spec
.match_flags
|= EFX_FILTER_MATCH_IP_PROTO
;
837 spec
.ip_proto
= uip6_entry
->l4_proto
;
842 if (!is_zero_ether_addr(mac_mask
->h_dest
)) {
843 if (ether_addr_equal(mac_mask
->h_dest
,
845 spec
.match_flags
|= EFX_FILTER_MATCH_LOC_MAC_IG
;
846 else if (is_broadcast_ether_addr(mac_mask
->h_dest
))
847 spec
.match_flags
|= EFX_FILTER_MATCH_LOC_MAC
;
850 ether_addr_copy(spec
.loc_mac
, mac_entry
->h_dest
);
852 if (!is_zero_ether_addr(mac_mask
->h_source
)) {
853 if (!is_broadcast_ether_addr(mac_mask
->h_source
))
855 spec
.match_flags
|= EFX_FILTER_MATCH_REM_MAC
;
856 ether_addr_copy(spec
.rem_mac
, mac_entry
->h_source
);
858 if (mac_mask
->h_proto
) {
859 if (mac_mask
->h_proto
!= ETHER_TYPE_FULL_MASK
)
861 spec
.match_flags
|= EFX_FILTER_MATCH_ETHER_TYPE
;
862 spec
.ether_type
= mac_entry
->h_proto
;
870 if ((rule
->flow_type
& FLOW_EXT
) && rule
->m_ext
.vlan_tci
) {
871 if (rule
->m_ext
.vlan_tci
!= htons(0xfff))
873 spec
.match_flags
|= EFX_FILTER_MATCH_OUTER_VID
;
874 spec
.outer_vid
= rule
->h_ext
.vlan_tci
;
877 rc
= efx_filter_insert_filter(efx
, &spec
, true);
885 static int efx_ethtool_set_rxnfc(struct net_device
*net_dev
,
886 struct ethtool_rxnfc
*info
)
888 struct efx_nic
*efx
= netdev_priv(net_dev
);
890 if (efx_filter_get_rx_id_limit(efx
) == 0)
894 case ETHTOOL_SRXCLSRLINS
:
895 return efx_ethtool_set_class_rule(efx
, &info
->fs
,
898 case ETHTOOL_SRXCLSRLDEL
:
899 return efx_filter_remove_id_safe(efx
, EFX_FILTER_PRI_MANUAL
,
907 static u32
efx_ethtool_get_rxfh_indir_size(struct net_device
*net_dev
)
909 struct efx_nic
*efx
= netdev_priv(net_dev
);
911 if (efx
->n_rx_channels
== 1)
913 return ARRAY_SIZE(efx
->rss_context
.rx_indir_table
);
916 static u32
efx_ethtool_get_rxfh_key_size(struct net_device
*net_dev
)
918 struct efx_nic
*efx
= netdev_priv(net_dev
);
920 return efx
->type
->rx_hash_key_size
;
923 static int efx_ethtool_get_rxfh(struct net_device
*net_dev
, u32
*indir
, u8
*key
,
926 struct efx_nic
*efx
= netdev_priv(net_dev
);
929 rc
= efx
->type
->rx_pull_rss_config(efx
);
934 *hfunc
= ETH_RSS_HASH_TOP
;
936 memcpy(indir
, efx
->rss_context
.rx_indir_table
,
937 sizeof(efx
->rss_context
.rx_indir_table
));
939 memcpy(key
, efx
->rss_context
.rx_hash_key
,
940 efx
->type
->rx_hash_key_size
);
944 static int efx_ethtool_set_rxfh(struct net_device
*net_dev
, const u32
*indir
,
945 const u8
*key
, const u8 hfunc
)
947 struct efx_nic
*efx
= netdev_priv(net_dev
);
949 /* Hash function is Toeplitz, cannot be changed */
950 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
)
956 key
= efx
->rss_context
.rx_hash_key
;
958 indir
= efx
->rss_context
.rx_indir_table
;
960 return efx
->type
->rx_push_rss_config(efx
, true, indir
, key
);
963 static int efx_ethtool_get_rxfh_context(struct net_device
*net_dev
, u32
*indir
,
964 u8
*key
, u8
*hfunc
, u32 rss_context
)
966 struct efx_nic
*efx
= netdev_priv(net_dev
);
967 struct efx_rss_context
*ctx
;
970 if (!efx
->type
->rx_pull_rss_context_config
)
973 mutex_lock(&efx
->rss_lock
);
974 ctx
= efx_find_rss_context_entry(efx
, rss_context
);
979 rc
= efx
->type
->rx_pull_rss_context_config(efx
, ctx
);
984 *hfunc
= ETH_RSS_HASH_TOP
;
986 memcpy(indir
, ctx
->rx_indir_table
, sizeof(ctx
->rx_indir_table
));
988 memcpy(key
, ctx
->rx_hash_key
, efx
->type
->rx_hash_key_size
);
990 mutex_unlock(&efx
->rss_lock
);
994 static int efx_ethtool_set_rxfh_context(struct net_device
*net_dev
,
995 const u32
*indir
, const u8
*key
,
996 const u8 hfunc
, u32
*rss_context
,
999 struct efx_nic
*efx
= netdev_priv(net_dev
);
1000 struct efx_rss_context
*ctx
;
1001 bool allocated
= false;
1004 if (!efx
->type
->rx_push_rss_context_config
)
1006 /* Hash function is Toeplitz, cannot be changed */
1007 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
)
1010 mutex_lock(&efx
->rss_lock
);
1012 if (*rss_context
== ETH_RXFH_CONTEXT_ALLOC
) {
1014 /* alloc + delete == Nothing to do */
1018 ctx
= efx_alloc_rss_context_entry(efx
);
1023 ctx
->context_id
= EFX_MCDI_RSS_CONTEXT_INVALID
;
1024 /* Initialise indir table and key to defaults */
1025 efx_set_default_rx_indir_table(efx
, ctx
);
1026 netdev_rss_key_fill(ctx
->rx_hash_key
, sizeof(ctx
->rx_hash_key
));
1029 ctx
= efx_find_rss_context_entry(efx
, *rss_context
);
1037 /* delete this context */
1038 rc
= efx
->type
->rx_push_rss_context_config(efx
, ctx
, NULL
, NULL
);
1040 efx_free_rss_context_entry(ctx
);
1045 key
= ctx
->rx_hash_key
;
1047 indir
= ctx
->rx_indir_table
;
1049 rc
= efx
->type
->rx_push_rss_context_config(efx
, ctx
, indir
, key
);
1050 if (rc
&& allocated
)
1051 efx_free_rss_context_entry(ctx
);
1053 *rss_context
= ctx
->user_id
;
1055 mutex_unlock(&efx
->rss_lock
);
1059 static int efx_ethtool_get_ts_info(struct net_device
*net_dev
,
1060 struct ethtool_ts_info
*ts_info
)
1062 struct efx_nic
*efx
= netdev_priv(net_dev
);
1064 /* Software capabilities */
1065 ts_info
->so_timestamping
= (SOF_TIMESTAMPING_RX_SOFTWARE
|
1066 SOF_TIMESTAMPING_SOFTWARE
);
1067 ts_info
->phc_index
= -1;
1069 efx_ptp_get_ts_info(efx
, ts_info
);
1073 static int efx_ethtool_get_module_eeprom(struct net_device
*net_dev
,
1074 struct ethtool_eeprom
*ee
,
1077 struct efx_nic
*efx
= netdev_priv(net_dev
);
1080 if (!efx
->phy_op
|| !efx
->phy_op
->get_module_eeprom
)
1083 mutex_lock(&efx
->mac_lock
);
1084 ret
= efx
->phy_op
->get_module_eeprom(efx
, ee
, data
);
1085 mutex_unlock(&efx
->mac_lock
);
1090 static int efx_ethtool_get_module_info(struct net_device
*net_dev
,
1091 struct ethtool_modinfo
*modinfo
)
1093 struct efx_nic
*efx
= netdev_priv(net_dev
);
1096 if (!efx
->phy_op
|| !efx
->phy_op
->get_module_info
)
1099 mutex_lock(&efx
->mac_lock
);
1100 ret
= efx
->phy_op
->get_module_info(efx
, modinfo
);
1101 mutex_unlock(&efx
->mac_lock
);
1106 static int efx_ethtool_get_fecparam(struct net_device
*net_dev
,
1107 struct ethtool_fecparam
*fecparam
)
1109 struct efx_nic
*efx
= netdev_priv(net_dev
);
1112 if (!efx
->phy_op
|| !efx
->phy_op
->get_fecparam
)
1114 mutex_lock(&efx
->mac_lock
);
1115 rc
= efx
->phy_op
->get_fecparam(efx
, fecparam
);
1116 mutex_unlock(&efx
->mac_lock
);
1121 static int efx_ethtool_set_fecparam(struct net_device
*net_dev
,
1122 struct ethtool_fecparam
*fecparam
)
1124 struct efx_nic
*efx
= netdev_priv(net_dev
);
1127 if (!efx
->phy_op
|| !efx
->phy_op
->get_fecparam
)
1129 mutex_lock(&efx
->mac_lock
);
1130 rc
= efx
->phy_op
->set_fecparam(efx
, fecparam
);
1131 mutex_unlock(&efx
->mac_lock
);
1136 const struct ethtool_ops efx_ethtool_ops
= {
1137 .get_drvinfo
= efx_ethtool_get_drvinfo
,
1138 .get_regs_len
= efx_ethtool_get_regs_len
,
1139 .get_regs
= efx_ethtool_get_regs
,
1140 .get_msglevel
= efx_ethtool_get_msglevel
,
1141 .set_msglevel
= efx_ethtool_set_msglevel
,
1142 .nway_reset
= efx_ethtool_nway_reset
,
1143 .get_link
= ethtool_op_get_link
,
1144 .get_coalesce
= efx_ethtool_get_coalesce
,
1145 .set_coalesce
= efx_ethtool_set_coalesce
,
1146 .get_ringparam
= efx_ethtool_get_ringparam
,
1147 .set_ringparam
= efx_ethtool_set_ringparam
,
1148 .get_pauseparam
= efx_ethtool_get_pauseparam
,
1149 .set_pauseparam
= efx_ethtool_set_pauseparam
,
1150 .get_sset_count
= efx_ethtool_get_sset_count
,
1151 .self_test
= efx_ethtool_self_test
,
1152 .get_strings
= efx_ethtool_get_strings
,
1153 .set_phys_id
= efx_ethtool_phys_id
,
1154 .get_ethtool_stats
= efx_ethtool_get_stats
,
1155 .get_wol
= efx_ethtool_get_wol
,
1156 .set_wol
= efx_ethtool_set_wol
,
1157 .reset
= efx_ethtool_reset
,
1158 .get_rxnfc
= efx_ethtool_get_rxnfc
,
1159 .set_rxnfc
= efx_ethtool_set_rxnfc
,
1160 .get_rxfh_indir_size
= efx_ethtool_get_rxfh_indir_size
,
1161 .get_rxfh_key_size
= efx_ethtool_get_rxfh_key_size
,
1162 .get_rxfh
= efx_ethtool_get_rxfh
,
1163 .set_rxfh
= efx_ethtool_set_rxfh
,
1164 .get_rxfh_context
= efx_ethtool_get_rxfh_context
,
1165 .set_rxfh_context
= efx_ethtool_set_rxfh_context
,
1166 .get_ts_info
= efx_ethtool_get_ts_info
,
1167 .get_module_info
= efx_ethtool_get_module_info
,
1168 .get_module_eeprom
= efx_ethtool_get_module_eeprom
,
1169 .get_link_ksettings
= efx_ethtool_get_link_ksettings
,
1170 .set_link_ksettings
= efx_ethtool_set_link_ksettings
,
1171 .get_fecparam
= efx_ethtool_get_fecparam
,
1172 .set_fecparam
= efx_ethtool_set_fecparam
,