1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
12 #include <linux/filter.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
16 #include "efx_common.h"
17 #include "efx_channels.h"
21 #include "rx_common.h"
22 #include "tx_common.h"
24 #include "mcdi_port_common.h"
26 #include "mcdi_pcol.h"
27 #include "ef100_rep.h"
29 static unsigned int debug
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
30 NETIF_MSG_LINK
| NETIF_MSG_IFDOWN
|
31 NETIF_MSG_IFUP
| NETIF_MSG_RX_ERR
|
32 NETIF_MSG_TX_ERR
| NETIF_MSG_HW
);
33 module_param(debug
, uint
, 0);
34 MODULE_PARM_DESC(debug
, "Bitmapped debugging message enable value");
36 /* This is the time (in jiffies) between invocations of the hardware
39 static unsigned int efx_monitor_interval
= 1 * HZ
;
41 /* How often and how many times to poll for a reset while waiting for a
42 * BIST that another function started to complete.
44 #define BIST_WAIT_DELAY_MS 100
45 #define BIST_WAIT_DELAY_COUNT 100
47 /* Default stats update time */
48 #define STATS_PERIOD_MS_DEFAULT 1000
50 static const unsigned int efx_reset_type_max
= RESET_TYPE_MAX
;
51 static const char *const efx_reset_type_names
[] = {
52 [RESET_TYPE_INVISIBLE
] = "INVISIBLE",
53 [RESET_TYPE_ALL
] = "ALL",
54 [RESET_TYPE_RECOVER_OR_ALL
] = "RECOVER_OR_ALL",
55 [RESET_TYPE_WORLD
] = "WORLD",
56 [RESET_TYPE_RECOVER_OR_DISABLE
] = "RECOVER_OR_DISABLE",
57 [RESET_TYPE_DATAPATH
] = "DATAPATH",
58 [RESET_TYPE_MC_BIST
] = "MC_BIST",
59 [RESET_TYPE_DISABLE
] = "DISABLE",
60 [RESET_TYPE_TX_WATCHDOG
] = "TX_WATCHDOG",
61 [RESET_TYPE_INT_ERROR
] = "INT_ERROR",
62 [RESET_TYPE_DMA_ERROR
] = "DMA_ERROR",
63 [RESET_TYPE_TX_SKIP
] = "TX_SKIP",
64 [RESET_TYPE_MC_FAILURE
] = "MC_FAILURE",
65 [RESET_TYPE_MCDI_TIMEOUT
] = "MCDI_TIMEOUT (FLR)",
68 #define RESET_TYPE(type) \
69 STRING_TABLE_LOOKUP(type, efx_reset_type)
71 /* Loopback mode names (see LOOPBACK_MODE()) */
72 const unsigned int efx_loopback_mode_max
= LOOPBACK_MAX
;
73 const char *const efx_loopback_mode_names
[] = {
74 [LOOPBACK_NONE
] = "NONE",
75 [LOOPBACK_DATA
] = "DATAPATH",
76 [LOOPBACK_GMAC
] = "GMAC",
77 [LOOPBACK_XGMII
] = "XGMII",
78 [LOOPBACK_XGXS
] = "XGXS",
79 [LOOPBACK_XAUI
] = "XAUI",
80 [LOOPBACK_GMII
] = "GMII",
81 [LOOPBACK_SGMII
] = "SGMII",
82 [LOOPBACK_XGBR
] = "XGBR",
83 [LOOPBACK_XFI
] = "XFI",
84 [LOOPBACK_XAUI_FAR
] = "XAUI_FAR",
85 [LOOPBACK_GMII_FAR
] = "GMII_FAR",
86 [LOOPBACK_SGMII_FAR
] = "SGMII_FAR",
87 [LOOPBACK_XFI_FAR
] = "XFI_FAR",
88 [LOOPBACK_GPHY
] = "GPHY",
89 [LOOPBACK_PHYXS
] = "PHYXS",
90 [LOOPBACK_PCS
] = "PCS",
91 [LOOPBACK_PMAPMD
] = "PMA/PMD",
92 [LOOPBACK_XPORT
] = "XPORT",
93 [LOOPBACK_XGMII_WS
] = "XGMII_WS",
94 [LOOPBACK_XAUI_WS
] = "XAUI_WS",
95 [LOOPBACK_XAUI_WS_FAR
] = "XAUI_WS_FAR",
96 [LOOPBACK_XAUI_WS_NEAR
] = "XAUI_WS_NEAR",
97 [LOOPBACK_GMII_WS
] = "GMII_WS",
98 [LOOPBACK_XFI_WS
] = "XFI_WS",
99 [LOOPBACK_XFI_WS_FAR
] = "XFI_WS_FAR",
100 [LOOPBACK_PHYXS_WS
] = "PHYXS_WS",
103 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
104 * queued onto this work queue. This is not a per-nic work queue, because
105 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
107 static struct workqueue_struct
*reset_workqueue
;
109 int efx_create_reset_workqueue(void)
111 reset_workqueue
= create_singlethread_workqueue("sfc_reset");
112 if (!reset_workqueue
) {
113 printk(KERN_ERR
"Failed to create reset workqueue\n");
120 void efx_queue_reset_work(struct efx_nic
*efx
)
122 queue_work(reset_workqueue
, &efx
->reset_work
);
125 void efx_flush_reset_workqueue(struct efx_nic
*efx
)
127 cancel_work_sync(&efx
->reset_work
);
130 void efx_destroy_reset_workqueue(void)
132 if (reset_workqueue
) {
133 destroy_workqueue(reset_workqueue
);
134 reset_workqueue
= NULL
;
138 /* We assume that efx->type->reconfigure_mac will always try to sync RX
139 * filters and therefore needs to read-lock the filter table against freeing
141 void efx_mac_reconfigure(struct efx_nic
*efx
, bool mtu_only
)
143 if (efx
->type
->reconfigure_mac
) {
144 down_read(&efx
->filter_sem
);
145 efx
->type
->reconfigure_mac(efx
, mtu_only
);
146 up_read(&efx
->filter_sem
);
150 /* Asynchronous work item for changing MAC promiscuity and multicast
151 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
154 static void efx_mac_work(struct work_struct
*data
)
156 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, mac_work
);
158 mutex_lock(&efx
->mac_lock
);
159 if (efx
->port_enabled
)
160 efx_mac_reconfigure(efx
, false);
161 mutex_unlock(&efx
->mac_lock
);
164 int efx_set_mac_address(struct net_device
*net_dev
, void *data
)
166 struct efx_nic
*efx
= efx_netdev_priv(net_dev
);
167 struct sockaddr
*addr
= data
;
168 u8
*new_addr
= addr
->sa_data
;
172 if (!is_valid_ether_addr(new_addr
)) {
173 netif_err(efx
, drv
, efx
->net_dev
,
174 "invalid ethernet MAC address requested: %pM\n",
176 return -EADDRNOTAVAIL
;
179 /* save old address */
180 ether_addr_copy(old_addr
, net_dev
->dev_addr
);
181 eth_hw_addr_set(net_dev
, new_addr
);
182 if (efx
->type
->set_mac_address
) {
183 rc
= efx
->type
->set_mac_address(efx
);
185 eth_hw_addr_set(net_dev
, old_addr
);
190 /* Reconfigure the MAC */
191 mutex_lock(&efx
->mac_lock
);
192 efx_mac_reconfigure(efx
, false);
193 mutex_unlock(&efx
->mac_lock
);
198 /* Context: netif_addr_lock held, BHs disabled. */
199 void efx_set_rx_mode(struct net_device
*net_dev
)
201 struct efx_nic
*efx
= efx_netdev_priv(net_dev
);
203 if (efx
->port_enabled
)
204 queue_work(efx
->workqueue
, &efx
->mac_work
);
205 /* Otherwise efx_start_port() will do this */
208 int efx_set_features(struct net_device
*net_dev
, netdev_features_t data
)
210 struct efx_nic
*efx
= efx_netdev_priv(net_dev
);
213 /* If disabling RX n-tuple filtering, clear existing filters */
214 if (net_dev
->features
& ~data
& NETIF_F_NTUPLE
) {
215 rc
= efx
->type
->filter_clear_rx(efx
, EFX_FILTER_PRI_MANUAL
);
220 /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
221 * If rx-fcs is changed, mac_reconfigure updates that too.
223 if ((net_dev
->features
^ data
) & (NETIF_F_HW_VLAN_CTAG_FILTER
|
225 /* efx_set_rx_mode() will schedule MAC work to update filters
226 * when a new features are finally set in net_dev.
228 efx_set_rx_mode(net_dev
);
234 /* This ensures that the kernel is kept informed (via
235 * netif_carrier_on/off) of the link status, and also maintains the
236 * link status's stop on the port's TX queue.
238 void efx_link_status_changed(struct efx_nic
*efx
)
240 struct efx_link_state
*link_state
= &efx
->link_state
;
242 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
243 * that no events are triggered between unregister_netdev() and the
244 * driver unloading. A more general condition is that NETDEV_CHANGE
245 * can only be generated between NETDEV_UP and NETDEV_DOWN
247 if (!netif_running(efx
->net_dev
))
250 if (link_state
->up
!= netif_carrier_ok(efx
->net_dev
)) {
251 efx
->n_link_state_changes
++;
254 netif_carrier_on(efx
->net_dev
);
256 netif_carrier_off(efx
->net_dev
);
259 /* Status message for kernel log */
261 netif_info(efx
, link
, efx
->net_dev
,
262 "link up at %uMbps %s-duplex (MTU %d)\n",
263 link_state
->speed
, link_state
->fd
? "full" : "half",
266 netif_info(efx
, link
, efx
->net_dev
, "link down\n");
269 unsigned int efx_xdp_max_mtu(struct efx_nic
*efx
)
271 /* The maximum MTU that we can fit in a single page, allowing for
272 * framing, overhead and XDP headroom + tailroom.
274 int overhead
= EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state
) +
275 efx
->rx_prefix_size
+ efx
->type
->rx_buffer_padding
+
276 efx
->rx_ip_align
+ EFX_XDP_HEADROOM
+ EFX_XDP_TAILROOM
;
278 return PAGE_SIZE
- overhead
;
281 /* Context: process, rtnl_lock() held. */
282 int efx_change_mtu(struct net_device
*net_dev
, int new_mtu
)
284 struct efx_nic
*efx
= efx_netdev_priv(net_dev
);
287 rc
= efx_check_disabled(efx
);
291 if (rtnl_dereference(efx
->xdp_prog
) &&
292 new_mtu
> efx_xdp_max_mtu(efx
)) {
293 netif_err(efx
, drv
, efx
->net_dev
,
294 "Requested MTU of %d too big for XDP (max: %d)\n",
295 new_mtu
, efx_xdp_max_mtu(efx
));
299 netif_dbg(efx
, drv
, efx
->net_dev
, "changing MTU to %d\n", new_mtu
);
301 efx_device_detach_sync(efx
);
304 mutex_lock(&efx
->mac_lock
);
305 WRITE_ONCE(net_dev
->mtu
, new_mtu
);
306 efx_mac_reconfigure(efx
, true);
307 mutex_unlock(&efx
->mac_lock
);
310 efx_device_attach_if_not_resetting(efx
);
314 /**************************************************************************
318 **************************************************************************/
320 /* Run periodically off the general workqueue */
321 static void efx_monitor(struct work_struct
*data
)
323 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
326 netif_vdbg(efx
, timer
, efx
->net_dev
,
327 "hardware monitor executing on CPU %d\n",
328 raw_smp_processor_id());
329 BUG_ON(efx
->type
->monitor
== NULL
);
331 /* If the mac_lock is already held then it is likely a port
332 * reconfiguration is already in place, which will likely do
333 * most of the work of monitor() anyway.
335 if (mutex_trylock(&efx
->mac_lock
)) {
336 if (efx
->port_enabled
&& efx
->type
->monitor
)
337 efx
->type
->monitor(efx
);
338 mutex_unlock(&efx
->mac_lock
);
341 efx_start_monitor(efx
);
344 void efx_start_monitor(struct efx_nic
*efx
)
346 if (efx
->type
->monitor
)
347 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
348 efx_monitor_interval
);
351 /**************************************************************************
353 * Event queue processing
355 *************************************************************************/
357 /* Channels are shutdown and reinitialised whilst the NIC is running
358 * to propagate configuration changes (mtu, checksum offload), or
359 * to clear hardware error conditions
361 static void efx_start_datapath(struct efx_nic
*efx
)
363 netdev_features_t old_features
= efx
->net_dev
->features
;
364 bool old_rx_scatter
= efx
->rx_scatter
;
367 /* Calculate the rx buffer allocation parameters required to
368 * support the current MTU, including padding for header
369 * alignment and overruns.
371 efx
->rx_dma_len
= (efx
->rx_prefix_size
+
372 EFX_MAX_FRAME_LEN(efx
->net_dev
->mtu
) +
373 efx
->type
->rx_buffer_padding
);
374 rx_buf_len
= (sizeof(struct efx_rx_page_state
) + EFX_XDP_HEADROOM
+
375 efx
->rx_ip_align
+ efx
->rx_dma_len
+ EFX_XDP_TAILROOM
);
377 if (rx_buf_len
<= PAGE_SIZE
) {
378 efx
->rx_scatter
= efx
->type
->always_rx_scatter
;
379 efx
->rx_buffer_order
= 0;
380 } else if (efx
->type
->can_rx_scatter
) {
381 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE
% L1_CACHE_BYTES
);
382 BUILD_BUG_ON(sizeof(struct efx_rx_page_state
) +
383 2 * ALIGN(NET_IP_ALIGN
+ EFX_RX_USR_BUF_SIZE
,
384 EFX_RX_BUF_ALIGNMENT
) >
386 efx
->rx_scatter
= true;
387 efx
->rx_dma_len
= EFX_RX_USR_BUF_SIZE
;
388 efx
->rx_buffer_order
= 0;
390 efx
->rx_scatter
= false;
391 efx
->rx_buffer_order
= get_order(rx_buf_len
);
394 efx_rx_config_page_split(efx
);
395 if (efx
->rx_buffer_order
)
396 netif_dbg(efx
, drv
, efx
->net_dev
,
397 "RX buf len=%u; page order=%u batch=%u\n",
398 efx
->rx_dma_len
, efx
->rx_buffer_order
,
399 efx
->rx_pages_per_batch
);
401 netif_dbg(efx
, drv
, efx
->net_dev
,
402 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
403 efx
->rx_dma_len
, efx
->rx_page_buf_step
,
404 efx
->rx_bufs_per_page
, efx
->rx_pages_per_batch
);
406 /* Restore previously fixed features in hw_features and remove
407 * features which are fixed now
409 efx
->net_dev
->hw_features
|= efx
->net_dev
->features
;
410 efx
->net_dev
->hw_features
&= ~efx
->fixed_features
;
411 efx
->net_dev
->features
|= efx
->fixed_features
;
412 if (efx
->net_dev
->features
!= old_features
)
413 netdev_features_change(efx
->net_dev
);
415 /* RX filters may also have scatter-enabled flags */
416 if ((efx
->rx_scatter
!= old_rx_scatter
) &&
417 efx
->type
->filter_update_rx_scatter
)
418 efx
->type
->filter_update_rx_scatter(efx
);
420 /* We must keep at least one descriptor in a TX ring empty.
421 * We could avoid this when the queue size does not exactly
422 * match the hardware ring size, but it's not that important.
423 * Therefore we stop the queue when one more skb might fill
424 * the ring completely. We wake it when half way back to
427 efx
->txq_stop_thresh
= efx
->txq_entries
- efx_tx_max_skb_descs(efx
);
428 efx
->txq_wake_thresh
= efx
->txq_stop_thresh
/ 2;
430 /* Initialise the channels */
431 efx_start_channels(efx
);
433 efx_ptp_start_datapath(efx
);
435 if (netif_device_present(efx
->net_dev
))
436 netif_tx_wake_all_queues(efx
->net_dev
);
439 static void efx_stop_datapath(struct efx_nic
*efx
)
441 EFX_ASSERT_RESET_SERIALISED(efx
);
442 BUG_ON(efx
->port_enabled
);
444 efx_ptp_stop_datapath(efx
);
446 efx_stop_channels(efx
);
449 /**************************************************************************
453 **************************************************************************/
455 /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
456 * force the Autoneg bit on.
458 void efx_link_clear_advertising(struct efx_nic
*efx
)
460 bitmap_zero(efx
->link_advertising
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
461 efx
->wanted_fc
&= ~(EFX_FC_TX
| EFX_FC_RX
);
464 void efx_link_set_wanted_fc(struct efx_nic
*efx
, u8 wanted_fc
)
466 efx
->wanted_fc
= wanted_fc
;
467 if (efx
->link_advertising
[0]) {
468 if (wanted_fc
& EFX_FC_RX
)
469 efx
->link_advertising
[0] |= (ADVERTISED_Pause
|
470 ADVERTISED_Asym_Pause
);
472 efx
->link_advertising
[0] &= ~(ADVERTISED_Pause
|
473 ADVERTISED_Asym_Pause
);
474 if (wanted_fc
& EFX_FC_TX
)
475 efx
->link_advertising
[0] ^= ADVERTISED_Asym_Pause
;
479 static void efx_start_port(struct efx_nic
*efx
)
481 netif_dbg(efx
, ifup
, efx
->net_dev
, "start port\n");
482 BUG_ON(efx
->port_enabled
);
484 mutex_lock(&efx
->mac_lock
);
485 efx
->port_enabled
= true;
487 /* Ensure MAC ingress/egress is enabled */
488 efx_mac_reconfigure(efx
, false);
490 mutex_unlock(&efx
->mac_lock
);
493 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
494 * and the async self-test, wait for them to finish and prevent them
495 * being scheduled again. This doesn't cover online resets, which
496 * should only be cancelled when removing the device.
498 static void efx_stop_port(struct efx_nic
*efx
)
500 netif_dbg(efx
, ifdown
, efx
->net_dev
, "stop port\n");
502 EFX_ASSERT_RESET_SERIALISED(efx
);
504 mutex_lock(&efx
->mac_lock
);
505 efx
->port_enabled
= false;
506 mutex_unlock(&efx
->mac_lock
);
508 /* Serialise against efx_set_multicast_list() */
509 netif_addr_lock_bh(efx
->net_dev
);
510 netif_addr_unlock_bh(efx
->net_dev
);
512 cancel_delayed_work_sync(&efx
->monitor_work
);
513 efx_selftest_async_cancel(efx
);
514 cancel_work_sync(&efx
->mac_work
);
517 /* If the interface is supposed to be running but is not, start
518 * the hardware and software data path, regular activity for the port
519 * (MAC statistics, link polling, etc.) and schedule the port to be
520 * reconfigured. Interrupts must already be enabled. This function
521 * is safe to call multiple times, so long as the NIC is not disabled.
522 * Requires the RTNL lock.
524 void efx_start_all(struct efx_nic
*efx
)
526 EFX_ASSERT_RESET_SERIALISED(efx
);
527 BUG_ON(efx
->state
== STATE_DISABLED
);
529 /* Check that it is appropriate to restart the interface. All
530 * of these flags are safe to read under just the rtnl lock
532 if (efx
->port_enabled
|| !netif_running(efx
->net_dev
) ||
537 efx_start_datapath(efx
);
539 /* Start the hardware monitor if there is one */
540 efx_start_monitor(efx
);
542 efx_selftest_async_start(efx
);
544 /* Link state detection is normally event-driven; we have
545 * to poll now because we could have missed a change
547 mutex_lock(&efx
->mac_lock
);
548 if (efx_mcdi_phy_poll(efx
))
549 efx_link_status_changed(efx
);
550 mutex_unlock(&efx
->mac_lock
);
552 if (efx
->type
->start_stats
) {
553 efx
->type
->start_stats(efx
);
554 efx
->type
->pull_stats(efx
);
555 spin_lock_bh(&efx
->stats_lock
);
556 efx
->type
->update_stats(efx
, NULL
, NULL
);
557 spin_unlock_bh(&efx
->stats_lock
);
561 /* Quiesce the hardware and software data path, and regular activity
562 * for the port without bringing the link down. Safe to call multiple
563 * times with the NIC in almost any state, but interrupts should be
564 * enabled. Requires the RTNL lock.
566 void efx_stop_all(struct efx_nic
*efx
)
568 EFX_ASSERT_RESET_SERIALISED(efx
);
570 /* port_enabled can be read safely under the rtnl lock */
571 if (!efx
->port_enabled
)
574 if (efx
->type
->update_stats
) {
575 /* update stats before we go down so we can accurately count
578 efx
->type
->pull_stats(efx
);
579 spin_lock_bh(&efx
->stats_lock
);
580 efx
->type
->update_stats(efx
, NULL
, NULL
);
581 spin_unlock_bh(&efx
->stats_lock
);
582 efx
->type
->stop_stats(efx
);
587 /* Stop the kernel transmit interface. This is only valid if
588 * the device is stopped or detached; otherwise the watchdog
589 * may fire immediately.
591 WARN_ON(netif_running(efx
->net_dev
) &&
592 netif_device_present(efx
->net_dev
));
593 netif_tx_disable(efx
->net_dev
);
595 efx_stop_datapath(efx
);
598 /* Context: process, rcu_read_lock or RTNL held, non-blocking. */
599 void efx_net_stats(struct net_device
*net_dev
, struct rtnl_link_stats64
*stats
)
601 struct efx_nic
*efx
= efx_netdev_priv(net_dev
);
603 spin_lock_bh(&efx
->stats_lock
);
604 efx_nic_update_stats_atomic(efx
, NULL
, stats
);
605 spin_unlock_bh(&efx
->stats_lock
);
608 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
609 * the MAC appropriately. All other PHY configuration changes are pushed
610 * through phy_op->set_settings(), and pushed asynchronously to the MAC
611 * through efx_monitor().
613 * Callers must hold the mac_lock
615 int __efx_reconfigure_port(struct efx_nic
*efx
)
617 enum efx_phy_mode phy_mode
;
620 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
622 /* Disable PHY transmit in mac level loopbacks */
623 phy_mode
= efx
->phy_mode
;
624 if (LOOPBACK_INTERNAL(efx
))
625 efx
->phy_mode
|= PHY_MODE_TX_DISABLED
;
627 efx
->phy_mode
&= ~PHY_MODE_TX_DISABLED
;
629 if (efx
->type
->reconfigure_port
)
630 rc
= efx
->type
->reconfigure_port(efx
);
633 efx
->phy_mode
= phy_mode
;
638 /**************************************************************************
640 * Device reset and suspend
642 **************************************************************************/
644 static void efx_wait_for_bist_end(struct efx_nic
*efx
)
648 for (i
= 0; i
< BIST_WAIT_DELAY_COUNT
; ++i
) {
649 if (efx_mcdi_poll_reboot(efx
))
651 msleep(BIST_WAIT_DELAY_MS
);
654 netif_err(efx
, drv
, efx
->net_dev
, "Warning: No MC reboot after BIST mode\n");
656 /* Either way unset the BIST flag. If we found no reboot we probably
657 * won't recover, but we should try.
659 efx
->mc_bist_for_other_fn
= false;
662 /* Try recovery mechanisms.
663 * For now only EEH is supported.
664 * Returns 0 if the recovery mechanisms are unsuccessful.
665 * Returns a non-zero value otherwise.
667 int efx_try_recovery(struct efx_nic
*efx
)
670 /* A PCI error can occur and not be seen by EEH because nothing
671 * happens on the PCI bus. In this case the driver may fail and
672 * schedule a 'recover or reset', leading to this recovery handler.
673 * Manually call the eeh failure check function.
675 struct eeh_dev
*eehdev
= pci_dev_to_eeh_dev(efx
->pci_dev
);
676 if (eeh_dev_check_failure(eehdev
)) {
677 /* The EEH mechanisms will handle the error and reset the
678 * device if necessary.
686 /* Tears down the entire software state and most of the hardware state
689 void efx_reset_down(struct efx_nic
*efx
, enum reset_type method
)
691 EFX_ASSERT_RESET_SERIALISED(efx
);
693 if (method
== RESET_TYPE_MCDI_TIMEOUT
)
694 efx
->type
->prepare_flr(efx
);
697 efx_disable_interrupts(efx
);
699 mutex_lock(&efx
->mac_lock
);
700 down_write(&efx
->filter_sem
);
701 mutex_lock(&efx
->net_dev
->ethtool
->rss_lock
);
702 efx
->type
->fini(efx
);
705 /* Context: netif_tx_lock held, BHs disabled. */
706 void efx_watchdog(struct net_device
*net_dev
, unsigned int txqueue
)
708 struct efx_nic
*efx
= efx_netdev_priv(net_dev
);
710 netif_err(efx
, tx_err
, efx
->net_dev
,
711 "TX stuck with port_enabled=%d: resetting channels\n",
714 efx_schedule_reset(efx
, RESET_TYPE_TX_WATCHDOG
);
717 /* This function will always ensure that the locks acquired in
718 * efx_reset_down() are released. A failure return code indicates
719 * that we were unable to reinitialise the hardware, and the
720 * driver should be disabled. If ok is false, then the rx and tx
721 * engines are not restarted, pending a RESET_DISABLE.
723 int efx_reset_up(struct efx_nic
*efx
, enum reset_type method
, bool ok
)
727 EFX_ASSERT_RESET_SERIALISED(efx
);
729 if (method
== RESET_TYPE_MCDI_TIMEOUT
)
730 efx
->type
->finish_flr(efx
);
732 /* Ensure that SRAM is initialised even if we're disabling the device */
733 rc
= efx
->type
->init(efx
);
735 netif_err(efx
, drv
, efx
->net_dev
, "failed to initialise NIC\n");
742 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
&&
743 method
!= RESET_TYPE_DATAPATH
) {
744 rc
= efx_mcdi_port_reconfigure(efx
);
745 if (rc
&& rc
!= -EPERM
)
746 netif_err(efx
, drv
, efx
->net_dev
,
747 "could not restore PHY settings\n");
750 rc
= efx_enable_interrupts(efx
);
754 #ifdef CONFIG_SFC_SRIOV
755 rc
= efx
->type
->vswitching_restore(efx
);
756 if (rc
) /* not fatal; the PF will still work fine */
757 netif_warn(efx
, probe
, efx
->net_dev
,
758 "failed to restore vswitching rc=%d;"
759 " VFs may not function\n", rc
);
762 if (efx
->type
->rx_restore_rss_contexts
)
763 efx
->type
->rx_restore_rss_contexts(efx
);
764 mutex_unlock(&efx
->net_dev
->ethtool
->rss_lock
);
765 efx
->type
->filter_table_restore(efx
);
766 up_write(&efx
->filter_sem
);
768 mutex_unlock(&efx
->mac_lock
);
772 if (efx
->type
->udp_tnl_push_ports
)
773 efx
->type
->udp_tnl_push_ports(efx
);
778 efx
->port_initialized
= false;
780 mutex_unlock(&efx
->net_dev
->ethtool
->rss_lock
);
781 up_write(&efx
->filter_sem
);
782 mutex_unlock(&efx
->mac_lock
);
787 /* Reset the NIC using the specified method. Note that the reset may
788 * fail, in which case the card will be left in an unusable state.
790 * Caller must hold the rtnl_lock.
792 int efx_reset(struct efx_nic
*efx
, enum reset_type method
)
797 netif_info(efx
, drv
, efx
->net_dev
, "resetting (%s)\n",
800 efx_device_detach_sync(efx
);
801 /* efx_reset_down() grabs locks that prevent recovery on EF100.
802 * EF100 reset is handled in the efx_nic_type callback below.
804 if (efx_nic_rev(efx
) != EFX_REV_EF100
)
805 efx_reset_down(efx
, method
);
807 rc
= efx
->type
->reset(efx
, method
);
809 netif_err(efx
, drv
, efx
->net_dev
, "failed to reset hardware\n");
813 /* Clear flags for the scopes we covered. We assume the NIC and
814 * driver are now quiescent so that there is no race here.
816 if (method
< RESET_TYPE_MAX_METHOD
)
817 efx
->reset_pending
&= -(1 << (method
+ 1));
818 else /* it doesn't fit into the well-ordered scope hierarchy */
819 __clear_bit(method
, &efx
->reset_pending
);
821 /* Reinitialise bus-mastering, which may have been turned off before
822 * the reset was scheduled. This is still appropriate, even in the
823 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
824 * can respond to requests.
826 pci_set_master(efx
->pci_dev
);
829 /* Leave device stopped if necessary */
831 method
== RESET_TYPE_DISABLE
||
832 method
== RESET_TYPE_RECOVER_OR_DISABLE
;
833 if (efx_nic_rev(efx
) != EFX_REV_EF100
)
834 rc2
= efx_reset_up(efx
, method
, !disabled
);
842 dev_close(efx
->net_dev
);
843 netif_err(efx
, drv
, efx
->net_dev
, "has been disabled\n");
844 efx
->state
= STATE_DISABLED
;
846 netif_dbg(efx
, drv
, efx
->net_dev
, "reset complete\n");
847 efx_device_attach_if_not_resetting(efx
);
852 /* The worker thread exists so that code that cannot sleep can
853 * schedule a reset for later.
855 static void efx_reset_work(struct work_struct
*data
)
857 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, reset_work
);
858 unsigned long pending
;
859 enum reset_type method
;
861 pending
= READ_ONCE(efx
->reset_pending
);
862 method
= fls(pending
) - 1;
864 if (method
== RESET_TYPE_MC_BIST
)
865 efx_wait_for_bist_end(efx
);
867 if ((method
== RESET_TYPE_RECOVER_OR_DISABLE
||
868 method
== RESET_TYPE_RECOVER_OR_ALL
) &&
869 efx_try_recovery(efx
))
877 /* We checked the state in efx_schedule_reset() but it may
878 * have changed by now. Now that we have the RTNL lock,
879 * it cannot change again.
881 if (efx_net_active(efx
->state
))
882 (void)efx_reset(efx
, method
);
887 void efx_schedule_reset(struct efx_nic
*efx
, enum reset_type type
)
889 enum reset_type method
;
891 if (efx_recovering(efx
->state
)) {
892 netif_dbg(efx
, drv
, efx
->net_dev
,
893 "recovering: skip scheduling %s reset\n",
899 case RESET_TYPE_INVISIBLE
:
901 case RESET_TYPE_RECOVER_OR_ALL
:
902 case RESET_TYPE_WORLD
:
903 case RESET_TYPE_DISABLE
:
904 case RESET_TYPE_RECOVER_OR_DISABLE
:
905 case RESET_TYPE_DATAPATH
:
906 case RESET_TYPE_MC_BIST
:
907 case RESET_TYPE_MCDI_TIMEOUT
:
909 netif_dbg(efx
, drv
, efx
->net_dev
, "scheduling %s reset\n",
913 method
= efx
->type
->map_reset_reason(type
);
914 netif_dbg(efx
, drv
, efx
->net_dev
,
915 "scheduling %s reset for %s\n",
916 RESET_TYPE(method
), RESET_TYPE(type
));
920 set_bit(method
, &efx
->reset_pending
);
921 smp_mb(); /* ensure we change reset_pending before checking state */
923 /* If we're not READY then just leave the flags set as the cue
924 * to abort probing or reschedule the reset later.
926 if (!efx_net_active(READ_ONCE(efx
->state
)))
929 /* efx_process_channel() will no longer read events once a
930 * reset is scheduled. So switch back to poll'd MCDI completions.
932 efx_mcdi_mode_poll(efx
);
934 efx_queue_reset_work(efx
);
937 /**************************************************************************
939 * Dummy NIC operations
941 * Can be used for some unimplemented operations
942 * Needed so all function pointers are valid and do not have to be tested
945 **************************************************************************/
946 int efx_port_dummy_op_int(struct efx_nic
*efx
)
950 void efx_port_dummy_op_void(struct efx_nic
*efx
) {}
952 /**************************************************************************
956 **************************************************************************/
958 /* This zeroes out and then fills in the invariants in a struct
959 * efx_nic (including all sub-structures).
961 int efx_init_struct(struct efx_nic
*efx
, struct pci_dev
*pci_dev
)
965 /* Initialise common structures */
966 INIT_LIST_HEAD(&efx
->node
);
967 INIT_LIST_HEAD(&efx
->secondary_list
);
968 spin_lock_init(&efx
->biu_lock
);
969 #ifdef CONFIG_SFC_MTD
970 INIT_LIST_HEAD(&efx
->mtd_list
);
972 INIT_WORK(&efx
->reset_work
, efx_reset_work
);
973 INIT_DELAYED_WORK(&efx
->monitor_work
, efx_monitor
);
974 efx_selftest_async_init(efx
);
975 efx
->pci_dev
= pci_dev
;
976 efx
->msg_enable
= debug
;
977 efx
->state
= STATE_UNINIT
;
978 strscpy(efx
->name
, pci_name(pci_dev
), sizeof(efx
->name
));
980 efx
->rx_prefix_size
= efx
->type
->rx_prefix_size
;
982 NET_IP_ALIGN
? (efx
->rx_prefix_size
+ NET_IP_ALIGN
) % 4 : 0;
983 efx
->rx_packet_hash_offset
=
984 efx
->type
->rx_hash_offset
- efx
->type
->rx_prefix_size
;
985 efx
->rx_packet_ts_offset
=
986 efx
->type
->rx_ts_offset
- efx
->type
->rx_prefix_size
;
987 efx
->rss_context
.priv
.context_id
= EFX_MCDI_RSS_CONTEXT_INVALID
;
988 efx
->vport_id
= EVB_PORT_ID_ASSIGNED
;
989 spin_lock_init(&efx
->stats_lock
);
990 efx
->vi_stride
= EFX_DEFAULT_VI_STRIDE
;
991 efx
->num_mac_stats
= MC_CMD_MAC_NSTATS
;
992 BUILD_BUG_ON(MC_CMD_MAC_NSTATS
- 1 != MC_CMD_MAC_GENERATION_END
);
993 mutex_init(&efx
->mac_lock
);
994 init_rwsem(&efx
->filter_sem
);
995 #ifdef CONFIG_RFS_ACCEL
996 mutex_init(&efx
->rps_mutex
);
997 spin_lock_init(&efx
->rps_hash_lock
);
998 /* Failure to allocate is not fatal, but may degrade ARFS performance */
999 efx
->rps_hash_table
= kcalloc(EFX_ARFS_HASH_TABLE_SIZE
,
1000 sizeof(*efx
->rps_hash_table
), GFP_KERNEL
);
1002 spin_lock_init(&efx
->vf_reps_lock
);
1003 INIT_LIST_HEAD(&efx
->vf_reps
);
1004 INIT_WORK(&efx
->mac_work
, efx_mac_work
);
1005 init_waitqueue_head(&efx
->flush_wq
);
1007 efx
->tx_queues_per_channel
= 1;
1008 efx
->rxq_entries
= EFX_DEFAULT_DMAQ_SIZE
;
1009 efx
->txq_entries
= EFX_DEFAULT_DMAQ_SIZE
;
1011 efx
->mem_bar
= UINT_MAX
;
1013 rc
= efx_init_channels(efx
);
1017 /* Would be good to use the net_dev name, but we're too early */
1018 snprintf(efx
->workqueue_name
, sizeof(efx
->workqueue_name
), "sfc%s",
1020 efx
->workqueue
= create_singlethread_workqueue(efx
->workqueue_name
);
1021 if (!efx
->workqueue
) {
1029 efx_fini_struct(efx
);
1033 void efx_fini_struct(struct efx_nic
*efx
)
1035 #ifdef CONFIG_RFS_ACCEL
1036 kfree(efx
->rps_hash_table
);
1039 efx_fini_channels(efx
);
1043 if (efx
->workqueue
) {
1044 destroy_workqueue(efx
->workqueue
);
1045 efx
->workqueue
= NULL
;
1049 /* This configures the PCI device to enable I/O and DMA. */
1050 int efx_init_io(struct efx_nic
*efx
, int bar
, dma_addr_t dma_mask
,
1051 unsigned int mem_map_size
)
1053 struct pci_dev
*pci_dev
= efx
->pci_dev
;
1056 efx
->mem_bar
= UINT_MAX
;
1057 pci_dbg(pci_dev
, "initialising I/O bar=%d\n", bar
);
1059 rc
= pci_enable_device(pci_dev
);
1061 pci_err(pci_dev
, "failed to enable PCI device\n");
1065 pci_set_master(pci_dev
);
1067 rc
= dma_set_mask_and_coherent(&pci_dev
->dev
, dma_mask
);
1069 pci_err(efx
->pci_dev
, "could not find a suitable DMA mask\n");
1072 pci_dbg(efx
->pci_dev
, "using DMA mask %llx\n", (unsigned long long)dma_mask
);
1074 efx
->membase_phys
= pci_resource_start(efx
->pci_dev
, bar
);
1075 if (!efx
->membase_phys
) {
1076 pci_err(efx
->pci_dev
,
1077 "ERROR: No BAR%d mapping from the BIOS. Try pci=realloc on the kernel command line\n",
1083 rc
= pci_request_region(pci_dev
, bar
, "sfc");
1085 pci_err(efx
->pci_dev
,
1086 "request for memory BAR[%d] failed\n", bar
);
1091 efx
->membase
= ioremap(efx
->membase_phys
, mem_map_size
);
1092 if (!efx
->membase
) {
1093 pci_err(efx
->pci_dev
,
1094 "could not map memory BAR[%d] at %llx+%x\n", bar
,
1095 (unsigned long long)efx
->membase_phys
, mem_map_size
);
1099 pci_dbg(efx
->pci_dev
,
1100 "memory BAR[%d] at %llx+%x (virtual %p)\n", bar
,
1101 (unsigned long long)efx
->membase_phys
, mem_map_size
,
1107 pci_release_region(efx
->pci_dev
, bar
);
1109 efx
->membase_phys
= 0;
1111 pci_disable_device(efx
->pci_dev
);
1116 void efx_fini_io(struct efx_nic
*efx
)
1118 pci_dbg(efx
->pci_dev
, "shutting down I/O\n");
1121 iounmap(efx
->membase
);
1122 efx
->membase
= NULL
;
1125 if (efx
->membase_phys
) {
1126 pci_release_region(efx
->pci_dev
, efx
->mem_bar
);
1127 efx
->membase_phys
= 0;
1128 efx
->mem_bar
= UINT_MAX
;
1131 /* Don't disable bus-mastering if VFs are assigned */
1132 if (!pci_vfs_assigned(efx
->pci_dev
))
1133 pci_disable_device(efx
->pci_dev
);
1136 #ifdef CONFIG_SFC_MCDI_LOGGING
1137 static ssize_t
mcdi_logging_show(struct device
*dev
,
1138 struct device_attribute
*attr
,
1141 struct efx_nic
*efx
= dev_get_drvdata(dev
);
1142 struct efx_mcdi_iface
*mcdi
= efx_mcdi(efx
);
1144 return sysfs_emit(buf
, "%d\n", mcdi
->logging_enabled
);
1147 static ssize_t
mcdi_logging_store(struct device
*dev
,
1148 struct device_attribute
*attr
,
1149 const char *buf
, size_t count
)
1151 struct efx_nic
*efx
= dev_get_drvdata(dev
);
1152 struct efx_mcdi_iface
*mcdi
= efx_mcdi(efx
);
1153 bool enable
= count
> 0 && *buf
!= '0';
1155 mcdi
->logging_enabled
= enable
;
1159 static DEVICE_ATTR_RW(mcdi_logging
);
1161 void efx_init_mcdi_logging(struct efx_nic
*efx
)
1163 int rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_mcdi_logging
);
1166 netif_warn(efx
, drv
, efx
->net_dev
,
1167 "failed to init net dev attributes\n");
1171 void efx_fini_mcdi_logging(struct efx_nic
*efx
)
1173 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_mcdi_logging
);
1177 /* A PCI error affecting this device was detected.
1178 * At this point MMIO and DMA may be disabled.
1179 * Stop the software path and request a slot reset.
1181 static pci_ers_result_t
efx_io_error_detected(struct pci_dev
*pdev
,
1182 pci_channel_state_t state
)
1184 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
1185 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1187 if (state
== pci_channel_io_perm_failure
)
1188 return PCI_ERS_RESULT_DISCONNECT
;
1192 if (efx
->state
!= STATE_DISABLED
) {
1193 efx
->state
= efx_recover(efx
->state
);
1194 efx
->reset_pending
= 0;
1196 efx_device_detach_sync(efx
);
1198 if (efx_net_active(efx
->state
)) {
1200 efx_disable_interrupts(efx
);
1203 status
= PCI_ERS_RESULT_NEED_RESET
;
1205 /* If the interface is disabled we don't want to do anything
1208 status
= PCI_ERS_RESULT_RECOVERED
;
1213 pci_disable_device(pdev
);
1218 /* Fake a successful reset, which will be performed later in efx_io_resume. */
1219 static pci_ers_result_t
efx_io_slot_reset(struct pci_dev
*pdev
)
1221 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1222 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
1224 if (pci_enable_device(pdev
)) {
1225 netif_err(efx
, hw
, efx
->net_dev
,
1226 "Cannot re-enable PCI device after reset.\n");
1227 status
= PCI_ERS_RESULT_DISCONNECT
;
1233 /* Perform the actual reset and resume I/O operations. */
1234 static void efx_io_resume(struct pci_dev
*pdev
)
1236 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1241 if (efx
->state
== STATE_DISABLED
)
1244 rc
= efx_reset(efx
, RESET_TYPE_ALL
);
1246 netif_err(efx
, hw
, efx
->net_dev
,
1247 "efx_reset failed after PCI error (%d)\n", rc
);
1249 efx
->state
= efx_recovered(efx
->state
);
1250 netif_dbg(efx
, hw
, efx
->net_dev
,
1251 "Done resetting and resuming IO after PCI error.\n");
1258 /* For simplicity and reliability, we always require a slot reset and try to
1259 * reset the hardware when a pci error affecting the device is detected.
1260 * We leave both the link_reset and mmio_enabled callback unimplemented:
1261 * with our request for slot reset the mmio_enabled callback will never be
1262 * called, and the link_reset callback is not used by AER or EEH mechanisms.
1264 const struct pci_error_handlers efx_err_handlers
= {
1265 .error_detected
= efx_io_error_detected
,
1266 .slot_reset
= efx_io_slot_reset
,
1267 .resume
= efx_io_resume
,
1270 /* Determine whether the NIC will be able to handle TX offloads for a given
1271 * encapsulated packet.
1273 static bool efx_can_encap_offloads(struct efx_nic
*efx
, struct sk_buff
*skb
)
1275 struct gre_base_hdr
*greh
;
1279 /* Does the NIC support encap offloads?
1280 * If not, we should never get here, because we shouldn't have
1281 * advertised encap offload feature flags in the first place.
1283 if (WARN_ON_ONCE(!efx
->type
->udp_tnl_has_port
))
1286 /* Determine encapsulation protocol in use */
1287 switch (skb
->protocol
) {
1288 case htons(ETH_P_IP
):
1289 ipproto
= ip_hdr(skb
)->protocol
;
1291 case htons(ETH_P_IPV6
):
1292 /* If there are extension headers, this will cause us to
1293 * think we can't offload something that we maybe could have.
1295 ipproto
= ipv6_hdr(skb
)->nexthdr
;
1298 /* Not IP, so can't offload it */
1303 /* We support NVGRE but not IP over GRE or random gretaps.
1304 * Specifically, the NIC will accept GRE as encapsulated if
1305 * the inner protocol is Ethernet, but only handle it
1306 * correctly if the GRE header is 8 bytes long. Moreover,
1307 * it will not update the Checksum or Sequence Number fields
1308 * if they are present. (The Routing Present flag,
1309 * GRE_ROUTING, cannot be set else the header would be more
1310 * than 8 bytes long; so we don't have to worry about it.)
1312 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
)
1314 if (ntohs(skb
->inner_protocol
) != ETH_P_TEB
)
1316 if (skb_inner_mac_header(skb
) - skb_transport_header(skb
) != 8)
1318 greh
= (struct gre_base_hdr
*)skb_transport_header(skb
);
1319 return !(greh
->flags
& (GRE_CSUM
| GRE_SEQ
));
1321 /* If the port is registered for a UDP tunnel, we assume the
1322 * packet is for that tunnel, and the NIC will handle it as
1323 * such. If not, the NIC won't know what to do with it.
1325 dst_port
= udp_hdr(skb
)->dest
;
1326 return efx
->type
->udp_tnl_has_port(efx
, dst_port
);
1332 netdev_features_t
efx_features_check(struct sk_buff
*skb
, struct net_device
*dev
,
1333 netdev_features_t features
)
1335 struct efx_nic
*efx
= efx_netdev_priv(dev
);
1337 if (skb
->encapsulation
) {
1338 if (features
& NETIF_F_GSO_MASK
)
1339 /* Hardware can only do TSO with at most 208 bytes
1342 if (skb_inner_transport_offset(skb
) >
1343 EFX_TSO2_MAX_HDRLEN
)
1344 features
&= ~(NETIF_F_GSO_MASK
);
1345 if (features
& (NETIF_F_GSO_MASK
| NETIF_F_CSUM_MASK
))
1346 if (!efx_can_encap_offloads(efx
, skb
))
1347 features
&= ~(NETIF_F_GSO_MASK
|
1353 int efx_get_phys_port_id(struct net_device
*net_dev
,
1354 struct netdev_phys_item_id
*ppid
)
1356 struct efx_nic
*efx
= efx_netdev_priv(net_dev
);
1358 if (efx
->type
->get_phys_port_id
)
1359 return efx
->type
->get_phys_port_id(efx
, ppid
);
1364 int efx_get_phys_port_name(struct net_device
*net_dev
, char *name
, size_t len
)
1366 struct efx_nic
*efx
= efx_netdev_priv(net_dev
);
1368 if (snprintf(name
, len
, "p%u", efx
->port_num
) >= len
)
1373 void efx_detach_reps(struct efx_nic
*efx
)
1375 struct net_device
*rep_dev
;
1376 struct efx_rep
*efv
;
1379 netif_dbg(efx
, drv
, efx
->net_dev
, "Detaching VF representors\n");
1380 list_for_each_entry(efv
, &efx
->vf_reps
, list
) {
1381 rep_dev
= efv
->net_dev
;
1384 netif_carrier_off(rep_dev
);
1385 /* See efx_device_detach_sync() */
1386 netif_tx_lock_bh(rep_dev
);
1387 netif_tx_stop_all_queues(rep_dev
);
1388 netif_tx_unlock_bh(rep_dev
);
1392 void efx_attach_reps(struct efx_nic
*efx
)
1394 struct net_device
*rep_dev
;
1395 struct efx_rep
*efv
;
1398 netif_dbg(efx
, drv
, efx
->net_dev
, "Attaching VF representors\n");
1399 list_for_each_entry(efv
, &efx
->vf_reps
, list
) {
1400 rep_dev
= efv
->net_dev
;
1403 netif_tx_wake_all_queues(rep_dev
);
1404 netif_carrier_on(rep_dev
);