1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/netdevice.h>
15 #include "efx_common.h"
16 #include "efx_channels.h"
20 #include "rx_common.h"
21 #include "tx_common.h"
23 #include "mcdi_port_common.h"
25 #include "mcdi_pcol.h"
27 static unsigned int debug
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
28 NETIF_MSG_LINK
| NETIF_MSG_IFDOWN
|
29 NETIF_MSG_IFUP
| NETIF_MSG_RX_ERR
|
30 NETIF_MSG_TX_ERR
| NETIF_MSG_HW
);
31 module_param(debug
, uint
, 0);
32 MODULE_PARM_DESC(debug
, "Bitmapped debugging message enable value");
34 /* This is the time (in jiffies) between invocations of the hardware
36 * On Falcon-based NICs, this will:
37 * - Check the on-board hardware monitor;
38 * - Poll the link state and reconfigure the hardware as necessary.
39 * On Siena-based NICs for power systems with EEH support, this will give EEH a
42 static unsigned int efx_monitor_interval
= 1 * HZ
;
44 /* How often and how many times to poll for a reset while waiting for a
45 * BIST that another function started to complete.
47 #define BIST_WAIT_DELAY_MS 100
48 #define BIST_WAIT_DELAY_COUNT 100
50 /* Default stats update time */
51 #define STATS_PERIOD_MS_DEFAULT 1000
53 const unsigned int efx_reset_type_max
= RESET_TYPE_MAX
;
54 const char *const efx_reset_type_names
[] = {
55 [RESET_TYPE_INVISIBLE
] = "INVISIBLE",
56 [RESET_TYPE_ALL
] = "ALL",
57 [RESET_TYPE_RECOVER_OR_ALL
] = "RECOVER_OR_ALL",
58 [RESET_TYPE_WORLD
] = "WORLD",
59 [RESET_TYPE_RECOVER_OR_DISABLE
] = "RECOVER_OR_DISABLE",
60 [RESET_TYPE_DATAPATH
] = "DATAPATH",
61 [RESET_TYPE_MC_BIST
] = "MC_BIST",
62 [RESET_TYPE_DISABLE
] = "DISABLE",
63 [RESET_TYPE_TX_WATCHDOG
] = "TX_WATCHDOG",
64 [RESET_TYPE_INT_ERROR
] = "INT_ERROR",
65 [RESET_TYPE_DMA_ERROR
] = "DMA_ERROR",
66 [RESET_TYPE_TX_SKIP
] = "TX_SKIP",
67 [RESET_TYPE_MC_FAILURE
] = "MC_FAILURE",
68 [RESET_TYPE_MCDI_TIMEOUT
] = "MCDI_TIMEOUT (FLR)",
71 #define RESET_TYPE(type) \
72 STRING_TABLE_LOOKUP(type, efx_reset_type)
74 /* Loopback mode names (see LOOPBACK_MODE()) */
75 const unsigned int efx_loopback_mode_max
= LOOPBACK_MAX
;
76 const char *const efx_loopback_mode_names
[] = {
77 [LOOPBACK_NONE
] = "NONE",
78 [LOOPBACK_DATA
] = "DATAPATH",
79 [LOOPBACK_GMAC
] = "GMAC",
80 [LOOPBACK_XGMII
] = "XGMII",
81 [LOOPBACK_XGXS
] = "XGXS",
82 [LOOPBACK_XAUI
] = "XAUI",
83 [LOOPBACK_GMII
] = "GMII",
84 [LOOPBACK_SGMII
] = "SGMII",
85 [LOOPBACK_XGBR
] = "XGBR",
86 [LOOPBACK_XFI
] = "XFI",
87 [LOOPBACK_XAUI_FAR
] = "XAUI_FAR",
88 [LOOPBACK_GMII_FAR
] = "GMII_FAR",
89 [LOOPBACK_SGMII_FAR
] = "SGMII_FAR",
90 [LOOPBACK_XFI_FAR
] = "XFI_FAR",
91 [LOOPBACK_GPHY
] = "GPHY",
92 [LOOPBACK_PHYXS
] = "PHYXS",
93 [LOOPBACK_PCS
] = "PCS",
94 [LOOPBACK_PMAPMD
] = "PMA/PMD",
95 [LOOPBACK_XPORT
] = "XPORT",
96 [LOOPBACK_XGMII_WS
] = "XGMII_WS",
97 [LOOPBACK_XAUI_WS
] = "XAUI_WS",
98 [LOOPBACK_XAUI_WS_FAR
] = "XAUI_WS_FAR",
99 [LOOPBACK_XAUI_WS_NEAR
] = "XAUI_WS_NEAR",
100 [LOOPBACK_GMII_WS
] = "GMII_WS",
101 [LOOPBACK_XFI_WS
] = "XFI_WS",
102 [LOOPBACK_XFI_WS_FAR
] = "XFI_WS_FAR",
103 [LOOPBACK_PHYXS_WS
] = "PHYXS_WS",
106 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
107 * queued onto this work queue. This is not a per-nic work queue, because
108 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
110 static struct workqueue_struct
*reset_workqueue
;
112 int efx_create_reset_workqueue(void)
114 reset_workqueue
= create_singlethread_workqueue("sfc_reset");
115 if (!reset_workqueue
) {
116 printk(KERN_ERR
"Failed to create reset workqueue\n");
123 void efx_queue_reset_work(struct efx_nic
*efx
)
125 queue_work(reset_workqueue
, &efx
->reset_work
);
128 void efx_flush_reset_workqueue(struct efx_nic
*efx
)
130 cancel_work_sync(&efx
->reset_work
);
133 void efx_destroy_reset_workqueue(void)
135 if (reset_workqueue
) {
136 destroy_workqueue(reset_workqueue
);
137 reset_workqueue
= NULL
;
141 /* We assume that efx->type->reconfigure_mac will always try to sync RX
142 * filters and therefore needs to read-lock the filter table against freeing
144 void efx_mac_reconfigure(struct efx_nic
*efx
, bool mtu_only
)
146 if (efx
->type
->reconfigure_mac
) {
147 down_read(&efx
->filter_sem
);
148 efx
->type
->reconfigure_mac(efx
, mtu_only
);
149 up_read(&efx
->filter_sem
);
153 /* Asynchronous work item for changing MAC promiscuity and multicast
154 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
157 static void efx_mac_work(struct work_struct
*data
)
159 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, mac_work
);
161 mutex_lock(&efx
->mac_lock
);
162 if (efx
->port_enabled
)
163 efx_mac_reconfigure(efx
, false);
164 mutex_unlock(&efx
->mac_lock
);
167 int efx_set_mac_address(struct net_device
*net_dev
, void *data
)
169 struct efx_nic
*efx
= netdev_priv(net_dev
);
170 struct sockaddr
*addr
= data
;
171 u8
*new_addr
= addr
->sa_data
;
175 if (!is_valid_ether_addr(new_addr
)) {
176 netif_err(efx
, drv
, efx
->net_dev
,
177 "invalid ethernet MAC address requested: %pM\n",
179 return -EADDRNOTAVAIL
;
182 /* save old address */
183 ether_addr_copy(old_addr
, net_dev
->dev_addr
);
184 ether_addr_copy(net_dev
->dev_addr
, new_addr
);
185 if (efx
->type
->set_mac_address
) {
186 rc
= efx
->type
->set_mac_address(efx
);
188 ether_addr_copy(net_dev
->dev_addr
, old_addr
);
193 /* Reconfigure the MAC */
194 mutex_lock(&efx
->mac_lock
);
195 efx_mac_reconfigure(efx
, false);
196 mutex_unlock(&efx
->mac_lock
);
201 /* Context: netif_addr_lock held, BHs disabled. */
202 void efx_set_rx_mode(struct net_device
*net_dev
)
204 struct efx_nic
*efx
= netdev_priv(net_dev
);
206 if (efx
->port_enabled
)
207 queue_work(efx
->workqueue
, &efx
->mac_work
);
208 /* Otherwise efx_start_port() will do this */
211 int efx_set_features(struct net_device
*net_dev
, netdev_features_t data
)
213 struct efx_nic
*efx
= netdev_priv(net_dev
);
216 /* If disabling RX n-tuple filtering, clear existing filters */
217 if (net_dev
->features
& ~data
& NETIF_F_NTUPLE
) {
218 rc
= efx
->type
->filter_clear_rx(efx
, EFX_FILTER_PRI_MANUAL
);
223 /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
224 * If rx-fcs is changed, mac_reconfigure updates that too.
226 if ((net_dev
->features
^ data
) & (NETIF_F_HW_VLAN_CTAG_FILTER
|
228 /* efx_set_rx_mode() will schedule MAC work to update filters
229 * when a new features are finally set in net_dev.
231 efx_set_rx_mode(net_dev
);
237 /* This ensures that the kernel is kept informed (via
238 * netif_carrier_on/off) of the link status, and also maintains the
239 * link status's stop on the port's TX queue.
241 void efx_link_status_changed(struct efx_nic
*efx
)
243 struct efx_link_state
*link_state
= &efx
->link_state
;
245 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
246 * that no events are triggered between unregister_netdev() and the
247 * driver unloading. A more general condition is that NETDEV_CHANGE
248 * can only be generated between NETDEV_UP and NETDEV_DOWN
250 if (!netif_running(efx
->net_dev
))
253 if (link_state
->up
!= netif_carrier_ok(efx
->net_dev
)) {
254 efx
->n_link_state_changes
++;
257 netif_carrier_on(efx
->net_dev
);
259 netif_carrier_off(efx
->net_dev
);
262 /* Status message for kernel log */
264 netif_info(efx
, link
, efx
->net_dev
,
265 "link up at %uMbps %s-duplex (MTU %d)\n",
266 link_state
->speed
, link_state
->fd
? "full" : "half",
269 netif_info(efx
, link
, efx
->net_dev
, "link down\n");
272 unsigned int efx_xdp_max_mtu(struct efx_nic
*efx
)
274 /* The maximum MTU that we can fit in a single page, allowing for
275 * framing, overhead and XDP headroom + tailroom.
277 int overhead
= EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state
) +
278 efx
->rx_prefix_size
+ efx
->type
->rx_buffer_padding
+
279 efx
->rx_ip_align
+ EFX_XDP_HEADROOM
+ EFX_XDP_TAILROOM
;
281 return PAGE_SIZE
- overhead
;
284 /* Context: process, rtnl_lock() held. */
285 int efx_change_mtu(struct net_device
*net_dev
, int new_mtu
)
287 struct efx_nic
*efx
= netdev_priv(net_dev
);
290 rc
= efx_check_disabled(efx
);
294 if (rtnl_dereference(efx
->xdp_prog
) &&
295 new_mtu
> efx_xdp_max_mtu(efx
)) {
296 netif_err(efx
, drv
, efx
->net_dev
,
297 "Requested MTU of %d too big for XDP (max: %d)\n",
298 new_mtu
, efx_xdp_max_mtu(efx
));
302 netif_dbg(efx
, drv
, efx
->net_dev
, "changing MTU to %d\n", new_mtu
);
304 efx_device_detach_sync(efx
);
307 mutex_lock(&efx
->mac_lock
);
308 net_dev
->mtu
= new_mtu
;
309 efx_mac_reconfigure(efx
, true);
310 mutex_unlock(&efx
->mac_lock
);
313 efx_device_attach_if_not_resetting(efx
);
317 /**************************************************************************
321 **************************************************************************/
323 /* Run periodically off the general workqueue */
324 static void efx_monitor(struct work_struct
*data
)
326 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
329 netif_vdbg(efx
, timer
, efx
->net_dev
,
330 "hardware monitor executing on CPU %d\n",
331 raw_smp_processor_id());
332 BUG_ON(efx
->type
->monitor
== NULL
);
334 /* If the mac_lock is already held then it is likely a port
335 * reconfiguration is already in place, which will likely do
336 * most of the work of monitor() anyway.
338 if (mutex_trylock(&efx
->mac_lock
)) {
339 if (efx
->port_enabled
&& efx
->type
->monitor
)
340 efx
->type
->monitor(efx
);
341 mutex_unlock(&efx
->mac_lock
);
344 efx_start_monitor(efx
);
347 void efx_start_monitor(struct efx_nic
*efx
)
349 if (efx
->type
->monitor
)
350 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
351 efx_monitor_interval
);
354 /**************************************************************************
356 * Event queue processing
358 *************************************************************************/
360 /* Channels are shutdown and reinitialised whilst the NIC is running
361 * to propagate configuration changes (mtu, checksum offload), or
362 * to clear hardware error conditions
364 static void efx_start_datapath(struct efx_nic
*efx
)
366 netdev_features_t old_features
= efx
->net_dev
->features
;
367 bool old_rx_scatter
= efx
->rx_scatter
;
370 /* Calculate the rx buffer allocation parameters required to
371 * support the current MTU, including padding for header
372 * alignment and overruns.
374 efx
->rx_dma_len
= (efx
->rx_prefix_size
+
375 EFX_MAX_FRAME_LEN(efx
->net_dev
->mtu
) +
376 efx
->type
->rx_buffer_padding
);
377 rx_buf_len
= (sizeof(struct efx_rx_page_state
) + EFX_XDP_HEADROOM
+
378 efx
->rx_ip_align
+ efx
->rx_dma_len
+ EFX_XDP_TAILROOM
);
380 if (rx_buf_len
<= PAGE_SIZE
) {
381 efx
->rx_scatter
= efx
->type
->always_rx_scatter
;
382 efx
->rx_buffer_order
= 0;
383 } else if (efx
->type
->can_rx_scatter
) {
384 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE
% L1_CACHE_BYTES
);
385 BUILD_BUG_ON(sizeof(struct efx_rx_page_state
) +
386 2 * ALIGN(NET_IP_ALIGN
+ EFX_RX_USR_BUF_SIZE
,
387 EFX_RX_BUF_ALIGNMENT
) >
389 efx
->rx_scatter
= true;
390 efx
->rx_dma_len
= EFX_RX_USR_BUF_SIZE
;
391 efx
->rx_buffer_order
= 0;
393 efx
->rx_scatter
= false;
394 efx
->rx_buffer_order
= get_order(rx_buf_len
);
397 efx_rx_config_page_split(efx
);
398 if (efx
->rx_buffer_order
)
399 netif_dbg(efx
, drv
, efx
->net_dev
,
400 "RX buf len=%u; page order=%u batch=%u\n",
401 efx
->rx_dma_len
, efx
->rx_buffer_order
,
402 efx
->rx_pages_per_batch
);
404 netif_dbg(efx
, drv
, efx
->net_dev
,
405 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
406 efx
->rx_dma_len
, efx
->rx_page_buf_step
,
407 efx
->rx_bufs_per_page
, efx
->rx_pages_per_batch
);
409 /* Restore previously fixed features in hw_features and remove
410 * features which are fixed now
412 efx
->net_dev
->hw_features
|= efx
->net_dev
->features
;
413 efx
->net_dev
->hw_features
&= ~efx
->fixed_features
;
414 efx
->net_dev
->features
|= efx
->fixed_features
;
415 if (efx
->net_dev
->features
!= old_features
)
416 netdev_features_change(efx
->net_dev
);
418 /* RX filters may also have scatter-enabled flags */
419 if ((efx
->rx_scatter
!= old_rx_scatter
) &&
420 efx
->type
->filter_update_rx_scatter
)
421 efx
->type
->filter_update_rx_scatter(efx
);
423 /* We must keep at least one descriptor in a TX ring empty.
424 * We could avoid this when the queue size does not exactly
425 * match the hardware ring size, but it's not that important.
426 * Therefore we stop the queue when one more skb might fill
427 * the ring completely. We wake it when half way back to
430 efx
->txq_stop_thresh
= efx
->txq_entries
- efx_tx_max_skb_descs(efx
);
431 efx
->txq_wake_thresh
= efx
->txq_stop_thresh
/ 2;
433 /* Initialise the channels */
434 efx_start_channels(efx
);
436 efx_ptp_start_datapath(efx
);
438 if (netif_device_present(efx
->net_dev
))
439 netif_tx_wake_all_queues(efx
->net_dev
);
442 static void efx_stop_datapath(struct efx_nic
*efx
)
444 EFX_ASSERT_RESET_SERIALISED(efx
);
445 BUG_ON(efx
->port_enabled
);
447 efx_ptp_stop_datapath(efx
);
449 efx_stop_channels(efx
);
452 /**************************************************************************
456 **************************************************************************/
458 /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
459 * force the Autoneg bit on.
461 void efx_link_clear_advertising(struct efx_nic
*efx
)
463 bitmap_zero(efx
->link_advertising
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
464 efx
->wanted_fc
&= ~(EFX_FC_TX
| EFX_FC_RX
);
467 void efx_link_set_wanted_fc(struct efx_nic
*efx
, u8 wanted_fc
)
469 efx
->wanted_fc
= wanted_fc
;
470 if (efx
->link_advertising
[0]) {
471 if (wanted_fc
& EFX_FC_RX
)
472 efx
->link_advertising
[0] |= (ADVERTISED_Pause
|
473 ADVERTISED_Asym_Pause
);
475 efx
->link_advertising
[0] &= ~(ADVERTISED_Pause
|
476 ADVERTISED_Asym_Pause
);
477 if (wanted_fc
& EFX_FC_TX
)
478 efx
->link_advertising
[0] ^= ADVERTISED_Asym_Pause
;
482 static void efx_start_port(struct efx_nic
*efx
)
484 netif_dbg(efx
, ifup
, efx
->net_dev
, "start port\n");
485 BUG_ON(efx
->port_enabled
);
487 mutex_lock(&efx
->mac_lock
);
488 efx
->port_enabled
= true;
490 /* Ensure MAC ingress/egress is enabled */
491 efx_mac_reconfigure(efx
, false);
493 mutex_unlock(&efx
->mac_lock
);
496 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
497 * and the async self-test, wait for them to finish and prevent them
498 * being scheduled again. This doesn't cover online resets, which
499 * should only be cancelled when removing the device.
501 static void efx_stop_port(struct efx_nic
*efx
)
503 netif_dbg(efx
, ifdown
, efx
->net_dev
, "stop port\n");
505 EFX_ASSERT_RESET_SERIALISED(efx
);
507 mutex_lock(&efx
->mac_lock
);
508 efx
->port_enabled
= false;
509 mutex_unlock(&efx
->mac_lock
);
511 /* Serialise against efx_set_multicast_list() */
512 netif_addr_lock_bh(efx
->net_dev
);
513 netif_addr_unlock_bh(efx
->net_dev
);
515 cancel_delayed_work_sync(&efx
->monitor_work
);
516 efx_selftest_async_cancel(efx
);
517 cancel_work_sync(&efx
->mac_work
);
520 /* If the interface is supposed to be running but is not, start
521 * the hardware and software data path, regular activity for the port
522 * (MAC statistics, link polling, etc.) and schedule the port to be
523 * reconfigured. Interrupts must already be enabled. This function
524 * is safe to call multiple times, so long as the NIC is not disabled.
525 * Requires the RTNL lock.
527 void efx_start_all(struct efx_nic
*efx
)
529 EFX_ASSERT_RESET_SERIALISED(efx
);
530 BUG_ON(efx
->state
== STATE_DISABLED
);
532 /* Check that it is appropriate to restart the interface. All
533 * of these flags are safe to read under just the rtnl lock
535 if (efx
->port_enabled
|| !netif_running(efx
->net_dev
) ||
540 efx_start_datapath(efx
);
542 /* Start the hardware monitor if there is one */
543 efx_start_monitor(efx
);
545 /* Link state detection is normally event-driven; we have
546 * to poll now because we could have missed a change
548 mutex_lock(&efx
->mac_lock
);
549 if (efx_mcdi_phy_poll(efx
))
550 efx_link_status_changed(efx
);
551 mutex_unlock(&efx
->mac_lock
);
553 if (efx
->type
->start_stats
) {
554 efx
->type
->start_stats(efx
);
555 efx
->type
->pull_stats(efx
);
556 spin_lock_bh(&efx
->stats_lock
);
557 efx
->type
->update_stats(efx
, NULL
, NULL
);
558 spin_unlock_bh(&efx
->stats_lock
);
562 /* Quiesce the hardware and software data path, and regular activity
563 * for the port without bringing the link down. Safe to call multiple
564 * times with the NIC in almost any state, but interrupts should be
565 * enabled. Requires the RTNL lock.
567 void efx_stop_all(struct efx_nic
*efx
)
569 EFX_ASSERT_RESET_SERIALISED(efx
);
571 /* port_enabled can be read safely under the rtnl lock */
572 if (!efx
->port_enabled
)
575 if (efx
->type
->update_stats
) {
576 /* update stats before we go down so we can accurately count
579 efx
->type
->pull_stats(efx
);
580 spin_lock_bh(&efx
->stats_lock
);
581 efx
->type
->update_stats(efx
, NULL
, NULL
);
582 spin_unlock_bh(&efx
->stats_lock
);
583 efx
->type
->stop_stats(efx
);
588 /* Stop the kernel transmit interface. This is only valid if
589 * the device is stopped or detached; otherwise the watchdog
590 * may fire immediately.
592 WARN_ON(netif_running(efx
->net_dev
) &&
593 netif_device_present(efx
->net_dev
));
594 netif_tx_disable(efx
->net_dev
);
596 efx_stop_datapath(efx
);
599 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
600 void efx_net_stats(struct net_device
*net_dev
, struct rtnl_link_stats64
*stats
)
602 struct efx_nic
*efx
= netdev_priv(net_dev
);
604 spin_lock_bh(&efx
->stats_lock
);
605 efx_nic_update_stats_atomic(efx
, NULL
, stats
);
606 spin_unlock_bh(&efx
->stats_lock
);
609 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
610 * the MAC appropriately. All other PHY configuration changes are pushed
611 * through phy_op->set_settings(), and pushed asynchronously to the MAC
612 * through efx_monitor().
614 * Callers must hold the mac_lock
616 int __efx_reconfigure_port(struct efx_nic
*efx
)
618 enum efx_phy_mode phy_mode
;
621 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
623 /* Disable PHY transmit in mac level loopbacks */
624 phy_mode
= efx
->phy_mode
;
625 if (LOOPBACK_INTERNAL(efx
))
626 efx
->phy_mode
|= PHY_MODE_TX_DISABLED
;
628 efx
->phy_mode
&= ~PHY_MODE_TX_DISABLED
;
630 if (efx
->type
->reconfigure_port
)
631 rc
= efx
->type
->reconfigure_port(efx
);
634 efx
->phy_mode
= phy_mode
;
639 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
642 int efx_reconfigure_port(struct efx_nic
*efx
)
646 EFX_ASSERT_RESET_SERIALISED(efx
);
648 mutex_lock(&efx
->mac_lock
);
649 rc
= __efx_reconfigure_port(efx
);
650 mutex_unlock(&efx
->mac_lock
);
655 /**************************************************************************
657 * Device reset and suspend
659 **************************************************************************/
661 static void efx_wait_for_bist_end(struct efx_nic
*efx
)
665 for (i
= 0; i
< BIST_WAIT_DELAY_COUNT
; ++i
) {
666 if (efx_mcdi_poll_reboot(efx
))
668 msleep(BIST_WAIT_DELAY_MS
);
671 netif_err(efx
, drv
, efx
->net_dev
, "Warning: No MC reboot after BIST mode\n");
673 /* Either way unset the BIST flag. If we found no reboot we probably
674 * won't recover, but we should try.
676 efx
->mc_bist_for_other_fn
= false;
679 /* Try recovery mechanisms.
680 * For now only EEH is supported.
681 * Returns 0 if the recovery mechanisms are unsuccessful.
682 * Returns a non-zero value otherwise.
684 int efx_try_recovery(struct efx_nic
*efx
)
687 /* A PCI error can occur and not be seen by EEH because nothing
688 * happens on the PCI bus. In this case the driver may fail and
689 * schedule a 'recover or reset', leading to this recovery handler.
690 * Manually call the eeh failure check function.
692 struct eeh_dev
*eehdev
= pci_dev_to_eeh_dev(efx
->pci_dev
);
693 if (eeh_dev_check_failure(eehdev
)) {
694 /* The EEH mechanisms will handle the error and reset the
695 * device if necessary.
703 /* Tears down the entire software state and most of the hardware state
706 void efx_reset_down(struct efx_nic
*efx
, enum reset_type method
)
708 EFX_ASSERT_RESET_SERIALISED(efx
);
710 if (method
== RESET_TYPE_MCDI_TIMEOUT
)
711 efx
->type
->prepare_flr(efx
);
714 efx_disable_interrupts(efx
);
716 mutex_lock(&efx
->mac_lock
);
717 down_write(&efx
->filter_sem
);
718 mutex_lock(&efx
->rss_lock
);
719 efx
->type
->fini(efx
);
722 /* Context: netif_tx_lock held, BHs disabled. */
723 void efx_watchdog(struct net_device
*net_dev
, unsigned int txqueue
)
725 struct efx_nic
*efx
= netdev_priv(net_dev
);
727 netif_err(efx
, tx_err
, efx
->net_dev
,
728 "TX stuck with port_enabled=%d: resetting channels\n",
731 efx_schedule_reset(efx
, RESET_TYPE_TX_WATCHDOG
);
734 /* This function will always ensure that the locks acquired in
735 * efx_reset_down() are released. A failure return code indicates
736 * that we were unable to reinitialise the hardware, and the
737 * driver should be disabled. If ok is false, then the rx and tx
738 * engines are not restarted, pending a RESET_DISABLE.
740 int efx_reset_up(struct efx_nic
*efx
, enum reset_type method
, bool ok
)
744 EFX_ASSERT_RESET_SERIALISED(efx
);
746 if (method
== RESET_TYPE_MCDI_TIMEOUT
)
747 efx
->type
->finish_flr(efx
);
749 /* Ensure that SRAM is initialised even if we're disabling the device */
750 rc
= efx
->type
->init(efx
);
752 netif_err(efx
, drv
, efx
->net_dev
, "failed to initialise NIC\n");
759 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
&&
760 method
!= RESET_TYPE_DATAPATH
) {
761 rc
= efx_mcdi_port_reconfigure(efx
);
762 if (rc
&& rc
!= -EPERM
)
763 netif_err(efx
, drv
, efx
->net_dev
,
764 "could not restore PHY settings\n");
767 rc
= efx_enable_interrupts(efx
);
771 #ifdef CONFIG_SFC_SRIOV
772 rc
= efx
->type
->vswitching_restore(efx
);
773 if (rc
) /* not fatal; the PF will still work fine */
774 netif_warn(efx
, probe
, efx
->net_dev
,
775 "failed to restore vswitching rc=%d;"
776 " VFs may not function\n", rc
);
779 if (efx
->type
->rx_restore_rss_contexts
)
780 efx
->type
->rx_restore_rss_contexts(efx
);
781 mutex_unlock(&efx
->rss_lock
);
782 efx
->type
->filter_table_restore(efx
);
783 up_write(&efx
->filter_sem
);
784 if (efx
->type
->sriov_reset
)
785 efx
->type
->sriov_reset(efx
);
787 mutex_unlock(&efx
->mac_lock
);
791 if (efx
->type
->udp_tnl_push_ports
)
792 efx
->type
->udp_tnl_push_ports(efx
);
797 efx
->port_initialized
= false;
799 mutex_unlock(&efx
->rss_lock
);
800 up_write(&efx
->filter_sem
);
801 mutex_unlock(&efx
->mac_lock
);
806 /* Reset the NIC using the specified method. Note that the reset may
807 * fail, in which case the card will be left in an unusable state.
809 * Caller must hold the rtnl_lock.
811 int efx_reset(struct efx_nic
*efx
, enum reset_type method
)
816 netif_info(efx
, drv
, efx
->net_dev
, "resetting (%s)\n",
819 efx_device_detach_sync(efx
);
820 /* efx_reset_down() grabs locks that prevent recovery on EF100.
821 * EF100 reset is handled in the efx_nic_type callback below.
823 if (efx_nic_rev(efx
) != EFX_REV_EF100
)
824 efx_reset_down(efx
, method
);
826 rc
= efx
->type
->reset(efx
, method
);
828 netif_err(efx
, drv
, efx
->net_dev
, "failed to reset hardware\n");
832 /* Clear flags for the scopes we covered. We assume the NIC and
833 * driver are now quiescent so that there is no race here.
835 if (method
< RESET_TYPE_MAX_METHOD
)
836 efx
->reset_pending
&= -(1 << (method
+ 1));
837 else /* it doesn't fit into the well-ordered scope hierarchy */
838 __clear_bit(method
, &efx
->reset_pending
);
840 /* Reinitialise bus-mastering, which may have been turned off before
841 * the reset was scheduled. This is still appropriate, even in the
842 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
843 * can respond to requests.
845 pci_set_master(efx
->pci_dev
);
848 /* Leave device stopped if necessary */
850 method
== RESET_TYPE_DISABLE
||
851 method
== RESET_TYPE_RECOVER_OR_DISABLE
;
852 if (efx_nic_rev(efx
) != EFX_REV_EF100
)
853 rc2
= efx_reset_up(efx
, method
, !disabled
);
861 dev_close(efx
->net_dev
);
862 netif_err(efx
, drv
, efx
->net_dev
, "has been disabled\n");
863 efx
->state
= STATE_DISABLED
;
865 netif_dbg(efx
, drv
, efx
->net_dev
, "reset complete\n");
866 efx_device_attach_if_not_resetting(efx
);
871 /* The worker thread exists so that code that cannot sleep can
872 * schedule a reset for later.
874 static void efx_reset_work(struct work_struct
*data
)
876 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, reset_work
);
877 unsigned long pending
;
878 enum reset_type method
;
880 pending
= READ_ONCE(efx
->reset_pending
);
881 method
= fls(pending
) - 1;
883 if (method
== RESET_TYPE_MC_BIST
)
884 efx_wait_for_bist_end(efx
);
886 if ((method
== RESET_TYPE_RECOVER_OR_DISABLE
||
887 method
== RESET_TYPE_RECOVER_OR_ALL
) &&
888 efx_try_recovery(efx
))
896 /* We checked the state in efx_schedule_reset() but it may
897 * have changed by now. Now that we have the RTNL lock,
898 * it cannot change again.
900 if (efx
->state
== STATE_READY
)
901 (void)efx_reset(efx
, method
);
906 void efx_schedule_reset(struct efx_nic
*efx
, enum reset_type type
)
908 enum reset_type method
;
910 if (efx
->state
== STATE_RECOVERY
) {
911 netif_dbg(efx
, drv
, efx
->net_dev
,
912 "recovering: skip scheduling %s reset\n",
918 case RESET_TYPE_INVISIBLE
:
920 case RESET_TYPE_RECOVER_OR_ALL
:
921 case RESET_TYPE_WORLD
:
922 case RESET_TYPE_DISABLE
:
923 case RESET_TYPE_RECOVER_OR_DISABLE
:
924 case RESET_TYPE_DATAPATH
:
925 case RESET_TYPE_MC_BIST
:
926 case RESET_TYPE_MCDI_TIMEOUT
:
928 netif_dbg(efx
, drv
, efx
->net_dev
, "scheduling %s reset\n",
932 method
= efx
->type
->map_reset_reason(type
);
933 netif_dbg(efx
, drv
, efx
->net_dev
,
934 "scheduling %s reset for %s\n",
935 RESET_TYPE(method
), RESET_TYPE(type
));
939 set_bit(method
, &efx
->reset_pending
);
940 smp_mb(); /* ensure we change reset_pending before checking state */
942 /* If we're not READY then just leave the flags set as the cue
943 * to abort probing or reschedule the reset later.
945 if (READ_ONCE(efx
->state
) != STATE_READY
)
948 /* efx_process_channel() will no longer read events once a
949 * reset is scheduled. So switch back to poll'd MCDI completions.
951 efx_mcdi_mode_poll(efx
);
953 efx_queue_reset_work(efx
);
956 /**************************************************************************
958 * Dummy NIC operations
960 * Can be used for some unimplemented operations
961 * Needed so all function pointers are valid and do not have to be tested
964 **************************************************************************/
965 int efx_port_dummy_op_int(struct efx_nic
*efx
)
969 void efx_port_dummy_op_void(struct efx_nic
*efx
) {}
971 /**************************************************************************
975 **************************************************************************/
977 /* This zeroes out and then fills in the invariants in a struct
978 * efx_nic (including all sub-structures).
980 int efx_init_struct(struct efx_nic
*efx
,
981 struct pci_dev
*pci_dev
, struct net_device
*net_dev
)
985 /* Initialise common structures */
986 INIT_LIST_HEAD(&efx
->node
);
987 INIT_LIST_HEAD(&efx
->secondary_list
);
988 spin_lock_init(&efx
->biu_lock
);
989 #ifdef CONFIG_SFC_MTD
990 INIT_LIST_HEAD(&efx
->mtd_list
);
992 INIT_WORK(&efx
->reset_work
, efx_reset_work
);
993 INIT_DELAYED_WORK(&efx
->monitor_work
, efx_monitor
);
994 efx_selftest_async_init(efx
);
995 efx
->pci_dev
= pci_dev
;
996 efx
->msg_enable
= debug
;
997 efx
->state
= STATE_UNINIT
;
998 strlcpy(efx
->name
, pci_name(pci_dev
), sizeof(efx
->name
));
1000 efx
->net_dev
= net_dev
;
1001 efx
->rx_prefix_size
= efx
->type
->rx_prefix_size
;
1003 NET_IP_ALIGN
? (efx
->rx_prefix_size
+ NET_IP_ALIGN
) % 4 : 0;
1004 efx
->rx_packet_hash_offset
=
1005 efx
->type
->rx_hash_offset
- efx
->type
->rx_prefix_size
;
1006 efx
->rx_packet_ts_offset
=
1007 efx
->type
->rx_ts_offset
- efx
->type
->rx_prefix_size
;
1008 INIT_LIST_HEAD(&efx
->rss_context
.list
);
1009 efx
->rss_context
.context_id
= EFX_MCDI_RSS_CONTEXT_INVALID
;
1010 mutex_init(&efx
->rss_lock
);
1011 efx
->vport_id
= EVB_PORT_ID_ASSIGNED
;
1012 spin_lock_init(&efx
->stats_lock
);
1013 efx
->vi_stride
= EFX_DEFAULT_VI_STRIDE
;
1014 efx
->num_mac_stats
= MC_CMD_MAC_NSTATS
;
1015 BUILD_BUG_ON(MC_CMD_MAC_NSTATS
- 1 != MC_CMD_MAC_GENERATION_END
);
1016 mutex_init(&efx
->mac_lock
);
1017 init_rwsem(&efx
->filter_sem
);
1018 #ifdef CONFIG_RFS_ACCEL
1019 mutex_init(&efx
->rps_mutex
);
1020 spin_lock_init(&efx
->rps_hash_lock
);
1021 /* Failure to allocate is not fatal, but may degrade ARFS performance */
1022 efx
->rps_hash_table
= kcalloc(EFX_ARFS_HASH_TABLE_SIZE
,
1023 sizeof(*efx
->rps_hash_table
), GFP_KERNEL
);
1025 efx
->mdio
.dev
= net_dev
;
1026 INIT_WORK(&efx
->mac_work
, efx_mac_work
);
1027 init_waitqueue_head(&efx
->flush_wq
);
1029 efx
->tx_queues_per_channel
= 1;
1030 efx
->rxq_entries
= EFX_DEFAULT_DMAQ_SIZE
;
1031 efx
->txq_entries
= EFX_DEFAULT_DMAQ_SIZE
;
1033 efx
->mem_bar
= UINT_MAX
;
1035 rc
= efx_init_channels(efx
);
1039 /* Would be good to use the net_dev name, but we're too early */
1040 snprintf(efx
->workqueue_name
, sizeof(efx
->workqueue_name
), "sfc%s",
1042 efx
->workqueue
= create_singlethread_workqueue(efx
->workqueue_name
);
1043 if (!efx
->workqueue
) {
1051 efx_fini_struct(efx
);
1055 void efx_fini_struct(struct efx_nic
*efx
)
1057 #ifdef CONFIG_RFS_ACCEL
1058 kfree(efx
->rps_hash_table
);
1061 efx_fini_channels(efx
);
1065 if (efx
->workqueue
) {
1066 destroy_workqueue(efx
->workqueue
);
1067 efx
->workqueue
= NULL
;
1071 /* This configures the PCI device to enable I/O and DMA. */
1072 int efx_init_io(struct efx_nic
*efx
, int bar
, dma_addr_t dma_mask
,
1073 unsigned int mem_map_size
)
1075 struct pci_dev
*pci_dev
= efx
->pci_dev
;
1078 efx
->mem_bar
= UINT_MAX
;
1080 netif_dbg(efx
, probe
, efx
->net_dev
, "initialising I/O bar=%d\n", bar
);
1082 rc
= pci_enable_device(pci_dev
);
1084 netif_err(efx
, probe
, efx
->net_dev
,
1085 "failed to enable PCI device\n");
1089 pci_set_master(pci_dev
);
1091 rc
= dma_set_mask_and_coherent(&pci_dev
->dev
, dma_mask
);
1093 netif_err(efx
, probe
, efx
->net_dev
,
1094 "could not find a suitable DMA mask\n");
1097 netif_dbg(efx
, probe
, efx
->net_dev
,
1098 "using DMA mask %llx\n", (unsigned long long)dma_mask
);
1100 efx
->membase_phys
= pci_resource_start(efx
->pci_dev
, bar
);
1101 if (!efx
->membase_phys
) {
1102 netif_err(efx
, probe
, efx
->net_dev
,
1103 "ERROR: No BAR%d mapping from the BIOS. "
1104 "Try pci=realloc on the kernel command line\n", bar
);
1109 rc
= pci_request_region(pci_dev
, bar
, "sfc");
1111 netif_err(efx
, probe
, efx
->net_dev
,
1112 "request for memory BAR[%d] failed\n", bar
);
1117 efx
->membase
= ioremap(efx
->membase_phys
, mem_map_size
);
1118 if (!efx
->membase
) {
1119 netif_err(efx
, probe
, efx
->net_dev
,
1120 "could not map memory BAR[%d] at %llx+%x\n", bar
,
1121 (unsigned long long)efx
->membase_phys
, mem_map_size
);
1125 netif_dbg(efx
, probe
, efx
->net_dev
,
1126 "memory BAR[%d] at %llx+%x (virtual %p)\n", bar
,
1127 (unsigned long long)efx
->membase_phys
, mem_map_size
,
1133 pci_release_region(efx
->pci_dev
, bar
);
1135 efx
->membase_phys
= 0;
1137 pci_disable_device(efx
->pci_dev
);
1142 void efx_fini_io(struct efx_nic
*efx
)
1144 netif_dbg(efx
, drv
, efx
->net_dev
, "shutting down I/O\n");
1147 iounmap(efx
->membase
);
1148 efx
->membase
= NULL
;
1151 if (efx
->membase_phys
) {
1152 pci_release_region(efx
->pci_dev
, efx
->mem_bar
);
1153 efx
->membase_phys
= 0;
1154 efx
->mem_bar
= UINT_MAX
;
1157 /* Don't disable bus-mastering if VFs are assigned */
1158 if (!pci_vfs_assigned(efx
->pci_dev
))
1159 pci_disable_device(efx
->pci_dev
);
1162 #ifdef CONFIG_SFC_MCDI_LOGGING
1163 static ssize_t
show_mcdi_log(struct device
*dev
, struct device_attribute
*attr
,
1166 struct efx_nic
*efx
= dev_get_drvdata(dev
);
1167 struct efx_mcdi_iface
*mcdi
= efx_mcdi(efx
);
1169 return scnprintf(buf
, PAGE_SIZE
, "%d\n", mcdi
->logging_enabled
);
1172 static ssize_t
set_mcdi_log(struct device
*dev
, struct device_attribute
*attr
,
1173 const char *buf
, size_t count
)
1175 struct efx_nic
*efx
= dev_get_drvdata(dev
);
1176 struct efx_mcdi_iface
*mcdi
= efx_mcdi(efx
);
1177 bool enable
= count
> 0 && *buf
!= '0';
1179 mcdi
->logging_enabled
= enable
;
1183 static DEVICE_ATTR(mcdi_logging
, 0644, show_mcdi_log
, set_mcdi_log
);
1185 void efx_init_mcdi_logging(struct efx_nic
*efx
)
1187 int rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_mcdi_logging
);
1190 netif_warn(efx
, drv
, efx
->net_dev
,
1191 "failed to init net dev attributes\n");
1195 void efx_fini_mcdi_logging(struct efx_nic
*efx
)
1197 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_mcdi_logging
);
1201 /* A PCI error affecting this device was detected.
1202 * At this point MMIO and DMA may be disabled.
1203 * Stop the software path and request a slot reset.
1205 static pci_ers_result_t
efx_io_error_detected(struct pci_dev
*pdev
,
1206 pci_channel_state_t state
)
1208 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
1209 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1211 if (state
== pci_channel_io_perm_failure
)
1212 return PCI_ERS_RESULT_DISCONNECT
;
1216 if (efx
->state
!= STATE_DISABLED
) {
1217 efx
->state
= STATE_RECOVERY
;
1218 efx
->reset_pending
= 0;
1220 efx_device_detach_sync(efx
);
1223 efx_disable_interrupts(efx
);
1225 status
= PCI_ERS_RESULT_NEED_RESET
;
1227 /* If the interface is disabled we don't want to do anything
1230 status
= PCI_ERS_RESULT_RECOVERED
;
1235 pci_disable_device(pdev
);
1240 /* Fake a successful reset, which will be performed later in efx_io_resume. */
1241 static pci_ers_result_t
efx_io_slot_reset(struct pci_dev
*pdev
)
1243 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1244 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
1246 if (pci_enable_device(pdev
)) {
1247 netif_err(efx
, hw
, efx
->net_dev
,
1248 "Cannot re-enable PCI device after reset.\n");
1249 status
= PCI_ERS_RESULT_DISCONNECT
;
1255 /* Perform the actual reset and resume I/O operations. */
1256 static void efx_io_resume(struct pci_dev
*pdev
)
1258 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1263 if (efx
->state
== STATE_DISABLED
)
1266 rc
= efx_reset(efx
, RESET_TYPE_ALL
);
1268 netif_err(efx
, hw
, efx
->net_dev
,
1269 "efx_reset failed after PCI error (%d)\n", rc
);
1271 efx
->state
= STATE_READY
;
1272 netif_dbg(efx
, hw
, efx
->net_dev
,
1273 "Done resetting and resuming IO after PCI error.\n");
1280 /* For simplicity and reliability, we always require a slot reset and try to
1281 * reset the hardware when a pci error affecting the device is detected.
1282 * We leave both the link_reset and mmio_enabled callback unimplemented:
1283 * with our request for slot reset the mmio_enabled callback will never be
1284 * called, and the link_reset callback is not used by AER or EEH mechanisms.
1286 const struct pci_error_handlers efx_err_handlers
= {
1287 .error_detected
= efx_io_error_detected
,
1288 .slot_reset
= efx_io_slot_reset
,
1289 .resume
= efx_io_resume
,
1292 /* Determine whether the NIC will be able to handle TX offloads for a given
1293 * encapsulated packet.
1295 static bool efx_can_encap_offloads(struct efx_nic
*efx
, struct sk_buff
*skb
)
1297 struct gre_base_hdr
*greh
;
1301 /* Does the NIC support encap offloads?
1302 * If not, we should never get here, because we shouldn't have
1303 * advertised encap offload feature flags in the first place.
1305 if (WARN_ON_ONCE(!efx
->type
->udp_tnl_has_port
))
1308 /* Determine encapsulation protocol in use */
1309 switch (skb
->protocol
) {
1310 case htons(ETH_P_IP
):
1311 ipproto
= ip_hdr(skb
)->protocol
;
1313 case htons(ETH_P_IPV6
):
1314 /* If there are extension headers, this will cause us to
1315 * think we can't offload something that we maybe could have.
1317 ipproto
= ipv6_hdr(skb
)->nexthdr
;
1320 /* Not IP, so can't offload it */
1325 /* We support NVGRE but not IP over GRE or random gretaps.
1326 * Specifically, the NIC will accept GRE as encapsulated if
1327 * the inner protocol is Ethernet, but only handle it
1328 * correctly if the GRE header is 8 bytes long. Moreover,
1329 * it will not update the Checksum or Sequence Number fields
1330 * if they are present. (The Routing Present flag,
1331 * GRE_ROUTING, cannot be set else the header would be more
1332 * than 8 bytes long; so we don't have to worry about it.)
1334 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
)
1336 if (ntohs(skb
->inner_protocol
) != ETH_P_TEB
)
1338 if (skb_inner_mac_header(skb
) - skb_transport_header(skb
) != 8)
1340 greh
= (struct gre_base_hdr
*)skb_transport_header(skb
);
1341 return !(greh
->flags
& (GRE_CSUM
| GRE_SEQ
));
1343 /* If the port is registered for a UDP tunnel, we assume the
1344 * packet is for that tunnel, and the NIC will handle it as
1345 * such. If not, the NIC won't know what to do with it.
1347 dst_port
= udp_hdr(skb
)->dest
;
1348 return efx
->type
->udp_tnl_has_port(efx
, dst_port
);
1354 netdev_features_t
efx_features_check(struct sk_buff
*skb
, struct net_device
*dev
,
1355 netdev_features_t features
)
1357 struct efx_nic
*efx
= netdev_priv(dev
);
1359 if (skb
->encapsulation
) {
1360 if (features
& NETIF_F_GSO_MASK
)
1361 /* Hardware can only do TSO with at most 208 bytes
1364 if (skb_inner_transport_offset(skb
) >
1365 EFX_TSO2_MAX_HDRLEN
)
1366 features
&= ~(NETIF_F_GSO_MASK
);
1367 if (features
& (NETIF_F_GSO_MASK
| NETIF_F_CSUM_MASK
))
1368 if (!efx_can_encap_offloads(efx
, skb
))
1369 features
&= ~(NETIF_F_GSO_MASK
|
1375 int efx_get_phys_port_id(struct net_device
*net_dev
,
1376 struct netdev_phys_item_id
*ppid
)
1378 struct efx_nic
*efx
= netdev_priv(net_dev
);
1380 if (efx
->type
->get_phys_port_id
)
1381 return efx
->type
->get_phys_port_id(efx
, ppid
);
1386 int efx_get_phys_port_name(struct net_device
*net_dev
, char *name
, size_t len
)
1388 struct efx_nic
*efx
= netdev_priv(net_dev
);
1390 if (snprintf(name
, len
, "p%u", efx
->port_num
) >= len
)