1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
8 #include <linux/module.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/delay.h>
13 #include <linux/notifier.h>
15 #include <linux/tcp.h>
17 #include <linux/ethtool.h>
18 #include <linux/topology.h>
19 #include <linux/gfp.h>
20 #include <linux/aer.h>
21 #include <linux/interrupt.h>
22 #include "net_driver.h"
24 #include <net/udp_tunnel.h>
26 #include "efx_common.h"
27 #include "efx_channels.h"
28 #include "rx_common.h"
29 #include "tx_common.h"
36 #include "mcdi_pcol.h"
37 #include "workarounds.h"
39 /**************************************************************************
43 **************************************************************************
46 /* UDP tunnel type names */
47 static const char *const efx_udp_tunnel_type_names
[] = {
48 [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN
] = "vxlan",
49 [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE
] = "geneve",
52 void efx_get_udp_tunnel_type_name(u16 type
, char *buf
, size_t buflen
)
54 if (type
< ARRAY_SIZE(efx_udp_tunnel_type_names
) &&
55 efx_udp_tunnel_type_names
[type
] != NULL
)
56 snprintf(buf
, buflen
, "%s", efx_udp_tunnel_type_names
[type
]);
58 snprintf(buf
, buflen
, "type %d", type
);
61 /**************************************************************************
65 *************************************************************************/
68 * Use separate channels for TX and RX events
70 * Set this to 1 to use separate channels for TX and RX. It allows us
71 * to control interrupt affinity separately for TX and RX.
73 * This is only used in MSI-X interrupt mode
75 bool efx_separate_tx_channels
;
76 module_param(efx_separate_tx_channels
, bool, 0444);
77 MODULE_PARM_DESC(efx_separate_tx_channels
,
78 "Use separate channels for TX and RX");
80 /* Initial interrupt moderation settings. They can be modified after
81 * module load with ethtool.
83 * The default for RX should strike a balance between increasing the
84 * round-trip latency and reducing overhead.
86 static unsigned int rx_irq_mod_usec
= 60;
88 /* Initial interrupt moderation settings. They can be modified after
89 * module load with ethtool.
91 * This default is chosen to ensure that a 10G link does not go idle
92 * while a TX queue is stopped after it has become full. A queue is
93 * restarted when it drops below half full. The time this takes (assuming
94 * worst case 3 descriptors per packet and 1024 descriptors) is
95 * 512 / 3 * 1.2 = 205 usec.
97 static unsigned int tx_irq_mod_usec
= 150;
99 static bool phy_flash_cfg
;
100 module_param(phy_flash_cfg
, bool, 0644);
101 MODULE_PARM_DESC(phy_flash_cfg
, "Set PHYs into reflash mode initially");
103 static unsigned debug
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
104 NETIF_MSG_LINK
| NETIF_MSG_IFDOWN
|
105 NETIF_MSG_IFUP
| NETIF_MSG_RX_ERR
|
106 NETIF_MSG_TX_ERR
| NETIF_MSG_HW
);
107 module_param(debug
, uint
, 0);
108 MODULE_PARM_DESC(debug
, "Bitmapped debugging message enable value");
110 /**************************************************************************
112 * Utility functions and prototypes
114 *************************************************************************/
116 static const struct efx_channel_type efx_default_channel_type
;
117 static void efx_remove_port(struct efx_nic
*efx
);
118 static int efx_xdp_setup_prog(struct efx_nic
*efx
, struct bpf_prog
*prog
);
119 static int efx_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
);
120 static int efx_xdp_xmit(struct net_device
*dev
, int n
, struct xdp_frame
**xdpfs
,
123 #define EFX_ASSERT_RESET_SERIALISED(efx) \
125 if ((efx->state == STATE_READY) || \
126 (efx->state == STATE_RECOVERY) || \
127 (efx->state == STATE_DISABLED)) \
131 /**************************************************************************
135 **************************************************************************/
137 /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
138 * force the Autoneg bit on.
140 void efx_link_clear_advertising(struct efx_nic
*efx
)
142 bitmap_zero(efx
->link_advertising
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
143 efx
->wanted_fc
&= ~(EFX_FC_TX
| EFX_FC_RX
);
146 void efx_link_set_wanted_fc(struct efx_nic
*efx
, u8 wanted_fc
)
148 efx
->wanted_fc
= wanted_fc
;
149 if (efx
->link_advertising
[0]) {
150 if (wanted_fc
& EFX_FC_RX
)
151 efx
->link_advertising
[0] |= (ADVERTISED_Pause
|
152 ADVERTISED_Asym_Pause
);
154 efx
->link_advertising
[0] &= ~(ADVERTISED_Pause
|
155 ADVERTISED_Asym_Pause
);
156 if (wanted_fc
& EFX_FC_TX
)
157 efx
->link_advertising
[0] ^= ADVERTISED_Asym_Pause
;
161 static void efx_fini_port(struct efx_nic
*efx
);
163 static int efx_probe_port(struct efx_nic
*efx
)
167 netif_dbg(efx
, probe
, efx
->net_dev
, "create port\n");
170 efx
->phy_mode
= PHY_MODE_SPECIAL
;
172 /* Connect up MAC/PHY operations table */
173 rc
= efx
->type
->probe_port(efx
);
177 /* Initialise MAC address to permanent address */
178 ether_addr_copy(efx
->net_dev
->dev_addr
, efx
->net_dev
->perm_addr
);
183 static int efx_init_port(struct efx_nic
*efx
)
187 netif_dbg(efx
, drv
, efx
->net_dev
, "init port\n");
189 mutex_lock(&efx
->mac_lock
);
191 rc
= efx
->phy_op
->init(efx
);
195 efx
->port_initialized
= true;
197 /* Reconfigure the MAC before creating dma queues (required for
198 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
199 efx_mac_reconfigure(efx
);
201 /* Ensure the PHY advertises the correct flow control settings */
202 rc
= efx
->phy_op
->reconfigure(efx
);
203 if (rc
&& rc
!= -EPERM
)
206 mutex_unlock(&efx
->mac_lock
);
210 efx
->phy_op
->fini(efx
);
212 mutex_unlock(&efx
->mac_lock
);
216 static void efx_fini_port(struct efx_nic
*efx
)
218 netif_dbg(efx
, drv
, efx
->net_dev
, "shut down port\n");
220 if (!efx
->port_initialized
)
223 efx
->phy_op
->fini(efx
);
224 efx
->port_initialized
= false;
226 efx
->link_state
.up
= false;
227 efx_link_status_changed(efx
);
230 static void efx_remove_port(struct efx_nic
*efx
)
232 netif_dbg(efx
, drv
, efx
->net_dev
, "destroying port\n");
234 efx
->type
->remove_port(efx
);
237 /**************************************************************************
241 **************************************************************************/
243 static LIST_HEAD(efx_primary_list
);
244 static LIST_HEAD(efx_unassociated_list
);
246 static bool efx_same_controller(struct efx_nic
*left
, struct efx_nic
*right
)
248 return left
->type
== right
->type
&&
249 left
->vpd_sn
&& right
->vpd_sn
&&
250 !strcmp(left
->vpd_sn
, right
->vpd_sn
);
253 static void efx_associate(struct efx_nic
*efx
)
255 struct efx_nic
*other
, *next
;
257 if (efx
->primary
== efx
) {
258 /* Adding primary function; look for secondaries */
260 netif_dbg(efx
, probe
, efx
->net_dev
, "adding to primary list\n");
261 list_add_tail(&efx
->node
, &efx_primary_list
);
263 list_for_each_entry_safe(other
, next
, &efx_unassociated_list
,
265 if (efx_same_controller(efx
, other
)) {
266 list_del(&other
->node
);
267 netif_dbg(other
, probe
, other
->net_dev
,
268 "moving to secondary list of %s %s\n",
269 pci_name(efx
->pci_dev
),
271 list_add_tail(&other
->node
,
272 &efx
->secondary_list
);
273 other
->primary
= efx
;
277 /* Adding secondary function; look for primary */
279 list_for_each_entry(other
, &efx_primary_list
, node
) {
280 if (efx_same_controller(efx
, other
)) {
281 netif_dbg(efx
, probe
, efx
->net_dev
,
282 "adding to secondary list of %s %s\n",
283 pci_name(other
->pci_dev
),
284 other
->net_dev
->name
);
285 list_add_tail(&efx
->node
,
286 &other
->secondary_list
);
287 efx
->primary
= other
;
292 netif_dbg(efx
, probe
, efx
->net_dev
,
293 "adding to unassociated list\n");
294 list_add_tail(&efx
->node
, &efx_unassociated_list
);
298 static void efx_dissociate(struct efx_nic
*efx
)
300 struct efx_nic
*other
, *next
;
302 list_del(&efx
->node
);
305 list_for_each_entry_safe(other
, next
, &efx
->secondary_list
, node
) {
306 list_del(&other
->node
);
307 netif_dbg(other
, probe
, other
->net_dev
,
308 "moving to unassociated list\n");
309 list_add_tail(&other
->node
, &efx_unassociated_list
);
310 other
->primary
= NULL
;
314 static int efx_probe_nic(struct efx_nic
*efx
)
318 netif_dbg(efx
, probe
, efx
->net_dev
, "creating NIC\n");
320 /* Carry out hardware-type specific initialisation */
321 rc
= efx
->type
->probe(efx
);
326 if (!efx
->max_channels
|| !efx
->max_tx_channels
) {
327 netif_err(efx
, drv
, efx
->net_dev
,
328 "Insufficient resources to allocate"
334 /* Determine the number of channels and queues by trying
335 * to hook in MSI-X interrupts.
337 rc
= efx_probe_interrupts(efx
);
341 rc
= efx_set_channels(efx
);
345 /* dimension_resources can fail with EAGAIN */
346 rc
= efx
->type
->dimension_resources(efx
);
347 if (rc
!= 0 && rc
!= -EAGAIN
)
351 /* try again with new max_channels */
352 efx_remove_interrupts(efx
);
354 } while (rc
== -EAGAIN
);
356 if (efx
->n_channels
> 1)
357 netdev_rss_key_fill(efx
->rss_context
.rx_hash_key
,
358 sizeof(efx
->rss_context
.rx_hash_key
));
359 efx_set_default_rx_indir_table(efx
, &efx
->rss_context
);
361 netif_set_real_num_tx_queues(efx
->net_dev
, efx
->n_tx_channels
);
362 netif_set_real_num_rx_queues(efx
->net_dev
, efx
->n_rx_channels
);
364 /* Initialise the interrupt moderation settings */
365 efx
->irq_mod_step_us
= DIV_ROUND_UP(efx
->timer_quantum_ns
, 1000);
366 efx_init_irq_moderation(efx
, tx_irq_mod_usec
, rx_irq_mod_usec
, true,
372 efx_remove_interrupts(efx
);
374 efx
->type
->remove(efx
);
378 static void efx_remove_nic(struct efx_nic
*efx
)
380 netif_dbg(efx
, drv
, efx
->net_dev
, "destroying NIC\n");
382 efx_remove_interrupts(efx
);
383 efx
->type
->remove(efx
);
386 /**************************************************************************
388 * NIC startup/shutdown
390 *************************************************************************/
392 static int efx_probe_all(struct efx_nic
*efx
)
396 rc
= efx_probe_nic(efx
);
398 netif_err(efx
, probe
, efx
->net_dev
, "failed to create NIC\n");
402 rc
= efx_probe_port(efx
);
404 netif_err(efx
, probe
, efx
->net_dev
, "failed to create port\n");
408 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE
< EFX_RXQ_MIN_ENT
);
409 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE
< EFX_TXQ_MIN_ENT(efx
))) {
413 efx
->rxq_entries
= efx
->txq_entries
= EFX_DEFAULT_DMAQ_SIZE
;
415 #ifdef CONFIG_SFC_SRIOV
416 rc
= efx
->type
->vswitching_probe(efx
);
417 if (rc
) /* not fatal; the PF will still work fine */
418 netif_warn(efx
, probe
, efx
->net_dev
,
419 "failed to setup vswitching rc=%d;"
420 " VFs may not function\n", rc
);
423 rc
= efx_probe_filters(efx
);
425 netif_err(efx
, probe
, efx
->net_dev
,
426 "failed to create filter tables\n");
430 rc
= efx_probe_channels(efx
);
437 efx_remove_filters(efx
);
439 #ifdef CONFIG_SFC_SRIOV
440 efx
->type
->vswitching_remove(efx
);
443 efx_remove_port(efx
);
450 static void efx_remove_all(struct efx_nic
*efx
)
453 efx_xdp_setup_prog(efx
, NULL
);
456 efx_remove_channels(efx
);
457 efx_remove_filters(efx
);
458 #ifdef CONFIG_SFC_SRIOV
459 efx
->type
->vswitching_remove(efx
);
461 efx_remove_port(efx
);
465 /**************************************************************************
467 * Interrupt moderation
469 **************************************************************************/
470 unsigned int efx_usecs_to_ticks(struct efx_nic
*efx
, unsigned int usecs
)
474 if (usecs
* 1000 < efx
->timer_quantum_ns
)
475 return 1; /* never round down to 0 */
476 return usecs
* 1000 / efx
->timer_quantum_ns
;
479 unsigned int efx_ticks_to_usecs(struct efx_nic
*efx
, unsigned int ticks
)
481 /* We must round up when converting ticks to microseconds
482 * because we round down when converting the other way.
484 return DIV_ROUND_UP(ticks
* efx
->timer_quantum_ns
, 1000);
487 /* Set interrupt moderation parameters */
488 int efx_init_irq_moderation(struct efx_nic
*efx
, unsigned int tx_usecs
,
489 unsigned int rx_usecs
, bool rx_adaptive
,
490 bool rx_may_override_tx
)
492 struct efx_channel
*channel
;
493 unsigned int timer_max_us
;
495 EFX_ASSERT_RESET_SERIALISED(efx
);
497 timer_max_us
= efx
->timer_max_ns
/ 1000;
499 if (tx_usecs
> timer_max_us
|| rx_usecs
> timer_max_us
)
502 if (tx_usecs
!= rx_usecs
&& efx
->tx_channel_offset
== 0 &&
503 !rx_may_override_tx
) {
504 netif_err(efx
, drv
, efx
->net_dev
, "Channels are shared. "
505 "RX and TX IRQ moderation must be equal\n");
509 efx
->irq_rx_adaptive
= rx_adaptive
;
510 efx
->irq_rx_moderation_us
= rx_usecs
;
511 efx_for_each_channel(channel
, efx
) {
512 if (efx_channel_has_rx_queue(channel
))
513 channel
->irq_moderation_us
= rx_usecs
;
514 else if (efx_channel_has_tx_queues(channel
))
515 channel
->irq_moderation_us
= tx_usecs
;
516 else if (efx_channel_is_xdp_tx(channel
))
517 channel
->irq_moderation_us
= tx_usecs
;
523 void efx_get_irq_moderation(struct efx_nic
*efx
, unsigned int *tx_usecs
,
524 unsigned int *rx_usecs
, bool *rx_adaptive
)
526 *rx_adaptive
= efx
->irq_rx_adaptive
;
527 *rx_usecs
= efx
->irq_rx_moderation_us
;
529 /* If channels are shared between RX and TX, so is IRQ
530 * moderation. Otherwise, IRQ moderation is the same for all
531 * TX channels and is not adaptive.
533 if (efx
->tx_channel_offset
== 0) {
534 *tx_usecs
= *rx_usecs
;
536 struct efx_channel
*tx_channel
;
538 tx_channel
= efx
->channel
[efx
->tx_channel_offset
];
539 *tx_usecs
= tx_channel
->irq_moderation_us
;
543 /**************************************************************************
547 *************************************************************************/
550 * Context: process, rtnl_lock() held.
552 static int efx_ioctl(struct net_device
*net_dev
, struct ifreq
*ifr
, int cmd
)
554 struct efx_nic
*efx
= netdev_priv(net_dev
);
555 struct mii_ioctl_data
*data
= if_mii(ifr
);
557 if (cmd
== SIOCSHWTSTAMP
)
558 return efx_ptp_set_ts_config(efx
, ifr
);
559 if (cmd
== SIOCGHWTSTAMP
)
560 return efx_ptp_get_ts_config(efx
, ifr
);
562 /* Convert phy_id from older PRTAD/DEVAD format */
563 if ((cmd
== SIOCGMIIREG
|| cmd
== SIOCSMIIREG
) &&
564 (data
->phy_id
& 0xfc00) == 0x0400)
565 data
->phy_id
^= MDIO_PHY_ID_C45
| 0x0400;
567 return mdio_mii_ioctl(&efx
->mdio
, data
, cmd
);
570 /**************************************************************************
572 * Kernel net device interface
574 *************************************************************************/
576 /* Context: process, rtnl_lock() held. */
577 int efx_net_open(struct net_device
*net_dev
)
579 struct efx_nic
*efx
= netdev_priv(net_dev
);
582 netif_dbg(efx
, ifup
, efx
->net_dev
, "opening device on CPU %d\n",
583 raw_smp_processor_id());
585 rc
= efx_check_disabled(efx
);
588 if (efx
->phy_mode
& PHY_MODE_SPECIAL
)
590 if (efx_mcdi_poll_reboot(efx
) && efx_reset(efx
, RESET_TYPE_ALL
))
593 /* Notify the kernel of the link state polled during driver load,
594 * before the monitor starts running */
595 efx_link_status_changed(efx
);
598 if (efx
->state
== STATE_DISABLED
|| efx
->reset_pending
)
599 netif_device_detach(efx
->net_dev
);
600 efx_selftest_async_start(efx
);
604 /* Context: process, rtnl_lock() held.
605 * Note that the kernel will ignore our return code; this method
606 * should really be a void.
608 int efx_net_stop(struct net_device
*net_dev
)
610 struct efx_nic
*efx
= netdev_priv(net_dev
);
612 netif_dbg(efx
, ifdown
, efx
->net_dev
, "closing on CPU %d\n",
613 raw_smp_processor_id());
615 /* Stop the device and flush all the channels */
621 /* Context: netif_tx_lock held, BHs disabled. */
622 static void efx_watchdog(struct net_device
*net_dev
, unsigned int txqueue
)
624 struct efx_nic
*efx
= netdev_priv(net_dev
);
626 netif_err(efx
, tx_err
, efx
->net_dev
,
627 "TX stuck with port_enabled=%d: resetting channels\n",
630 efx_schedule_reset(efx
, RESET_TYPE_TX_WATCHDOG
);
633 static int efx_set_mac_address(struct net_device
*net_dev
, void *data
)
635 struct efx_nic
*efx
= netdev_priv(net_dev
);
636 struct sockaddr
*addr
= data
;
637 u8
*new_addr
= addr
->sa_data
;
641 if (!is_valid_ether_addr(new_addr
)) {
642 netif_err(efx
, drv
, efx
->net_dev
,
643 "invalid ethernet MAC address requested: %pM\n",
645 return -EADDRNOTAVAIL
;
648 /* save old address */
649 ether_addr_copy(old_addr
, net_dev
->dev_addr
);
650 ether_addr_copy(net_dev
->dev_addr
, new_addr
);
651 if (efx
->type
->set_mac_address
) {
652 rc
= efx
->type
->set_mac_address(efx
);
654 ether_addr_copy(net_dev
->dev_addr
, old_addr
);
659 /* Reconfigure the MAC */
660 mutex_lock(&efx
->mac_lock
);
661 efx_mac_reconfigure(efx
);
662 mutex_unlock(&efx
->mac_lock
);
667 /* Context: netif_addr_lock held, BHs disabled. */
668 static void efx_set_rx_mode(struct net_device
*net_dev
)
670 struct efx_nic
*efx
= netdev_priv(net_dev
);
672 if (efx
->port_enabled
)
673 queue_work(efx
->workqueue
, &efx
->mac_work
);
674 /* Otherwise efx_start_port() will do this */
677 static int efx_set_features(struct net_device
*net_dev
, netdev_features_t data
)
679 struct efx_nic
*efx
= netdev_priv(net_dev
);
682 /* If disabling RX n-tuple filtering, clear existing filters */
683 if (net_dev
->features
& ~data
& NETIF_F_NTUPLE
) {
684 rc
= efx
->type
->filter_clear_rx(efx
, EFX_FILTER_PRI_MANUAL
);
689 /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
690 * If rx-fcs is changed, mac_reconfigure updates that too.
692 if ((net_dev
->features
^ data
) & (NETIF_F_HW_VLAN_CTAG_FILTER
|
694 /* efx_set_rx_mode() will schedule MAC work to update filters
695 * when a new features are finally set in net_dev.
697 efx_set_rx_mode(net_dev
);
703 static int efx_get_phys_port_id(struct net_device
*net_dev
,
704 struct netdev_phys_item_id
*ppid
)
706 struct efx_nic
*efx
= netdev_priv(net_dev
);
708 if (efx
->type
->get_phys_port_id
)
709 return efx
->type
->get_phys_port_id(efx
, ppid
);
714 static int efx_get_phys_port_name(struct net_device
*net_dev
,
715 char *name
, size_t len
)
717 struct efx_nic
*efx
= netdev_priv(net_dev
);
719 if (snprintf(name
, len
, "p%u", efx
->port_num
) >= len
)
724 static int efx_vlan_rx_add_vid(struct net_device
*net_dev
, __be16 proto
, u16 vid
)
726 struct efx_nic
*efx
= netdev_priv(net_dev
);
728 if (efx
->type
->vlan_rx_add_vid
)
729 return efx
->type
->vlan_rx_add_vid(efx
, proto
, vid
);
734 static int efx_vlan_rx_kill_vid(struct net_device
*net_dev
, __be16 proto
, u16 vid
)
736 struct efx_nic
*efx
= netdev_priv(net_dev
);
738 if (efx
->type
->vlan_rx_kill_vid
)
739 return efx
->type
->vlan_rx_kill_vid(efx
, proto
, vid
);
744 static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in
)
747 case UDP_TUNNEL_TYPE_VXLAN
:
748 return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN
;
749 case UDP_TUNNEL_TYPE_GENEVE
:
750 return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE
;
756 static void efx_udp_tunnel_add(struct net_device
*dev
, struct udp_tunnel_info
*ti
)
758 struct efx_nic
*efx
= netdev_priv(dev
);
759 struct efx_udp_tunnel tnl
;
762 efx_tunnel_type
= efx_udp_tunnel_type_map(ti
->type
);
763 if (efx_tunnel_type
< 0)
766 tnl
.type
= (u16
)efx_tunnel_type
;
769 if (efx
->type
->udp_tnl_add_port
)
770 (void)efx
->type
->udp_tnl_add_port(efx
, tnl
);
773 static void efx_udp_tunnel_del(struct net_device
*dev
, struct udp_tunnel_info
*ti
)
775 struct efx_nic
*efx
= netdev_priv(dev
);
776 struct efx_udp_tunnel tnl
;
779 efx_tunnel_type
= efx_udp_tunnel_type_map(ti
->type
);
780 if (efx_tunnel_type
< 0)
783 tnl
.type
= (u16
)efx_tunnel_type
;
786 if (efx
->type
->udp_tnl_del_port
)
787 (void)efx
->type
->udp_tnl_del_port(efx
, tnl
);
790 static const struct net_device_ops efx_netdev_ops
= {
791 .ndo_open
= efx_net_open
,
792 .ndo_stop
= efx_net_stop
,
793 .ndo_get_stats64
= efx_net_stats
,
794 .ndo_tx_timeout
= efx_watchdog
,
795 .ndo_start_xmit
= efx_hard_start_xmit
,
796 .ndo_validate_addr
= eth_validate_addr
,
797 .ndo_do_ioctl
= efx_ioctl
,
798 .ndo_change_mtu
= efx_change_mtu
,
799 .ndo_set_mac_address
= efx_set_mac_address
,
800 .ndo_set_rx_mode
= efx_set_rx_mode
,
801 .ndo_set_features
= efx_set_features
,
802 .ndo_vlan_rx_add_vid
= efx_vlan_rx_add_vid
,
803 .ndo_vlan_rx_kill_vid
= efx_vlan_rx_kill_vid
,
804 #ifdef CONFIG_SFC_SRIOV
805 .ndo_set_vf_mac
= efx_sriov_set_vf_mac
,
806 .ndo_set_vf_vlan
= efx_sriov_set_vf_vlan
,
807 .ndo_set_vf_spoofchk
= efx_sriov_set_vf_spoofchk
,
808 .ndo_get_vf_config
= efx_sriov_get_vf_config
,
809 .ndo_set_vf_link_state
= efx_sriov_set_vf_link_state
,
811 .ndo_get_phys_port_id
= efx_get_phys_port_id
,
812 .ndo_get_phys_port_name
= efx_get_phys_port_name
,
813 .ndo_setup_tc
= efx_setup_tc
,
814 #ifdef CONFIG_RFS_ACCEL
815 .ndo_rx_flow_steer
= efx_filter_rfs
,
817 .ndo_udp_tunnel_add
= efx_udp_tunnel_add
,
818 .ndo_udp_tunnel_del
= efx_udp_tunnel_del
,
819 .ndo_xdp_xmit
= efx_xdp_xmit
,
823 static int efx_xdp_setup_prog(struct efx_nic
*efx
, struct bpf_prog
*prog
)
825 struct bpf_prog
*old_prog
;
827 if (efx
->xdp_rxq_info_failed
) {
828 netif_err(efx
, drv
, efx
->net_dev
,
829 "Unable to bind XDP program due to previous failure of rxq_info\n");
833 if (prog
&& efx
->net_dev
->mtu
> efx_xdp_max_mtu(efx
)) {
834 netif_err(efx
, drv
, efx
->net_dev
,
835 "Unable to configure XDP with MTU of %d (max: %d)\n",
836 efx
->net_dev
->mtu
, efx_xdp_max_mtu(efx
));
840 old_prog
= rtnl_dereference(efx
->xdp_prog
);
841 rcu_assign_pointer(efx
->xdp_prog
, prog
);
842 /* Release the reference that was originally passed by the caller. */
844 bpf_prog_put(old_prog
);
849 /* Context: process, rtnl_lock() held. */
850 static int efx_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
852 struct efx_nic
*efx
= netdev_priv(dev
);
853 struct bpf_prog
*xdp_prog
;
855 switch (xdp
->command
) {
857 return efx_xdp_setup_prog(efx
, xdp
->prog
);
859 xdp_prog
= rtnl_dereference(efx
->xdp_prog
);
860 xdp
->prog_id
= xdp_prog
? xdp_prog
->aux
->id
: 0;
867 static int efx_xdp_xmit(struct net_device
*dev
, int n
, struct xdp_frame
**xdpfs
,
870 struct efx_nic
*efx
= netdev_priv(dev
);
872 if (!netif_running(dev
))
875 return efx_xdp_tx_buffers(efx
, n
, xdpfs
, flags
& XDP_XMIT_FLUSH
);
878 static void efx_update_name(struct efx_nic
*efx
)
880 strcpy(efx
->name
, efx
->net_dev
->name
);
882 efx_set_channel_names(efx
);
885 static int efx_netdev_event(struct notifier_block
*this,
886 unsigned long event
, void *ptr
)
888 struct net_device
*net_dev
= netdev_notifier_info_to_dev(ptr
);
890 if ((net_dev
->netdev_ops
== &efx_netdev_ops
) &&
891 event
== NETDEV_CHANGENAME
)
892 efx_update_name(netdev_priv(net_dev
));
897 static struct notifier_block efx_netdev_notifier
= {
898 .notifier_call
= efx_netdev_event
,
902 show_phy_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
904 struct efx_nic
*efx
= dev_get_drvdata(dev
);
905 return sprintf(buf
, "%d\n", efx
->phy_type
);
907 static DEVICE_ATTR(phy_type
, 0444, show_phy_type
, NULL
);
909 static int efx_register_netdev(struct efx_nic
*efx
)
911 struct net_device
*net_dev
= efx
->net_dev
;
912 struct efx_channel
*channel
;
915 net_dev
->watchdog_timeo
= 5 * HZ
;
916 net_dev
->irq
= efx
->pci_dev
->irq
;
917 net_dev
->netdev_ops
= &efx_netdev_ops
;
918 if (efx_nic_rev(efx
) >= EFX_REV_HUNT_A0
)
919 net_dev
->priv_flags
|= IFF_UNICAST_FLT
;
920 net_dev
->ethtool_ops
= &efx_ethtool_ops
;
921 net_dev
->gso_max_segs
= EFX_TSO_MAX_SEGS
;
922 net_dev
->min_mtu
= EFX_MIN_MTU
;
923 net_dev
->max_mtu
= EFX_MAX_MTU
;
927 /* Enable resets to be scheduled and check whether any were
928 * already requested. If so, the NIC is probably hosed so we
931 efx
->state
= STATE_READY
;
932 smp_mb(); /* ensure we change state before checking reset_pending */
933 if (efx
->reset_pending
) {
934 netif_err(efx
, probe
, efx
->net_dev
,
935 "aborting probe due to scheduled reset\n");
940 rc
= dev_alloc_name(net_dev
, net_dev
->name
);
943 efx_update_name(efx
);
945 /* Always start with carrier off; PHY events will detect the link */
946 netif_carrier_off(net_dev
);
948 rc
= register_netdevice(net_dev
);
952 efx_for_each_channel(channel
, efx
) {
953 struct efx_tx_queue
*tx_queue
;
954 efx_for_each_channel_tx_queue(tx_queue
, channel
)
955 efx_init_tx_queue_core_txq(tx_queue
);
962 rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
964 netif_err(efx
, drv
, efx
->net_dev
,
965 "failed to init net dev attributes\n");
966 goto fail_registered
;
969 efx_init_mcdi_logging(efx
);
976 unregister_netdevice(net_dev
);
978 efx
->state
= STATE_UNINIT
;
980 netif_err(efx
, drv
, efx
->net_dev
, "could not register net dev\n");
984 static void efx_unregister_netdev(struct efx_nic
*efx
)
989 BUG_ON(netdev_priv(efx
->net_dev
) != efx
);
991 if (efx_dev_registered(efx
)) {
992 strlcpy(efx
->name
, pci_name(efx
->pci_dev
), sizeof(efx
->name
));
993 efx_fini_mcdi_logging(efx
);
994 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
995 unregister_netdev(efx
->net_dev
);
999 /**************************************************************************
1001 * List of NICs we support
1003 **************************************************************************/
1005 /* PCI device ID table */
1006 static const struct pci_device_id efx_pci_table
[] = {
1007 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0803), /* SFC9020 */
1008 .driver_data
= (unsigned long) &siena_a0_nic_type
},
1009 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0813), /* SFL9021 */
1010 .driver_data
= (unsigned long) &siena_a0_nic_type
},
1011 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0903), /* SFC9120 PF */
1012 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
1013 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1903), /* SFC9120 VF */
1014 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
1015 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0923), /* SFC9140 PF */
1016 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
1017 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1923), /* SFC9140 VF */
1018 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
1019 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0a03), /* SFC9220 PF */
1020 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
1021 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1a03), /* SFC9220 VF */
1022 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
1023 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0b03), /* SFC9250 PF */
1024 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
1025 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1b03), /* SFC9250 VF */
1026 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
1027 {0} /* end of list */
1030 /**************************************************************************
1034 **************************************************************************/
1036 void efx_update_sw_stats(struct efx_nic
*efx
, u64
*stats
)
1038 u64 n_rx_nodesc_trunc
= 0;
1039 struct efx_channel
*channel
;
1041 efx_for_each_channel(channel
, efx
)
1042 n_rx_nodesc_trunc
+= channel
->n_rx_nodesc_trunc
;
1043 stats
[GENERIC_STAT_rx_nodesc_trunc
] = n_rx_nodesc_trunc
;
1044 stats
[GENERIC_STAT_rx_noskb_drops
] = atomic_read(&efx
->n_rx_noskb_drops
);
1047 /**************************************************************************
1051 **************************************************************************/
1053 /* Main body of final NIC shutdown code
1054 * This is called only at module unload (or hotplug removal).
1056 static void efx_pci_remove_main(struct efx_nic
*efx
)
1058 /* Flush reset_work. It can no longer be scheduled since we
1061 BUG_ON(efx
->state
== STATE_READY
);
1062 efx_flush_reset_workqueue(efx
);
1064 efx_disable_interrupts(efx
);
1065 efx_clear_interrupt_affinity(efx
);
1066 efx_nic_fini_interrupt(efx
);
1068 efx
->type
->fini(efx
);
1070 efx_remove_all(efx
);
1073 /* Final NIC shutdown
1074 * This is called only at module unload (or hotplug removal). A PF can call
1075 * this on its VFs to ensure they are unbound first.
1077 static void efx_pci_remove(struct pci_dev
*pci_dev
)
1079 struct efx_nic
*efx
;
1081 efx
= pci_get_drvdata(pci_dev
);
1085 /* Mark the NIC as fini, then stop the interface */
1087 efx_dissociate(efx
);
1088 dev_close(efx
->net_dev
);
1089 efx_disable_interrupts(efx
);
1090 efx
->state
= STATE_UNINIT
;
1093 if (efx
->type
->sriov_fini
)
1094 efx
->type
->sriov_fini(efx
);
1096 efx_unregister_netdev(efx
);
1098 efx_mtd_remove(efx
);
1100 efx_pci_remove_main(efx
);
1102 efx_fini_io(efx
, efx
->type
->mem_bar(efx
));
1103 netif_dbg(efx
, drv
, efx
->net_dev
, "shutdown successful\n");
1105 efx_fini_struct(efx
);
1106 free_netdev(efx
->net_dev
);
1108 pci_disable_pcie_error_reporting(pci_dev
);
1111 /* NIC VPD information
1112 * Called during probe to display the part number of the
1113 * installed NIC. VPD is potentially very large but this should
1114 * always appear within the first 512 bytes.
1116 #define SFC_VPD_LEN 512
1117 static void efx_probe_vpd_strings(struct efx_nic
*efx
)
1119 struct pci_dev
*dev
= efx
->pci_dev
;
1120 char vpd_data
[SFC_VPD_LEN
];
1122 int ro_start
, ro_size
, i
, j
;
1124 /* Get the vpd data from the device */
1125 vpd_size
= pci_read_vpd(dev
, 0, sizeof(vpd_data
), vpd_data
);
1126 if (vpd_size
<= 0) {
1127 netif_err(efx
, drv
, efx
->net_dev
, "Unable to read VPD\n");
1131 /* Get the Read only section */
1132 ro_start
= pci_vpd_find_tag(vpd_data
, 0, vpd_size
, PCI_VPD_LRDT_RO_DATA
);
1134 netif_err(efx
, drv
, efx
->net_dev
, "VPD Read-only not found\n");
1138 ro_size
= pci_vpd_lrdt_size(&vpd_data
[ro_start
]);
1140 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
1141 if (i
+ j
> vpd_size
)
1144 /* Get the Part number */
1145 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, "PN");
1147 netif_err(efx
, drv
, efx
->net_dev
, "Part number not found\n");
1151 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
1152 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
1153 if (i
+ j
> vpd_size
) {
1154 netif_err(efx
, drv
, efx
->net_dev
, "Incomplete part number\n");
1158 netif_info(efx
, drv
, efx
->net_dev
,
1159 "Part Number : %.*s\n", j
, &vpd_data
[i
]);
1161 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
1163 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, "SN");
1165 netif_err(efx
, drv
, efx
->net_dev
, "Serial number not found\n");
1169 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
1170 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
1171 if (i
+ j
> vpd_size
) {
1172 netif_err(efx
, drv
, efx
->net_dev
, "Incomplete serial number\n");
1176 efx
->vpd_sn
= kmalloc(j
+ 1, GFP_KERNEL
);
1180 snprintf(efx
->vpd_sn
, j
+ 1, "%s", &vpd_data
[i
]);
1184 /* Main body of NIC initialisation
1185 * This is called at module load (or hotplug insertion, theoretically).
1187 static int efx_pci_probe_main(struct efx_nic
*efx
)
1191 /* Do start-of-day initialisation */
1192 rc
= efx_probe_all(efx
);
1198 down_write(&efx
->filter_sem
);
1199 rc
= efx
->type
->init(efx
);
1200 up_write(&efx
->filter_sem
);
1202 netif_err(efx
, probe
, efx
->net_dev
,
1203 "failed to initialise NIC\n");
1207 rc
= efx_init_port(efx
);
1209 netif_err(efx
, probe
, efx
->net_dev
,
1210 "failed to initialise port\n");
1214 rc
= efx_nic_init_interrupt(efx
);
1218 efx_set_interrupt_affinity(efx
);
1219 rc
= efx_enable_interrupts(efx
);
1226 efx_clear_interrupt_affinity(efx
);
1227 efx_nic_fini_interrupt(efx
);
1231 efx
->type
->fini(efx
);
1234 efx_remove_all(efx
);
1239 static int efx_pci_probe_post_io(struct efx_nic
*efx
)
1241 struct net_device
*net_dev
= efx
->net_dev
;
1242 int rc
= efx_pci_probe_main(efx
);
1247 if (efx
->type
->sriov_init
) {
1248 rc
= efx
->type
->sriov_init(efx
);
1250 netif_err(efx
, probe
, efx
->net_dev
,
1251 "SR-IOV can't be enabled rc %d\n", rc
);
1254 /* Determine netdevice features */
1255 net_dev
->features
|= (efx
->type
->offload_features
| NETIF_F_SG
|
1256 NETIF_F_TSO
| NETIF_F_RXCSUM
| NETIF_F_RXALL
);
1257 if (efx
->type
->offload_features
& (NETIF_F_IPV6_CSUM
| NETIF_F_HW_CSUM
))
1258 net_dev
->features
|= NETIF_F_TSO6
;
1259 /* Check whether device supports TSO */
1260 if (!efx
->type
->tso_versions
|| !efx
->type
->tso_versions(efx
))
1261 net_dev
->features
&= ~NETIF_F_ALL_TSO
;
1262 /* Mask for features that also apply to VLAN devices */
1263 net_dev
->vlan_features
|= (NETIF_F_HW_CSUM
| NETIF_F_SG
|
1264 NETIF_F_HIGHDMA
| NETIF_F_ALL_TSO
|
1267 net_dev
->hw_features
|= net_dev
->features
& ~efx
->fixed_features
;
1269 /* Disable receiving frames with bad FCS, by default. */
1270 net_dev
->features
&= ~NETIF_F_RXALL
;
1272 /* Disable VLAN filtering by default. It may be enforced if
1273 * the feature is fixed (i.e. VLAN filters are required to
1274 * receive VLAN tagged packets due to vPort restrictions).
1276 net_dev
->features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
1277 net_dev
->features
|= efx
->fixed_features
;
1279 rc
= efx_register_netdev(efx
);
1283 efx_pci_remove_main(efx
);
1287 /* NIC initialisation
1289 * This is called at module load (or hotplug insertion,
1290 * theoretically). It sets up PCI mappings, resets the NIC,
1291 * sets up and registers the network devices with the kernel and hooks
1292 * the interrupt service routine. It does not prepare the device for
1293 * transmission; this is left to the first time one of the network
1294 * interfaces is brought up (i.e. efx_net_open).
1296 static int efx_pci_probe(struct pci_dev
*pci_dev
,
1297 const struct pci_device_id
*entry
)
1299 struct net_device
*net_dev
;
1300 struct efx_nic
*efx
;
1303 /* Allocate and initialise a struct net_device and struct efx_nic */
1304 net_dev
= alloc_etherdev_mqs(sizeof(*efx
), EFX_MAX_CORE_TX_QUEUES
,
1308 efx
= netdev_priv(net_dev
);
1309 efx
->type
= (const struct efx_nic_type
*) entry
->driver_data
;
1310 efx
->fixed_features
|= NETIF_F_HIGHDMA
;
1312 pci_set_drvdata(pci_dev
, efx
);
1313 SET_NETDEV_DEV(net_dev
, &pci_dev
->dev
);
1314 rc
= efx_init_struct(efx
, pci_dev
, net_dev
);
1318 netif_info(efx
, probe
, efx
->net_dev
,
1319 "Solarflare NIC detected\n");
1321 if (!efx
->type
->is_vf
)
1322 efx_probe_vpd_strings(efx
);
1324 /* Set up basic I/O (BAR mappings etc) */
1325 rc
= efx_init_io(efx
, efx
->type
->mem_bar(efx
), efx
->type
->max_dma_mask
,
1326 efx
->type
->mem_map_size(efx
));
1330 rc
= efx_pci_probe_post_io(efx
);
1332 /* On failure, retry once immediately.
1333 * If we aborted probe due to a scheduled reset, dismiss it.
1335 efx
->reset_pending
= 0;
1336 rc
= efx_pci_probe_post_io(efx
);
1338 /* On another failure, retry once more
1339 * after a 50-305ms delay.
1343 get_random_bytes(&r
, 1);
1344 msleep((unsigned int)r
+ 50);
1345 efx
->reset_pending
= 0;
1346 rc
= efx_pci_probe_post_io(efx
);
1352 netif_dbg(efx
, probe
, efx
->net_dev
, "initialisation successful\n");
1354 /* Try to create MTDs, but allow this to fail */
1356 rc
= efx_mtd_probe(efx
);
1358 if (rc
&& rc
!= -EPERM
)
1359 netif_warn(efx
, probe
, efx
->net_dev
,
1360 "failed to create MTDs (%d)\n", rc
);
1362 (void)pci_enable_pcie_error_reporting(pci_dev
);
1364 if (efx
->type
->udp_tnl_push_ports
)
1365 efx
->type
->udp_tnl_push_ports(efx
);
1370 efx_fini_io(efx
, efx
->type
->mem_bar(efx
));
1372 efx_fini_struct(efx
);
1375 netif_dbg(efx
, drv
, efx
->net_dev
, "initialisation failed. rc=%d\n", rc
);
1376 free_netdev(net_dev
);
1380 /* efx_pci_sriov_configure returns the actual number of Virtual Functions
1381 * enabled on success
1383 #ifdef CONFIG_SFC_SRIOV
1384 static int efx_pci_sriov_configure(struct pci_dev
*dev
, int num_vfs
)
1387 struct efx_nic
*efx
= pci_get_drvdata(dev
);
1389 if (efx
->type
->sriov_configure
) {
1390 rc
= efx
->type
->sriov_configure(efx
, num_vfs
);
1400 static int efx_pm_freeze(struct device
*dev
)
1402 struct efx_nic
*efx
= dev_get_drvdata(dev
);
1406 if (efx
->state
!= STATE_DISABLED
) {
1407 efx
->state
= STATE_UNINIT
;
1409 efx_device_detach_sync(efx
);
1412 efx_disable_interrupts(efx
);
1420 static int efx_pm_thaw(struct device
*dev
)
1423 struct efx_nic
*efx
= dev_get_drvdata(dev
);
1427 if (efx
->state
!= STATE_DISABLED
) {
1428 rc
= efx_enable_interrupts(efx
);
1432 mutex_lock(&efx
->mac_lock
);
1433 efx
->phy_op
->reconfigure(efx
);
1434 mutex_unlock(&efx
->mac_lock
);
1438 efx_device_attach_if_not_resetting(efx
);
1440 efx
->state
= STATE_READY
;
1442 efx
->type
->resume_wol(efx
);
1447 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
1448 efx_queue_reset_work(efx
);
1458 static int efx_pm_poweroff(struct device
*dev
)
1460 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1461 struct efx_nic
*efx
= pci_get_drvdata(pci_dev
);
1463 efx
->type
->fini(efx
);
1465 efx
->reset_pending
= 0;
1467 pci_save_state(pci_dev
);
1468 return pci_set_power_state(pci_dev
, PCI_D3hot
);
1471 /* Used for both resume and restore */
1472 static int efx_pm_resume(struct device
*dev
)
1474 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1475 struct efx_nic
*efx
= pci_get_drvdata(pci_dev
);
1478 rc
= pci_set_power_state(pci_dev
, PCI_D0
);
1481 pci_restore_state(pci_dev
);
1482 rc
= pci_enable_device(pci_dev
);
1485 pci_set_master(efx
->pci_dev
);
1486 rc
= efx
->type
->reset(efx
, RESET_TYPE_ALL
);
1489 down_write(&efx
->filter_sem
);
1490 rc
= efx
->type
->init(efx
);
1491 up_write(&efx
->filter_sem
);
1494 rc
= efx_pm_thaw(dev
);
1498 static int efx_pm_suspend(struct device
*dev
)
1503 rc
= efx_pm_poweroff(dev
);
1509 static const struct dev_pm_ops efx_pm_ops
= {
1510 .suspend
= efx_pm_suspend
,
1511 .resume
= efx_pm_resume
,
1512 .freeze
= efx_pm_freeze
,
1513 .thaw
= efx_pm_thaw
,
1514 .poweroff
= efx_pm_poweroff
,
1515 .restore
= efx_pm_resume
,
1518 /* A PCI error affecting this device was detected.
1519 * At this point MMIO and DMA may be disabled.
1520 * Stop the software path and request a slot reset.
1522 static pci_ers_result_t
efx_io_error_detected(struct pci_dev
*pdev
,
1523 enum pci_channel_state state
)
1525 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
1526 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1528 if (state
== pci_channel_io_perm_failure
)
1529 return PCI_ERS_RESULT_DISCONNECT
;
1533 if (efx
->state
!= STATE_DISABLED
) {
1534 efx
->state
= STATE_RECOVERY
;
1535 efx
->reset_pending
= 0;
1537 efx_device_detach_sync(efx
);
1540 efx_disable_interrupts(efx
);
1542 status
= PCI_ERS_RESULT_NEED_RESET
;
1544 /* If the interface is disabled we don't want to do anything
1547 status
= PCI_ERS_RESULT_RECOVERED
;
1552 pci_disable_device(pdev
);
1557 /* Fake a successful reset, which will be performed later in efx_io_resume. */
1558 static pci_ers_result_t
efx_io_slot_reset(struct pci_dev
*pdev
)
1560 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1561 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
1563 if (pci_enable_device(pdev
)) {
1564 netif_err(efx
, hw
, efx
->net_dev
,
1565 "Cannot re-enable PCI device after reset.\n");
1566 status
= PCI_ERS_RESULT_DISCONNECT
;
1572 /* Perform the actual reset and resume I/O operations. */
1573 static void efx_io_resume(struct pci_dev
*pdev
)
1575 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
1580 if (efx
->state
== STATE_DISABLED
)
1583 rc
= efx_reset(efx
, RESET_TYPE_ALL
);
1585 netif_err(efx
, hw
, efx
->net_dev
,
1586 "efx_reset failed after PCI error (%d)\n", rc
);
1588 efx
->state
= STATE_READY
;
1589 netif_dbg(efx
, hw
, efx
->net_dev
,
1590 "Done resetting and resuming IO after PCI error.\n");
1597 /* For simplicity and reliability, we always require a slot reset and try to
1598 * reset the hardware when a pci error affecting the device is detected.
1599 * We leave both the link_reset and mmio_enabled callback unimplemented:
1600 * with our request for slot reset the mmio_enabled callback will never be
1601 * called, and the link_reset callback is not used by AER or EEH mechanisms.
1603 static const struct pci_error_handlers efx_err_handlers
= {
1604 .error_detected
= efx_io_error_detected
,
1605 .slot_reset
= efx_io_slot_reset
,
1606 .resume
= efx_io_resume
,
1609 static struct pci_driver efx_pci_driver
= {
1610 .name
= KBUILD_MODNAME
,
1611 .id_table
= efx_pci_table
,
1612 .probe
= efx_pci_probe
,
1613 .remove
= efx_pci_remove
,
1614 .driver
.pm
= &efx_pm_ops
,
1615 .err_handler
= &efx_err_handlers
,
1616 #ifdef CONFIG_SFC_SRIOV
1617 .sriov_configure
= efx_pci_sriov_configure
,
1621 /**************************************************************************
1623 * Kernel module interface
1625 *************************************************************************/
1627 static int __init
efx_init_module(void)
1631 printk(KERN_INFO
"Solarflare NET driver v" EFX_DRIVER_VERSION
"\n");
1633 rc
= register_netdevice_notifier(&efx_netdev_notifier
);
1637 #ifdef CONFIG_SFC_SRIOV
1638 rc
= efx_init_sriov();
1643 rc
= efx_create_reset_workqueue();
1647 rc
= pci_register_driver(&efx_pci_driver
);
1654 efx_destroy_reset_workqueue();
1656 #ifdef CONFIG_SFC_SRIOV
1660 unregister_netdevice_notifier(&efx_netdev_notifier
);
1665 static void __exit
efx_exit_module(void)
1667 printk(KERN_INFO
"Solarflare NET driver unloading\n");
1669 pci_unregister_driver(&efx_pci_driver
);
1670 efx_destroy_reset_workqueue();
1671 #ifdef CONFIG_SFC_SRIOV
1674 unregister_netdevice_notifier(&efx_netdev_notifier
);
1678 module_init(efx_init_module
);
1679 module_exit(efx_exit_module
);
1681 MODULE_AUTHOR("Solarflare Communications and "
1682 "Michael Brown <mbrown@fensystems.co.uk>");
1683 MODULE_DESCRIPTION("Solarflare network driver");
1684 MODULE_LICENSE("GPL");
1685 MODULE_DEVICE_TABLE(pci
, efx_pci_table
);
1686 MODULE_VERSION(EFX_DRIVER_VERSION
);