1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/ethtool.h>
21 #include <linux/topology.h>
22 #include <linux/gfp.h>
23 #include <linux/aer.h>
24 #include <linux/interrupt.h>
25 #include "net_driver.h"
27 #include <net/udp_tunnel.h>
35 #include "mcdi_pcol.h"
36 #include "workarounds.h"
38 /**************************************************************************
42 **************************************************************************
45 /* Loopback mode names (see LOOPBACK_MODE()) */
46 const unsigned int efx_loopback_mode_max
= LOOPBACK_MAX
;
47 const char *const efx_loopback_mode_names
[] = {
48 [LOOPBACK_NONE
] = "NONE",
49 [LOOPBACK_DATA
] = "DATAPATH",
50 [LOOPBACK_GMAC
] = "GMAC",
51 [LOOPBACK_XGMII
] = "XGMII",
52 [LOOPBACK_XGXS
] = "XGXS",
53 [LOOPBACK_XAUI
] = "XAUI",
54 [LOOPBACK_GMII
] = "GMII",
55 [LOOPBACK_SGMII
] = "SGMII",
56 [LOOPBACK_XGBR
] = "XGBR",
57 [LOOPBACK_XFI
] = "XFI",
58 [LOOPBACK_XAUI_FAR
] = "XAUI_FAR",
59 [LOOPBACK_GMII_FAR
] = "GMII_FAR",
60 [LOOPBACK_SGMII_FAR
] = "SGMII_FAR",
61 [LOOPBACK_XFI_FAR
] = "XFI_FAR",
62 [LOOPBACK_GPHY
] = "GPHY",
63 [LOOPBACK_PHYXS
] = "PHYXS",
64 [LOOPBACK_PCS
] = "PCS",
65 [LOOPBACK_PMAPMD
] = "PMA/PMD",
66 [LOOPBACK_XPORT
] = "XPORT",
67 [LOOPBACK_XGMII_WS
] = "XGMII_WS",
68 [LOOPBACK_XAUI_WS
] = "XAUI_WS",
69 [LOOPBACK_XAUI_WS_FAR
] = "XAUI_WS_FAR",
70 [LOOPBACK_XAUI_WS_NEAR
] = "XAUI_WS_NEAR",
71 [LOOPBACK_GMII_WS
] = "GMII_WS",
72 [LOOPBACK_XFI_WS
] = "XFI_WS",
73 [LOOPBACK_XFI_WS_FAR
] = "XFI_WS_FAR",
74 [LOOPBACK_PHYXS_WS
] = "PHYXS_WS",
77 const unsigned int efx_reset_type_max
= RESET_TYPE_MAX
;
78 const char *const efx_reset_type_names
[] = {
79 [RESET_TYPE_INVISIBLE
] = "INVISIBLE",
80 [RESET_TYPE_ALL
] = "ALL",
81 [RESET_TYPE_RECOVER_OR_ALL
] = "RECOVER_OR_ALL",
82 [RESET_TYPE_WORLD
] = "WORLD",
83 [RESET_TYPE_RECOVER_OR_DISABLE
] = "RECOVER_OR_DISABLE",
84 [RESET_TYPE_DATAPATH
] = "DATAPATH",
85 [RESET_TYPE_MC_BIST
] = "MC_BIST",
86 [RESET_TYPE_DISABLE
] = "DISABLE",
87 [RESET_TYPE_TX_WATCHDOG
] = "TX_WATCHDOG",
88 [RESET_TYPE_INT_ERROR
] = "INT_ERROR",
89 [RESET_TYPE_DMA_ERROR
] = "DMA_ERROR",
90 [RESET_TYPE_TX_SKIP
] = "TX_SKIP",
91 [RESET_TYPE_MC_FAILURE
] = "MC_FAILURE",
92 [RESET_TYPE_MCDI_TIMEOUT
] = "MCDI_TIMEOUT (FLR)",
95 /* UDP tunnel type names */
96 static const char *const efx_udp_tunnel_type_names
[] = {
97 [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN
] = "vxlan",
98 [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE
] = "geneve",
101 void efx_get_udp_tunnel_type_name(u16 type
, char *buf
, size_t buflen
)
103 if (type
< ARRAY_SIZE(efx_udp_tunnel_type_names
) &&
104 efx_udp_tunnel_type_names
[type
] != NULL
)
105 snprintf(buf
, buflen
, "%s", efx_udp_tunnel_type_names
[type
]);
107 snprintf(buf
, buflen
, "type %d", type
);
110 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
111 * queued onto this work queue. This is not a per-nic work queue, because
112 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
114 static struct workqueue_struct
*reset_workqueue
;
116 /* How often and how many times to poll for a reset while waiting for a
117 * BIST that another function started to complete.
119 #define BIST_WAIT_DELAY_MS 100
120 #define BIST_WAIT_DELAY_COUNT 100
122 /**************************************************************************
124 * Configurable values
126 *************************************************************************/
129 * Use separate channels for TX and RX events
131 * Set this to 1 to use separate channels for TX and RX. It allows us
132 * to control interrupt affinity separately for TX and RX.
134 * This is only used in MSI-X interrupt mode
136 bool efx_separate_tx_channels
;
137 module_param(efx_separate_tx_channels
, bool, 0444);
138 MODULE_PARM_DESC(efx_separate_tx_channels
,
139 "Use separate channels for TX and RX");
141 /* This is the weight assigned to each of the (per-channel) virtual
144 static int napi_weight
= 64;
146 /* This is the time (in jiffies) between invocations of the hardware
148 * On Falcon-based NICs, this will:
149 * - Check the on-board hardware monitor;
150 * - Poll the link state and reconfigure the hardware as necessary.
151 * On Siena-based NICs for power systems with EEH support, this will give EEH a
154 static unsigned int efx_monitor_interval
= 1 * HZ
;
156 /* Initial interrupt moderation settings. They can be modified after
157 * module load with ethtool.
159 * The default for RX should strike a balance between increasing the
160 * round-trip latency and reducing overhead.
162 static unsigned int rx_irq_mod_usec
= 60;
164 /* Initial interrupt moderation settings. They can be modified after
165 * module load with ethtool.
167 * This default is chosen to ensure that a 10G link does not go idle
168 * while a TX queue is stopped after it has become full. A queue is
169 * restarted when it drops below half full. The time this takes (assuming
170 * worst case 3 descriptors per packet and 1024 descriptors) is
171 * 512 / 3 * 1.2 = 205 usec.
173 static unsigned int tx_irq_mod_usec
= 150;
175 /* This is the first interrupt mode to try out of:
180 static unsigned int interrupt_mode
;
182 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
183 * i.e. the number of CPUs among which we may distribute simultaneous
184 * interrupt handling.
186 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
187 * The default (0) means to assign an interrupt to each core.
189 static unsigned int rss_cpus
;
190 module_param(rss_cpus
, uint
, 0444);
191 MODULE_PARM_DESC(rss_cpus
, "Number of CPUs to use for Receive-Side Scaling");
193 static bool phy_flash_cfg
;
194 module_param(phy_flash_cfg
, bool, 0644);
195 MODULE_PARM_DESC(phy_flash_cfg
, "Set PHYs into reflash mode initially");
197 static unsigned irq_adapt_low_thresh
= 8000;
198 module_param(irq_adapt_low_thresh
, uint
, 0644);
199 MODULE_PARM_DESC(irq_adapt_low_thresh
,
200 "Threshold score for reducing IRQ moderation");
202 static unsigned irq_adapt_high_thresh
= 16000;
203 module_param(irq_adapt_high_thresh
, uint
, 0644);
204 MODULE_PARM_DESC(irq_adapt_high_thresh
,
205 "Threshold score for increasing IRQ moderation");
207 static unsigned debug
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
208 NETIF_MSG_LINK
| NETIF_MSG_IFDOWN
|
209 NETIF_MSG_IFUP
| NETIF_MSG_RX_ERR
|
210 NETIF_MSG_TX_ERR
| NETIF_MSG_HW
);
211 module_param(debug
, uint
, 0);
212 MODULE_PARM_DESC(debug
, "Bitmapped debugging message enable value");
214 /**************************************************************************
216 * Utility functions and prototypes
218 *************************************************************************/
220 static int efx_soft_enable_interrupts(struct efx_nic
*efx
);
221 static void efx_soft_disable_interrupts(struct efx_nic
*efx
);
222 static void efx_remove_channel(struct efx_channel
*channel
);
223 static void efx_remove_channels(struct efx_nic
*efx
);
224 static const struct efx_channel_type efx_default_channel_type
;
225 static void efx_remove_port(struct efx_nic
*efx
);
226 static void efx_init_napi_channel(struct efx_channel
*channel
);
227 static void efx_fini_napi(struct efx_nic
*efx
);
228 static void efx_fini_napi_channel(struct efx_channel
*channel
);
229 static void efx_fini_struct(struct efx_nic
*efx
);
230 static void efx_start_all(struct efx_nic
*efx
);
231 static void efx_stop_all(struct efx_nic
*efx
);
233 #define EFX_ASSERT_RESET_SERIALISED(efx) \
235 if ((efx->state == STATE_READY) || \
236 (efx->state == STATE_RECOVERY) || \
237 (efx->state == STATE_DISABLED)) \
241 static int efx_check_disabled(struct efx_nic
*efx
)
243 if (efx
->state
== STATE_DISABLED
|| efx
->state
== STATE_RECOVERY
) {
244 netif_err(efx
, drv
, efx
->net_dev
,
245 "device is disabled due to earlier errors\n");
251 /**************************************************************************
253 * Event queue processing
255 *************************************************************************/
257 /* Process channel's event queue
259 * This function is responsible for processing the event queue of a
260 * single channel. The caller must guarantee that this function will
261 * never be concurrently called more than once on the same channel,
262 * though different channels may be being processed concurrently.
264 static int efx_process_channel(struct efx_channel
*channel
, int budget
)
266 struct efx_tx_queue
*tx_queue
;
267 struct list_head rx_list
;
270 if (unlikely(!channel
->enabled
))
273 /* Prepare the batch receive list */
274 EFX_WARN_ON_PARANOID(channel
->rx_list
!= NULL
);
275 INIT_LIST_HEAD(&rx_list
);
276 channel
->rx_list
= &rx_list
;
278 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
279 tx_queue
->pkts_compl
= 0;
280 tx_queue
->bytes_compl
= 0;
283 spent
= efx_nic_process_eventq(channel
, budget
);
284 if (spent
&& efx_channel_has_rx_queue(channel
)) {
285 struct efx_rx_queue
*rx_queue
=
286 efx_channel_get_rx_queue(channel
);
288 efx_rx_flush_packet(channel
);
289 efx_fast_push_rx_descriptors(rx_queue
, true);
293 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
294 if (tx_queue
->bytes_compl
) {
295 netdev_tx_completed_queue(tx_queue
->core_txq
,
296 tx_queue
->pkts_compl
, tx_queue
->bytes_compl
);
300 /* Receive any packets we queued up */
301 netif_receive_skb_list(channel
->rx_list
);
302 channel
->rx_list
= NULL
;
309 * NAPI guarantees serialisation of polls of the same device, which
310 * provides the guarantee required by efx_process_channel().
312 static void efx_update_irq_mod(struct efx_nic
*efx
, struct efx_channel
*channel
)
314 int step
= efx
->irq_mod_step_us
;
316 if (channel
->irq_mod_score
< irq_adapt_low_thresh
) {
317 if (channel
->irq_moderation_us
> step
) {
318 channel
->irq_moderation_us
-= step
;
319 efx
->type
->push_irq_moderation(channel
);
321 } else if (channel
->irq_mod_score
> irq_adapt_high_thresh
) {
322 if (channel
->irq_moderation_us
<
323 efx
->irq_rx_moderation_us
) {
324 channel
->irq_moderation_us
+= step
;
325 efx
->type
->push_irq_moderation(channel
);
329 channel
->irq_count
= 0;
330 channel
->irq_mod_score
= 0;
333 static int efx_poll(struct napi_struct
*napi
, int budget
)
335 struct efx_channel
*channel
=
336 container_of(napi
, struct efx_channel
, napi_str
);
337 struct efx_nic
*efx
= channel
->efx
;
340 netif_vdbg(efx
, intr
, efx
->net_dev
,
341 "channel %d NAPI poll executing on CPU %d\n",
342 channel
->channel
, raw_smp_processor_id());
344 spent
= efx_process_channel(channel
, budget
);
346 if (spent
< budget
) {
347 if (efx_channel_has_rx_queue(channel
) &&
348 efx
->irq_rx_adaptive
&&
349 unlikely(++channel
->irq_count
== 1000)) {
350 efx_update_irq_mod(efx
, channel
);
353 #ifdef CONFIG_RFS_ACCEL
354 /* Perhaps expire some ARFS filters */
355 schedule_work(&channel
->filter_work
);
358 /* There is no race here; although napi_disable() will
359 * only wait for napi_complete(), this isn't a problem
360 * since efx_nic_eventq_read_ack() will have no effect if
361 * interrupts have already been disabled.
363 if (napi_complete_done(napi
, spent
))
364 efx_nic_eventq_read_ack(channel
);
370 /* Create event queue
371 * Event queue memory allocations are done only once. If the channel
372 * is reset, the memory buffer will be reused; this guards against
373 * errors during channel reset and also simplifies interrupt handling.
375 static int efx_probe_eventq(struct efx_channel
*channel
)
377 struct efx_nic
*efx
= channel
->efx
;
378 unsigned long entries
;
380 netif_dbg(efx
, probe
, efx
->net_dev
,
381 "chan %d create event queue\n", channel
->channel
);
383 /* Build an event queue with room for one event per tx and rx buffer,
384 * plus some extra for link state events and MCDI completions. */
385 entries
= roundup_pow_of_two(efx
->rxq_entries
+ efx
->txq_entries
+ 128);
386 EFX_WARN_ON_PARANOID(entries
> EFX_MAX_EVQ_SIZE
);
387 channel
->eventq_mask
= max(entries
, EFX_MIN_EVQ_SIZE
) - 1;
389 return efx_nic_probe_eventq(channel
);
392 /* Prepare channel's event queue */
393 static int efx_init_eventq(struct efx_channel
*channel
)
395 struct efx_nic
*efx
= channel
->efx
;
398 EFX_WARN_ON_PARANOID(channel
->eventq_init
);
400 netif_dbg(efx
, drv
, efx
->net_dev
,
401 "chan %d init event queue\n", channel
->channel
);
403 rc
= efx_nic_init_eventq(channel
);
405 efx
->type
->push_irq_moderation(channel
);
406 channel
->eventq_read_ptr
= 0;
407 channel
->eventq_init
= true;
412 /* Enable event queue processing and NAPI */
413 void efx_start_eventq(struct efx_channel
*channel
)
415 netif_dbg(channel
->efx
, ifup
, channel
->efx
->net_dev
,
416 "chan %d start event queue\n", channel
->channel
);
418 /* Make sure the NAPI handler sees the enabled flag set */
419 channel
->enabled
= true;
422 napi_enable(&channel
->napi_str
);
423 efx_nic_eventq_read_ack(channel
);
426 /* Disable event queue processing and NAPI */
427 void efx_stop_eventq(struct efx_channel
*channel
)
429 if (!channel
->enabled
)
432 napi_disable(&channel
->napi_str
);
433 channel
->enabled
= false;
436 static void efx_fini_eventq(struct efx_channel
*channel
)
438 if (!channel
->eventq_init
)
441 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
442 "chan %d fini event queue\n", channel
->channel
);
444 efx_nic_fini_eventq(channel
);
445 channel
->eventq_init
= false;
448 static void efx_remove_eventq(struct efx_channel
*channel
)
450 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
451 "chan %d remove event queue\n", channel
->channel
);
453 efx_nic_remove_eventq(channel
);
456 /**************************************************************************
460 *************************************************************************/
462 /* Allocate and initialise a channel structure. */
463 static struct efx_channel
*
464 efx_alloc_channel(struct efx_nic
*efx
, int i
, struct efx_channel
*old_channel
)
466 struct efx_channel
*channel
;
467 struct efx_rx_queue
*rx_queue
;
468 struct efx_tx_queue
*tx_queue
;
471 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
476 channel
->channel
= i
;
477 channel
->type
= &efx_default_channel_type
;
479 for (j
= 0; j
< EFX_TXQ_TYPES
; j
++) {
480 tx_queue
= &channel
->tx_queue
[j
];
482 tx_queue
->queue
= i
* EFX_TXQ_TYPES
+ j
;
483 tx_queue
->channel
= channel
;
486 #ifdef CONFIG_RFS_ACCEL
487 INIT_WORK(&channel
->filter_work
, efx_filter_rfs_expire
);
490 rx_queue
= &channel
->rx_queue
;
492 timer_setup(&rx_queue
->slow_fill
, efx_rx_slow_fill
, 0);
497 /* Allocate and initialise a channel structure, copying parameters
498 * (but not resources) from an old channel structure.
500 static struct efx_channel
*
501 efx_copy_channel(const struct efx_channel
*old_channel
)
503 struct efx_channel
*channel
;
504 struct efx_rx_queue
*rx_queue
;
505 struct efx_tx_queue
*tx_queue
;
508 channel
= kmalloc(sizeof(*channel
), GFP_KERNEL
);
512 *channel
= *old_channel
;
514 channel
->napi_dev
= NULL
;
515 INIT_HLIST_NODE(&channel
->napi_str
.napi_hash_node
);
516 channel
->napi_str
.napi_id
= 0;
517 channel
->napi_str
.state
= 0;
518 memset(&channel
->eventq
, 0, sizeof(channel
->eventq
));
520 for (j
= 0; j
< EFX_TXQ_TYPES
; j
++) {
521 tx_queue
= &channel
->tx_queue
[j
];
522 if (tx_queue
->channel
)
523 tx_queue
->channel
= channel
;
524 tx_queue
->buffer
= NULL
;
525 tx_queue
->cb_page
= NULL
;
526 memset(&tx_queue
->txd
, 0, sizeof(tx_queue
->txd
));
529 rx_queue
= &channel
->rx_queue
;
530 rx_queue
->buffer
= NULL
;
531 memset(&rx_queue
->rxd
, 0, sizeof(rx_queue
->rxd
));
532 timer_setup(&rx_queue
->slow_fill
, efx_rx_slow_fill
, 0);
533 #ifdef CONFIG_RFS_ACCEL
534 INIT_WORK(&channel
->filter_work
, efx_filter_rfs_expire
);
540 static int efx_probe_channel(struct efx_channel
*channel
)
542 struct efx_tx_queue
*tx_queue
;
543 struct efx_rx_queue
*rx_queue
;
546 netif_dbg(channel
->efx
, probe
, channel
->efx
->net_dev
,
547 "creating channel %d\n", channel
->channel
);
549 rc
= channel
->type
->pre_probe(channel
);
553 rc
= efx_probe_eventq(channel
);
557 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
558 rc
= efx_probe_tx_queue(tx_queue
);
563 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
564 rc
= efx_probe_rx_queue(rx_queue
);
569 channel
->rx_list
= NULL
;
574 efx_remove_channel(channel
);
579 efx_get_channel_name(struct efx_channel
*channel
, char *buf
, size_t len
)
581 struct efx_nic
*efx
= channel
->efx
;
585 number
= channel
->channel
;
586 if (efx
->tx_channel_offset
== 0) {
588 } else if (channel
->channel
< efx
->tx_channel_offset
) {
592 number
-= efx
->tx_channel_offset
;
594 snprintf(buf
, len
, "%s%s-%d", efx
->name
, type
, number
);
597 static void efx_set_channel_names(struct efx_nic
*efx
)
599 struct efx_channel
*channel
;
601 efx_for_each_channel(channel
, efx
)
602 channel
->type
->get_name(channel
,
603 efx
->msi_context
[channel
->channel
].name
,
604 sizeof(efx
->msi_context
[0].name
));
607 static int efx_probe_channels(struct efx_nic
*efx
)
609 struct efx_channel
*channel
;
612 /* Restart special buffer allocation */
613 efx
->next_buffer_table
= 0;
615 /* Probe channels in reverse, so that any 'extra' channels
616 * use the start of the buffer table. This allows the traffic
617 * channels to be resized without moving them or wasting the
618 * entries before them.
620 efx_for_each_channel_rev(channel
, efx
) {
621 rc
= efx_probe_channel(channel
);
623 netif_err(efx
, probe
, efx
->net_dev
,
624 "failed to create channel %d\n",
629 efx_set_channel_names(efx
);
634 efx_remove_channels(efx
);
638 /* Channels are shutdown and reinitialised whilst the NIC is running
639 * to propagate configuration changes (mtu, checksum offload), or
640 * to clear hardware error conditions
642 static void efx_start_datapath(struct efx_nic
*efx
)
644 netdev_features_t old_features
= efx
->net_dev
->features
;
645 bool old_rx_scatter
= efx
->rx_scatter
;
646 struct efx_tx_queue
*tx_queue
;
647 struct efx_rx_queue
*rx_queue
;
648 struct efx_channel
*channel
;
651 /* Calculate the rx buffer allocation parameters required to
652 * support the current MTU, including padding for header
653 * alignment and overruns.
655 efx
->rx_dma_len
= (efx
->rx_prefix_size
+
656 EFX_MAX_FRAME_LEN(efx
->net_dev
->mtu
) +
657 efx
->type
->rx_buffer_padding
);
658 rx_buf_len
= (sizeof(struct efx_rx_page_state
) +
659 efx
->rx_ip_align
+ efx
->rx_dma_len
);
660 if (rx_buf_len
<= PAGE_SIZE
) {
661 efx
->rx_scatter
= efx
->type
->always_rx_scatter
;
662 efx
->rx_buffer_order
= 0;
663 } else if (efx
->type
->can_rx_scatter
) {
664 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE
% L1_CACHE_BYTES
);
665 BUILD_BUG_ON(sizeof(struct efx_rx_page_state
) +
666 2 * ALIGN(NET_IP_ALIGN
+ EFX_RX_USR_BUF_SIZE
,
667 EFX_RX_BUF_ALIGNMENT
) >
669 efx
->rx_scatter
= true;
670 efx
->rx_dma_len
= EFX_RX_USR_BUF_SIZE
;
671 efx
->rx_buffer_order
= 0;
673 efx
->rx_scatter
= false;
674 efx
->rx_buffer_order
= get_order(rx_buf_len
);
677 efx_rx_config_page_split(efx
);
678 if (efx
->rx_buffer_order
)
679 netif_dbg(efx
, drv
, efx
->net_dev
,
680 "RX buf len=%u; page order=%u batch=%u\n",
681 efx
->rx_dma_len
, efx
->rx_buffer_order
,
682 efx
->rx_pages_per_batch
);
684 netif_dbg(efx
, drv
, efx
->net_dev
,
685 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
686 efx
->rx_dma_len
, efx
->rx_page_buf_step
,
687 efx
->rx_bufs_per_page
, efx
->rx_pages_per_batch
);
689 /* Restore previously fixed features in hw_features and remove
690 * features which are fixed now
692 efx
->net_dev
->hw_features
|= efx
->net_dev
->features
;
693 efx
->net_dev
->hw_features
&= ~efx
->fixed_features
;
694 efx
->net_dev
->features
|= efx
->fixed_features
;
695 if (efx
->net_dev
->features
!= old_features
)
696 netdev_features_change(efx
->net_dev
);
698 /* RX filters may also have scatter-enabled flags */
699 if (efx
->rx_scatter
!= old_rx_scatter
)
700 efx
->type
->filter_update_rx_scatter(efx
);
702 /* We must keep at least one descriptor in a TX ring empty.
703 * We could avoid this when the queue size does not exactly
704 * match the hardware ring size, but it's not that important.
705 * Therefore we stop the queue when one more skb might fill
706 * the ring completely. We wake it when half way back to
709 efx
->txq_stop_thresh
= efx
->txq_entries
- efx_tx_max_skb_descs(efx
);
710 efx
->txq_wake_thresh
= efx
->txq_stop_thresh
/ 2;
712 /* Initialise the channels */
713 efx_for_each_channel(channel
, efx
) {
714 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
715 efx_init_tx_queue(tx_queue
);
716 atomic_inc(&efx
->active_queues
);
719 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
720 efx_init_rx_queue(rx_queue
);
721 atomic_inc(&efx
->active_queues
);
722 efx_stop_eventq(channel
);
723 efx_fast_push_rx_descriptors(rx_queue
, false);
724 efx_start_eventq(channel
);
727 WARN_ON(channel
->rx_pkt_n_frags
);
730 efx_ptp_start_datapath(efx
);
732 if (netif_device_present(efx
->net_dev
))
733 netif_tx_wake_all_queues(efx
->net_dev
);
736 static void efx_stop_datapath(struct efx_nic
*efx
)
738 struct efx_channel
*channel
;
739 struct efx_tx_queue
*tx_queue
;
740 struct efx_rx_queue
*rx_queue
;
743 EFX_ASSERT_RESET_SERIALISED(efx
);
744 BUG_ON(efx
->port_enabled
);
746 efx_ptp_stop_datapath(efx
);
749 efx_for_each_channel(channel
, efx
) {
750 efx_for_each_channel_rx_queue(rx_queue
, channel
)
751 rx_queue
->refill_enabled
= false;
754 efx_for_each_channel(channel
, efx
) {
755 /* RX packet processing is pipelined, so wait for the
756 * NAPI handler to complete. At least event queue 0
757 * might be kept active by non-data events, so don't
758 * use napi_synchronize() but actually disable NAPI
761 if (efx_channel_has_rx_queue(channel
)) {
762 efx_stop_eventq(channel
);
763 efx_start_eventq(channel
);
767 rc
= efx
->type
->fini_dmaq(efx
);
769 netif_err(efx
, drv
, efx
->net_dev
, "failed to flush queues\n");
771 netif_dbg(efx
, drv
, efx
->net_dev
,
772 "successfully flushed all queues\n");
775 efx_for_each_channel(channel
, efx
) {
776 efx_for_each_channel_rx_queue(rx_queue
, channel
)
777 efx_fini_rx_queue(rx_queue
);
778 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
)
779 efx_fini_tx_queue(tx_queue
);
783 static void efx_remove_channel(struct efx_channel
*channel
)
785 struct efx_tx_queue
*tx_queue
;
786 struct efx_rx_queue
*rx_queue
;
788 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
789 "destroy chan %d\n", channel
->channel
);
791 efx_for_each_channel_rx_queue(rx_queue
, channel
)
792 efx_remove_rx_queue(rx_queue
);
793 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
)
794 efx_remove_tx_queue(tx_queue
);
795 efx_remove_eventq(channel
);
796 channel
->type
->post_remove(channel
);
799 static void efx_remove_channels(struct efx_nic
*efx
)
801 struct efx_channel
*channel
;
803 efx_for_each_channel(channel
, efx
)
804 efx_remove_channel(channel
);
808 efx_realloc_channels(struct efx_nic
*efx
, u32 rxq_entries
, u32 txq_entries
)
810 struct efx_channel
*other_channel
[EFX_MAX_CHANNELS
], *channel
;
811 u32 old_rxq_entries
, old_txq_entries
;
812 unsigned i
, next_buffer_table
= 0;
815 rc
= efx_check_disabled(efx
);
819 /* Not all channels should be reallocated. We must avoid
820 * reallocating their buffer table entries.
822 efx_for_each_channel(channel
, efx
) {
823 struct efx_rx_queue
*rx_queue
;
824 struct efx_tx_queue
*tx_queue
;
826 if (channel
->type
->copy
)
828 next_buffer_table
= max(next_buffer_table
,
829 channel
->eventq
.index
+
830 channel
->eventq
.entries
);
831 efx_for_each_channel_rx_queue(rx_queue
, channel
)
832 next_buffer_table
= max(next_buffer_table
,
833 rx_queue
->rxd
.index
+
834 rx_queue
->rxd
.entries
);
835 efx_for_each_channel_tx_queue(tx_queue
, channel
)
836 next_buffer_table
= max(next_buffer_table
,
837 tx_queue
->txd
.index
+
838 tx_queue
->txd
.entries
);
841 efx_device_detach_sync(efx
);
843 efx_soft_disable_interrupts(efx
);
845 /* Clone channels (where possible) */
846 memset(other_channel
, 0, sizeof(other_channel
));
847 for (i
= 0; i
< efx
->n_channels
; i
++) {
848 channel
= efx
->channel
[i
];
849 if (channel
->type
->copy
)
850 channel
= channel
->type
->copy(channel
);
855 other_channel
[i
] = channel
;
858 /* Swap entry counts and channel pointers */
859 old_rxq_entries
= efx
->rxq_entries
;
860 old_txq_entries
= efx
->txq_entries
;
861 efx
->rxq_entries
= rxq_entries
;
862 efx
->txq_entries
= txq_entries
;
863 for (i
= 0; i
< efx
->n_channels
; i
++) {
864 channel
= efx
->channel
[i
];
865 efx
->channel
[i
] = other_channel
[i
];
866 other_channel
[i
] = channel
;
869 /* Restart buffer table allocation */
870 efx
->next_buffer_table
= next_buffer_table
;
872 for (i
= 0; i
< efx
->n_channels
; i
++) {
873 channel
= efx
->channel
[i
];
874 if (!channel
->type
->copy
)
876 rc
= efx_probe_channel(channel
);
879 efx_init_napi_channel(efx
->channel
[i
]);
883 /* Destroy unused channel structures */
884 for (i
= 0; i
< efx
->n_channels
; i
++) {
885 channel
= other_channel
[i
];
886 if (channel
&& channel
->type
->copy
) {
887 efx_fini_napi_channel(channel
);
888 efx_remove_channel(channel
);
893 rc2
= efx_soft_enable_interrupts(efx
);
896 netif_err(efx
, drv
, efx
->net_dev
,
897 "unable to restart interrupts on channel reallocation\n");
898 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
901 efx_device_attach_if_not_resetting(efx
);
907 efx
->rxq_entries
= old_rxq_entries
;
908 efx
->txq_entries
= old_txq_entries
;
909 for (i
= 0; i
< efx
->n_channels
; i
++) {
910 channel
= efx
->channel
[i
];
911 efx
->channel
[i
] = other_channel
[i
];
912 other_channel
[i
] = channel
;
917 void efx_schedule_slow_fill(struct efx_rx_queue
*rx_queue
)
919 mod_timer(&rx_queue
->slow_fill
, jiffies
+ msecs_to_jiffies(100));
922 static bool efx_default_channel_want_txqs(struct efx_channel
*channel
)
924 return channel
->channel
- channel
->efx
->tx_channel_offset
<
925 channel
->efx
->n_tx_channels
;
928 static const struct efx_channel_type efx_default_channel_type
= {
929 .pre_probe
= efx_channel_dummy_op_int
,
930 .post_remove
= efx_channel_dummy_op_void
,
931 .get_name
= efx_get_channel_name
,
932 .copy
= efx_copy_channel
,
933 .want_txqs
= efx_default_channel_want_txqs
,
934 .keep_eventq
= false,
938 int efx_channel_dummy_op_int(struct efx_channel
*channel
)
943 void efx_channel_dummy_op_void(struct efx_channel
*channel
)
947 /**************************************************************************
951 **************************************************************************/
953 /* This ensures that the kernel is kept informed (via
954 * netif_carrier_on/off) of the link status, and also maintains the
955 * link status's stop on the port's TX queue.
957 void efx_link_status_changed(struct efx_nic
*efx
)
959 struct efx_link_state
*link_state
= &efx
->link_state
;
961 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
962 * that no events are triggered between unregister_netdev() and the
963 * driver unloading. A more general condition is that NETDEV_CHANGE
964 * can only be generated between NETDEV_UP and NETDEV_DOWN */
965 if (!netif_running(efx
->net_dev
))
968 if (link_state
->up
!= netif_carrier_ok(efx
->net_dev
)) {
969 efx
->n_link_state_changes
++;
972 netif_carrier_on(efx
->net_dev
);
974 netif_carrier_off(efx
->net_dev
);
977 /* Status message for kernel log */
979 netif_info(efx
, link
, efx
->net_dev
,
980 "link up at %uMbps %s-duplex (MTU %d)\n",
981 link_state
->speed
, link_state
->fd
? "full" : "half",
984 netif_info(efx
, link
, efx
->net_dev
, "link down\n");
987 void efx_link_set_advertising(struct efx_nic
*efx
,
988 const unsigned long *advertising
)
990 memcpy(efx
->link_advertising
, advertising
,
991 sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
993 efx
->link_advertising
[0] |= ADVERTISED_Autoneg
;
994 if (advertising
[0] & ADVERTISED_Pause
)
995 efx
->wanted_fc
|= (EFX_FC_TX
| EFX_FC_RX
);
997 efx
->wanted_fc
&= ~(EFX_FC_TX
| EFX_FC_RX
);
998 if (advertising
[0] & ADVERTISED_Asym_Pause
)
999 efx
->wanted_fc
^= EFX_FC_TX
;
1002 /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
1003 * force the Autoneg bit on.
1005 void efx_link_clear_advertising(struct efx_nic
*efx
)
1007 bitmap_zero(efx
->link_advertising
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
1008 efx
->wanted_fc
&= ~(EFX_FC_TX
| EFX_FC_RX
);
1011 void efx_link_set_wanted_fc(struct efx_nic
*efx
, u8 wanted_fc
)
1013 efx
->wanted_fc
= wanted_fc
;
1014 if (efx
->link_advertising
[0]) {
1015 if (wanted_fc
& EFX_FC_RX
)
1016 efx
->link_advertising
[0] |= (ADVERTISED_Pause
|
1017 ADVERTISED_Asym_Pause
);
1019 efx
->link_advertising
[0] &= ~(ADVERTISED_Pause
|
1020 ADVERTISED_Asym_Pause
);
1021 if (wanted_fc
& EFX_FC_TX
)
1022 efx
->link_advertising
[0] ^= ADVERTISED_Asym_Pause
;
1026 static void efx_fini_port(struct efx_nic
*efx
);
1028 /* We assume that efx->type->reconfigure_mac will always try to sync RX
1029 * filters and therefore needs to read-lock the filter table against freeing
1031 void efx_mac_reconfigure(struct efx_nic
*efx
)
1033 down_read(&efx
->filter_sem
);
1034 efx
->type
->reconfigure_mac(efx
);
1035 up_read(&efx
->filter_sem
);
1038 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
1039 * the MAC appropriately. All other PHY configuration changes are pushed
1040 * through phy_op->set_settings(), and pushed asynchronously to the MAC
1041 * through efx_monitor().
1043 * Callers must hold the mac_lock
1045 int __efx_reconfigure_port(struct efx_nic
*efx
)
1047 enum efx_phy_mode phy_mode
;
1050 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
1052 /* Disable PHY transmit in mac level loopbacks */
1053 phy_mode
= efx
->phy_mode
;
1054 if (LOOPBACK_INTERNAL(efx
))
1055 efx
->phy_mode
|= PHY_MODE_TX_DISABLED
;
1057 efx
->phy_mode
&= ~PHY_MODE_TX_DISABLED
;
1059 rc
= efx
->type
->reconfigure_port(efx
);
1062 efx
->phy_mode
= phy_mode
;
1067 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
1069 int efx_reconfigure_port(struct efx_nic
*efx
)
1073 EFX_ASSERT_RESET_SERIALISED(efx
);
1075 mutex_lock(&efx
->mac_lock
);
1076 rc
= __efx_reconfigure_port(efx
);
1077 mutex_unlock(&efx
->mac_lock
);
1082 /* Asynchronous work item for changing MAC promiscuity and multicast
1083 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
1085 static void efx_mac_work(struct work_struct
*data
)
1087 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, mac_work
);
1089 mutex_lock(&efx
->mac_lock
);
1090 if (efx
->port_enabled
)
1091 efx_mac_reconfigure(efx
);
1092 mutex_unlock(&efx
->mac_lock
);
1095 static int efx_probe_port(struct efx_nic
*efx
)
1099 netif_dbg(efx
, probe
, efx
->net_dev
, "create port\n");
1102 efx
->phy_mode
= PHY_MODE_SPECIAL
;
1104 /* Connect up MAC/PHY operations table */
1105 rc
= efx
->type
->probe_port(efx
);
1109 /* Initialise MAC address to permanent address */
1110 ether_addr_copy(efx
->net_dev
->dev_addr
, efx
->net_dev
->perm_addr
);
1115 static int efx_init_port(struct efx_nic
*efx
)
1119 netif_dbg(efx
, drv
, efx
->net_dev
, "init port\n");
1121 mutex_lock(&efx
->mac_lock
);
1123 rc
= efx
->phy_op
->init(efx
);
1127 efx
->port_initialized
= true;
1129 /* Reconfigure the MAC before creating dma queues (required for
1130 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1131 efx_mac_reconfigure(efx
);
1133 /* Ensure the PHY advertises the correct flow control settings */
1134 rc
= efx
->phy_op
->reconfigure(efx
);
1135 if (rc
&& rc
!= -EPERM
)
1138 mutex_unlock(&efx
->mac_lock
);
1142 efx
->phy_op
->fini(efx
);
1144 mutex_unlock(&efx
->mac_lock
);
1148 static void efx_start_port(struct efx_nic
*efx
)
1150 netif_dbg(efx
, ifup
, efx
->net_dev
, "start port\n");
1151 BUG_ON(efx
->port_enabled
);
1153 mutex_lock(&efx
->mac_lock
);
1154 efx
->port_enabled
= true;
1156 /* Ensure MAC ingress/egress is enabled */
1157 efx_mac_reconfigure(efx
);
1159 mutex_unlock(&efx
->mac_lock
);
1162 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
1163 * and the async self-test, wait for them to finish and prevent them
1164 * being scheduled again. This doesn't cover online resets, which
1165 * should only be cancelled when removing the device.
1167 static void efx_stop_port(struct efx_nic
*efx
)
1169 netif_dbg(efx
, ifdown
, efx
->net_dev
, "stop port\n");
1171 EFX_ASSERT_RESET_SERIALISED(efx
);
1173 mutex_lock(&efx
->mac_lock
);
1174 efx
->port_enabled
= false;
1175 mutex_unlock(&efx
->mac_lock
);
1177 /* Serialise against efx_set_multicast_list() */
1178 netif_addr_lock_bh(efx
->net_dev
);
1179 netif_addr_unlock_bh(efx
->net_dev
);
1181 cancel_delayed_work_sync(&efx
->monitor_work
);
1182 efx_selftest_async_cancel(efx
);
1183 cancel_work_sync(&efx
->mac_work
);
1186 static void efx_fini_port(struct efx_nic
*efx
)
1188 netif_dbg(efx
, drv
, efx
->net_dev
, "shut down port\n");
1190 if (!efx
->port_initialized
)
1193 efx
->phy_op
->fini(efx
);
1194 efx
->port_initialized
= false;
1196 efx
->link_state
.up
= false;
1197 efx_link_status_changed(efx
);
1200 static void efx_remove_port(struct efx_nic
*efx
)
1202 netif_dbg(efx
, drv
, efx
->net_dev
, "destroying port\n");
1204 efx
->type
->remove_port(efx
);
1207 /**************************************************************************
1211 **************************************************************************/
1213 static LIST_HEAD(efx_primary_list
);
1214 static LIST_HEAD(efx_unassociated_list
);
1216 static bool efx_same_controller(struct efx_nic
*left
, struct efx_nic
*right
)
1218 return left
->type
== right
->type
&&
1219 left
->vpd_sn
&& right
->vpd_sn
&&
1220 !strcmp(left
->vpd_sn
, right
->vpd_sn
);
1223 static void efx_associate(struct efx_nic
*efx
)
1225 struct efx_nic
*other
, *next
;
1227 if (efx
->primary
== efx
) {
1228 /* Adding primary function; look for secondaries */
1230 netif_dbg(efx
, probe
, efx
->net_dev
, "adding to primary list\n");
1231 list_add_tail(&efx
->node
, &efx_primary_list
);
1233 list_for_each_entry_safe(other
, next
, &efx_unassociated_list
,
1235 if (efx_same_controller(efx
, other
)) {
1236 list_del(&other
->node
);
1237 netif_dbg(other
, probe
, other
->net_dev
,
1238 "moving to secondary list of %s %s\n",
1239 pci_name(efx
->pci_dev
),
1240 efx
->net_dev
->name
);
1241 list_add_tail(&other
->node
,
1242 &efx
->secondary_list
);
1243 other
->primary
= efx
;
1247 /* Adding secondary function; look for primary */
1249 list_for_each_entry(other
, &efx_primary_list
, node
) {
1250 if (efx_same_controller(efx
, other
)) {
1251 netif_dbg(efx
, probe
, efx
->net_dev
,
1252 "adding to secondary list of %s %s\n",
1253 pci_name(other
->pci_dev
),
1254 other
->net_dev
->name
);
1255 list_add_tail(&efx
->node
,
1256 &other
->secondary_list
);
1257 efx
->primary
= other
;
1262 netif_dbg(efx
, probe
, efx
->net_dev
,
1263 "adding to unassociated list\n");
1264 list_add_tail(&efx
->node
, &efx_unassociated_list
);
1268 static void efx_dissociate(struct efx_nic
*efx
)
1270 struct efx_nic
*other
, *next
;
1272 list_del(&efx
->node
);
1273 efx
->primary
= NULL
;
1275 list_for_each_entry_safe(other
, next
, &efx
->secondary_list
, node
) {
1276 list_del(&other
->node
);
1277 netif_dbg(other
, probe
, other
->net_dev
,
1278 "moving to unassociated list\n");
1279 list_add_tail(&other
->node
, &efx_unassociated_list
);
1280 other
->primary
= NULL
;
1284 /* This configures the PCI device to enable I/O and DMA. */
1285 static int efx_init_io(struct efx_nic
*efx
)
1287 struct pci_dev
*pci_dev
= efx
->pci_dev
;
1288 dma_addr_t dma_mask
= efx
->type
->max_dma_mask
;
1289 unsigned int mem_map_size
= efx
->type
->mem_map_size(efx
);
1292 netif_dbg(efx
, probe
, efx
->net_dev
, "initialising I/O\n");
1294 bar
= efx
->type
->mem_bar(efx
);
1296 rc
= pci_enable_device(pci_dev
);
1298 netif_err(efx
, probe
, efx
->net_dev
,
1299 "failed to enable PCI device\n");
1303 pci_set_master(pci_dev
);
1305 /* Set the PCI DMA mask. Try all possibilities from our genuine mask
1306 * down to 32 bits, because some architectures will allow 40 bit
1307 * masks event though they reject 46 bit masks.
1309 while (dma_mask
> 0x7fffffffUL
) {
1310 rc
= dma_set_mask_and_coherent(&pci_dev
->dev
, dma_mask
);
1316 netif_err(efx
, probe
, efx
->net_dev
,
1317 "could not find a suitable DMA mask\n");
1320 netif_dbg(efx
, probe
, efx
->net_dev
,
1321 "using DMA mask %llx\n", (unsigned long long) dma_mask
);
1323 efx
->membase_phys
= pci_resource_start(efx
->pci_dev
, bar
);
1324 rc
= pci_request_region(pci_dev
, bar
, "sfc");
1326 netif_err(efx
, probe
, efx
->net_dev
,
1327 "request for memory BAR failed\n");
1331 efx
->membase
= ioremap_nocache(efx
->membase_phys
, mem_map_size
);
1332 if (!efx
->membase
) {
1333 netif_err(efx
, probe
, efx
->net_dev
,
1334 "could not map memory BAR at %llx+%x\n",
1335 (unsigned long long)efx
->membase_phys
, mem_map_size
);
1339 netif_dbg(efx
, probe
, efx
->net_dev
,
1340 "memory BAR at %llx+%x (virtual %p)\n",
1341 (unsigned long long)efx
->membase_phys
, mem_map_size
,
1347 pci_release_region(efx
->pci_dev
, bar
);
1349 efx
->membase_phys
= 0;
1351 pci_disable_device(efx
->pci_dev
);
1356 static void efx_fini_io(struct efx_nic
*efx
)
1360 netif_dbg(efx
, drv
, efx
->net_dev
, "shutting down I/O\n");
1363 iounmap(efx
->membase
);
1364 efx
->membase
= NULL
;
1367 if (efx
->membase_phys
) {
1368 bar
= efx
->type
->mem_bar(efx
);
1369 pci_release_region(efx
->pci_dev
, bar
);
1370 efx
->membase_phys
= 0;
1373 /* Don't disable bus-mastering if VFs are assigned */
1374 if (!pci_vfs_assigned(efx
->pci_dev
))
1375 pci_disable_device(efx
->pci_dev
);
1378 void efx_set_default_rx_indir_table(struct efx_nic
*efx
,
1379 struct efx_rss_context
*ctx
)
1383 for (i
= 0; i
< ARRAY_SIZE(ctx
->rx_indir_table
); i
++)
1384 ctx
->rx_indir_table
[i
] =
1385 ethtool_rxfh_indir_default(i
, efx
->rss_spread
);
1388 static unsigned int efx_wanted_parallelism(struct efx_nic
*efx
)
1390 cpumask_var_t thread_mask
;
1397 if (unlikely(!zalloc_cpumask_var(&thread_mask
, GFP_KERNEL
))) {
1398 netif_warn(efx
, probe
, efx
->net_dev
,
1399 "RSS disabled due to allocation failure\n");
1404 for_each_online_cpu(cpu
) {
1405 if (!cpumask_test_cpu(cpu
, thread_mask
)) {
1407 cpumask_or(thread_mask
, thread_mask
,
1408 topology_sibling_cpumask(cpu
));
1412 free_cpumask_var(thread_mask
);
1415 if (count
> EFX_MAX_RX_QUEUES
) {
1416 netif_cond_dbg(efx
, probe
, efx
->net_dev
, !rss_cpus
, warn
,
1417 "Reducing number of rx queues from %u to %u.\n",
1418 count
, EFX_MAX_RX_QUEUES
);
1419 count
= EFX_MAX_RX_QUEUES
;
1422 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1423 * table entries that are inaccessible to VFs
1425 #ifdef CONFIG_SFC_SRIOV
1426 if (efx
->type
->sriov_wanted
) {
1427 if (efx
->type
->sriov_wanted(efx
) && efx_vf_size(efx
) > 1 &&
1428 count
> efx_vf_size(efx
)) {
1429 netif_warn(efx
, probe
, efx
->net_dev
,
1430 "Reducing number of RSS channels from %u to %u for "
1431 "VF support. Increase vf-msix-limit to use more "
1432 "channels on the PF.\n",
1433 count
, efx_vf_size(efx
));
1434 count
= efx_vf_size(efx
);
1442 /* Probe the number and type of interrupts we are able to obtain, and
1443 * the resulting numbers of channels and RX queues.
1445 static int efx_probe_interrupts(struct efx_nic
*efx
)
1447 unsigned int extra_channels
= 0;
1451 for (i
= 0; i
< EFX_MAX_EXTRA_CHANNELS
; i
++)
1452 if (efx
->extra_channel_type
[i
])
1455 if (efx
->interrupt_mode
== EFX_INT_MODE_MSIX
) {
1456 struct msix_entry xentries
[EFX_MAX_CHANNELS
];
1457 unsigned int n_channels
;
1459 n_channels
= efx_wanted_parallelism(efx
);
1460 if (efx_separate_tx_channels
)
1462 n_channels
+= extra_channels
;
1463 n_channels
= min(n_channels
, efx
->max_channels
);
1465 for (i
= 0; i
< n_channels
; i
++)
1466 xentries
[i
].entry
= i
;
1467 rc
= pci_enable_msix_range(efx
->pci_dev
,
1468 xentries
, 1, n_channels
);
1470 /* Fall back to single channel MSI */
1471 netif_err(efx
, drv
, efx
->net_dev
,
1472 "could not enable MSI-X\n");
1473 if (efx
->type
->min_interrupt_mode
>= EFX_INT_MODE_MSI
)
1474 efx
->interrupt_mode
= EFX_INT_MODE_MSI
;
1477 } else if (rc
< n_channels
) {
1478 netif_err(efx
, drv
, efx
->net_dev
,
1479 "WARNING: Insufficient MSI-X vectors"
1480 " available (%d < %u).\n", rc
, n_channels
);
1481 netif_err(efx
, drv
, efx
->net_dev
,
1482 "WARNING: Performance may be reduced.\n");
1487 efx
->n_channels
= n_channels
;
1488 if (n_channels
> extra_channels
)
1489 n_channels
-= extra_channels
;
1490 if (efx_separate_tx_channels
) {
1491 efx
->n_tx_channels
= min(max(n_channels
/ 2,
1493 efx
->max_tx_channels
);
1494 efx
->n_rx_channels
= max(n_channels
-
1498 efx
->n_tx_channels
= min(n_channels
,
1499 efx
->max_tx_channels
);
1500 efx
->n_rx_channels
= n_channels
;
1502 for (i
= 0; i
< efx
->n_channels
; i
++)
1503 efx_get_channel(efx
, i
)->irq
=
1508 /* Try single interrupt MSI */
1509 if (efx
->interrupt_mode
== EFX_INT_MODE_MSI
) {
1510 efx
->n_channels
= 1;
1511 efx
->n_rx_channels
= 1;
1512 efx
->n_tx_channels
= 1;
1513 rc
= pci_enable_msi(efx
->pci_dev
);
1515 efx_get_channel(efx
, 0)->irq
= efx
->pci_dev
->irq
;
1517 netif_err(efx
, drv
, efx
->net_dev
,
1518 "could not enable MSI\n");
1519 if (efx
->type
->min_interrupt_mode
>= EFX_INT_MODE_LEGACY
)
1520 efx
->interrupt_mode
= EFX_INT_MODE_LEGACY
;
1526 /* Assume legacy interrupts */
1527 if (efx
->interrupt_mode
== EFX_INT_MODE_LEGACY
) {
1528 efx
->n_channels
= 1 + (efx_separate_tx_channels
? 1 : 0);
1529 efx
->n_rx_channels
= 1;
1530 efx
->n_tx_channels
= 1;
1531 efx
->legacy_irq
= efx
->pci_dev
->irq
;
1534 /* Assign extra channels if possible */
1535 efx
->n_extra_tx_channels
= 0;
1536 j
= efx
->n_channels
;
1537 for (i
= 0; i
< EFX_MAX_EXTRA_CHANNELS
; i
++) {
1538 if (!efx
->extra_channel_type
[i
])
1540 if (efx
->interrupt_mode
!= EFX_INT_MODE_MSIX
||
1541 efx
->n_channels
<= extra_channels
) {
1542 efx
->extra_channel_type
[i
]->handle_no_channel(efx
);
1545 efx_get_channel(efx
, j
)->type
=
1546 efx
->extra_channel_type
[i
];
1547 if (efx_channel_has_tx_queues(efx_get_channel(efx
, j
)))
1548 efx
->n_extra_tx_channels
++;
1552 /* RSS might be usable on VFs even if it is disabled on the PF */
1553 #ifdef CONFIG_SFC_SRIOV
1554 if (efx
->type
->sriov_wanted
) {
1555 efx
->rss_spread
= ((efx
->n_rx_channels
> 1 ||
1556 !efx
->type
->sriov_wanted(efx
)) ?
1557 efx
->n_rx_channels
: efx_vf_size(efx
));
1561 efx
->rss_spread
= efx
->n_rx_channels
;
1566 #if defined(CONFIG_SMP)
1567 static void efx_set_interrupt_affinity(struct efx_nic
*efx
)
1569 struct efx_channel
*channel
;
1572 efx_for_each_channel(channel
, efx
) {
1573 cpu
= cpumask_local_spread(channel
->channel
,
1574 pcibus_to_node(efx
->pci_dev
->bus
));
1575 irq_set_affinity_hint(channel
->irq
, cpumask_of(cpu
));
1579 static void efx_clear_interrupt_affinity(struct efx_nic
*efx
)
1581 struct efx_channel
*channel
;
1583 efx_for_each_channel(channel
, efx
)
1584 irq_set_affinity_hint(channel
->irq
, NULL
);
1588 efx_set_interrupt_affinity(struct efx_nic
*efx
__attribute__ ((unused
)))
1593 efx_clear_interrupt_affinity(struct efx_nic
*efx
__attribute__ ((unused
)))
1596 #endif /* CONFIG_SMP */
1598 static int efx_soft_enable_interrupts(struct efx_nic
*efx
)
1600 struct efx_channel
*channel
, *end_channel
;
1603 BUG_ON(efx
->state
== STATE_DISABLED
);
1605 efx
->irq_soft_enabled
= true;
1608 efx_for_each_channel(channel
, efx
) {
1609 if (!channel
->type
->keep_eventq
) {
1610 rc
= efx_init_eventq(channel
);
1614 efx_start_eventq(channel
);
1617 efx_mcdi_mode_event(efx
);
1621 end_channel
= channel
;
1622 efx_for_each_channel(channel
, efx
) {
1623 if (channel
== end_channel
)
1625 efx_stop_eventq(channel
);
1626 if (!channel
->type
->keep_eventq
)
1627 efx_fini_eventq(channel
);
1633 static void efx_soft_disable_interrupts(struct efx_nic
*efx
)
1635 struct efx_channel
*channel
;
1637 if (efx
->state
== STATE_DISABLED
)
1640 efx_mcdi_mode_poll(efx
);
1642 efx
->irq_soft_enabled
= false;
1645 if (efx
->legacy_irq
)
1646 synchronize_irq(efx
->legacy_irq
);
1648 efx_for_each_channel(channel
, efx
) {
1650 synchronize_irq(channel
->irq
);
1652 efx_stop_eventq(channel
);
1653 if (!channel
->type
->keep_eventq
)
1654 efx_fini_eventq(channel
);
1657 /* Flush the asynchronous MCDI request queue */
1658 efx_mcdi_flush_async(efx
);
1661 static int efx_enable_interrupts(struct efx_nic
*efx
)
1663 struct efx_channel
*channel
, *end_channel
;
1666 BUG_ON(efx
->state
== STATE_DISABLED
);
1668 if (efx
->eeh_disabled_legacy_irq
) {
1669 enable_irq(efx
->legacy_irq
);
1670 efx
->eeh_disabled_legacy_irq
= false;
1673 efx
->type
->irq_enable_master(efx
);
1675 efx_for_each_channel(channel
, efx
) {
1676 if (channel
->type
->keep_eventq
) {
1677 rc
= efx_init_eventq(channel
);
1683 rc
= efx_soft_enable_interrupts(efx
);
1690 end_channel
= channel
;
1691 efx_for_each_channel(channel
, efx
) {
1692 if (channel
== end_channel
)
1694 if (channel
->type
->keep_eventq
)
1695 efx_fini_eventq(channel
);
1698 efx
->type
->irq_disable_non_ev(efx
);
1703 static void efx_disable_interrupts(struct efx_nic
*efx
)
1705 struct efx_channel
*channel
;
1707 efx_soft_disable_interrupts(efx
);
1709 efx_for_each_channel(channel
, efx
) {
1710 if (channel
->type
->keep_eventq
)
1711 efx_fini_eventq(channel
);
1714 efx
->type
->irq_disable_non_ev(efx
);
1717 static void efx_remove_interrupts(struct efx_nic
*efx
)
1719 struct efx_channel
*channel
;
1721 /* Remove MSI/MSI-X interrupts */
1722 efx_for_each_channel(channel
, efx
)
1724 pci_disable_msi(efx
->pci_dev
);
1725 pci_disable_msix(efx
->pci_dev
);
1727 /* Remove legacy interrupt */
1728 efx
->legacy_irq
= 0;
1731 static void efx_set_channels(struct efx_nic
*efx
)
1733 struct efx_channel
*channel
;
1734 struct efx_tx_queue
*tx_queue
;
1736 efx
->tx_channel_offset
=
1737 efx_separate_tx_channels
?
1738 efx
->n_channels
- efx
->n_tx_channels
: 0;
1740 /* We need to mark which channels really have RX and TX
1741 * queues, and adjust the TX queue numbers if we have separate
1742 * RX-only and TX-only channels.
1744 efx_for_each_channel(channel
, efx
) {
1745 if (channel
->channel
< efx
->n_rx_channels
)
1746 channel
->rx_queue
.core_index
= channel
->channel
;
1748 channel
->rx_queue
.core_index
= -1;
1750 efx_for_each_channel_tx_queue(tx_queue
, channel
)
1751 tx_queue
->queue
-= (efx
->tx_channel_offset
*
1756 static int efx_probe_nic(struct efx_nic
*efx
)
1760 netif_dbg(efx
, probe
, efx
->net_dev
, "creating NIC\n");
1762 /* Carry out hardware-type specific initialisation */
1763 rc
= efx
->type
->probe(efx
);
1768 if (!efx
->max_channels
|| !efx
->max_tx_channels
) {
1769 netif_err(efx
, drv
, efx
->net_dev
,
1770 "Insufficient resources to allocate"
1776 /* Determine the number of channels and queues by trying
1777 * to hook in MSI-X interrupts.
1779 rc
= efx_probe_interrupts(efx
);
1783 efx_set_channels(efx
);
1785 /* dimension_resources can fail with EAGAIN */
1786 rc
= efx
->type
->dimension_resources(efx
);
1787 if (rc
!= 0 && rc
!= -EAGAIN
)
1791 /* try again with new max_channels */
1792 efx_remove_interrupts(efx
);
1794 } while (rc
== -EAGAIN
);
1796 if (efx
->n_channels
> 1)
1797 netdev_rss_key_fill(efx
->rss_context
.rx_hash_key
,
1798 sizeof(efx
->rss_context
.rx_hash_key
));
1799 efx_set_default_rx_indir_table(efx
, &efx
->rss_context
);
1801 netif_set_real_num_tx_queues(efx
->net_dev
, efx
->n_tx_channels
);
1802 netif_set_real_num_rx_queues(efx
->net_dev
, efx
->n_rx_channels
);
1804 /* Initialise the interrupt moderation settings */
1805 efx
->irq_mod_step_us
= DIV_ROUND_UP(efx
->timer_quantum_ns
, 1000);
1806 efx_init_irq_moderation(efx
, tx_irq_mod_usec
, rx_irq_mod_usec
, true,
1812 efx_remove_interrupts(efx
);
1814 efx
->type
->remove(efx
);
1818 static void efx_remove_nic(struct efx_nic
*efx
)
1820 netif_dbg(efx
, drv
, efx
->net_dev
, "destroying NIC\n");
1822 efx_remove_interrupts(efx
);
1823 efx
->type
->remove(efx
);
1826 static int efx_probe_filters(struct efx_nic
*efx
)
1830 init_rwsem(&efx
->filter_sem
);
1831 mutex_lock(&efx
->mac_lock
);
1832 down_write(&efx
->filter_sem
);
1833 rc
= efx
->type
->filter_table_probe(efx
);
1837 #ifdef CONFIG_RFS_ACCEL
1838 if (efx
->type
->offload_features
& NETIF_F_NTUPLE
) {
1839 struct efx_channel
*channel
;
1842 efx_for_each_channel(channel
, efx
) {
1843 channel
->rps_flow_id
=
1844 kcalloc(efx
->type
->max_rx_ip_filters
,
1845 sizeof(*channel
->rps_flow_id
),
1847 if (!channel
->rps_flow_id
)
1851 i
< efx
->type
->max_rx_ip_filters
;
1853 channel
->rps_flow_id
[i
] =
1854 RPS_FLOW_ID_INVALID
;
1858 efx_for_each_channel(channel
, efx
)
1859 kfree(channel
->rps_flow_id
);
1860 efx
->type
->filter_table_remove(efx
);
1865 efx
->rps_expire_index
= efx
->rps_expire_channel
= 0;
1869 up_write(&efx
->filter_sem
);
1870 mutex_unlock(&efx
->mac_lock
);
1874 static void efx_remove_filters(struct efx_nic
*efx
)
1876 #ifdef CONFIG_RFS_ACCEL
1877 struct efx_channel
*channel
;
1879 efx_for_each_channel(channel
, efx
)
1880 kfree(channel
->rps_flow_id
);
1882 down_write(&efx
->filter_sem
);
1883 efx
->type
->filter_table_remove(efx
);
1884 up_write(&efx
->filter_sem
);
1888 /**************************************************************************
1890 * NIC startup/shutdown
1892 *************************************************************************/
1894 static int efx_probe_all(struct efx_nic
*efx
)
1898 rc
= efx_probe_nic(efx
);
1900 netif_err(efx
, probe
, efx
->net_dev
, "failed to create NIC\n");
1904 rc
= efx_probe_port(efx
);
1906 netif_err(efx
, probe
, efx
->net_dev
, "failed to create port\n");
1910 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE
< EFX_RXQ_MIN_ENT
);
1911 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE
< EFX_TXQ_MIN_ENT(efx
))) {
1915 efx
->rxq_entries
= efx
->txq_entries
= EFX_DEFAULT_DMAQ_SIZE
;
1917 #ifdef CONFIG_SFC_SRIOV
1918 rc
= efx
->type
->vswitching_probe(efx
);
1919 if (rc
) /* not fatal; the PF will still work fine */
1920 netif_warn(efx
, probe
, efx
->net_dev
,
1921 "failed to setup vswitching rc=%d;"
1922 " VFs may not function\n", rc
);
1925 rc
= efx_probe_filters(efx
);
1927 netif_err(efx
, probe
, efx
->net_dev
,
1928 "failed to create filter tables\n");
1932 rc
= efx_probe_channels(efx
);
1939 efx_remove_filters(efx
);
1941 #ifdef CONFIG_SFC_SRIOV
1942 efx
->type
->vswitching_remove(efx
);
1945 efx_remove_port(efx
);
1947 efx_remove_nic(efx
);
1952 /* If the interface is supposed to be running but is not, start
1953 * the hardware and software data path, regular activity for the port
1954 * (MAC statistics, link polling, etc.) and schedule the port to be
1955 * reconfigured. Interrupts must already be enabled. This function
1956 * is safe to call multiple times, so long as the NIC is not disabled.
1957 * Requires the RTNL lock.
1959 static void efx_start_all(struct efx_nic
*efx
)
1961 EFX_ASSERT_RESET_SERIALISED(efx
);
1962 BUG_ON(efx
->state
== STATE_DISABLED
);
1964 /* Check that it is appropriate to restart the interface. All
1965 * of these flags are safe to read under just the rtnl lock */
1966 if (efx
->port_enabled
|| !netif_running(efx
->net_dev
) ||
1970 efx_start_port(efx
);
1971 efx_start_datapath(efx
);
1973 /* Start the hardware monitor if there is one */
1974 if (efx
->type
->monitor
!= NULL
)
1975 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1976 efx_monitor_interval
);
1978 /* Link state detection is normally event-driven; we have
1979 * to poll now because we could have missed a change
1981 mutex_lock(&efx
->mac_lock
);
1982 if (efx
->phy_op
->poll(efx
))
1983 efx_link_status_changed(efx
);
1984 mutex_unlock(&efx
->mac_lock
);
1986 efx
->type
->start_stats(efx
);
1987 efx
->type
->pull_stats(efx
);
1988 spin_lock_bh(&efx
->stats_lock
);
1989 efx
->type
->update_stats(efx
, NULL
, NULL
);
1990 spin_unlock_bh(&efx
->stats_lock
);
1993 /* Quiesce the hardware and software data path, and regular activity
1994 * for the port without bringing the link down. Safe to call multiple
1995 * times with the NIC in almost any state, but interrupts should be
1996 * enabled. Requires the RTNL lock.
1998 static void efx_stop_all(struct efx_nic
*efx
)
2000 EFX_ASSERT_RESET_SERIALISED(efx
);
2002 /* port_enabled can be read safely under the rtnl lock */
2003 if (!efx
->port_enabled
)
2006 /* update stats before we go down so we can accurately count
2009 efx
->type
->pull_stats(efx
);
2010 spin_lock_bh(&efx
->stats_lock
);
2011 efx
->type
->update_stats(efx
, NULL
, NULL
);
2012 spin_unlock_bh(&efx
->stats_lock
);
2013 efx
->type
->stop_stats(efx
);
2016 /* Stop the kernel transmit interface. This is only valid if
2017 * the device is stopped or detached; otherwise the watchdog
2018 * may fire immediately.
2020 WARN_ON(netif_running(efx
->net_dev
) &&
2021 netif_device_present(efx
->net_dev
));
2022 netif_tx_disable(efx
->net_dev
);
2024 efx_stop_datapath(efx
);
2027 static void efx_remove_all(struct efx_nic
*efx
)
2029 efx_remove_channels(efx
);
2030 efx_remove_filters(efx
);
2031 #ifdef CONFIG_SFC_SRIOV
2032 efx
->type
->vswitching_remove(efx
);
2034 efx_remove_port(efx
);
2035 efx_remove_nic(efx
);
2038 /**************************************************************************
2040 * Interrupt moderation
2042 **************************************************************************/
2043 unsigned int efx_usecs_to_ticks(struct efx_nic
*efx
, unsigned int usecs
)
2047 if (usecs
* 1000 < efx
->timer_quantum_ns
)
2048 return 1; /* never round down to 0 */
2049 return usecs
* 1000 / efx
->timer_quantum_ns
;
2052 unsigned int efx_ticks_to_usecs(struct efx_nic
*efx
, unsigned int ticks
)
2054 /* We must round up when converting ticks to microseconds
2055 * because we round down when converting the other way.
2057 return DIV_ROUND_UP(ticks
* efx
->timer_quantum_ns
, 1000);
2060 /* Set interrupt moderation parameters */
2061 int efx_init_irq_moderation(struct efx_nic
*efx
, unsigned int tx_usecs
,
2062 unsigned int rx_usecs
, bool rx_adaptive
,
2063 bool rx_may_override_tx
)
2065 struct efx_channel
*channel
;
2066 unsigned int timer_max_us
;
2068 EFX_ASSERT_RESET_SERIALISED(efx
);
2070 timer_max_us
= efx
->timer_max_ns
/ 1000;
2072 if (tx_usecs
> timer_max_us
|| rx_usecs
> timer_max_us
)
2075 if (tx_usecs
!= rx_usecs
&& efx
->tx_channel_offset
== 0 &&
2076 !rx_may_override_tx
) {
2077 netif_err(efx
, drv
, efx
->net_dev
, "Channels are shared. "
2078 "RX and TX IRQ moderation must be equal\n");
2082 efx
->irq_rx_adaptive
= rx_adaptive
;
2083 efx
->irq_rx_moderation_us
= rx_usecs
;
2084 efx_for_each_channel(channel
, efx
) {
2085 if (efx_channel_has_rx_queue(channel
))
2086 channel
->irq_moderation_us
= rx_usecs
;
2087 else if (efx_channel_has_tx_queues(channel
))
2088 channel
->irq_moderation_us
= tx_usecs
;
2094 void efx_get_irq_moderation(struct efx_nic
*efx
, unsigned int *tx_usecs
,
2095 unsigned int *rx_usecs
, bool *rx_adaptive
)
2097 *rx_adaptive
= efx
->irq_rx_adaptive
;
2098 *rx_usecs
= efx
->irq_rx_moderation_us
;
2100 /* If channels are shared between RX and TX, so is IRQ
2101 * moderation. Otherwise, IRQ moderation is the same for all
2102 * TX channels and is not adaptive.
2104 if (efx
->tx_channel_offset
== 0) {
2105 *tx_usecs
= *rx_usecs
;
2107 struct efx_channel
*tx_channel
;
2109 tx_channel
= efx
->channel
[efx
->tx_channel_offset
];
2110 *tx_usecs
= tx_channel
->irq_moderation_us
;
2114 /**************************************************************************
2118 **************************************************************************/
2120 /* Run periodically off the general workqueue */
2121 static void efx_monitor(struct work_struct
*data
)
2123 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
2126 netif_vdbg(efx
, timer
, efx
->net_dev
,
2127 "hardware monitor executing on CPU %d\n",
2128 raw_smp_processor_id());
2129 BUG_ON(efx
->type
->monitor
== NULL
);
2131 /* If the mac_lock is already held then it is likely a port
2132 * reconfiguration is already in place, which will likely do
2133 * most of the work of monitor() anyway. */
2134 if (mutex_trylock(&efx
->mac_lock
)) {
2135 if (efx
->port_enabled
)
2136 efx
->type
->monitor(efx
);
2137 mutex_unlock(&efx
->mac_lock
);
2140 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
2141 efx_monitor_interval
);
2144 /**************************************************************************
2148 *************************************************************************/
2151 * Context: process, rtnl_lock() held.
2153 static int efx_ioctl(struct net_device
*net_dev
, struct ifreq
*ifr
, int cmd
)
2155 struct efx_nic
*efx
= netdev_priv(net_dev
);
2156 struct mii_ioctl_data
*data
= if_mii(ifr
);
2158 if (cmd
== SIOCSHWTSTAMP
)
2159 return efx_ptp_set_ts_config(efx
, ifr
);
2160 if (cmd
== SIOCGHWTSTAMP
)
2161 return efx_ptp_get_ts_config(efx
, ifr
);
2163 /* Convert phy_id from older PRTAD/DEVAD format */
2164 if ((cmd
== SIOCGMIIREG
|| cmd
== SIOCSMIIREG
) &&
2165 (data
->phy_id
& 0xfc00) == 0x0400)
2166 data
->phy_id
^= MDIO_PHY_ID_C45
| 0x0400;
2168 return mdio_mii_ioctl(&efx
->mdio
, data
, cmd
);
2171 /**************************************************************************
2175 **************************************************************************/
2177 static void efx_init_napi_channel(struct efx_channel
*channel
)
2179 struct efx_nic
*efx
= channel
->efx
;
2181 channel
->napi_dev
= efx
->net_dev
;
2182 netif_napi_add(channel
->napi_dev
, &channel
->napi_str
,
2183 efx_poll
, napi_weight
);
2186 static void efx_init_napi(struct efx_nic
*efx
)
2188 struct efx_channel
*channel
;
2190 efx_for_each_channel(channel
, efx
)
2191 efx_init_napi_channel(channel
);
2194 static void efx_fini_napi_channel(struct efx_channel
*channel
)
2196 if (channel
->napi_dev
)
2197 netif_napi_del(&channel
->napi_str
);
2199 channel
->napi_dev
= NULL
;
2202 static void efx_fini_napi(struct efx_nic
*efx
)
2204 struct efx_channel
*channel
;
2206 efx_for_each_channel(channel
, efx
)
2207 efx_fini_napi_channel(channel
);
2210 /**************************************************************************
2212 * Kernel net device interface
2214 *************************************************************************/
2216 /* Context: process, rtnl_lock() held. */
2217 int efx_net_open(struct net_device
*net_dev
)
2219 struct efx_nic
*efx
= netdev_priv(net_dev
);
2222 netif_dbg(efx
, ifup
, efx
->net_dev
, "opening device on CPU %d\n",
2223 raw_smp_processor_id());
2225 rc
= efx_check_disabled(efx
);
2228 if (efx
->phy_mode
& PHY_MODE_SPECIAL
)
2230 if (efx_mcdi_poll_reboot(efx
) && efx_reset(efx
, RESET_TYPE_ALL
))
2233 /* Notify the kernel of the link state polled during driver load,
2234 * before the monitor starts running */
2235 efx_link_status_changed(efx
);
2238 if (efx
->state
== STATE_DISABLED
|| efx
->reset_pending
)
2239 netif_device_detach(efx
->net_dev
);
2240 efx_selftest_async_start(efx
);
2244 /* Context: process, rtnl_lock() held.
2245 * Note that the kernel will ignore our return code; this method
2246 * should really be a void.
2248 int efx_net_stop(struct net_device
*net_dev
)
2250 struct efx_nic
*efx
= netdev_priv(net_dev
);
2252 netif_dbg(efx
, ifdown
, efx
->net_dev
, "closing on CPU %d\n",
2253 raw_smp_processor_id());
2255 /* Stop the device and flush all the channels */
2261 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
2262 static void efx_net_stats(struct net_device
*net_dev
,
2263 struct rtnl_link_stats64
*stats
)
2265 struct efx_nic
*efx
= netdev_priv(net_dev
);
2267 spin_lock_bh(&efx
->stats_lock
);
2268 efx
->type
->update_stats(efx
, NULL
, stats
);
2269 spin_unlock_bh(&efx
->stats_lock
);
2272 /* Context: netif_tx_lock held, BHs disabled. */
2273 static void efx_watchdog(struct net_device
*net_dev
)
2275 struct efx_nic
*efx
= netdev_priv(net_dev
);
2277 netif_err(efx
, tx_err
, efx
->net_dev
,
2278 "TX stuck with port_enabled=%d: resetting channels\n",
2281 efx_schedule_reset(efx
, RESET_TYPE_TX_WATCHDOG
);
2285 /* Context: process, rtnl_lock() held. */
2286 static int efx_change_mtu(struct net_device
*net_dev
, int new_mtu
)
2288 struct efx_nic
*efx
= netdev_priv(net_dev
);
2291 rc
= efx_check_disabled(efx
);
2295 netif_dbg(efx
, drv
, efx
->net_dev
, "changing MTU to %d\n", new_mtu
);
2297 efx_device_detach_sync(efx
);
2300 mutex_lock(&efx
->mac_lock
);
2301 net_dev
->mtu
= new_mtu
;
2302 efx_mac_reconfigure(efx
);
2303 mutex_unlock(&efx
->mac_lock
);
2306 efx_device_attach_if_not_resetting(efx
);
2310 static int efx_set_mac_address(struct net_device
*net_dev
, void *data
)
2312 struct efx_nic
*efx
= netdev_priv(net_dev
);
2313 struct sockaddr
*addr
= data
;
2314 u8
*new_addr
= addr
->sa_data
;
2318 if (!is_valid_ether_addr(new_addr
)) {
2319 netif_err(efx
, drv
, efx
->net_dev
,
2320 "invalid ethernet MAC address requested: %pM\n",
2322 return -EADDRNOTAVAIL
;
2325 /* save old address */
2326 ether_addr_copy(old_addr
, net_dev
->dev_addr
);
2327 ether_addr_copy(net_dev
->dev_addr
, new_addr
);
2328 if (efx
->type
->set_mac_address
) {
2329 rc
= efx
->type
->set_mac_address(efx
);
2331 ether_addr_copy(net_dev
->dev_addr
, old_addr
);
2336 /* Reconfigure the MAC */
2337 mutex_lock(&efx
->mac_lock
);
2338 efx_mac_reconfigure(efx
);
2339 mutex_unlock(&efx
->mac_lock
);
2344 /* Context: netif_addr_lock held, BHs disabled. */
2345 static void efx_set_rx_mode(struct net_device
*net_dev
)
2347 struct efx_nic
*efx
= netdev_priv(net_dev
);
2349 if (efx
->port_enabled
)
2350 queue_work(efx
->workqueue
, &efx
->mac_work
);
2351 /* Otherwise efx_start_port() will do this */
2354 static int efx_set_features(struct net_device
*net_dev
, netdev_features_t data
)
2356 struct efx_nic
*efx
= netdev_priv(net_dev
);
2359 /* If disabling RX n-tuple filtering, clear existing filters */
2360 if (net_dev
->features
& ~data
& NETIF_F_NTUPLE
) {
2361 rc
= efx
->type
->filter_clear_rx(efx
, EFX_FILTER_PRI_MANUAL
);
2366 /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
2367 * If rx-fcs is changed, mac_reconfigure updates that too.
2369 if ((net_dev
->features
^ data
) & (NETIF_F_HW_VLAN_CTAG_FILTER
|
2371 /* efx_set_rx_mode() will schedule MAC work to update filters
2372 * when a new features are finally set in net_dev.
2374 efx_set_rx_mode(net_dev
);
2380 static int efx_get_phys_port_id(struct net_device
*net_dev
,
2381 struct netdev_phys_item_id
*ppid
)
2383 struct efx_nic
*efx
= netdev_priv(net_dev
);
2385 if (efx
->type
->get_phys_port_id
)
2386 return efx
->type
->get_phys_port_id(efx
, ppid
);
2391 static int efx_get_phys_port_name(struct net_device
*net_dev
,
2392 char *name
, size_t len
)
2394 struct efx_nic
*efx
= netdev_priv(net_dev
);
2396 if (snprintf(name
, len
, "p%u", efx
->port_num
) >= len
)
2401 static int efx_vlan_rx_add_vid(struct net_device
*net_dev
, __be16 proto
, u16 vid
)
2403 struct efx_nic
*efx
= netdev_priv(net_dev
);
2405 if (efx
->type
->vlan_rx_add_vid
)
2406 return efx
->type
->vlan_rx_add_vid(efx
, proto
, vid
);
2411 static int efx_vlan_rx_kill_vid(struct net_device
*net_dev
, __be16 proto
, u16 vid
)
2413 struct efx_nic
*efx
= netdev_priv(net_dev
);
2415 if (efx
->type
->vlan_rx_kill_vid
)
2416 return efx
->type
->vlan_rx_kill_vid(efx
, proto
, vid
);
2421 static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in
)
2424 case UDP_TUNNEL_TYPE_VXLAN
:
2425 return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN
;
2426 case UDP_TUNNEL_TYPE_GENEVE
:
2427 return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE
;
2433 static void efx_udp_tunnel_add(struct net_device
*dev
, struct udp_tunnel_info
*ti
)
2435 struct efx_nic
*efx
= netdev_priv(dev
);
2436 struct efx_udp_tunnel tnl
;
2437 int efx_tunnel_type
;
2439 efx_tunnel_type
= efx_udp_tunnel_type_map(ti
->type
);
2440 if (efx_tunnel_type
< 0)
2443 tnl
.type
= (u16
)efx_tunnel_type
;
2444 tnl
.port
= ti
->port
;
2446 if (efx
->type
->udp_tnl_add_port
)
2447 (void)efx
->type
->udp_tnl_add_port(efx
, tnl
);
2450 static void efx_udp_tunnel_del(struct net_device
*dev
, struct udp_tunnel_info
*ti
)
2452 struct efx_nic
*efx
= netdev_priv(dev
);
2453 struct efx_udp_tunnel tnl
;
2454 int efx_tunnel_type
;
2456 efx_tunnel_type
= efx_udp_tunnel_type_map(ti
->type
);
2457 if (efx_tunnel_type
< 0)
2460 tnl
.type
= (u16
)efx_tunnel_type
;
2461 tnl
.port
= ti
->port
;
2463 if (efx
->type
->udp_tnl_del_port
)
2464 (void)efx
->type
->udp_tnl_del_port(efx
, tnl
);
2467 static const struct net_device_ops efx_netdev_ops
= {
2468 .ndo_open
= efx_net_open
,
2469 .ndo_stop
= efx_net_stop
,
2470 .ndo_get_stats64
= efx_net_stats
,
2471 .ndo_tx_timeout
= efx_watchdog
,
2472 .ndo_start_xmit
= efx_hard_start_xmit
,
2473 .ndo_validate_addr
= eth_validate_addr
,
2474 .ndo_do_ioctl
= efx_ioctl
,
2475 .ndo_change_mtu
= efx_change_mtu
,
2476 .ndo_set_mac_address
= efx_set_mac_address
,
2477 .ndo_set_rx_mode
= efx_set_rx_mode
,
2478 .ndo_set_features
= efx_set_features
,
2479 .ndo_vlan_rx_add_vid
= efx_vlan_rx_add_vid
,
2480 .ndo_vlan_rx_kill_vid
= efx_vlan_rx_kill_vid
,
2481 #ifdef CONFIG_SFC_SRIOV
2482 .ndo_set_vf_mac
= efx_sriov_set_vf_mac
,
2483 .ndo_set_vf_vlan
= efx_sriov_set_vf_vlan
,
2484 .ndo_set_vf_spoofchk
= efx_sriov_set_vf_spoofchk
,
2485 .ndo_get_vf_config
= efx_sriov_get_vf_config
,
2486 .ndo_set_vf_link_state
= efx_sriov_set_vf_link_state
,
2488 .ndo_get_phys_port_id
= efx_get_phys_port_id
,
2489 .ndo_get_phys_port_name
= efx_get_phys_port_name
,
2490 .ndo_setup_tc
= efx_setup_tc
,
2491 #ifdef CONFIG_RFS_ACCEL
2492 .ndo_rx_flow_steer
= efx_filter_rfs
,
2494 .ndo_udp_tunnel_add
= efx_udp_tunnel_add
,
2495 .ndo_udp_tunnel_del
= efx_udp_tunnel_del
,
2498 static void efx_update_name(struct efx_nic
*efx
)
2500 strcpy(efx
->name
, efx
->net_dev
->name
);
2501 efx_mtd_rename(efx
);
2502 efx_set_channel_names(efx
);
2505 static int efx_netdev_event(struct notifier_block
*this,
2506 unsigned long event
, void *ptr
)
2508 struct net_device
*net_dev
= netdev_notifier_info_to_dev(ptr
);
2510 if ((net_dev
->netdev_ops
== &efx_netdev_ops
) &&
2511 event
== NETDEV_CHANGENAME
)
2512 efx_update_name(netdev_priv(net_dev
));
2517 static struct notifier_block efx_netdev_notifier
= {
2518 .notifier_call
= efx_netdev_event
,
2522 show_phy_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2524 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
2525 return sprintf(buf
, "%d\n", efx
->phy_type
);
2527 static DEVICE_ATTR(phy_type
, 0444, show_phy_type
, NULL
);
2529 #ifdef CONFIG_SFC_MCDI_LOGGING
2530 static ssize_t
show_mcdi_log(struct device
*dev
, struct device_attribute
*attr
,
2533 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
2534 struct efx_mcdi_iface
*mcdi
= efx_mcdi(efx
);
2536 return scnprintf(buf
, PAGE_SIZE
, "%d\n", mcdi
->logging_enabled
);
2538 static ssize_t
set_mcdi_log(struct device
*dev
, struct device_attribute
*attr
,
2539 const char *buf
, size_t count
)
2541 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
2542 struct efx_mcdi_iface
*mcdi
= efx_mcdi(efx
);
2543 bool enable
= count
> 0 && *buf
!= '0';
2545 mcdi
->logging_enabled
= enable
;
2548 static DEVICE_ATTR(mcdi_logging
, 0644, show_mcdi_log
, set_mcdi_log
);
2551 static int efx_register_netdev(struct efx_nic
*efx
)
2553 struct net_device
*net_dev
= efx
->net_dev
;
2554 struct efx_channel
*channel
;
2557 net_dev
->watchdog_timeo
= 5 * HZ
;
2558 net_dev
->irq
= efx
->pci_dev
->irq
;
2559 net_dev
->netdev_ops
= &efx_netdev_ops
;
2560 if (efx_nic_rev(efx
) >= EFX_REV_HUNT_A0
)
2561 net_dev
->priv_flags
|= IFF_UNICAST_FLT
;
2562 net_dev
->ethtool_ops
= &efx_ethtool_ops
;
2563 net_dev
->gso_max_segs
= EFX_TSO_MAX_SEGS
;
2564 net_dev
->min_mtu
= EFX_MIN_MTU
;
2565 net_dev
->max_mtu
= EFX_MAX_MTU
;
2569 /* Enable resets to be scheduled and check whether any were
2570 * already requested. If so, the NIC is probably hosed so we
2573 efx
->state
= STATE_READY
;
2574 smp_mb(); /* ensure we change state before checking reset_pending */
2575 if (efx
->reset_pending
) {
2576 netif_err(efx
, probe
, efx
->net_dev
,
2577 "aborting probe due to scheduled reset\n");
2582 rc
= dev_alloc_name(net_dev
, net_dev
->name
);
2585 efx_update_name(efx
);
2587 /* Always start with carrier off; PHY events will detect the link */
2588 netif_carrier_off(net_dev
);
2590 rc
= register_netdevice(net_dev
);
2594 efx_for_each_channel(channel
, efx
) {
2595 struct efx_tx_queue
*tx_queue
;
2596 efx_for_each_channel_tx_queue(tx_queue
, channel
)
2597 efx_init_tx_queue_core_txq(tx_queue
);
2604 rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
2606 netif_err(efx
, drv
, efx
->net_dev
,
2607 "failed to init net dev attributes\n");
2608 goto fail_registered
;
2610 #ifdef CONFIG_SFC_MCDI_LOGGING
2611 rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_mcdi_logging
);
2613 netif_err(efx
, drv
, efx
->net_dev
,
2614 "failed to init net dev attributes\n");
2615 goto fail_attr_mcdi_logging
;
2621 #ifdef CONFIG_SFC_MCDI_LOGGING
2622 fail_attr_mcdi_logging
:
2623 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
2627 efx_dissociate(efx
);
2628 unregister_netdevice(net_dev
);
2630 efx
->state
= STATE_UNINIT
;
2632 netif_err(efx
, drv
, efx
->net_dev
, "could not register net dev\n");
2636 static void efx_unregister_netdev(struct efx_nic
*efx
)
2641 BUG_ON(netdev_priv(efx
->net_dev
) != efx
);
2643 if (efx_dev_registered(efx
)) {
2644 strlcpy(efx
->name
, pci_name(efx
->pci_dev
), sizeof(efx
->name
));
2645 #ifdef CONFIG_SFC_MCDI_LOGGING
2646 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_mcdi_logging
);
2648 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
2649 unregister_netdev(efx
->net_dev
);
2653 /**************************************************************************
2655 * Device reset and suspend
2657 **************************************************************************/
2659 /* Tears down the entire software state and most of the hardware state
2661 void efx_reset_down(struct efx_nic
*efx
, enum reset_type method
)
2663 EFX_ASSERT_RESET_SERIALISED(efx
);
2665 if (method
== RESET_TYPE_MCDI_TIMEOUT
)
2666 efx
->type
->prepare_flr(efx
);
2669 efx_disable_interrupts(efx
);
2671 mutex_lock(&efx
->mac_lock
);
2672 down_write(&efx
->filter_sem
);
2673 mutex_lock(&efx
->rss_lock
);
2674 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
&&
2675 method
!= RESET_TYPE_DATAPATH
)
2676 efx
->phy_op
->fini(efx
);
2677 efx
->type
->fini(efx
);
2680 /* This function will always ensure that the locks acquired in
2681 * efx_reset_down() are released. A failure return code indicates
2682 * that we were unable to reinitialise the hardware, and the
2683 * driver should be disabled. If ok is false, then the rx and tx
2684 * engines are not restarted, pending a RESET_DISABLE. */
2685 int efx_reset_up(struct efx_nic
*efx
, enum reset_type method
, bool ok
)
2689 EFX_ASSERT_RESET_SERIALISED(efx
);
2691 if (method
== RESET_TYPE_MCDI_TIMEOUT
)
2692 efx
->type
->finish_flr(efx
);
2694 /* Ensure that SRAM is initialised even if we're disabling the device */
2695 rc
= efx
->type
->init(efx
);
2697 netif_err(efx
, drv
, efx
->net_dev
, "failed to initialise NIC\n");
2704 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
&&
2705 method
!= RESET_TYPE_DATAPATH
) {
2706 rc
= efx
->phy_op
->init(efx
);
2709 rc
= efx
->phy_op
->reconfigure(efx
);
2710 if (rc
&& rc
!= -EPERM
)
2711 netif_err(efx
, drv
, efx
->net_dev
,
2712 "could not restore PHY settings\n");
2715 rc
= efx_enable_interrupts(efx
);
2719 #ifdef CONFIG_SFC_SRIOV
2720 rc
= efx
->type
->vswitching_restore(efx
);
2721 if (rc
) /* not fatal; the PF will still work fine */
2722 netif_warn(efx
, probe
, efx
->net_dev
,
2723 "failed to restore vswitching rc=%d;"
2724 " VFs may not function\n", rc
);
2727 if (efx
->type
->rx_restore_rss_contexts
)
2728 efx
->type
->rx_restore_rss_contexts(efx
);
2729 mutex_unlock(&efx
->rss_lock
);
2730 efx
->type
->filter_table_restore(efx
);
2731 up_write(&efx
->filter_sem
);
2732 if (efx
->type
->sriov_reset
)
2733 efx
->type
->sriov_reset(efx
);
2735 mutex_unlock(&efx
->mac_lock
);
2739 if (efx
->type
->udp_tnl_push_ports
)
2740 efx
->type
->udp_tnl_push_ports(efx
);
2745 efx
->port_initialized
= false;
2747 mutex_unlock(&efx
->rss_lock
);
2748 up_write(&efx
->filter_sem
);
2749 mutex_unlock(&efx
->mac_lock
);
2754 /* Reset the NIC using the specified method. Note that the reset may
2755 * fail, in which case the card will be left in an unusable state.
2757 * Caller must hold the rtnl_lock.
2759 int efx_reset(struct efx_nic
*efx
, enum reset_type method
)
2764 netif_info(efx
, drv
, efx
->net_dev
, "resetting (%s)\n",
2765 RESET_TYPE(method
));
2767 efx_device_detach_sync(efx
);
2768 efx_reset_down(efx
, method
);
2770 rc
= efx
->type
->reset(efx
, method
);
2772 netif_err(efx
, drv
, efx
->net_dev
, "failed to reset hardware\n");
2776 /* Clear flags for the scopes we covered. We assume the NIC and
2777 * driver are now quiescent so that there is no race here.
2779 if (method
< RESET_TYPE_MAX_METHOD
)
2780 efx
->reset_pending
&= -(1 << (method
+ 1));
2781 else /* it doesn't fit into the well-ordered scope hierarchy */
2782 __clear_bit(method
, &efx
->reset_pending
);
2784 /* Reinitialise bus-mastering, which may have been turned off before
2785 * the reset was scheduled. This is still appropriate, even in the
2786 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2787 * can respond to requests. */
2788 pci_set_master(efx
->pci_dev
);
2791 /* Leave device stopped if necessary */
2793 method
== RESET_TYPE_DISABLE
||
2794 method
== RESET_TYPE_RECOVER_OR_DISABLE
;
2795 rc2
= efx_reset_up(efx
, method
, !disabled
);
2803 dev_close(efx
->net_dev
);
2804 netif_err(efx
, drv
, efx
->net_dev
, "has been disabled\n");
2805 efx
->state
= STATE_DISABLED
;
2807 netif_dbg(efx
, drv
, efx
->net_dev
, "reset complete\n");
2808 efx_device_attach_if_not_resetting(efx
);
2813 /* Try recovery mechanisms.
2814 * For now only EEH is supported.
2815 * Returns 0 if the recovery mechanisms are unsuccessful.
2816 * Returns a non-zero value otherwise.
2818 int efx_try_recovery(struct efx_nic
*efx
)
2821 /* A PCI error can occur and not be seen by EEH because nothing
2822 * happens on the PCI bus. In this case the driver may fail and
2823 * schedule a 'recover or reset', leading to this recovery handler.
2824 * Manually call the eeh failure check function.
2826 struct eeh_dev
*eehdev
= pci_dev_to_eeh_dev(efx
->pci_dev
);
2827 if (eeh_dev_check_failure(eehdev
)) {
2828 /* The EEH mechanisms will handle the error and reset the
2829 * device if necessary.
2837 static void efx_wait_for_bist_end(struct efx_nic
*efx
)
2841 for (i
= 0; i
< BIST_WAIT_DELAY_COUNT
; ++i
) {
2842 if (efx_mcdi_poll_reboot(efx
))
2844 msleep(BIST_WAIT_DELAY_MS
);
2847 netif_err(efx
, drv
, efx
->net_dev
, "Warning: No MC reboot after BIST mode\n");
2849 /* Either way unset the BIST flag. If we found no reboot we probably
2850 * won't recover, but we should try.
2852 efx
->mc_bist_for_other_fn
= false;
2855 /* The worker thread exists so that code that cannot sleep can
2856 * schedule a reset for later.
2858 static void efx_reset_work(struct work_struct
*data
)
2860 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, reset_work
);
2861 unsigned long pending
;
2862 enum reset_type method
;
2864 pending
= READ_ONCE(efx
->reset_pending
);
2865 method
= fls(pending
) - 1;
2867 if (method
== RESET_TYPE_MC_BIST
)
2868 efx_wait_for_bist_end(efx
);
2870 if ((method
== RESET_TYPE_RECOVER_OR_DISABLE
||
2871 method
== RESET_TYPE_RECOVER_OR_ALL
) &&
2872 efx_try_recovery(efx
))
2880 /* We checked the state in efx_schedule_reset() but it may
2881 * have changed by now. Now that we have the RTNL lock,
2882 * it cannot change again.
2884 if (efx
->state
== STATE_READY
)
2885 (void)efx_reset(efx
, method
);
2890 void efx_schedule_reset(struct efx_nic
*efx
, enum reset_type type
)
2892 enum reset_type method
;
2894 if (efx
->state
== STATE_RECOVERY
) {
2895 netif_dbg(efx
, drv
, efx
->net_dev
,
2896 "recovering: skip scheduling %s reset\n",
2902 case RESET_TYPE_INVISIBLE
:
2903 case RESET_TYPE_ALL
:
2904 case RESET_TYPE_RECOVER_OR_ALL
:
2905 case RESET_TYPE_WORLD
:
2906 case RESET_TYPE_DISABLE
:
2907 case RESET_TYPE_RECOVER_OR_DISABLE
:
2908 case RESET_TYPE_DATAPATH
:
2909 case RESET_TYPE_MC_BIST
:
2910 case RESET_TYPE_MCDI_TIMEOUT
:
2912 netif_dbg(efx
, drv
, efx
->net_dev
, "scheduling %s reset\n",
2913 RESET_TYPE(method
));
2916 method
= efx
->type
->map_reset_reason(type
);
2917 netif_dbg(efx
, drv
, efx
->net_dev
,
2918 "scheduling %s reset for %s\n",
2919 RESET_TYPE(method
), RESET_TYPE(type
));
2923 set_bit(method
, &efx
->reset_pending
);
2924 smp_mb(); /* ensure we change reset_pending before checking state */
2926 /* If we're not READY then just leave the flags set as the cue
2927 * to abort probing or reschedule the reset later.
2929 if (READ_ONCE(efx
->state
) != STATE_READY
)
2932 /* efx_process_channel() will no longer read events once a
2933 * reset is scheduled. So switch back to poll'd MCDI completions. */
2934 efx_mcdi_mode_poll(efx
);
2936 queue_work(reset_workqueue
, &efx
->reset_work
);
2939 /**************************************************************************
2941 * List of NICs we support
2943 **************************************************************************/
2945 /* PCI device ID table */
2946 static const struct pci_device_id efx_pci_table
[] = {
2947 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0803), /* SFC9020 */
2948 .driver_data
= (unsigned long) &siena_a0_nic_type
},
2949 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0813), /* SFL9021 */
2950 .driver_data
= (unsigned long) &siena_a0_nic_type
},
2951 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0903), /* SFC9120 PF */
2952 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
2953 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1903), /* SFC9120 VF */
2954 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
2955 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0923), /* SFC9140 PF */
2956 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
2957 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1923), /* SFC9140 VF */
2958 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
2959 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0a03), /* SFC9220 PF */
2960 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
2961 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1a03), /* SFC9220 VF */
2962 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
2963 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0b03), /* SFC9250 PF */
2964 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
2965 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1b03), /* SFC9250 VF */
2966 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
2967 {0} /* end of list */
2970 /**************************************************************************
2972 * Dummy PHY/MAC operations
2974 * Can be used for some unimplemented operations
2975 * Needed so all function pointers are valid and do not have to be tested
2978 **************************************************************************/
2979 int efx_port_dummy_op_int(struct efx_nic
*efx
)
2983 void efx_port_dummy_op_void(struct efx_nic
*efx
) {}
2985 static bool efx_port_dummy_op_poll(struct efx_nic
*efx
)
2990 static const struct efx_phy_operations efx_dummy_phy_operations
= {
2991 .init
= efx_port_dummy_op_int
,
2992 .reconfigure
= efx_port_dummy_op_int
,
2993 .poll
= efx_port_dummy_op_poll
,
2994 .fini
= efx_port_dummy_op_void
,
2997 /**************************************************************************
3001 **************************************************************************/
3003 /* This zeroes out and then fills in the invariants in a struct
3004 * efx_nic (including all sub-structures).
3006 static int efx_init_struct(struct efx_nic
*efx
,
3007 struct pci_dev
*pci_dev
, struct net_device
*net_dev
)
3009 int rc
= -ENOMEM
, i
;
3011 /* Initialise common structures */
3012 INIT_LIST_HEAD(&efx
->node
);
3013 INIT_LIST_HEAD(&efx
->secondary_list
);
3014 spin_lock_init(&efx
->biu_lock
);
3015 #ifdef CONFIG_SFC_MTD
3016 INIT_LIST_HEAD(&efx
->mtd_list
);
3018 INIT_WORK(&efx
->reset_work
, efx_reset_work
);
3019 INIT_DELAYED_WORK(&efx
->monitor_work
, efx_monitor
);
3020 INIT_DELAYED_WORK(&efx
->selftest_work
, efx_selftest_async_work
);
3021 efx
->pci_dev
= pci_dev
;
3022 efx
->msg_enable
= debug
;
3023 efx
->state
= STATE_UNINIT
;
3024 strlcpy(efx
->name
, pci_name(pci_dev
), sizeof(efx
->name
));
3026 efx
->net_dev
= net_dev
;
3027 efx
->rx_prefix_size
= efx
->type
->rx_prefix_size
;
3029 NET_IP_ALIGN
? (efx
->rx_prefix_size
+ NET_IP_ALIGN
) % 4 : 0;
3030 efx
->rx_packet_hash_offset
=
3031 efx
->type
->rx_hash_offset
- efx
->type
->rx_prefix_size
;
3032 efx
->rx_packet_ts_offset
=
3033 efx
->type
->rx_ts_offset
- efx
->type
->rx_prefix_size
;
3034 INIT_LIST_HEAD(&efx
->rss_context
.list
);
3035 mutex_init(&efx
->rss_lock
);
3036 spin_lock_init(&efx
->stats_lock
);
3037 efx
->vi_stride
= EFX_DEFAULT_VI_STRIDE
;
3038 efx
->num_mac_stats
= MC_CMD_MAC_NSTATS
;
3039 BUILD_BUG_ON(MC_CMD_MAC_NSTATS
- 1 != MC_CMD_MAC_GENERATION_END
);
3040 mutex_init(&efx
->mac_lock
);
3041 #ifdef CONFIG_RFS_ACCEL
3042 mutex_init(&efx
->rps_mutex
);
3043 spin_lock_init(&efx
->rps_hash_lock
);
3044 /* Failure to allocate is not fatal, but may degrade ARFS performance */
3045 efx
->rps_hash_table
= kcalloc(EFX_ARFS_HASH_TABLE_SIZE
,
3046 sizeof(*efx
->rps_hash_table
), GFP_KERNEL
);
3048 efx
->phy_op
= &efx_dummy_phy_operations
;
3049 efx
->mdio
.dev
= net_dev
;
3050 INIT_WORK(&efx
->mac_work
, efx_mac_work
);
3051 init_waitqueue_head(&efx
->flush_wq
);
3053 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++) {
3054 efx
->channel
[i
] = efx_alloc_channel(efx
, i
, NULL
);
3055 if (!efx
->channel
[i
])
3057 efx
->msi_context
[i
].efx
= efx
;
3058 efx
->msi_context
[i
].index
= i
;
3061 /* Higher numbered interrupt modes are less capable! */
3062 if (WARN_ON_ONCE(efx
->type
->max_interrupt_mode
>
3063 efx
->type
->min_interrupt_mode
)) {
3067 efx
->interrupt_mode
= max(efx
->type
->max_interrupt_mode
,
3069 efx
->interrupt_mode
= min(efx
->type
->min_interrupt_mode
,
3072 /* Would be good to use the net_dev name, but we're too early */
3073 snprintf(efx
->workqueue_name
, sizeof(efx
->workqueue_name
), "sfc%s",
3075 efx
->workqueue
= create_singlethread_workqueue(efx
->workqueue_name
);
3076 if (!efx
->workqueue
)
3082 efx_fini_struct(efx
);
3086 static void efx_fini_struct(struct efx_nic
*efx
)
3090 #ifdef CONFIG_RFS_ACCEL
3091 kfree(efx
->rps_hash_table
);
3094 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++)
3095 kfree(efx
->channel
[i
]);
3099 if (efx
->workqueue
) {
3100 destroy_workqueue(efx
->workqueue
);
3101 efx
->workqueue
= NULL
;
3105 void efx_update_sw_stats(struct efx_nic
*efx
, u64
*stats
)
3107 u64 n_rx_nodesc_trunc
= 0;
3108 struct efx_channel
*channel
;
3110 efx_for_each_channel(channel
, efx
)
3111 n_rx_nodesc_trunc
+= channel
->n_rx_nodesc_trunc
;
3112 stats
[GENERIC_STAT_rx_nodesc_trunc
] = n_rx_nodesc_trunc
;
3113 stats
[GENERIC_STAT_rx_noskb_drops
] = atomic_read(&efx
->n_rx_noskb_drops
);
3116 bool efx_filter_spec_equal(const struct efx_filter_spec
*left
,
3117 const struct efx_filter_spec
*right
)
3119 if ((left
->match_flags
^ right
->match_flags
) |
3120 ((left
->flags
^ right
->flags
) &
3121 (EFX_FILTER_FLAG_RX
| EFX_FILTER_FLAG_TX
)))
3124 return memcmp(&left
->outer_vid
, &right
->outer_vid
,
3125 sizeof(struct efx_filter_spec
) -
3126 offsetof(struct efx_filter_spec
, outer_vid
)) == 0;
3129 u32
efx_filter_spec_hash(const struct efx_filter_spec
*spec
)
3131 BUILD_BUG_ON(offsetof(struct efx_filter_spec
, outer_vid
) & 3);
3132 return jhash2((const u32
*)&spec
->outer_vid
,
3133 (sizeof(struct efx_filter_spec
) -
3134 offsetof(struct efx_filter_spec
, outer_vid
)) / 4,
3138 #ifdef CONFIG_RFS_ACCEL
3139 bool efx_rps_check_rule(struct efx_arfs_rule
*rule
, unsigned int filter_idx
,
3142 if (rule
->filter_id
== EFX_ARFS_FILTER_ID_PENDING
) {
3143 /* ARFS is currently updating this entry, leave it */
3146 if (rule
->filter_id
== EFX_ARFS_FILTER_ID_ERROR
) {
3147 /* ARFS tried and failed to update this, so it's probably out
3148 * of date. Remove the filter and the ARFS rule entry.
3150 rule
->filter_id
= EFX_ARFS_FILTER_ID_REMOVING
;
3153 } else if (WARN_ON(rule
->filter_id
!= filter_idx
)) { /* can't happen */
3154 /* ARFS has moved on, so old filter is not needed. Since we did
3155 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
3156 * not be removed by efx_rps_hash_del() subsequently.
3161 /* Remove it iff ARFS wants to. */
3166 struct hlist_head
*efx_rps_hash_bucket(struct efx_nic
*efx
,
3167 const struct efx_filter_spec
*spec
)
3169 u32 hash
= efx_filter_spec_hash(spec
);
3171 WARN_ON(!spin_is_locked(&efx
->rps_hash_lock
));
3172 if (!efx
->rps_hash_table
)
3174 return &efx
->rps_hash_table
[hash
% EFX_ARFS_HASH_TABLE_SIZE
];
3177 struct efx_arfs_rule
*efx_rps_hash_find(struct efx_nic
*efx
,
3178 const struct efx_filter_spec
*spec
)
3180 struct efx_arfs_rule
*rule
;
3181 struct hlist_head
*head
;
3182 struct hlist_node
*node
;
3184 head
= efx_rps_hash_bucket(efx
, spec
);
3187 hlist_for_each(node
, head
) {
3188 rule
= container_of(node
, struct efx_arfs_rule
, node
);
3189 if (efx_filter_spec_equal(spec
, &rule
->spec
))
3195 struct efx_arfs_rule
*efx_rps_hash_add(struct efx_nic
*efx
,
3196 const struct efx_filter_spec
*spec
,
3199 struct efx_arfs_rule
*rule
;
3200 struct hlist_head
*head
;
3201 struct hlist_node
*node
;
3203 head
= efx_rps_hash_bucket(efx
, spec
);
3206 hlist_for_each(node
, head
) {
3207 rule
= container_of(node
, struct efx_arfs_rule
, node
);
3208 if (efx_filter_spec_equal(spec
, &rule
->spec
)) {
3213 rule
= kmalloc(sizeof(*rule
), GFP_ATOMIC
);
3216 memcpy(&rule
->spec
, spec
, sizeof(rule
->spec
));
3217 hlist_add_head(&rule
->node
, head
);
3222 void efx_rps_hash_del(struct efx_nic
*efx
, const struct efx_filter_spec
*spec
)
3224 struct efx_arfs_rule
*rule
;
3225 struct hlist_head
*head
;
3226 struct hlist_node
*node
;
3228 head
= efx_rps_hash_bucket(efx
, spec
);
3231 hlist_for_each(node
, head
) {
3232 rule
= container_of(node
, struct efx_arfs_rule
, node
);
3233 if (efx_filter_spec_equal(spec
, &rule
->spec
)) {
3234 /* Someone already reused the entry. We know that if
3235 * this check doesn't fire (i.e. filter_id == REMOVING)
3236 * then the REMOVING mark was put there by our caller,
3237 * because caller is holding a lock on filter table and
3238 * only holders of that lock set REMOVING.
3240 if (rule
->filter_id
!= EFX_ARFS_FILTER_ID_REMOVING
)
3247 /* We didn't find it. */
3252 /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
3253 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
3255 struct efx_rss_context
*efx_alloc_rss_context_entry(struct efx_nic
*efx
)
3257 struct list_head
*head
= &efx
->rss_context
.list
;
3258 struct efx_rss_context
*ctx
, *new;
3259 u32 id
= 1; /* Don't use zero, that refers to the master RSS context */
3261 WARN_ON(!mutex_is_locked(&efx
->rss_lock
));
3263 /* Search for first gap in the numbering */
3264 list_for_each_entry(ctx
, head
, list
) {
3265 if (ctx
->user_id
!= id
)
3268 /* Check for wrap. If this happens, we have nearly 2^32
3269 * allocated RSS contexts, which seems unlikely.
3271 if (WARN_ON_ONCE(!id
))
3275 /* Create the new entry */
3276 new = kmalloc(sizeof(struct efx_rss_context
), GFP_KERNEL
);
3279 new->context_id
= EFX_EF10_RSS_CONTEXT_INVALID
;
3280 new->rx_hash_udp_4tuple
= false;
3282 /* Insert the new entry into the gap */
3284 list_add_tail(&new->list
, &ctx
->list
);
3288 struct efx_rss_context
*efx_find_rss_context_entry(struct efx_nic
*efx
, u32 id
)
3290 struct list_head
*head
= &efx
->rss_context
.list
;
3291 struct efx_rss_context
*ctx
;
3293 WARN_ON(!mutex_is_locked(&efx
->rss_lock
));
3295 list_for_each_entry(ctx
, head
, list
)
3296 if (ctx
->user_id
== id
)
3301 void efx_free_rss_context_entry(struct efx_rss_context
*ctx
)
3303 list_del(&ctx
->list
);
3307 /**************************************************************************
3311 **************************************************************************/
3313 /* Main body of final NIC shutdown code
3314 * This is called only at module unload (or hotplug removal).
3316 static void efx_pci_remove_main(struct efx_nic
*efx
)
3318 /* Flush reset_work. It can no longer be scheduled since we
3321 BUG_ON(efx
->state
== STATE_READY
);
3322 cancel_work_sync(&efx
->reset_work
);
3324 efx_disable_interrupts(efx
);
3325 efx_clear_interrupt_affinity(efx
);
3326 efx_nic_fini_interrupt(efx
);
3328 efx
->type
->fini(efx
);
3330 efx_remove_all(efx
);
3333 /* Final NIC shutdown
3334 * This is called only at module unload (or hotplug removal). A PF can call
3335 * this on its VFs to ensure they are unbound first.
3337 static void efx_pci_remove(struct pci_dev
*pci_dev
)
3339 struct efx_nic
*efx
;
3341 efx
= pci_get_drvdata(pci_dev
);
3345 /* Mark the NIC as fini, then stop the interface */
3347 efx_dissociate(efx
);
3348 dev_close(efx
->net_dev
);
3349 efx_disable_interrupts(efx
);
3350 efx
->state
= STATE_UNINIT
;
3353 if (efx
->type
->sriov_fini
)
3354 efx
->type
->sriov_fini(efx
);
3356 efx_unregister_netdev(efx
);
3358 efx_mtd_remove(efx
);
3360 efx_pci_remove_main(efx
);
3363 netif_dbg(efx
, drv
, efx
->net_dev
, "shutdown successful\n");
3365 efx_fini_struct(efx
);
3366 free_netdev(efx
->net_dev
);
3368 pci_disable_pcie_error_reporting(pci_dev
);
3371 /* NIC VPD information
3372 * Called during probe to display the part number of the
3373 * installed NIC. VPD is potentially very large but this should
3374 * always appear within the first 512 bytes.
3376 #define SFC_VPD_LEN 512
3377 static void efx_probe_vpd_strings(struct efx_nic
*efx
)
3379 struct pci_dev
*dev
= efx
->pci_dev
;
3380 char vpd_data
[SFC_VPD_LEN
];
3382 int ro_start
, ro_size
, i
, j
;
3384 /* Get the vpd data from the device */
3385 vpd_size
= pci_read_vpd(dev
, 0, sizeof(vpd_data
), vpd_data
);
3386 if (vpd_size
<= 0) {
3387 netif_err(efx
, drv
, efx
->net_dev
, "Unable to read VPD\n");
3391 /* Get the Read only section */
3392 ro_start
= pci_vpd_find_tag(vpd_data
, 0, vpd_size
, PCI_VPD_LRDT_RO_DATA
);
3394 netif_err(efx
, drv
, efx
->net_dev
, "VPD Read-only not found\n");
3398 ro_size
= pci_vpd_lrdt_size(&vpd_data
[ro_start
]);
3400 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
3401 if (i
+ j
> vpd_size
)
3404 /* Get the Part number */
3405 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, "PN");
3407 netif_err(efx
, drv
, efx
->net_dev
, "Part number not found\n");
3411 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
3412 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
3413 if (i
+ j
> vpd_size
) {
3414 netif_err(efx
, drv
, efx
->net_dev
, "Incomplete part number\n");
3418 netif_info(efx
, drv
, efx
->net_dev
,
3419 "Part Number : %.*s\n", j
, &vpd_data
[i
]);
3421 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
3423 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, "SN");
3425 netif_err(efx
, drv
, efx
->net_dev
, "Serial number not found\n");
3429 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
3430 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
3431 if (i
+ j
> vpd_size
) {
3432 netif_err(efx
, drv
, efx
->net_dev
, "Incomplete serial number\n");
3436 efx
->vpd_sn
= kmalloc(j
+ 1, GFP_KERNEL
);
3440 snprintf(efx
->vpd_sn
, j
+ 1, "%s", &vpd_data
[i
]);
3444 /* Main body of NIC initialisation
3445 * This is called at module load (or hotplug insertion, theoretically).
3447 static int efx_pci_probe_main(struct efx_nic
*efx
)
3451 /* Do start-of-day initialisation */
3452 rc
= efx_probe_all(efx
);
3458 down_write(&efx
->filter_sem
);
3459 rc
= efx
->type
->init(efx
);
3460 up_write(&efx
->filter_sem
);
3462 netif_err(efx
, probe
, efx
->net_dev
,
3463 "failed to initialise NIC\n");
3467 rc
= efx_init_port(efx
);
3469 netif_err(efx
, probe
, efx
->net_dev
,
3470 "failed to initialise port\n");
3474 rc
= efx_nic_init_interrupt(efx
);
3478 efx_set_interrupt_affinity(efx
);
3479 rc
= efx_enable_interrupts(efx
);
3486 efx_clear_interrupt_affinity(efx
);
3487 efx_nic_fini_interrupt(efx
);
3491 efx
->type
->fini(efx
);
3494 efx_remove_all(efx
);
3499 static int efx_pci_probe_post_io(struct efx_nic
*efx
)
3501 struct net_device
*net_dev
= efx
->net_dev
;
3502 int rc
= efx_pci_probe_main(efx
);
3507 if (efx
->type
->sriov_init
) {
3508 rc
= efx
->type
->sriov_init(efx
);
3510 netif_err(efx
, probe
, efx
->net_dev
,
3511 "SR-IOV can't be enabled rc %d\n", rc
);
3514 /* Determine netdevice features */
3515 net_dev
->features
|= (efx
->type
->offload_features
| NETIF_F_SG
|
3516 NETIF_F_TSO
| NETIF_F_RXCSUM
| NETIF_F_RXALL
);
3517 if (efx
->type
->offload_features
& (NETIF_F_IPV6_CSUM
| NETIF_F_HW_CSUM
))
3518 net_dev
->features
|= NETIF_F_TSO6
;
3519 /* Check whether device supports TSO */
3520 if (!efx
->type
->tso_versions
|| !efx
->type
->tso_versions(efx
))
3521 net_dev
->features
&= ~NETIF_F_ALL_TSO
;
3522 /* Mask for features that also apply to VLAN devices */
3523 net_dev
->vlan_features
|= (NETIF_F_HW_CSUM
| NETIF_F_SG
|
3524 NETIF_F_HIGHDMA
| NETIF_F_ALL_TSO
|
3527 net_dev
->hw_features
|= net_dev
->features
& ~efx
->fixed_features
;
3529 /* Disable receiving frames with bad FCS, by default. */
3530 net_dev
->features
&= ~NETIF_F_RXALL
;
3532 /* Disable VLAN filtering by default. It may be enforced if
3533 * the feature is fixed (i.e. VLAN filters are required to
3534 * receive VLAN tagged packets due to vPort restrictions).
3536 net_dev
->features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
3537 net_dev
->features
|= efx
->fixed_features
;
3539 rc
= efx_register_netdev(efx
);
3543 efx_pci_remove_main(efx
);
3547 /* NIC initialisation
3549 * This is called at module load (or hotplug insertion,
3550 * theoretically). It sets up PCI mappings, resets the NIC,
3551 * sets up and registers the network devices with the kernel and hooks
3552 * the interrupt service routine. It does not prepare the device for
3553 * transmission; this is left to the first time one of the network
3554 * interfaces is brought up (i.e. efx_net_open).
3556 static int efx_pci_probe(struct pci_dev
*pci_dev
,
3557 const struct pci_device_id
*entry
)
3559 struct net_device
*net_dev
;
3560 struct efx_nic
*efx
;
3563 /* Allocate and initialise a struct net_device and struct efx_nic */
3564 net_dev
= alloc_etherdev_mqs(sizeof(*efx
), EFX_MAX_CORE_TX_QUEUES
,
3568 efx
= netdev_priv(net_dev
);
3569 efx
->type
= (const struct efx_nic_type
*) entry
->driver_data
;
3570 efx
->fixed_features
|= NETIF_F_HIGHDMA
;
3572 pci_set_drvdata(pci_dev
, efx
);
3573 SET_NETDEV_DEV(net_dev
, &pci_dev
->dev
);
3574 rc
= efx_init_struct(efx
, pci_dev
, net_dev
);
3578 netif_info(efx
, probe
, efx
->net_dev
,
3579 "Solarflare NIC detected\n");
3581 if (!efx
->type
->is_vf
)
3582 efx_probe_vpd_strings(efx
);
3584 /* Set up basic I/O (BAR mappings etc) */
3585 rc
= efx_init_io(efx
);
3589 rc
= efx_pci_probe_post_io(efx
);
3591 /* On failure, retry once immediately.
3592 * If we aborted probe due to a scheduled reset, dismiss it.
3594 efx
->reset_pending
= 0;
3595 rc
= efx_pci_probe_post_io(efx
);
3597 /* On another failure, retry once more
3598 * after a 50-305ms delay.
3602 get_random_bytes(&r
, 1);
3603 msleep((unsigned int)r
+ 50);
3604 efx
->reset_pending
= 0;
3605 rc
= efx_pci_probe_post_io(efx
);
3611 netif_dbg(efx
, probe
, efx
->net_dev
, "initialisation successful\n");
3613 /* Try to create MTDs, but allow this to fail */
3615 rc
= efx_mtd_probe(efx
);
3617 if (rc
&& rc
!= -EPERM
)
3618 netif_warn(efx
, probe
, efx
->net_dev
,
3619 "failed to create MTDs (%d)\n", rc
);
3621 rc
= pci_enable_pcie_error_reporting(pci_dev
);
3622 if (rc
&& rc
!= -EINVAL
)
3623 netif_notice(efx
, probe
, efx
->net_dev
,
3624 "PCIE error reporting unavailable (%d).\n",
3627 if (efx
->type
->udp_tnl_push_ports
)
3628 efx
->type
->udp_tnl_push_ports(efx
);
3635 efx_fini_struct(efx
);
3638 netif_dbg(efx
, drv
, efx
->net_dev
, "initialisation failed. rc=%d\n", rc
);
3639 free_netdev(net_dev
);
3643 /* efx_pci_sriov_configure returns the actual number of Virtual Functions
3644 * enabled on success
3646 #ifdef CONFIG_SFC_SRIOV
3647 static int efx_pci_sriov_configure(struct pci_dev
*dev
, int num_vfs
)
3650 struct efx_nic
*efx
= pci_get_drvdata(dev
);
3652 if (efx
->type
->sriov_configure
) {
3653 rc
= efx
->type
->sriov_configure(efx
, num_vfs
);
3663 static int efx_pm_freeze(struct device
*dev
)
3665 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
3669 if (efx
->state
!= STATE_DISABLED
) {
3670 efx
->state
= STATE_UNINIT
;
3672 efx_device_detach_sync(efx
);
3675 efx_disable_interrupts(efx
);
3683 static int efx_pm_thaw(struct device
*dev
)
3686 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
3690 if (efx
->state
!= STATE_DISABLED
) {
3691 rc
= efx_enable_interrupts(efx
);
3695 mutex_lock(&efx
->mac_lock
);
3696 efx
->phy_op
->reconfigure(efx
);
3697 mutex_unlock(&efx
->mac_lock
);
3701 efx_device_attach_if_not_resetting(efx
);
3703 efx
->state
= STATE_READY
;
3705 efx
->type
->resume_wol(efx
);
3710 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
3711 queue_work(reset_workqueue
, &efx
->reset_work
);
3721 static int efx_pm_poweroff(struct device
*dev
)
3723 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
3724 struct efx_nic
*efx
= pci_get_drvdata(pci_dev
);
3726 efx
->type
->fini(efx
);
3728 efx
->reset_pending
= 0;
3730 pci_save_state(pci_dev
);
3731 return pci_set_power_state(pci_dev
, PCI_D3hot
);
3734 /* Used for both resume and restore */
3735 static int efx_pm_resume(struct device
*dev
)
3737 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
3738 struct efx_nic
*efx
= pci_get_drvdata(pci_dev
);
3741 rc
= pci_set_power_state(pci_dev
, PCI_D0
);
3744 pci_restore_state(pci_dev
);
3745 rc
= pci_enable_device(pci_dev
);
3748 pci_set_master(efx
->pci_dev
);
3749 rc
= efx
->type
->reset(efx
, RESET_TYPE_ALL
);
3752 down_write(&efx
->filter_sem
);
3753 rc
= efx
->type
->init(efx
);
3754 up_write(&efx
->filter_sem
);
3757 rc
= efx_pm_thaw(dev
);
3761 static int efx_pm_suspend(struct device
*dev
)
3766 rc
= efx_pm_poweroff(dev
);
3772 static const struct dev_pm_ops efx_pm_ops
= {
3773 .suspend
= efx_pm_suspend
,
3774 .resume
= efx_pm_resume
,
3775 .freeze
= efx_pm_freeze
,
3776 .thaw
= efx_pm_thaw
,
3777 .poweroff
= efx_pm_poweroff
,
3778 .restore
= efx_pm_resume
,
3781 /* A PCI error affecting this device was detected.
3782 * At this point MMIO and DMA may be disabled.
3783 * Stop the software path and request a slot reset.
3785 static pci_ers_result_t
efx_io_error_detected(struct pci_dev
*pdev
,
3786 enum pci_channel_state state
)
3788 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
3789 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
3791 if (state
== pci_channel_io_perm_failure
)
3792 return PCI_ERS_RESULT_DISCONNECT
;
3796 if (efx
->state
!= STATE_DISABLED
) {
3797 efx
->state
= STATE_RECOVERY
;
3798 efx
->reset_pending
= 0;
3800 efx_device_detach_sync(efx
);
3803 efx_disable_interrupts(efx
);
3805 status
= PCI_ERS_RESULT_NEED_RESET
;
3807 /* If the interface is disabled we don't want to do anything
3810 status
= PCI_ERS_RESULT_RECOVERED
;
3815 pci_disable_device(pdev
);
3820 /* Fake a successful reset, which will be performed later in efx_io_resume. */
3821 static pci_ers_result_t
efx_io_slot_reset(struct pci_dev
*pdev
)
3823 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
3824 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
3827 if (pci_enable_device(pdev
)) {
3828 netif_err(efx
, hw
, efx
->net_dev
,
3829 "Cannot re-enable PCI device after reset.\n");
3830 status
= PCI_ERS_RESULT_DISCONNECT
;
3833 rc
= pci_cleanup_aer_uncorrect_error_status(pdev
);
3835 netif_err(efx
, hw
, efx
->net_dev
,
3836 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc
);
3837 /* Non-fatal error. Continue. */
3843 /* Perform the actual reset and resume I/O operations. */
3844 static void efx_io_resume(struct pci_dev
*pdev
)
3846 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
3851 if (efx
->state
== STATE_DISABLED
)
3854 rc
= efx_reset(efx
, RESET_TYPE_ALL
);
3856 netif_err(efx
, hw
, efx
->net_dev
,
3857 "efx_reset failed after PCI error (%d)\n", rc
);
3859 efx
->state
= STATE_READY
;
3860 netif_dbg(efx
, hw
, efx
->net_dev
,
3861 "Done resetting and resuming IO after PCI error.\n");
3868 /* For simplicity and reliability, we always require a slot reset and try to
3869 * reset the hardware when a pci error affecting the device is detected.
3870 * We leave both the link_reset and mmio_enabled callback unimplemented:
3871 * with our request for slot reset the mmio_enabled callback will never be
3872 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3874 static const struct pci_error_handlers efx_err_handlers
= {
3875 .error_detected
= efx_io_error_detected
,
3876 .slot_reset
= efx_io_slot_reset
,
3877 .resume
= efx_io_resume
,
3880 static struct pci_driver efx_pci_driver
= {
3881 .name
= KBUILD_MODNAME
,
3882 .id_table
= efx_pci_table
,
3883 .probe
= efx_pci_probe
,
3884 .remove
= efx_pci_remove
,
3885 .driver
.pm
= &efx_pm_ops
,
3886 .err_handler
= &efx_err_handlers
,
3887 #ifdef CONFIG_SFC_SRIOV
3888 .sriov_configure
= efx_pci_sriov_configure
,
3892 /**************************************************************************
3894 * Kernel module interface
3896 *************************************************************************/
3898 module_param(interrupt_mode
, uint
, 0444);
3899 MODULE_PARM_DESC(interrupt_mode
,
3900 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3902 static int __init
efx_init_module(void)
3906 printk(KERN_INFO
"Solarflare NET driver v" EFX_DRIVER_VERSION
"\n");
3908 rc
= register_netdevice_notifier(&efx_netdev_notifier
);
3912 #ifdef CONFIG_SFC_SRIOV
3913 rc
= efx_init_sriov();
3918 reset_workqueue
= create_singlethread_workqueue("sfc_reset");
3919 if (!reset_workqueue
) {
3924 rc
= pci_register_driver(&efx_pci_driver
);
3931 destroy_workqueue(reset_workqueue
);
3933 #ifdef CONFIG_SFC_SRIOV
3937 unregister_netdevice_notifier(&efx_netdev_notifier
);
3942 static void __exit
efx_exit_module(void)
3944 printk(KERN_INFO
"Solarflare NET driver unloading\n");
3946 pci_unregister_driver(&efx_pci_driver
);
3947 destroy_workqueue(reset_workqueue
);
3948 #ifdef CONFIG_SFC_SRIOV
3951 unregister_netdevice_notifier(&efx_netdev_notifier
);
3955 module_init(efx_init_module
);
3956 module_exit(efx_exit_module
);
3958 MODULE_AUTHOR("Solarflare Communications and "
3959 "Michael Brown <mbrown@fensystems.co.uk>");
3960 MODULE_DESCRIPTION("Solarflare network driver");
3961 MODULE_LICENSE("GPL");
3962 MODULE_DEVICE_TABLE(pci
, efx_pci_table
);
3963 MODULE_VERSION(EFX_DRIVER_VERSION
);