1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/filter.h>
14 #include "efx_channels.h"
16 #include "efx_common.h"
17 #include "tx_common.h"
18 #include "rx_common.h"
21 #include "workarounds.h"
23 /* This is the first interrupt mode to try out of:
28 unsigned int efx_siena_interrupt_mode
= EFX_INT_MODE_MSIX
;
30 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
31 * i.e. the number of CPUs among which we may distribute simultaneous
34 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
35 * The default (0) means to assign an interrupt to each core.
37 unsigned int efx_siena_rss_cpus
;
39 static unsigned int irq_adapt_low_thresh
= 8000;
40 module_param(irq_adapt_low_thresh
, uint
, 0644);
41 MODULE_PARM_DESC(irq_adapt_low_thresh
,
42 "Threshold score for reducing IRQ moderation");
44 static unsigned int irq_adapt_high_thresh
= 16000;
45 module_param(irq_adapt_high_thresh
, uint
, 0644);
46 MODULE_PARM_DESC(irq_adapt_high_thresh
,
47 "Threshold score for increasing IRQ moderation");
49 static const struct efx_channel_type efx_default_channel_type
;
55 static unsigned int count_online_cores(struct efx_nic
*efx
, bool local_node
)
57 cpumask_var_t filter_mask
;
61 if (unlikely(!zalloc_cpumask_var(&filter_mask
, GFP_KERNEL
))) {
62 netif_warn(efx
, probe
, efx
->net_dev
,
63 "RSS disabled due to allocation failure\n");
67 cpumask_copy(filter_mask
, cpu_online_mask
);
69 cpumask_and(filter_mask
, filter_mask
,
70 cpumask_of_pcibus(efx
->pci_dev
->bus
));
73 for_each_cpu(cpu
, filter_mask
) {
75 cpumask_andnot(filter_mask
, filter_mask
, topology_sibling_cpumask(cpu
));
78 free_cpumask_var(filter_mask
);
83 static unsigned int efx_wanted_parallelism(struct efx_nic
*efx
)
87 if (efx_siena_rss_cpus
) {
88 count
= efx_siena_rss_cpus
;
90 count
= count_online_cores(efx
, true);
92 /* If no online CPUs in local node, fallback to any online CPUs */
94 count
= count_online_cores(efx
, false);
97 if (count
> EFX_MAX_RX_QUEUES
) {
98 netif_cond_dbg(efx
, probe
, efx
->net_dev
, !efx_siena_rss_cpus
,
100 "Reducing number of rx queues from %u to %u.\n",
101 count
, EFX_MAX_RX_QUEUES
);
102 count
= EFX_MAX_RX_QUEUES
;
105 /* If RSS is requested for the PF *and* VFs then we can't write RSS
106 * table entries that are inaccessible to VFs
108 #ifdef CONFIG_SFC_SIENA_SRIOV
109 if (efx
->type
->sriov_wanted
) {
110 if (efx
->type
->sriov_wanted(efx
) && efx_vf_size(efx
) > 1 &&
111 count
> efx_vf_size(efx
)) {
112 netif_warn(efx
, probe
, efx
->net_dev
,
113 "Reducing number of RSS channels from %u to %u for "
114 "VF support. Increase vf-msix-limit to use more "
115 "channels on the PF.\n",
116 count
, efx_vf_size(efx
));
117 count
= efx_vf_size(efx
);
125 static int efx_allocate_msix_channels(struct efx_nic
*efx
,
126 unsigned int max_channels
,
127 unsigned int extra_channels
,
128 unsigned int parallelism
)
130 unsigned int n_channels
= parallelism
;
136 if (efx_siena_separate_tx_channels
)
138 n_channels
+= extra_channels
;
140 /* To allow XDP transmit to happen from arbitrary NAPI contexts
141 * we allocate a TX queue per CPU. We share event queues across
142 * multiple tx queues, assuming tx and ev queues are both
145 tx_per_ev
= EFX_MAX_EVQ_SIZE
/ EFX_TXQ_MAX_ENT(efx
);
146 tx_per_ev
= min(tx_per_ev
, EFX_MAX_TXQ_PER_CHANNEL
);
147 n_xdp_tx
= num_possible_cpus();
148 n_xdp_ev
= DIV_ROUND_UP(n_xdp_tx
, tx_per_ev
);
150 vec_count
= pci_msix_vec_count(efx
->pci_dev
);
154 max_channels
= min_t(unsigned int, vec_count
, max_channels
);
157 * We need a channel per event queue, plus a VI per tx queue.
158 * This may be more pessimistic than it needs to be.
160 if (n_channels
>= max_channels
) {
161 efx
->xdp_txq_queues_mode
= EFX_XDP_TX_QUEUES_BORROWED
;
162 netif_warn(efx
, drv
, efx
->net_dev
,
163 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
164 n_xdp_ev
, n_channels
, max_channels
);
165 netif_warn(efx
, drv
, efx
->net_dev
,
166 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
167 } else if (n_channels
+ n_xdp_tx
> efx
->max_vis
) {
168 efx
->xdp_txq_queues_mode
= EFX_XDP_TX_QUEUES_BORROWED
;
169 netif_warn(efx
, drv
, efx
->net_dev
,
170 "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
171 n_xdp_tx
, n_channels
, efx
->max_vis
);
172 netif_warn(efx
, drv
, efx
->net_dev
,
173 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
174 } else if (n_channels
+ n_xdp_ev
> max_channels
) {
175 efx
->xdp_txq_queues_mode
= EFX_XDP_TX_QUEUES_SHARED
;
176 netif_warn(efx
, drv
, efx
->net_dev
,
177 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
178 n_xdp_ev
, n_channels
, max_channels
);
180 n_xdp_ev
= max_channels
- n_channels
;
181 netif_warn(efx
, drv
, efx
->net_dev
,
182 "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
183 DIV_ROUND_UP(n_xdp_tx
, tx_per_ev
* n_xdp_ev
));
185 efx
->xdp_txq_queues_mode
= EFX_XDP_TX_QUEUES_DEDICATED
;
188 if (efx
->xdp_txq_queues_mode
!= EFX_XDP_TX_QUEUES_BORROWED
) {
189 efx
->n_xdp_channels
= n_xdp_ev
;
190 efx
->xdp_tx_per_channel
= tx_per_ev
;
191 efx
->xdp_tx_queue_count
= n_xdp_tx
;
192 n_channels
+= n_xdp_ev
;
193 netif_dbg(efx
, drv
, efx
->net_dev
,
194 "Allocating %d TX and %d event queues for XDP\n",
195 n_xdp_ev
* tx_per_ev
, n_xdp_ev
);
197 efx
->n_xdp_channels
= 0;
198 efx
->xdp_tx_per_channel
= 0;
199 efx
->xdp_tx_queue_count
= n_xdp_tx
;
202 if (vec_count
< n_channels
) {
203 netif_err(efx
, drv
, efx
->net_dev
,
204 "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
205 vec_count
, n_channels
);
206 netif_err(efx
, drv
, efx
->net_dev
,
207 "WARNING: Performance may be reduced.\n");
208 n_channels
= vec_count
;
211 n_channels
= min(n_channels
, max_channels
);
213 efx
->n_channels
= n_channels
;
215 /* Ignore XDP tx channels when creating rx channels. */
216 n_channels
-= efx
->n_xdp_channels
;
218 if (efx_siena_separate_tx_channels
) {
220 min(max(n_channels
/ 2, 1U),
221 efx
->max_tx_channels
);
222 efx
->tx_channel_offset
=
223 n_channels
- efx
->n_tx_channels
;
226 efx
->n_tx_channels
, 1U);
228 efx
->n_tx_channels
= min(n_channels
, efx
->max_tx_channels
);
229 efx
->tx_channel_offset
= 0;
230 efx
->n_rx_channels
= n_channels
;
233 efx
->n_rx_channels
= min(efx
->n_rx_channels
, parallelism
);
234 efx
->n_tx_channels
= min(efx
->n_tx_channels
, parallelism
);
236 efx
->xdp_channel_offset
= n_channels
;
238 netif_dbg(efx
, drv
, efx
->net_dev
,
239 "Allocating %u RX channels\n",
242 return efx
->n_channels
;
245 /* Probe the number and type of interrupts we are able to obtain, and
246 * the resulting numbers of channels and RX queues.
248 int efx_siena_probe_interrupts(struct efx_nic
*efx
)
250 unsigned int extra_channels
= 0;
251 unsigned int rss_spread
;
255 for (i
= 0; i
< EFX_MAX_EXTRA_CHANNELS
; i
++)
256 if (efx
->extra_channel_type
[i
])
259 if (efx
->interrupt_mode
== EFX_INT_MODE_MSIX
) {
260 unsigned int parallelism
= efx_wanted_parallelism(efx
);
261 struct msix_entry xentries
[EFX_MAX_CHANNELS
];
262 unsigned int n_channels
;
264 rc
= efx_allocate_msix_channels(efx
, efx
->max_channels
,
265 extra_channels
, parallelism
);
268 for (i
= 0; i
< n_channels
; i
++)
269 xentries
[i
].entry
= i
;
270 rc
= pci_enable_msix_range(efx
->pci_dev
, xentries
, 1,
274 /* Fall back to single channel MSI */
275 netif_err(efx
, drv
, efx
->net_dev
,
276 "could not enable MSI-X\n");
277 if (efx
->type
->min_interrupt_mode
>= EFX_INT_MODE_MSI
)
278 efx
->interrupt_mode
= EFX_INT_MODE_MSI
;
281 } else if (rc
< n_channels
) {
282 netif_err(efx
, drv
, efx
->net_dev
,
283 "WARNING: Insufficient MSI-X vectors"
284 " available (%d < %u).\n", rc
, n_channels
);
285 netif_err(efx
, drv
, efx
->net_dev
,
286 "WARNING: Performance may be reduced.\n");
291 for (i
= 0; i
< efx
->n_channels
; i
++)
292 efx_get_channel(efx
, i
)->irq
=
297 /* Try single interrupt MSI */
298 if (efx
->interrupt_mode
== EFX_INT_MODE_MSI
) {
300 efx
->n_rx_channels
= 1;
301 efx
->n_tx_channels
= 1;
302 efx
->tx_channel_offset
= 0;
303 efx
->n_xdp_channels
= 0;
304 efx
->xdp_channel_offset
= efx
->n_channels
;
305 efx
->xdp_txq_queues_mode
= EFX_XDP_TX_QUEUES_BORROWED
;
306 rc
= pci_enable_msi(efx
->pci_dev
);
308 efx_get_channel(efx
, 0)->irq
= efx
->pci_dev
->irq
;
310 netif_err(efx
, drv
, efx
->net_dev
,
311 "could not enable MSI\n");
312 if (efx
->type
->min_interrupt_mode
>= EFX_INT_MODE_LEGACY
)
313 efx
->interrupt_mode
= EFX_INT_MODE_LEGACY
;
319 /* Assume legacy interrupts */
320 if (efx
->interrupt_mode
== EFX_INT_MODE_LEGACY
) {
321 efx
->n_channels
= 1 + (efx_siena_separate_tx_channels
? 1 : 0);
322 efx
->n_rx_channels
= 1;
323 efx
->n_tx_channels
= 1;
324 efx
->tx_channel_offset
= efx_siena_separate_tx_channels
? 1 : 0;
325 efx
->n_xdp_channels
= 0;
326 efx
->xdp_channel_offset
= efx
->n_channels
;
327 efx
->xdp_txq_queues_mode
= EFX_XDP_TX_QUEUES_BORROWED
;
328 efx
->legacy_irq
= efx
->pci_dev
->irq
;
331 /* Assign extra channels if possible, before XDP channels */
332 efx
->n_extra_tx_channels
= 0;
333 j
= efx
->xdp_channel_offset
;
334 for (i
= 0; i
< EFX_MAX_EXTRA_CHANNELS
; i
++) {
335 if (!efx
->extra_channel_type
[i
])
337 if (j
<= efx
->tx_channel_offset
+ efx
->n_tx_channels
) {
338 efx
->extra_channel_type
[i
]->handle_no_channel(efx
);
341 efx_get_channel(efx
, j
)->type
=
342 efx
->extra_channel_type
[i
];
343 if (efx_channel_has_tx_queues(efx_get_channel(efx
, j
)))
344 efx
->n_extra_tx_channels
++;
348 rss_spread
= efx
->n_rx_channels
;
349 /* RSS might be usable on VFs even if it is disabled on the PF */
350 #ifdef CONFIG_SFC_SIENA_SRIOV
351 if (efx
->type
->sriov_wanted
) {
352 efx
->rss_spread
= ((rss_spread
> 1 ||
353 !efx
->type
->sriov_wanted(efx
)) ?
354 rss_spread
: efx_vf_size(efx
));
358 efx
->rss_spread
= rss_spread
;
363 #if defined(CONFIG_SMP)
364 void efx_siena_set_interrupt_affinity(struct efx_nic
*efx
)
366 const struct cpumask
*numa_mask
= cpumask_of_pcibus(efx
->pci_dev
->bus
);
367 struct efx_channel
*channel
;
370 /* If no online CPUs in local node, fallback to any online CPU */
371 if (cpumask_first_and(cpu_online_mask
, numa_mask
) >= nr_cpu_ids
)
372 numa_mask
= cpu_online_mask
;
375 efx_for_each_channel(channel
, efx
) {
376 cpu
= cpumask_next_and(cpu
, cpu_online_mask
, numa_mask
);
377 if (cpu
>= nr_cpu_ids
)
378 cpu
= cpumask_first_and(cpu_online_mask
, numa_mask
);
379 irq_set_affinity_hint(channel
->irq
, cpumask_of(cpu
));
383 void efx_siena_clear_interrupt_affinity(struct efx_nic
*efx
)
385 struct efx_channel
*channel
;
387 efx_for_each_channel(channel
, efx
)
388 irq_set_affinity_hint(channel
->irq
, NULL
);
392 efx_siena_set_interrupt_affinity(struct efx_nic
*efx __always_unused
)
397 efx_siena_clear_interrupt_affinity(struct efx_nic
*efx __always_unused
)
400 #endif /* CONFIG_SMP */
402 void efx_siena_remove_interrupts(struct efx_nic
*efx
)
404 struct efx_channel
*channel
;
406 /* Remove MSI/MSI-X interrupts */
407 efx_for_each_channel(channel
, efx
)
409 pci_disable_msi(efx
->pci_dev
);
410 pci_disable_msix(efx
->pci_dev
);
412 /* Remove legacy interrupt */
420 /* Create event queue
421 * Event queue memory allocations are done only once. If the channel
422 * is reset, the memory buffer will be reused; this guards against
423 * errors during channel reset and also simplifies interrupt handling.
425 static int efx_probe_eventq(struct efx_channel
*channel
)
427 struct efx_nic
*efx
= channel
->efx
;
428 unsigned long entries
;
430 netif_dbg(efx
, probe
, efx
->net_dev
,
431 "chan %d create event queue\n", channel
->channel
);
433 /* Build an event queue with room for one event per tx and rx buffer,
434 * plus some extra for link state events and MCDI completions.
436 entries
= roundup_pow_of_two(efx
->rxq_entries
+ efx
->txq_entries
+ 128);
437 EFX_WARN_ON_PARANOID(entries
> EFX_MAX_EVQ_SIZE
);
438 channel
->eventq_mask
= max(entries
, EFX_MIN_EVQ_SIZE
) - 1;
440 return efx_nic_probe_eventq(channel
);
443 /* Prepare channel's event queue */
444 static int efx_init_eventq(struct efx_channel
*channel
)
446 struct efx_nic
*efx
= channel
->efx
;
449 EFX_WARN_ON_PARANOID(channel
->eventq_init
);
451 netif_dbg(efx
, drv
, efx
->net_dev
,
452 "chan %d init event queue\n", channel
->channel
);
454 rc
= efx_nic_init_eventq(channel
);
456 efx
->type
->push_irq_moderation(channel
);
457 channel
->eventq_read_ptr
= 0;
458 channel
->eventq_init
= true;
463 /* Enable event queue processing and NAPI */
464 void efx_siena_start_eventq(struct efx_channel
*channel
)
466 netif_dbg(channel
->efx
, ifup
, channel
->efx
->net_dev
,
467 "chan %d start event queue\n", channel
->channel
);
469 /* Make sure the NAPI handler sees the enabled flag set */
470 channel
->enabled
= true;
473 napi_enable(&channel
->napi_str
);
474 efx_nic_eventq_read_ack(channel
);
477 /* Disable event queue processing and NAPI */
478 void efx_siena_stop_eventq(struct efx_channel
*channel
)
480 if (!channel
->enabled
)
483 napi_disable(&channel
->napi_str
);
484 channel
->enabled
= false;
487 static void efx_fini_eventq(struct efx_channel
*channel
)
489 if (!channel
->eventq_init
)
492 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
493 "chan %d fini event queue\n", channel
->channel
);
495 efx_nic_fini_eventq(channel
);
496 channel
->eventq_init
= false;
499 static void efx_remove_eventq(struct efx_channel
*channel
)
501 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
502 "chan %d remove event queue\n", channel
->channel
);
504 efx_nic_remove_eventq(channel
);
507 /**************************************************************************
511 *************************************************************************/
513 #ifdef CONFIG_RFS_ACCEL
514 static void efx_filter_rfs_expire(struct work_struct
*data
)
516 struct delayed_work
*dwork
= to_delayed_work(data
);
517 struct efx_channel
*channel
;
518 unsigned int time
, quota
;
520 channel
= container_of(dwork
, struct efx_channel
, filter_work
);
521 time
= jiffies
- channel
->rfs_last_expiry
;
522 quota
= channel
->rfs_filter_count
* time
/ (30 * HZ
);
523 if (quota
>= 20 && __efx_siena_filter_rfs_expire(channel
,
524 min(channel
->rfs_filter_count
, quota
)))
525 channel
->rfs_last_expiry
+= time
;
526 /* Ensure we do more work eventually even if NAPI poll is not happening */
527 schedule_delayed_work(dwork
, 30 * HZ
);
531 /* Allocate and initialise a channel structure. */
532 static struct efx_channel
*efx_alloc_channel(struct efx_nic
*efx
, int i
)
534 struct efx_rx_queue
*rx_queue
;
535 struct efx_tx_queue
*tx_queue
;
536 struct efx_channel
*channel
;
539 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
544 channel
->channel
= i
;
545 channel
->type
= &efx_default_channel_type
;
547 for (j
= 0; j
< EFX_MAX_TXQ_PER_CHANNEL
; j
++) {
548 tx_queue
= &channel
->tx_queue
[j
];
550 tx_queue
->queue
= -1;
552 tx_queue
->channel
= channel
;
555 #ifdef CONFIG_RFS_ACCEL
556 INIT_DELAYED_WORK(&channel
->filter_work
, efx_filter_rfs_expire
);
559 rx_queue
= &channel
->rx_queue
;
561 timer_setup(&rx_queue
->slow_fill
, efx_siena_rx_slow_fill
, 0);
566 int efx_siena_init_channels(struct efx_nic
*efx
)
570 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++) {
571 efx
->channel
[i
] = efx_alloc_channel(efx
, i
);
572 if (!efx
->channel
[i
])
574 efx
->msi_context
[i
].efx
= efx
;
575 efx
->msi_context
[i
].index
= i
;
578 /* Higher numbered interrupt modes are less capable! */
579 efx
->interrupt_mode
= min(efx
->type
->min_interrupt_mode
,
580 efx_siena_interrupt_mode
);
582 efx
->max_channels
= EFX_MAX_CHANNELS
;
583 efx
->max_tx_channels
= EFX_MAX_CHANNELS
;
588 void efx_siena_fini_channels(struct efx_nic
*efx
)
592 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++)
593 if (efx
->channel
[i
]) {
594 kfree(efx
->channel
[i
]);
595 efx
->channel
[i
] = NULL
;
599 /* Allocate and initialise a channel structure, copying parameters
600 * (but not resources) from an old channel structure.
603 struct efx_channel
*efx_copy_channel(const struct efx_channel
*old_channel
)
605 struct efx_rx_queue
*rx_queue
;
606 struct efx_tx_queue
*tx_queue
;
607 struct efx_channel
*channel
;
610 channel
= kmalloc(sizeof(*channel
), GFP_KERNEL
);
614 *channel
= *old_channel
;
616 channel
->napi_dev
= NULL
;
617 INIT_HLIST_NODE(&channel
->napi_str
.napi_hash_node
);
618 channel
->napi_str
.napi_id
= 0;
619 channel
->napi_str
.state
= 0;
620 memset(&channel
->eventq
, 0, sizeof(channel
->eventq
));
622 for (j
= 0; j
< EFX_MAX_TXQ_PER_CHANNEL
; j
++) {
623 tx_queue
= &channel
->tx_queue
[j
];
624 if (tx_queue
->channel
)
625 tx_queue
->channel
= channel
;
626 tx_queue
->buffer
= NULL
;
627 tx_queue
->cb_page
= NULL
;
628 memset(&tx_queue
->txd
, 0, sizeof(tx_queue
->txd
));
631 rx_queue
= &channel
->rx_queue
;
632 rx_queue
->buffer
= NULL
;
633 memset(&rx_queue
->rxd
, 0, sizeof(rx_queue
->rxd
));
634 timer_setup(&rx_queue
->slow_fill
, efx_siena_rx_slow_fill
, 0);
635 #ifdef CONFIG_RFS_ACCEL
636 INIT_DELAYED_WORK(&channel
->filter_work
, efx_filter_rfs_expire
);
642 static int efx_probe_channel(struct efx_channel
*channel
)
644 struct efx_tx_queue
*tx_queue
;
645 struct efx_rx_queue
*rx_queue
;
648 netif_dbg(channel
->efx
, probe
, channel
->efx
->net_dev
,
649 "creating channel %d\n", channel
->channel
);
651 rc
= channel
->type
->pre_probe(channel
);
655 rc
= efx_probe_eventq(channel
);
659 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
660 rc
= efx_siena_probe_tx_queue(tx_queue
);
665 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
666 rc
= efx_siena_probe_rx_queue(rx_queue
);
671 channel
->rx_list
= NULL
;
676 efx_siena_remove_channel(channel
);
680 static void efx_get_channel_name(struct efx_channel
*channel
, char *buf
,
683 struct efx_nic
*efx
= channel
->efx
;
687 number
= channel
->channel
;
689 if (number
>= efx
->xdp_channel_offset
&&
690 !WARN_ON_ONCE(!efx
->n_xdp_channels
)) {
692 number
-= efx
->xdp_channel_offset
;
693 } else if (efx
->tx_channel_offset
== 0) {
695 } else if (number
< efx
->tx_channel_offset
) {
699 number
-= efx
->tx_channel_offset
;
701 snprintf(buf
, len
, "%s%s-%d", efx
->name
, type
, number
);
704 void efx_siena_set_channel_names(struct efx_nic
*efx
)
706 struct efx_channel
*channel
;
708 efx_for_each_channel(channel
, efx
)
709 channel
->type
->get_name(channel
,
710 efx
->msi_context
[channel
->channel
].name
,
711 sizeof(efx
->msi_context
[0].name
));
714 int efx_siena_probe_channels(struct efx_nic
*efx
)
716 struct efx_channel
*channel
;
719 /* Restart special buffer allocation */
720 efx
->next_buffer_table
= 0;
722 /* Probe channels in reverse, so that any 'extra' channels
723 * use the start of the buffer table. This allows the traffic
724 * channels to be resized without moving them or wasting the
725 * entries before them.
727 efx_for_each_channel_rev(channel
, efx
) {
728 rc
= efx_probe_channel(channel
);
730 netif_err(efx
, probe
, efx
->net_dev
,
731 "failed to create channel %d\n",
736 efx_siena_set_channel_names(efx
);
741 efx_siena_remove_channels(efx
);
745 void efx_siena_remove_channel(struct efx_channel
*channel
)
747 struct efx_tx_queue
*tx_queue
;
748 struct efx_rx_queue
*rx_queue
;
750 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
751 "destroy chan %d\n", channel
->channel
);
753 efx_for_each_channel_rx_queue(rx_queue
, channel
)
754 efx_siena_remove_rx_queue(rx_queue
);
755 efx_for_each_channel_tx_queue(tx_queue
, channel
)
756 efx_siena_remove_tx_queue(tx_queue
);
757 efx_remove_eventq(channel
);
758 channel
->type
->post_remove(channel
);
761 void efx_siena_remove_channels(struct efx_nic
*efx
)
763 struct efx_channel
*channel
;
765 efx_for_each_channel(channel
, efx
)
766 efx_siena_remove_channel(channel
);
768 kfree(efx
->xdp_tx_queues
);
771 static int efx_set_xdp_tx_queue(struct efx_nic
*efx
, int xdp_queue_number
,
772 struct efx_tx_queue
*tx_queue
)
774 if (xdp_queue_number
>= efx
->xdp_tx_queue_count
)
777 netif_dbg(efx
, drv
, efx
->net_dev
,
778 "Channel %u TXQ %u is XDP %u, HW %u\n",
779 tx_queue
->channel
->channel
, tx_queue
->label
,
780 xdp_queue_number
, tx_queue
->queue
);
781 efx
->xdp_tx_queues
[xdp_queue_number
] = tx_queue
;
785 static void efx_set_xdp_channels(struct efx_nic
*efx
)
787 struct efx_tx_queue
*tx_queue
;
788 struct efx_channel
*channel
;
789 unsigned int next_queue
= 0;
790 int xdp_queue_number
= 0;
793 /* We need to mark which channels really have RX and TX
794 * queues, and adjust the TX queue numbers if we have separate
795 * RX-only and TX-only channels.
797 efx_for_each_channel(channel
, efx
) {
798 if (channel
->channel
< efx
->tx_channel_offset
)
801 if (efx_channel_is_xdp_tx(channel
)) {
802 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
803 tx_queue
->queue
= next_queue
++;
804 rc
= efx_set_xdp_tx_queue(efx
, xdp_queue_number
,
810 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
811 tx_queue
->queue
= next_queue
++;
812 netif_dbg(efx
, drv
, efx
->net_dev
,
813 "Channel %u TXQ %u is HW %u\n",
814 channel
->channel
, tx_queue
->label
,
818 /* If XDP is borrowing queues from net stack, it must
819 * use the queue with no csum offload, which is the
820 * first one of the channel
821 * (note: tx_queue_by_type is not initialized yet)
823 if (efx
->xdp_txq_queues_mode
==
824 EFX_XDP_TX_QUEUES_BORROWED
) {
825 tx_queue
= &channel
->tx_queue
[0];
826 rc
= efx_set_xdp_tx_queue(efx
, xdp_queue_number
,
833 WARN_ON(efx
->xdp_txq_queues_mode
== EFX_XDP_TX_QUEUES_DEDICATED
&&
834 xdp_queue_number
!= efx
->xdp_tx_queue_count
);
835 WARN_ON(efx
->xdp_txq_queues_mode
!= EFX_XDP_TX_QUEUES_DEDICATED
&&
836 xdp_queue_number
> efx
->xdp_tx_queue_count
);
838 /* If we have more CPUs than assigned XDP TX queues, assign the already
839 * existing queues to the exceeding CPUs
842 while (xdp_queue_number
< efx
->xdp_tx_queue_count
) {
843 tx_queue
= efx
->xdp_tx_queues
[next_queue
++];
844 rc
= efx_set_xdp_tx_queue(efx
, xdp_queue_number
, tx_queue
);
850 static int efx_soft_enable_interrupts(struct efx_nic
*efx
);
851 static void efx_soft_disable_interrupts(struct efx_nic
*efx
);
852 static void efx_init_napi_channel(struct efx_channel
*channel
);
853 static void efx_fini_napi_channel(struct efx_channel
*channel
);
855 int efx_siena_realloc_channels(struct efx_nic
*efx
, u32 rxq_entries
,
858 struct efx_channel
*other_channel
[EFX_MAX_CHANNELS
], *channel
;
859 unsigned int i
, next_buffer_table
= 0;
860 u32 old_rxq_entries
, old_txq_entries
;
863 rc
= efx_check_disabled(efx
);
867 /* Not all channels should be reallocated. We must avoid
868 * reallocating their buffer table entries.
870 efx_for_each_channel(channel
, efx
) {
871 struct efx_rx_queue
*rx_queue
;
872 struct efx_tx_queue
*tx_queue
;
874 if (channel
->type
->copy
)
876 next_buffer_table
= max(next_buffer_table
,
877 channel
->eventq
.index
+
878 channel
->eventq
.entries
);
879 efx_for_each_channel_rx_queue(rx_queue
, channel
)
880 next_buffer_table
= max(next_buffer_table
,
881 rx_queue
->rxd
.index
+
882 rx_queue
->rxd
.entries
);
883 efx_for_each_channel_tx_queue(tx_queue
, channel
)
884 next_buffer_table
= max(next_buffer_table
,
885 tx_queue
->txd
.index
+
886 tx_queue
->txd
.entries
);
889 efx_device_detach_sync(efx
);
890 efx_siena_stop_all(efx
);
891 efx_soft_disable_interrupts(efx
);
893 /* Clone channels (where possible) */
894 memset(other_channel
, 0, sizeof(other_channel
));
895 for (i
= 0; i
< efx
->n_channels
; i
++) {
896 channel
= efx
->channel
[i
];
897 if (channel
->type
->copy
)
898 channel
= channel
->type
->copy(channel
);
903 other_channel
[i
] = channel
;
906 /* Swap entry counts and channel pointers */
907 old_rxq_entries
= efx
->rxq_entries
;
908 old_txq_entries
= efx
->txq_entries
;
909 efx
->rxq_entries
= rxq_entries
;
910 efx
->txq_entries
= txq_entries
;
911 for (i
= 0; i
< efx
->n_channels
; i
++)
912 swap(efx
->channel
[i
], other_channel
[i
]);
914 /* Restart buffer table allocation */
915 efx
->next_buffer_table
= next_buffer_table
;
917 for (i
= 0; i
< efx
->n_channels
; i
++) {
918 channel
= efx
->channel
[i
];
919 if (!channel
->type
->copy
)
921 rc
= efx_probe_channel(channel
);
924 efx_init_napi_channel(efx
->channel
[i
]);
927 efx_set_xdp_channels(efx
);
929 /* Destroy unused channel structures */
930 for (i
= 0; i
< efx
->n_channels
; i
++) {
931 channel
= other_channel
[i
];
932 if (channel
&& channel
->type
->copy
) {
933 efx_fini_napi_channel(channel
);
934 efx_siena_remove_channel(channel
);
939 rc2
= efx_soft_enable_interrupts(efx
);
942 netif_err(efx
, drv
, efx
->net_dev
,
943 "unable to restart interrupts on channel reallocation\n");
944 efx_siena_schedule_reset(efx
, RESET_TYPE_DISABLE
);
946 efx_siena_start_all(efx
);
947 efx_device_attach_if_not_resetting(efx
);
953 efx
->rxq_entries
= old_rxq_entries
;
954 efx
->txq_entries
= old_txq_entries
;
955 for (i
= 0; i
< efx
->n_channels
; i
++)
956 swap(efx
->channel
[i
], other_channel
[i
]);
960 int efx_siena_set_channels(struct efx_nic
*efx
)
962 struct efx_channel
*channel
;
965 if (efx
->xdp_tx_queue_count
) {
966 EFX_WARN_ON_PARANOID(efx
->xdp_tx_queues
);
968 /* Allocate array for XDP TX queue lookup. */
969 efx
->xdp_tx_queues
= kcalloc(efx
->xdp_tx_queue_count
,
970 sizeof(*efx
->xdp_tx_queues
),
972 if (!efx
->xdp_tx_queues
)
976 efx_for_each_channel(channel
, efx
) {
977 if (channel
->channel
< efx
->n_rx_channels
)
978 channel
->rx_queue
.core_index
= channel
->channel
;
980 channel
->rx_queue
.core_index
= -1;
983 efx_set_xdp_channels(efx
);
985 rc
= netif_set_real_num_tx_queues(efx
->net_dev
, efx
->n_tx_channels
);
988 return netif_set_real_num_rx_queues(efx
->net_dev
, efx
->n_rx_channels
);
991 static bool efx_default_channel_want_txqs(struct efx_channel
*channel
)
993 return channel
->channel
- channel
->efx
->tx_channel_offset
<
994 channel
->efx
->n_tx_channels
;
1001 static int efx_soft_enable_interrupts(struct efx_nic
*efx
)
1003 struct efx_channel
*channel
, *end_channel
;
1006 BUG_ON(efx
->state
== STATE_DISABLED
);
1008 efx
->irq_soft_enabled
= true;
1011 efx_for_each_channel(channel
, efx
) {
1012 if (!channel
->type
->keep_eventq
) {
1013 rc
= efx_init_eventq(channel
);
1017 efx_siena_start_eventq(channel
);
1020 efx_siena_mcdi_mode_event(efx
);
1024 end_channel
= channel
;
1025 efx_for_each_channel(channel
, efx
) {
1026 if (channel
== end_channel
)
1028 efx_siena_stop_eventq(channel
);
1029 if (!channel
->type
->keep_eventq
)
1030 efx_fini_eventq(channel
);
1036 static void efx_soft_disable_interrupts(struct efx_nic
*efx
)
1038 struct efx_channel
*channel
;
1040 if (efx
->state
== STATE_DISABLED
)
1043 efx_siena_mcdi_mode_poll(efx
);
1045 efx
->irq_soft_enabled
= false;
1048 if (efx
->legacy_irq
)
1049 synchronize_irq(efx
->legacy_irq
);
1051 efx_for_each_channel(channel
, efx
) {
1053 synchronize_irq(channel
->irq
);
1055 efx_siena_stop_eventq(channel
);
1056 if (!channel
->type
->keep_eventq
)
1057 efx_fini_eventq(channel
);
1060 /* Flush the asynchronous MCDI request queue */
1061 efx_siena_mcdi_flush_async(efx
);
1064 int efx_siena_enable_interrupts(struct efx_nic
*efx
)
1066 struct efx_channel
*channel
, *end_channel
;
1069 /* TODO: Is this really a bug? */
1070 BUG_ON(efx
->state
== STATE_DISABLED
);
1072 if (efx
->eeh_disabled_legacy_irq
) {
1073 enable_irq(efx
->legacy_irq
);
1074 efx
->eeh_disabled_legacy_irq
= false;
1077 efx
->type
->irq_enable_master(efx
);
1079 efx_for_each_channel(channel
, efx
) {
1080 if (channel
->type
->keep_eventq
) {
1081 rc
= efx_init_eventq(channel
);
1087 rc
= efx_soft_enable_interrupts(efx
);
1094 end_channel
= channel
;
1095 efx_for_each_channel(channel
, efx
) {
1096 if (channel
== end_channel
)
1098 if (channel
->type
->keep_eventq
)
1099 efx_fini_eventq(channel
);
1102 efx
->type
->irq_disable_non_ev(efx
);
1107 void efx_siena_disable_interrupts(struct efx_nic
*efx
)
1109 struct efx_channel
*channel
;
1111 efx_soft_disable_interrupts(efx
);
1113 efx_for_each_channel(channel
, efx
) {
1114 if (channel
->type
->keep_eventq
)
1115 efx_fini_eventq(channel
);
1118 efx
->type
->irq_disable_non_ev(efx
);
1121 void efx_siena_start_channels(struct efx_nic
*efx
)
1123 struct efx_tx_queue
*tx_queue
;
1124 struct efx_rx_queue
*rx_queue
;
1125 struct efx_channel
*channel
;
1127 efx_for_each_channel_rev(channel
, efx
) {
1128 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
1129 efx_siena_init_tx_queue(tx_queue
);
1130 atomic_inc(&efx
->active_queues
);
1133 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1134 efx_siena_init_rx_queue(rx_queue
);
1135 atomic_inc(&efx
->active_queues
);
1136 efx_siena_stop_eventq(channel
);
1137 efx_siena_fast_push_rx_descriptors(rx_queue
, false);
1138 efx_siena_start_eventq(channel
);
1141 WARN_ON(channel
->rx_pkt_n_frags
);
1145 void efx_siena_stop_channels(struct efx_nic
*efx
)
1147 struct efx_tx_queue
*tx_queue
;
1148 struct efx_rx_queue
*rx_queue
;
1149 struct efx_channel
*channel
;
1152 /* Stop RX refill */
1153 efx_for_each_channel(channel
, efx
) {
1154 efx_for_each_channel_rx_queue(rx_queue
, channel
)
1155 rx_queue
->refill_enabled
= false;
1158 efx_for_each_channel(channel
, efx
) {
1159 /* RX packet processing is pipelined, so wait for the
1160 * NAPI handler to complete. At least event queue 0
1161 * might be kept active by non-data events, so don't
1162 * use napi_synchronize() but actually disable NAPI
1165 if (efx_channel_has_rx_queue(channel
)) {
1166 efx_siena_stop_eventq(channel
);
1167 efx_siena_start_eventq(channel
);
1171 if (efx
->type
->fini_dmaq
)
1172 rc
= efx
->type
->fini_dmaq(efx
);
1175 netif_err(efx
, drv
, efx
->net_dev
, "failed to flush queues\n");
1177 netif_dbg(efx
, drv
, efx
->net_dev
,
1178 "successfully flushed all queues\n");
1181 efx_for_each_channel(channel
, efx
) {
1182 efx_for_each_channel_rx_queue(rx_queue
, channel
)
1183 efx_siena_fini_rx_queue(rx_queue
);
1184 efx_for_each_channel_tx_queue(tx_queue
, channel
)
1185 efx_siena_fini_tx_queue(tx_queue
);
1189 /**************************************************************************
1193 *************************************************************************/
1195 /* Process channel's event queue
1197 * This function is responsible for processing the event queue of a
1198 * single channel. The caller must guarantee that this function will
1199 * never be concurrently called more than once on the same channel,
1200 * though different channels may be being processed concurrently.
1202 static int efx_process_channel(struct efx_channel
*channel
, int budget
)
1204 struct efx_tx_queue
*tx_queue
;
1205 struct list_head rx_list
;
1208 if (unlikely(!channel
->enabled
))
1211 /* Prepare the batch receive list */
1212 EFX_WARN_ON_PARANOID(channel
->rx_list
!= NULL
);
1213 INIT_LIST_HEAD(&rx_list
);
1214 channel
->rx_list
= &rx_list
;
1216 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
1217 tx_queue
->pkts_compl
= 0;
1218 tx_queue
->bytes_compl
= 0;
1221 spent
= efx_nic_process_eventq(channel
, budget
);
1222 if (spent
&& efx_channel_has_rx_queue(channel
)) {
1223 struct efx_rx_queue
*rx_queue
=
1224 efx_channel_get_rx_queue(channel
);
1226 efx_rx_flush_packet(channel
);
1227 efx_siena_fast_push_rx_descriptors(rx_queue
, true);
1231 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
1232 if (tx_queue
->bytes_compl
) {
1233 netdev_tx_completed_queue(tx_queue
->core_txq
,
1234 tx_queue
->pkts_compl
,
1235 tx_queue
->bytes_compl
);
1239 /* Receive any packets we queued up */
1240 netif_receive_skb_list(channel
->rx_list
);
1241 channel
->rx_list
= NULL
;
1246 static void efx_update_irq_mod(struct efx_nic
*efx
, struct efx_channel
*channel
)
1248 int step
= efx
->irq_mod_step_us
;
1250 if (channel
->irq_mod_score
< irq_adapt_low_thresh
) {
1251 if (channel
->irq_moderation_us
> step
) {
1252 channel
->irq_moderation_us
-= step
;
1253 efx
->type
->push_irq_moderation(channel
);
1255 } else if (channel
->irq_mod_score
> irq_adapt_high_thresh
) {
1256 if (channel
->irq_moderation_us
<
1257 efx
->irq_rx_moderation_us
) {
1258 channel
->irq_moderation_us
+= step
;
1259 efx
->type
->push_irq_moderation(channel
);
1263 channel
->irq_count
= 0;
1264 channel
->irq_mod_score
= 0;
1267 /* NAPI poll handler
1269 * NAPI guarantees serialisation of polls of the same device, which
1270 * provides the guarantee required by efx_process_channel().
1272 static int efx_poll(struct napi_struct
*napi
, int budget
)
1274 struct efx_channel
*channel
=
1275 container_of(napi
, struct efx_channel
, napi_str
);
1276 struct efx_nic
*efx
= channel
->efx
;
1277 #ifdef CONFIG_RFS_ACCEL
1282 netif_vdbg(efx
, intr
, efx
->net_dev
,
1283 "channel %d NAPI poll executing on CPU %d\n",
1284 channel
->channel
, raw_smp_processor_id());
1286 spent
= efx_process_channel(channel
, budget
);
1291 if (spent
< budget
) {
1292 if (efx_channel_has_rx_queue(channel
) &&
1293 efx
->irq_rx_adaptive
&&
1294 unlikely(++channel
->irq_count
== 1000)) {
1295 efx_update_irq_mod(efx
, channel
);
1298 #ifdef CONFIG_RFS_ACCEL
1299 /* Perhaps expire some ARFS filters */
1300 time
= jiffies
- channel
->rfs_last_expiry
;
1301 /* Would our quota be >= 20? */
1302 if (channel
->rfs_filter_count
* time
>= 600 * HZ
)
1303 mod_delayed_work(system_wq
, &channel
->filter_work
, 0);
1306 /* There is no race here; although napi_disable() will
1307 * only wait for napi_complete(), this isn't a problem
1308 * since efx_nic_eventq_read_ack() will have no effect if
1309 * interrupts have already been disabled.
1311 if (napi_complete_done(napi
, spent
))
1312 efx_nic_eventq_read_ack(channel
);
1318 static void efx_init_napi_channel(struct efx_channel
*channel
)
1320 struct efx_nic
*efx
= channel
->efx
;
1322 channel
->napi_dev
= efx
->net_dev
;
1323 netif_napi_add(channel
->napi_dev
, &channel
->napi_str
, efx_poll
);
1326 void efx_siena_init_napi(struct efx_nic
*efx
)
1328 struct efx_channel
*channel
;
1330 efx_for_each_channel(channel
, efx
)
1331 efx_init_napi_channel(channel
);
1334 static void efx_fini_napi_channel(struct efx_channel
*channel
)
1336 if (channel
->napi_dev
)
1337 netif_napi_del(&channel
->napi_str
);
1339 channel
->napi_dev
= NULL
;
1342 void efx_siena_fini_napi(struct efx_nic
*efx
)
1344 struct efx_channel
*channel
;
1346 efx_for_each_channel(channel
, efx
)
1347 efx_fini_napi_channel(channel
);
1354 static int efx_channel_dummy_op_int(struct efx_channel
*channel
)
1359 void efx_siena_channel_dummy_op_void(struct efx_channel
*channel
)
1363 static const struct efx_channel_type efx_default_channel_type
= {
1364 .pre_probe
= efx_channel_dummy_op_int
,
1365 .post_remove
= efx_siena_channel_dummy_op_void
,
1366 .get_name
= efx_get_channel_name
,
1367 .copy
= efx_copy_channel
,
1368 .want_txqs
= efx_default_channel_want_txqs
,
1369 .keep_eventq
= false,