1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
20 #include <net/checksum.h>
21 #include "net_driver.h"
25 #include "workarounds.h"
27 /* Number of RX descriptors pushed at once. */
28 #define EFX_RX_BATCH 8
30 /* Maximum size of a buffer sharing a page */
31 #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
33 /* Size of buffer allocated for skb header area. */
34 #define EFX_SKB_HEADERS 64u
37 * rx_alloc_method - RX buffer allocation method
39 * This driver supports two methods for allocating and using RX buffers:
40 * each RX buffer may be backed by an skb or by an order-n page.
42 * When GRO is in use then the second method has a lower overhead,
43 * since we don't have to allocate then free skbs on reassembled frames.
46 * - RX_ALLOC_METHOD_AUTO = 0
47 * - RX_ALLOC_METHOD_SKB = 1
48 * - RX_ALLOC_METHOD_PAGE = 2
50 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
51 * controlled by the parameters below.
53 * - Since pushing and popping descriptors are separated by the rx_queue
54 * size, so the watermarks should be ~rxd_size.
55 * - The performance win by using page-based allocation for GRO is less
56 * than the performance hit of using page-based allocation of non-GRO,
57 * so the watermarks should reflect this.
59 * Per channel we maintain a single variable, updated by each channel:
61 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
62 * RX_ALLOC_FACTOR_SKB)
63 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
64 * limits the hysteresis), and update the allocation strategy:
66 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
67 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
69 static int rx_alloc_method
= RX_ALLOC_METHOD_AUTO
;
71 #define RX_ALLOC_LEVEL_GRO 0x2000
72 #define RX_ALLOC_LEVEL_MAX 0x3000
73 #define RX_ALLOC_FACTOR_GRO 1
74 #define RX_ALLOC_FACTOR_SKB (-2)
76 /* This is the percentage fill level below which new RX descriptors
77 * will be added to the RX descriptor ring.
79 static unsigned int rx_refill_threshold
= 90;
81 /* This is the percentage fill level to which an RX queue will be refilled
82 * when the "RX refill threshold" is reached.
84 static unsigned int rx_refill_limit
= 95;
87 * RX maximum head room required.
89 * This must be at least 1 to prevent overflow and at least 2 to allow
92 #define EFX_RXD_HEAD_ROOM 2
94 /* Offset of ethernet header within page */
95 static inline unsigned int efx_rx_buf_offset(struct efx_nic
*efx
,
96 struct efx_rx_buffer
*buf
)
98 return buf
->page_offset
+ efx
->type
->rx_buffer_hash_size
;
100 static inline unsigned int efx_rx_buf_size(struct efx_nic
*efx
)
102 return PAGE_SIZE
<< efx
->rx_buffer_order
;
105 static u8
*efx_rx_buf_eh(struct efx_nic
*efx
, struct efx_rx_buffer
*buf
)
107 if (buf
->flags
& EFX_RX_BUF_PAGE
)
108 return page_address(buf
->u
.page
) + efx_rx_buf_offset(efx
, buf
);
110 return (u8
*)buf
->u
.skb
->data
+ efx
->type
->rx_buffer_hash_size
;
113 static inline u32
efx_rx_buf_hash(const u8
*eh
)
115 /* The ethernet header is always directly after any hash. */
116 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
117 return __le32_to_cpup((const __le32
*)(eh
- 4));
119 const u8
*data
= eh
- 4;
120 return (u32
)data
[0] |
128 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
130 * @rx_queue: Efx RX queue
132 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
133 * struct efx_rx_buffer for each one. Return a negative error code or 0
134 * on success. May fail having only inserted fewer than EFX_RX_BATCH
137 static int efx_init_rx_buffers_skb(struct efx_rx_queue
*rx_queue
)
139 struct efx_nic
*efx
= rx_queue
->efx
;
140 struct net_device
*net_dev
= efx
->net_dev
;
141 struct efx_rx_buffer
*rx_buf
;
143 int skb_len
= efx
->rx_buffer_len
;
144 unsigned index
, count
;
146 for (count
= 0; count
< EFX_RX_BATCH
; ++count
) {
147 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
148 rx_buf
= efx_rx_buffer(rx_queue
, index
);
150 rx_buf
->u
.skb
= skb
= netdev_alloc_skb(net_dev
, skb_len
);
154 /* Adjust the SKB for padding */
155 skb_reserve(skb
, NET_IP_ALIGN
);
156 rx_buf
->len
= skb_len
- NET_IP_ALIGN
;
159 rx_buf
->dma_addr
= pci_map_single(efx
->pci_dev
,
160 skb
->data
, rx_buf
->len
,
162 if (unlikely(pci_dma_mapping_error(efx
->pci_dev
,
163 rx_buf
->dma_addr
))) {
164 dev_kfree_skb_any(skb
);
165 rx_buf
->u
.skb
= NULL
;
169 ++rx_queue
->added_count
;
170 ++rx_queue
->alloc_skb_count
;
177 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
179 * @rx_queue: Efx RX queue
181 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
182 * and populates struct efx_rx_buffers for each one. Return a negative error
183 * code or 0 on success. If a single page can be split between two buffers,
184 * then the page will either be inserted fully, or not at at all.
186 static int efx_init_rx_buffers_page(struct efx_rx_queue
*rx_queue
)
188 struct efx_nic
*efx
= rx_queue
->efx
;
189 struct efx_rx_buffer
*rx_buf
;
192 unsigned int page_offset
;
193 struct efx_rx_page_state
*state
;
195 unsigned index
, count
;
197 /* We can split a page between two buffers */
198 BUILD_BUG_ON(EFX_RX_BATCH
& 1);
200 for (count
= 0; count
< EFX_RX_BATCH
; ++count
) {
201 page
= alloc_pages(__GFP_COLD
| __GFP_COMP
| GFP_ATOMIC
,
202 efx
->rx_buffer_order
);
203 if (unlikely(page
== NULL
))
205 dma_addr
= pci_map_page(efx
->pci_dev
, page
, 0,
206 efx_rx_buf_size(efx
),
208 if (unlikely(pci_dma_mapping_error(efx
->pci_dev
, dma_addr
))) {
209 __free_pages(page
, efx
->rx_buffer_order
);
212 page_addr
= page_address(page
);
215 state
->dma_addr
= dma_addr
;
217 page_addr
+= sizeof(struct efx_rx_page_state
);
218 dma_addr
+= sizeof(struct efx_rx_page_state
);
219 page_offset
= sizeof(struct efx_rx_page_state
);
222 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
223 rx_buf
= efx_rx_buffer(rx_queue
, index
);
224 rx_buf
->dma_addr
= dma_addr
+ EFX_PAGE_IP_ALIGN
;
225 rx_buf
->u
.page
= page
;
226 rx_buf
->page_offset
= page_offset
+ EFX_PAGE_IP_ALIGN
;
227 rx_buf
->len
= efx
->rx_buffer_len
- EFX_PAGE_IP_ALIGN
;
228 rx_buf
->flags
= EFX_RX_BUF_PAGE
;
229 ++rx_queue
->added_count
;
230 ++rx_queue
->alloc_page_count
;
233 if ((~count
& 1) && (efx
->rx_buffer_len
<= EFX_RX_HALF_PAGE
)) {
234 /* Use the second half of the page */
236 dma_addr
+= (PAGE_SIZE
>> 1);
237 page_addr
+= (PAGE_SIZE
>> 1);
238 page_offset
+= (PAGE_SIZE
>> 1);
247 static void efx_unmap_rx_buffer(struct efx_nic
*efx
,
248 struct efx_rx_buffer
*rx_buf
,
249 unsigned int used_len
)
251 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.page
) {
252 struct efx_rx_page_state
*state
;
254 state
= page_address(rx_buf
->u
.page
);
255 if (--state
->refcnt
== 0) {
256 pci_unmap_page(efx
->pci_dev
,
258 efx_rx_buf_size(efx
),
260 } else if (used_len
) {
261 dma_sync_single_for_cpu(&efx
->pci_dev
->dev
,
262 rx_buf
->dma_addr
, used_len
,
265 } else if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.skb
) {
266 pci_unmap_single(efx
->pci_dev
, rx_buf
->dma_addr
,
267 rx_buf
->len
, PCI_DMA_FROMDEVICE
);
271 static void efx_free_rx_buffer(struct efx_nic
*efx
,
272 struct efx_rx_buffer
*rx_buf
)
274 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.page
) {
275 __free_pages(rx_buf
->u
.page
, efx
->rx_buffer_order
);
276 rx_buf
->u
.page
= NULL
;
277 } else if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.skb
) {
278 dev_kfree_skb_any(rx_buf
->u
.skb
);
279 rx_buf
->u
.skb
= NULL
;
283 static void efx_fini_rx_buffer(struct efx_rx_queue
*rx_queue
,
284 struct efx_rx_buffer
*rx_buf
)
286 efx_unmap_rx_buffer(rx_queue
->efx
, rx_buf
, 0);
287 efx_free_rx_buffer(rx_queue
->efx
, rx_buf
);
290 /* Attempt to resurrect the other receive buffer that used to share this page,
291 * which had previously been passed up to the kernel and freed. */
292 static void efx_resurrect_rx_buffer(struct efx_rx_queue
*rx_queue
,
293 struct efx_rx_buffer
*rx_buf
)
295 struct efx_rx_page_state
*state
= page_address(rx_buf
->u
.page
);
296 struct efx_rx_buffer
*new_buf
;
297 unsigned fill_level
, index
;
299 /* +1 because efx_rx_packet() incremented removed_count. +1 because
300 * we'd like to insert an additional descriptor whilst leaving
301 * EFX_RXD_HEAD_ROOM for the non-recycle path */
302 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
+ 2);
303 if (unlikely(fill_level
> rx_queue
->max_fill
)) {
304 /* We could place "state" on a list, and drain the list in
305 * efx_fast_push_rx_descriptors(). For now, this will do. */
310 get_page(rx_buf
->u
.page
);
312 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
313 new_buf
= efx_rx_buffer(rx_queue
, index
);
314 new_buf
->u
.page
= rx_buf
->u
.page
;
315 new_buf
->page_offset
= rx_buf
->page_offset
^ (PAGE_SIZE
>> 1);
316 new_buf
->dma_addr
= state
->dma_addr
+ new_buf
->page_offset
;
317 new_buf
->len
= rx_buf
->len
;
318 new_buf
->flags
= EFX_RX_BUF_PAGE
;
319 ++rx_queue
->added_count
;
322 /* Recycle the given rx buffer directly back into the rx_queue. There is
323 * always room to add this buffer, because we've just popped a buffer. */
324 static void efx_recycle_rx_buffer(struct efx_channel
*channel
,
325 struct efx_rx_buffer
*rx_buf
)
327 struct efx_nic
*efx
= channel
->efx
;
328 struct efx_rx_queue
*rx_queue
= efx_channel_get_rx_queue(channel
);
329 struct efx_rx_buffer
*new_buf
;
332 rx_buf
->flags
&= EFX_RX_BUF_PAGE
;
334 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) &&
335 efx
->rx_buffer_len
<= EFX_RX_HALF_PAGE
&&
336 page_count(rx_buf
->u
.page
) == 1)
337 efx_resurrect_rx_buffer(rx_queue
, rx_buf
);
339 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
340 new_buf
= efx_rx_buffer(rx_queue
, index
);
342 memcpy(new_buf
, rx_buf
, sizeof(*new_buf
));
343 rx_buf
->u
.page
= NULL
;
344 ++rx_queue
->added_count
;
348 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
349 * @rx_queue: RX descriptor queue
350 * This will aim to fill the RX descriptor queue up to
351 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
352 * memory to do so, a slow fill will be scheduled.
354 * The caller must provide serialisation (none is used here). In practise,
355 * this means this function must run from the NAPI handler, or be called
356 * when NAPI is disabled.
358 void efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
)
360 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
364 /* Calculate current fill level, and exit if we don't need to fill */
365 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
366 EFX_BUG_ON_PARANOID(fill_level
> rx_queue
->efx
->rxq_entries
);
367 if (fill_level
>= rx_queue
->fast_fill_trigger
)
370 /* Record minimum fill level */
371 if (unlikely(fill_level
< rx_queue
->min_fill
)) {
373 rx_queue
->min_fill
= fill_level
;
376 space
= rx_queue
->fast_fill_limit
- fill_level
;
377 if (space
< EFX_RX_BATCH
)
380 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
381 "RX queue %d fast-filling descriptor ring from"
382 " level %d to level %d using %s allocation\n",
383 efx_rx_queue_index(rx_queue
), fill_level
,
384 rx_queue
->fast_fill_limit
,
385 channel
->rx_alloc_push_pages
? "page" : "skb");
388 if (channel
->rx_alloc_push_pages
)
389 rc
= efx_init_rx_buffers_page(rx_queue
);
391 rc
= efx_init_rx_buffers_skb(rx_queue
);
393 /* Ensure that we don't leave the rx queue empty */
394 if (rx_queue
->added_count
== rx_queue
->removed_count
)
395 efx_schedule_slow_fill(rx_queue
);
398 } while ((space
-= EFX_RX_BATCH
) >= EFX_RX_BATCH
);
400 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
401 "RX queue %d fast-filled descriptor ring "
402 "to level %d\n", efx_rx_queue_index(rx_queue
),
403 rx_queue
->added_count
- rx_queue
->removed_count
);
406 if (rx_queue
->notified_count
!= rx_queue
->added_count
)
407 efx_nic_notify_rx_desc(rx_queue
);
410 void efx_rx_slow_fill(unsigned long context
)
412 struct efx_rx_queue
*rx_queue
= (struct efx_rx_queue
*)context
;
414 /* Post an event to cause NAPI to run and refill the queue */
415 efx_nic_generate_fill_event(rx_queue
);
416 ++rx_queue
->slow_fill_count
;
419 static void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
420 struct efx_rx_buffer
*rx_buf
,
421 int len
, bool *leak_packet
)
423 struct efx_nic
*efx
= rx_queue
->efx
;
424 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
426 if (likely(len
<= max_len
))
429 /* The packet must be discarded, but this is only a fatal error
430 * if the caller indicated it was
432 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
434 if ((len
> rx_buf
->len
) && EFX_WORKAROUND_8071(efx
)) {
436 netif_err(efx
, rx_err
, efx
->net_dev
,
437 " RX queue %d seriously overlength "
438 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
439 efx_rx_queue_index(rx_queue
), len
, max_len
,
440 efx
->type
->rx_buffer_padding
);
441 /* If this buffer was skb-allocated, then the meta
442 * data at the end of the skb will be trashed. So
443 * we have no choice but to leak the fragment.
445 *leak_packet
= !(rx_buf
->flags
& EFX_RX_BUF_PAGE
);
446 efx_schedule_reset(efx
, RESET_TYPE_RX_RECOVERY
);
449 netif_err(efx
, rx_err
, efx
->net_dev
,
450 " RX queue %d overlength RX event "
452 efx_rx_queue_index(rx_queue
), len
, max_len
);
455 efx_rx_queue_channel(rx_queue
)->n_rx_overlength
++;
458 /* Pass a received packet up through GRO. GRO can handle pages
459 * regardless of checksum state and skbs with a good checksum.
461 static void efx_rx_packet_gro(struct efx_channel
*channel
,
462 struct efx_rx_buffer
*rx_buf
,
465 struct napi_struct
*napi
= &channel
->napi_str
;
466 gro_result_t gro_result
;
468 if (rx_buf
->flags
& EFX_RX_BUF_PAGE
) {
469 struct efx_nic
*efx
= channel
->efx
;
470 struct page
*page
= rx_buf
->u
.page
;
473 rx_buf
->u
.page
= NULL
;
475 skb
= napi_get_frags(napi
);
481 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
482 skb
->rxhash
= efx_rx_buf_hash(eh
);
484 skb_fill_page_desc(skb
, 0, page
,
485 efx_rx_buf_offset(efx
, rx_buf
), rx_buf
->len
);
487 skb
->len
= rx_buf
->len
;
488 skb
->data_len
= rx_buf
->len
;
489 skb
->truesize
+= rx_buf
->len
;
490 skb
->ip_summed
= ((rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ?
491 CHECKSUM_UNNECESSARY
: CHECKSUM_NONE
);
493 skb_record_rx_queue(skb
, channel
->channel
);
495 gro_result
= napi_gro_frags(napi
);
497 struct sk_buff
*skb
= rx_buf
->u
.skb
;
499 EFX_BUG_ON_PARANOID(!(rx_buf
->flags
& EFX_RX_PKT_CSUMMED
));
500 rx_buf
->u
.skb
= NULL
;
501 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
503 gro_result
= napi_gro_receive(napi
, skb
);
506 if (gro_result
== GRO_NORMAL
) {
507 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
508 } else if (gro_result
!= GRO_DROP
) {
509 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_GRO
;
510 channel
->irq_mod_score
+= 2;
514 void efx_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
515 unsigned int len
, u16 flags
)
517 struct efx_nic
*efx
= rx_queue
->efx
;
518 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
519 struct efx_rx_buffer
*rx_buf
;
520 bool leak_packet
= false;
522 rx_buf
= efx_rx_buffer(rx_queue
, index
);
523 rx_buf
->flags
|= flags
;
525 /* This allows the refill path to post another buffer.
526 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
527 * isn't overwritten yet.
529 rx_queue
->removed_count
++;
531 /* Validate the length encoded in the event vs the descriptor pushed */
532 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
, &leak_packet
);
534 netif_vdbg(efx
, rx_status
, efx
->net_dev
,
535 "RX queue %d received id %x at %llx+%x %s%s\n",
536 efx_rx_queue_index(rx_queue
), index
,
537 (unsigned long long)rx_buf
->dma_addr
, len
,
538 (rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ? " [SUMMED]" : "",
539 (rx_buf
->flags
& EFX_RX_PKT_DISCARD
) ? " [DISCARD]" : "");
541 /* Discard packet, if instructed to do so */
542 if (unlikely(rx_buf
->flags
& EFX_RX_PKT_DISCARD
)) {
543 if (unlikely(leak_packet
))
544 channel
->n_skbuff_leaks
++;
546 efx_recycle_rx_buffer(channel
, rx_buf
);
548 /* Don't hold off the previous receive */
553 /* Release and/or sync DMA mapping - assumes all RX buffers
554 * consumed in-order per RX queue
556 efx_unmap_rx_buffer(efx
, rx_buf
, len
);
558 /* Prefetch nice and early so data will (hopefully) be in cache by
559 * the time we look at it.
561 prefetch(efx_rx_buf_eh(efx
, rx_buf
));
563 /* Pipeline receives so that we give time for packet headers to be
564 * prefetched into cache.
566 rx_buf
->len
= len
- efx
->type
->rx_buffer_hash_size
;
569 __efx_rx_packet(channel
, channel
->rx_pkt
);
570 channel
->rx_pkt
= rx_buf
;
573 static void efx_rx_deliver(struct efx_channel
*channel
,
574 struct efx_rx_buffer
*rx_buf
)
578 /* We now own the SKB */
580 rx_buf
->u
.skb
= NULL
;
582 /* Set the SKB flags */
583 skb_checksum_none_assert(skb
);
585 /* Pass the packet up */
586 netif_receive_skb(skb
);
588 /* Update allocation strategy method */
589 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
592 /* Handle a received packet. Second half: Touches packet payload. */
593 void __efx_rx_packet(struct efx_channel
*channel
, struct efx_rx_buffer
*rx_buf
)
595 struct efx_nic
*efx
= channel
->efx
;
596 u8
*eh
= efx_rx_buf_eh(efx
, rx_buf
);
598 /* If we're in loopback test, then pass the packet directly to the
599 * loopback layer, and free the rx_buf here
601 if (unlikely(efx
->loopback_selftest
)) {
602 efx_loopback_rx_packet(efx
, eh
, rx_buf
->len
);
603 efx_free_rx_buffer(efx
, rx_buf
);
607 if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
)) {
608 struct sk_buff
*skb
= rx_buf
->u
.skb
;
610 prefetch(skb_shinfo(skb
));
612 skb_reserve(skb
, efx
->type
->rx_buffer_hash_size
);
613 skb_put(skb
, rx_buf
->len
);
615 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
616 skb
->rxhash
= efx_rx_buf_hash(eh
);
618 /* Move past the ethernet header. rx_buf->data still points
619 * at the ethernet header */
620 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
622 skb_record_rx_queue(skb
, channel
->channel
);
625 if (unlikely(!(efx
->net_dev
->features
& NETIF_F_RXCSUM
)))
626 rx_buf
->flags
&= ~EFX_RX_PKT_CSUMMED
;
628 if (likely(rx_buf
->flags
& (EFX_RX_BUF_PAGE
| EFX_RX_PKT_CSUMMED
)))
629 efx_rx_packet_gro(channel
, rx_buf
, eh
);
631 efx_rx_deliver(channel
, rx_buf
);
634 void efx_rx_strategy(struct efx_channel
*channel
)
636 enum efx_rx_alloc_method method
= rx_alloc_method
;
638 /* Only makes sense to use page based allocation if GRO is enabled */
639 if (!(channel
->efx
->net_dev
->features
& NETIF_F_GRO
)) {
640 method
= RX_ALLOC_METHOD_SKB
;
641 } else if (method
== RX_ALLOC_METHOD_AUTO
) {
642 /* Constrain the rx_alloc_level */
643 if (channel
->rx_alloc_level
< 0)
644 channel
->rx_alloc_level
= 0;
645 else if (channel
->rx_alloc_level
> RX_ALLOC_LEVEL_MAX
)
646 channel
->rx_alloc_level
= RX_ALLOC_LEVEL_MAX
;
648 /* Decide on the allocation method */
649 method
= ((channel
->rx_alloc_level
> RX_ALLOC_LEVEL_GRO
) ?
650 RX_ALLOC_METHOD_PAGE
: RX_ALLOC_METHOD_SKB
);
653 /* Push the option */
654 channel
->rx_alloc_push_pages
= (method
== RX_ALLOC_METHOD_PAGE
);
657 int efx_probe_rx_queue(struct efx_rx_queue
*rx_queue
)
659 struct efx_nic
*efx
= rx_queue
->efx
;
660 unsigned int entries
;
663 /* Create the smallest power-of-two aligned ring */
664 entries
= max(roundup_pow_of_two(efx
->rxq_entries
), EFX_MIN_DMAQ_SIZE
);
665 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
666 rx_queue
->ptr_mask
= entries
- 1;
668 netif_dbg(efx
, probe
, efx
->net_dev
,
669 "creating RX queue %d size %#x mask %#x\n",
670 efx_rx_queue_index(rx_queue
), efx
->rxq_entries
,
673 /* Allocate RX buffers */
674 rx_queue
->buffer
= kcalloc(entries
, sizeof(*rx_queue
->buffer
),
676 if (!rx_queue
->buffer
)
679 rc
= efx_nic_probe_rx(rx_queue
);
681 kfree(rx_queue
->buffer
);
682 rx_queue
->buffer
= NULL
;
687 void efx_init_rx_queue(struct efx_rx_queue
*rx_queue
)
689 struct efx_nic
*efx
= rx_queue
->efx
;
690 unsigned int max_fill
, trigger
, limit
;
692 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
693 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue
));
695 /* Initialise ptr fields */
696 rx_queue
->added_count
= 0;
697 rx_queue
->notified_count
= 0;
698 rx_queue
->removed_count
= 0;
699 rx_queue
->min_fill
= -1U;
701 /* Initialise limit fields */
702 max_fill
= efx
->rxq_entries
- EFX_RXD_HEAD_ROOM
;
703 trigger
= max_fill
* min(rx_refill_threshold
, 100U) / 100U;
704 limit
= max_fill
* min(rx_refill_limit
, 100U) / 100U;
706 rx_queue
->max_fill
= max_fill
;
707 rx_queue
->fast_fill_trigger
= trigger
;
708 rx_queue
->fast_fill_limit
= limit
;
710 /* Set up RX descriptor ring */
711 rx_queue
->enabled
= true;
712 efx_nic_init_rx(rx_queue
);
715 void efx_fini_rx_queue(struct efx_rx_queue
*rx_queue
)
718 struct efx_rx_buffer
*rx_buf
;
720 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
721 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue
));
723 /* A flush failure might have left rx_queue->enabled */
724 rx_queue
->enabled
= false;
726 del_timer_sync(&rx_queue
->slow_fill
);
727 efx_nic_fini_rx(rx_queue
);
729 /* Release RX buffers NB start at index 0 not current HW ptr */
730 if (rx_queue
->buffer
) {
731 for (i
= 0; i
<= rx_queue
->ptr_mask
; i
++) {
732 rx_buf
= efx_rx_buffer(rx_queue
, i
);
733 efx_fini_rx_buffer(rx_queue
, rx_buf
);
738 void efx_remove_rx_queue(struct efx_rx_queue
*rx_queue
)
740 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
741 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue
));
743 efx_nic_remove_rx(rx_queue
);
745 kfree(rx_queue
->buffer
);
746 rx_queue
->buffer
= NULL
;
750 module_param(rx_alloc_method
, int, 0644);
751 MODULE_PARM_DESC(rx_alloc_method
, "Allocation method used for RX buffers");
753 module_param(rx_refill_threshold
, uint
, 0444);
754 MODULE_PARM_DESC(rx_refill_threshold
,
755 "RX descriptor ring fast/slow fill threshold (%)");