1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
8 #include <linux/socket.h>
10 #include <linux/slab.h>
12 #include <linux/ipv6.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/prefetch.h>
16 #include <linux/moduleparam.h>
17 #include <linux/iommu.h>
19 #include <net/checksum.h>
20 #include "net_driver.h"
25 #include "workarounds.h"
27 /* Preferred number of descriptors to fill at once */
28 #define EF4_RX_PREFERRED_BATCH 8U
30 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
31 * ring, this number is divided by the number of buffers per page to calculate
32 * the number of pages to store in the RX page recycle ring.
34 #define EF4_RECYCLE_RING_SIZE_IOMMU 4096
35 #define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
37 /* Size of buffer allocated for skb header area. */
38 #define EF4_SKB_HEADERS 128u
40 /* This is the percentage fill level below which new RX descriptors
41 * will be added to the RX descriptor ring.
43 static unsigned int rx_refill_threshold
;
45 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
46 #define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
50 * RX maximum head room required.
52 * This must be at least 1 to prevent overflow, plus one packet-worth
53 * to allow pipelined receives.
55 #define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
57 static inline u8
*ef4_rx_buf_va(struct ef4_rx_buffer
*buf
)
59 return page_address(buf
->page
) + buf
->page_offset
;
62 static inline u32
ef4_rx_buf_hash(struct ef4_nic
*efx
, const u8
*eh
)
64 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
65 return __le32_to_cpup((const __le32
*)(eh
+ efx
->rx_packet_hash_offset
));
67 const u8
*data
= eh
+ efx
->rx_packet_hash_offset
;
75 static inline struct ef4_rx_buffer
*
76 ef4_rx_buf_next(struct ef4_rx_queue
*rx_queue
, struct ef4_rx_buffer
*rx_buf
)
78 if (unlikely(rx_buf
== ef4_rx_buffer(rx_queue
, rx_queue
->ptr_mask
)))
79 return ef4_rx_buffer(rx_queue
, 0);
84 static inline void ef4_sync_rx_buffer(struct ef4_nic
*efx
,
85 struct ef4_rx_buffer
*rx_buf
,
88 dma_sync_single_for_cpu(&efx
->pci_dev
->dev
, rx_buf
->dma_addr
, len
,
92 void ef4_rx_config_page_split(struct ef4_nic
*efx
)
94 efx
->rx_page_buf_step
= ALIGN(efx
->rx_dma_len
+ efx
->rx_ip_align
,
95 EF4_RX_BUF_ALIGNMENT
);
96 efx
->rx_bufs_per_page
= efx
->rx_buffer_order
? 1 :
97 ((PAGE_SIZE
- sizeof(struct ef4_rx_page_state
)) /
98 efx
->rx_page_buf_step
);
99 efx
->rx_buffer_truesize
= (PAGE_SIZE
<< efx
->rx_buffer_order
) /
100 efx
->rx_bufs_per_page
;
101 efx
->rx_pages_per_batch
= DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH
,
102 efx
->rx_bufs_per_page
);
105 /* Check the RX page recycle ring for a page that can be reused. */
106 static struct page
*ef4_reuse_page(struct ef4_rx_queue
*rx_queue
)
108 struct ef4_nic
*efx
= rx_queue
->efx
;
110 struct ef4_rx_page_state
*state
;
113 if (unlikely(!rx_queue
->page_ring
))
115 index
= rx_queue
->page_remove
& rx_queue
->page_ptr_mask
;
116 page
= rx_queue
->page_ring
[index
];
120 rx_queue
->page_ring
[index
] = NULL
;
121 /* page_remove cannot exceed page_add. */
122 if (rx_queue
->page_remove
!= rx_queue
->page_add
)
123 ++rx_queue
->page_remove
;
125 /* If page_count is 1 then we hold the only reference to this page. */
126 if (page_count(page
) == 1) {
127 ++rx_queue
->page_recycle_count
;
130 state
= page_address(page
);
131 dma_unmap_page(&efx
->pci_dev
->dev
, state
->dma_addr
,
132 PAGE_SIZE
<< efx
->rx_buffer_order
,
135 ++rx_queue
->page_recycle_failed
;
142 * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
144 * @rx_queue: Efx RX queue
145 * @atomic: control memory allocation flags
147 * This allocates a batch of pages, maps them for DMA, and populates
148 * struct ef4_rx_buffers for each one. Return a negative error code or
149 * 0 on success. If a single page can be used for multiple buffers,
150 * then the page will either be inserted fully, or not at all.
152 static int ef4_init_rx_buffers(struct ef4_rx_queue
*rx_queue
, bool atomic
)
154 struct ef4_nic
*efx
= rx_queue
->efx
;
155 struct ef4_rx_buffer
*rx_buf
;
157 unsigned int page_offset
;
158 struct ef4_rx_page_state
*state
;
160 unsigned index
, count
;
164 page
= ef4_reuse_page(rx_queue
);
166 page
= alloc_pages(__GFP_COMP
|
167 (atomic
? GFP_ATOMIC
: GFP_KERNEL
),
168 efx
->rx_buffer_order
);
169 if (unlikely(page
== NULL
))
172 dma_map_page(&efx
->pci_dev
->dev
, page
, 0,
173 PAGE_SIZE
<< efx
->rx_buffer_order
,
175 if (unlikely(dma_mapping_error(&efx
->pci_dev
->dev
,
177 __free_pages(page
, efx
->rx_buffer_order
);
180 state
= page_address(page
);
181 state
->dma_addr
= dma_addr
;
183 state
= page_address(page
);
184 dma_addr
= state
->dma_addr
;
187 dma_addr
+= sizeof(struct ef4_rx_page_state
);
188 page_offset
= sizeof(struct ef4_rx_page_state
);
191 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
192 rx_buf
= ef4_rx_buffer(rx_queue
, index
);
193 rx_buf
->dma_addr
= dma_addr
+ efx
->rx_ip_align
;
195 rx_buf
->page_offset
= page_offset
+ efx
->rx_ip_align
;
196 rx_buf
->len
= efx
->rx_dma_len
;
198 ++rx_queue
->added_count
;
200 dma_addr
+= efx
->rx_page_buf_step
;
201 page_offset
+= efx
->rx_page_buf_step
;
202 } while (page_offset
+ efx
->rx_page_buf_step
<= PAGE_SIZE
);
204 rx_buf
->flags
= EF4_RX_BUF_LAST_IN_PAGE
;
205 } while (++count
< efx
->rx_pages_per_batch
);
210 /* Unmap a DMA-mapped page. This function is only called for the final RX
213 static void ef4_unmap_rx_buffer(struct ef4_nic
*efx
,
214 struct ef4_rx_buffer
*rx_buf
)
216 struct page
*page
= rx_buf
->page
;
219 struct ef4_rx_page_state
*state
= page_address(page
);
220 dma_unmap_page(&efx
->pci_dev
->dev
,
222 PAGE_SIZE
<< efx
->rx_buffer_order
,
227 static void ef4_free_rx_buffers(struct ef4_rx_queue
*rx_queue
,
228 struct ef4_rx_buffer
*rx_buf
,
229 unsigned int num_bufs
)
233 put_page(rx_buf
->page
);
236 rx_buf
= ef4_rx_buf_next(rx_queue
, rx_buf
);
237 } while (--num_bufs
);
240 /* Attempt to recycle the page if there is an RX recycle ring; the page can
241 * only be added if this is the final RX buffer, to prevent pages being used in
242 * the descriptor ring and appearing in the recycle ring simultaneously.
244 static void ef4_recycle_rx_page(struct ef4_channel
*channel
,
245 struct ef4_rx_buffer
*rx_buf
)
247 struct page
*page
= rx_buf
->page
;
248 struct ef4_rx_queue
*rx_queue
= ef4_channel_get_rx_queue(channel
);
249 struct ef4_nic
*efx
= rx_queue
->efx
;
252 /* Only recycle the page after processing the final buffer. */
253 if (!(rx_buf
->flags
& EF4_RX_BUF_LAST_IN_PAGE
))
256 index
= rx_queue
->page_add
& rx_queue
->page_ptr_mask
;
257 if (rx_queue
->page_ring
[index
] == NULL
) {
258 unsigned read_index
= rx_queue
->page_remove
&
259 rx_queue
->page_ptr_mask
;
261 /* The next slot in the recycle ring is available, but
262 * increment page_remove if the read pointer currently
265 if (read_index
== index
)
266 ++rx_queue
->page_remove
;
267 rx_queue
->page_ring
[index
] = page
;
268 ++rx_queue
->page_add
;
271 ++rx_queue
->page_recycle_full
;
272 ef4_unmap_rx_buffer(efx
, rx_buf
);
273 put_page(rx_buf
->page
);
276 static void ef4_fini_rx_buffer(struct ef4_rx_queue
*rx_queue
,
277 struct ef4_rx_buffer
*rx_buf
)
279 /* Release the page reference we hold for the buffer. */
281 put_page(rx_buf
->page
);
283 /* If this is the last buffer in a page, unmap and free it. */
284 if (rx_buf
->flags
& EF4_RX_BUF_LAST_IN_PAGE
) {
285 ef4_unmap_rx_buffer(rx_queue
->efx
, rx_buf
);
286 ef4_free_rx_buffers(rx_queue
, rx_buf
, 1);
291 /* Recycle the pages that are used by buffers that have just been received. */
292 static void ef4_recycle_rx_pages(struct ef4_channel
*channel
,
293 struct ef4_rx_buffer
*rx_buf
,
294 unsigned int n_frags
)
296 struct ef4_rx_queue
*rx_queue
= ef4_channel_get_rx_queue(channel
);
298 if (unlikely(!rx_queue
->page_ring
))
302 ef4_recycle_rx_page(channel
, rx_buf
);
303 rx_buf
= ef4_rx_buf_next(rx_queue
, rx_buf
);
307 static void ef4_discard_rx_packet(struct ef4_channel
*channel
,
308 struct ef4_rx_buffer
*rx_buf
,
309 unsigned int n_frags
)
311 struct ef4_rx_queue
*rx_queue
= ef4_channel_get_rx_queue(channel
);
313 ef4_recycle_rx_pages(channel
, rx_buf
, n_frags
);
315 ef4_free_rx_buffers(rx_queue
, rx_buf
, n_frags
);
319 * ef4_fast_push_rx_descriptors - push new RX descriptors quickly
320 * @rx_queue: RX descriptor queue
322 * This will aim to fill the RX descriptor queue up to
323 * @rx_queue->@max_fill. If there is insufficient atomic
324 * memory to do so, a slow fill will be scheduled.
325 * @atomic: control memory allocation flags
327 * The caller must provide serialisation (none is used here). In practise,
328 * this means this function must run from the NAPI handler, or be called
329 * when NAPI is disabled.
331 void ef4_fast_push_rx_descriptors(struct ef4_rx_queue
*rx_queue
, bool atomic
)
333 struct ef4_nic
*efx
= rx_queue
->efx
;
334 unsigned int fill_level
, batch_size
;
337 if (!rx_queue
->refill_enabled
)
340 /* Calculate current fill level, and exit if we don't need to fill */
341 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
342 EF4_BUG_ON_PARANOID(fill_level
> rx_queue
->efx
->rxq_entries
);
343 if (fill_level
>= rx_queue
->fast_fill_trigger
)
346 /* Record minimum fill level */
347 if (unlikely(fill_level
< rx_queue
->min_fill
)) {
349 rx_queue
->min_fill
= fill_level
;
352 batch_size
= efx
->rx_pages_per_batch
* efx
->rx_bufs_per_page
;
353 space
= rx_queue
->max_fill
- fill_level
;
354 EF4_BUG_ON_PARANOID(space
< batch_size
);
356 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
357 "RX queue %d fast-filling descriptor ring from"
358 " level %d to level %d\n",
359 ef4_rx_queue_index(rx_queue
), fill_level
,
364 rc
= ef4_init_rx_buffers(rx_queue
, atomic
);
366 /* Ensure that we don't leave the rx queue empty */
367 if (rx_queue
->added_count
== rx_queue
->removed_count
)
368 ef4_schedule_slow_fill(rx_queue
);
371 } while ((space
-= batch_size
) >= batch_size
);
373 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
374 "RX queue %d fast-filled descriptor ring "
375 "to level %d\n", ef4_rx_queue_index(rx_queue
),
376 rx_queue
->added_count
- rx_queue
->removed_count
);
379 if (rx_queue
->notified_count
!= rx_queue
->added_count
)
380 ef4_nic_notify_rx_desc(rx_queue
);
383 void ef4_rx_slow_fill(struct timer_list
*t
)
385 struct ef4_rx_queue
*rx_queue
= from_timer(rx_queue
, t
, slow_fill
);
387 /* Post an event to cause NAPI to run and refill the queue */
388 ef4_nic_generate_fill_event(rx_queue
);
389 ++rx_queue
->slow_fill_count
;
392 static void ef4_rx_packet__check_len(struct ef4_rx_queue
*rx_queue
,
393 struct ef4_rx_buffer
*rx_buf
,
396 struct ef4_nic
*efx
= rx_queue
->efx
;
397 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
399 if (likely(len
<= max_len
))
402 /* The packet must be discarded, but this is only a fatal error
403 * if the caller indicated it was
405 rx_buf
->flags
|= EF4_RX_PKT_DISCARD
;
407 if ((len
> rx_buf
->len
) && EF4_WORKAROUND_8071(efx
)) {
409 netif_err(efx
, rx_err
, efx
->net_dev
,
410 " RX queue %d seriously overlength "
411 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
412 ef4_rx_queue_index(rx_queue
), len
, max_len
,
413 efx
->type
->rx_buffer_padding
);
414 ef4_schedule_reset(efx
, RESET_TYPE_RX_RECOVERY
);
417 netif_err(efx
, rx_err
, efx
->net_dev
,
418 " RX queue %d overlength RX event "
420 ef4_rx_queue_index(rx_queue
), len
, max_len
);
423 ef4_rx_queue_channel(rx_queue
)->n_rx_overlength
++;
426 /* Pass a received packet up through GRO. GRO can handle pages
427 * regardless of checksum state and skbs with a good checksum.
430 ef4_rx_packet_gro(struct ef4_channel
*channel
, struct ef4_rx_buffer
*rx_buf
,
431 unsigned int n_frags
, u8
*eh
)
433 struct napi_struct
*napi
= &channel
->napi_str
;
434 struct ef4_nic
*efx
= channel
->efx
;
437 skb
= napi_get_frags(napi
);
438 if (unlikely(!skb
)) {
439 struct ef4_rx_queue
*rx_queue
;
441 rx_queue
= ef4_channel_get_rx_queue(channel
);
442 ef4_free_rx_buffers(rx_queue
, rx_buf
, n_frags
);
446 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
447 skb_set_hash(skb
, ef4_rx_buf_hash(efx
, eh
),
449 skb
->ip_summed
= ((rx_buf
->flags
& EF4_RX_PKT_CSUMMED
) ?
450 CHECKSUM_UNNECESSARY
: CHECKSUM_NONE
);
453 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
454 rx_buf
->page
, rx_buf
->page_offset
,
457 skb
->len
+= rx_buf
->len
;
458 if (skb_shinfo(skb
)->nr_frags
== n_frags
)
461 rx_buf
= ef4_rx_buf_next(&channel
->rx_queue
, rx_buf
);
464 skb
->data_len
= skb
->len
;
465 skb
->truesize
+= n_frags
* efx
->rx_buffer_truesize
;
467 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
469 napi_gro_frags(napi
);
472 /* Allocate and construct an SKB around page fragments */
473 static struct sk_buff
*ef4_rx_mk_skb(struct ef4_channel
*channel
,
474 struct ef4_rx_buffer
*rx_buf
,
475 unsigned int n_frags
,
478 struct ef4_nic
*efx
= channel
->efx
;
481 /* Allocate an SKB to store the headers */
482 skb
= netdev_alloc_skb(efx
->net_dev
,
483 efx
->rx_ip_align
+ efx
->rx_prefix_size
+
485 if (unlikely(skb
== NULL
)) {
486 atomic_inc(&efx
->n_rx_noskb_drops
);
490 EF4_BUG_ON_PARANOID(rx_buf
->len
< hdr_len
);
492 memcpy(skb
->data
+ efx
->rx_ip_align
, eh
- efx
->rx_prefix_size
,
493 efx
->rx_prefix_size
+ hdr_len
);
494 skb_reserve(skb
, efx
->rx_ip_align
+ efx
->rx_prefix_size
);
495 __skb_put(skb
, hdr_len
);
497 /* Append the remaining page(s) onto the frag list */
498 if (rx_buf
->len
> hdr_len
) {
499 rx_buf
->page_offset
+= hdr_len
;
500 rx_buf
->len
-= hdr_len
;
503 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
504 rx_buf
->page
, rx_buf
->page_offset
,
507 skb
->len
+= rx_buf
->len
;
508 skb
->data_len
+= rx_buf
->len
;
509 if (skb_shinfo(skb
)->nr_frags
== n_frags
)
512 rx_buf
= ef4_rx_buf_next(&channel
->rx_queue
, rx_buf
);
515 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
520 skb
->truesize
+= n_frags
* efx
->rx_buffer_truesize
;
522 /* Move past the ethernet header */
523 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
525 skb_mark_napi_id(skb
, &channel
->napi_str
);
530 void ef4_rx_packet(struct ef4_rx_queue
*rx_queue
, unsigned int index
,
531 unsigned int n_frags
, unsigned int len
, u16 flags
)
533 struct ef4_nic
*efx
= rx_queue
->efx
;
534 struct ef4_channel
*channel
= ef4_rx_queue_channel(rx_queue
);
535 struct ef4_rx_buffer
*rx_buf
;
537 rx_queue
->rx_packets
++;
539 rx_buf
= ef4_rx_buffer(rx_queue
, index
);
540 rx_buf
->flags
|= flags
;
542 /* Validate the number of fragments and completed length */
544 if (!(flags
& EF4_RX_PKT_PREFIX_LEN
))
545 ef4_rx_packet__check_len(rx_queue
, rx_buf
, len
);
546 } else if (unlikely(n_frags
> EF4_RX_MAX_FRAGS
) ||
547 unlikely(len
<= (n_frags
- 1) * efx
->rx_dma_len
) ||
548 unlikely(len
> n_frags
* efx
->rx_dma_len
) ||
549 unlikely(!efx
->rx_scatter
)) {
550 /* If this isn't an explicit discard request, either
551 * the hardware or the driver is broken.
553 WARN_ON(!(len
== 0 && rx_buf
->flags
& EF4_RX_PKT_DISCARD
));
554 rx_buf
->flags
|= EF4_RX_PKT_DISCARD
;
557 netif_vdbg(efx
, rx_status
, efx
->net_dev
,
558 "RX queue %d received ids %x-%x len %d %s%s\n",
559 ef4_rx_queue_index(rx_queue
), index
,
560 (index
+ n_frags
- 1) & rx_queue
->ptr_mask
, len
,
561 (rx_buf
->flags
& EF4_RX_PKT_CSUMMED
) ? " [SUMMED]" : "",
562 (rx_buf
->flags
& EF4_RX_PKT_DISCARD
) ? " [DISCARD]" : "");
564 /* Discard packet, if instructed to do so. Process the
565 * previous receive first.
567 if (unlikely(rx_buf
->flags
& EF4_RX_PKT_DISCARD
)) {
568 ef4_rx_flush_packet(channel
);
569 ef4_discard_rx_packet(channel
, rx_buf
, n_frags
);
573 if (n_frags
== 1 && !(flags
& EF4_RX_PKT_PREFIX_LEN
))
576 /* Release and/or sync the DMA mapping - assumes all RX buffers
577 * consumed in-order per RX queue.
579 ef4_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
581 /* Prefetch nice and early so data will (hopefully) be in cache by
582 * the time we look at it.
584 prefetch(ef4_rx_buf_va(rx_buf
));
586 rx_buf
->page_offset
+= efx
->rx_prefix_size
;
587 rx_buf
->len
-= efx
->rx_prefix_size
;
590 /* Release/sync DMA mapping for additional fragments.
591 * Fix length for last fragment.
593 unsigned int tail_frags
= n_frags
- 1;
596 rx_buf
= ef4_rx_buf_next(rx_queue
, rx_buf
);
597 if (--tail_frags
== 0)
599 ef4_sync_rx_buffer(efx
, rx_buf
, efx
->rx_dma_len
);
601 rx_buf
->len
= len
- (n_frags
- 1) * efx
->rx_dma_len
;
602 ef4_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
605 /* All fragments have been DMA-synced, so recycle pages. */
606 rx_buf
= ef4_rx_buffer(rx_queue
, index
);
607 ef4_recycle_rx_pages(channel
, rx_buf
, n_frags
);
609 /* Pipeline receives so that we give time for packet headers to be
610 * prefetched into cache.
612 ef4_rx_flush_packet(channel
);
613 channel
->rx_pkt_n_frags
= n_frags
;
614 channel
->rx_pkt_index
= index
;
617 static void ef4_rx_deliver(struct ef4_channel
*channel
, u8
*eh
,
618 struct ef4_rx_buffer
*rx_buf
,
619 unsigned int n_frags
)
622 u16 hdr_len
= min_t(u16
, rx_buf
->len
, EF4_SKB_HEADERS
);
624 skb
= ef4_rx_mk_skb(channel
, rx_buf
, n_frags
, eh
, hdr_len
);
625 if (unlikely(skb
== NULL
)) {
626 struct ef4_rx_queue
*rx_queue
;
628 rx_queue
= ef4_channel_get_rx_queue(channel
);
629 ef4_free_rx_buffers(rx_queue
, rx_buf
, n_frags
);
632 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
634 /* Set the SKB flags */
635 skb_checksum_none_assert(skb
);
636 if (likely(rx_buf
->flags
& EF4_RX_PKT_CSUMMED
))
637 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
639 if (channel
->type
->receive_skb
)
640 if (channel
->type
->receive_skb(channel
, skb
))
643 /* Pass the packet up */
644 netif_receive_skb(skb
);
647 /* Handle a received packet. Second half: Touches packet payload. */
648 void __ef4_rx_packet(struct ef4_channel
*channel
)
650 struct ef4_nic
*efx
= channel
->efx
;
651 struct ef4_rx_buffer
*rx_buf
=
652 ef4_rx_buffer(&channel
->rx_queue
, channel
->rx_pkt_index
);
653 u8
*eh
= ef4_rx_buf_va(rx_buf
);
655 /* Read length from the prefix if necessary. This already
656 * excludes the length of the prefix itself.
658 if (rx_buf
->flags
& EF4_RX_PKT_PREFIX_LEN
)
659 rx_buf
->len
= le16_to_cpup((__le16
*)
660 (eh
+ efx
->rx_packet_len_offset
));
662 /* If we're in loopback test, then pass the packet directly to the
663 * loopback layer, and free the rx_buf here
665 if (unlikely(efx
->loopback_selftest
)) {
666 struct ef4_rx_queue
*rx_queue
;
668 ef4_loopback_rx_packet(efx
, eh
, rx_buf
->len
);
669 rx_queue
= ef4_channel_get_rx_queue(channel
);
670 ef4_free_rx_buffers(rx_queue
, rx_buf
,
671 channel
->rx_pkt_n_frags
);
675 if (unlikely(!(efx
->net_dev
->features
& NETIF_F_RXCSUM
)))
676 rx_buf
->flags
&= ~EF4_RX_PKT_CSUMMED
;
678 if ((rx_buf
->flags
& EF4_RX_PKT_TCP
) && !channel
->type
->receive_skb
)
679 ef4_rx_packet_gro(channel
, rx_buf
, channel
->rx_pkt_n_frags
, eh
);
681 ef4_rx_deliver(channel
, eh
, rx_buf
, channel
->rx_pkt_n_frags
);
683 channel
->rx_pkt_n_frags
= 0;
686 int ef4_probe_rx_queue(struct ef4_rx_queue
*rx_queue
)
688 struct ef4_nic
*efx
= rx_queue
->efx
;
689 unsigned int entries
;
692 /* Create the smallest power-of-two aligned ring */
693 entries
= max(roundup_pow_of_two(efx
->rxq_entries
), EF4_MIN_DMAQ_SIZE
);
694 EF4_BUG_ON_PARANOID(entries
> EF4_MAX_DMAQ_SIZE
);
695 rx_queue
->ptr_mask
= entries
- 1;
697 netif_dbg(efx
, probe
, efx
->net_dev
,
698 "creating RX queue %d size %#x mask %#x\n",
699 ef4_rx_queue_index(rx_queue
), efx
->rxq_entries
,
702 /* Allocate RX buffers */
703 rx_queue
->buffer
= kcalloc(entries
, sizeof(*rx_queue
->buffer
),
705 if (!rx_queue
->buffer
)
708 rc
= ef4_nic_probe_rx(rx_queue
);
710 kfree(rx_queue
->buffer
);
711 rx_queue
->buffer
= NULL
;
717 static void ef4_init_rx_recycle_ring(struct ef4_nic
*efx
,
718 struct ef4_rx_queue
*rx_queue
)
720 unsigned int bufs_in_recycle_ring
, page_ring_size
;
721 struct iommu_domain __maybe_unused
*domain
;
723 /* Set the RX recycle ring size */
725 bufs_in_recycle_ring
= EF4_RECYCLE_RING_SIZE_IOMMU
;
727 domain
= iommu_get_domain_for_dev(&efx
->pci_dev
->dev
);
728 if (domain
&& domain
->type
!= IOMMU_DOMAIN_IDENTITY
)
729 bufs_in_recycle_ring
= EF4_RECYCLE_RING_SIZE_IOMMU
;
731 bufs_in_recycle_ring
= EF4_RECYCLE_RING_SIZE_NOIOMMU
;
732 #endif /* CONFIG_PPC64 */
734 page_ring_size
= roundup_pow_of_two(bufs_in_recycle_ring
/
735 efx
->rx_bufs_per_page
);
736 rx_queue
->page_ring
= kcalloc(page_ring_size
,
737 sizeof(*rx_queue
->page_ring
), GFP_KERNEL
);
738 if (!rx_queue
->page_ring
)
739 rx_queue
->page_ptr_mask
= 0;
741 rx_queue
->page_ptr_mask
= page_ring_size
- 1;
744 void ef4_init_rx_queue(struct ef4_rx_queue
*rx_queue
)
746 struct ef4_nic
*efx
= rx_queue
->efx
;
747 unsigned int max_fill
, trigger
, max_trigger
;
749 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
750 "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue
));
752 /* Initialise ptr fields */
753 rx_queue
->added_count
= 0;
754 rx_queue
->notified_count
= 0;
755 rx_queue
->removed_count
= 0;
756 rx_queue
->min_fill
= -1U;
757 ef4_init_rx_recycle_ring(efx
, rx_queue
);
759 rx_queue
->page_remove
= 0;
760 rx_queue
->page_add
= rx_queue
->page_ptr_mask
+ 1;
761 rx_queue
->page_recycle_count
= 0;
762 rx_queue
->page_recycle_failed
= 0;
763 rx_queue
->page_recycle_full
= 0;
765 /* Initialise limit fields */
766 max_fill
= efx
->rxq_entries
- EF4_RXD_HEAD_ROOM
;
768 max_fill
- efx
->rx_pages_per_batch
* efx
->rx_bufs_per_page
;
769 if (rx_refill_threshold
!= 0) {
770 trigger
= max_fill
* min(rx_refill_threshold
, 100U) / 100U;
771 if (trigger
> max_trigger
)
772 trigger
= max_trigger
;
774 trigger
= max_trigger
;
777 rx_queue
->max_fill
= max_fill
;
778 rx_queue
->fast_fill_trigger
= trigger
;
779 rx_queue
->refill_enabled
= true;
781 /* Set up RX descriptor ring */
782 ef4_nic_init_rx(rx_queue
);
785 void ef4_fini_rx_queue(struct ef4_rx_queue
*rx_queue
)
788 struct ef4_nic
*efx
= rx_queue
->efx
;
789 struct ef4_rx_buffer
*rx_buf
;
791 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
792 "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue
));
794 del_timer_sync(&rx_queue
->slow_fill
);
796 /* Release RX buffers from the current read ptr to the write ptr */
797 if (rx_queue
->buffer
) {
798 for (i
= rx_queue
->removed_count
; i
< rx_queue
->added_count
;
800 unsigned index
= i
& rx_queue
->ptr_mask
;
801 rx_buf
= ef4_rx_buffer(rx_queue
, index
);
802 ef4_fini_rx_buffer(rx_queue
, rx_buf
);
806 /* Unmap and release the pages in the recycle ring. Remove the ring. */
807 for (i
= 0; i
<= rx_queue
->page_ptr_mask
; i
++) {
808 struct page
*page
= rx_queue
->page_ring
[i
];
809 struct ef4_rx_page_state
*state
;
814 state
= page_address(page
);
815 dma_unmap_page(&efx
->pci_dev
->dev
, state
->dma_addr
,
816 PAGE_SIZE
<< efx
->rx_buffer_order
,
820 kfree(rx_queue
->page_ring
);
821 rx_queue
->page_ring
= NULL
;
824 void ef4_remove_rx_queue(struct ef4_rx_queue
*rx_queue
)
826 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
827 "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue
));
829 ef4_nic_remove_rx(rx_queue
);
831 kfree(rx_queue
->buffer
);
832 rx_queue
->buffer
= NULL
;
836 module_param(rx_refill_threshold
, uint
, 0444);
837 MODULE_PARM_DESC(rx_refill_threshold
,
838 "RX descriptor ring refill threshold (%)");
840 #ifdef CONFIG_RFS_ACCEL
842 int ef4_filter_rfs(struct net_device
*net_dev
, const struct sk_buff
*skb
,
843 u16 rxq_index
, u32 flow_id
)
845 struct ef4_nic
*efx
= netdev_priv(net_dev
);
846 struct ef4_channel
*channel
;
847 struct ef4_filter_spec spec
;
851 if (flow_id
== RPS_FLOW_ID_INVALID
)
854 if (!skb_flow_dissect_flow_keys(skb
, &fk
, 0))
855 return -EPROTONOSUPPORT
;
857 if (fk
.basic
.n_proto
!= htons(ETH_P_IP
) && fk
.basic
.n_proto
!= htons(ETH_P_IPV6
))
858 return -EPROTONOSUPPORT
;
859 if (fk
.control
.flags
& FLOW_DIS_IS_FRAGMENT
)
860 return -EPROTONOSUPPORT
;
862 ef4_filter_init_rx(&spec
, EF4_FILTER_PRI_HINT
,
863 efx
->rx_scatter
? EF4_FILTER_FLAG_RX_SCATTER
: 0,
866 EF4_FILTER_MATCH_ETHER_TYPE
| EF4_FILTER_MATCH_IP_PROTO
|
867 EF4_FILTER_MATCH_LOC_HOST
| EF4_FILTER_MATCH_LOC_PORT
|
868 EF4_FILTER_MATCH_REM_HOST
| EF4_FILTER_MATCH_REM_PORT
;
869 spec
.ether_type
= fk
.basic
.n_proto
;
870 spec
.ip_proto
= fk
.basic
.ip_proto
;
872 if (fk
.basic
.n_proto
== htons(ETH_P_IP
)) {
873 spec
.rem_host
[0] = fk
.addrs
.v4addrs
.src
;
874 spec
.loc_host
[0] = fk
.addrs
.v4addrs
.dst
;
876 memcpy(spec
.rem_host
, &fk
.addrs
.v6addrs
.src
, sizeof(struct in6_addr
));
877 memcpy(spec
.loc_host
, &fk
.addrs
.v6addrs
.dst
, sizeof(struct in6_addr
));
880 spec
.rem_port
= fk
.ports
.src
;
881 spec
.loc_port
= fk
.ports
.dst
;
883 rc
= efx
->type
->filter_rfs_insert(efx
, &spec
);
887 /* Remember this so we can check whether to expire the filter later */
888 channel
= ef4_get_channel(efx
, rxq_index
);
889 channel
->rps_flow_id
[rc
] = flow_id
;
890 ++channel
->rfs_filters_added
;
892 if (spec
.ether_type
== htons(ETH_P_IP
))
893 netif_info(efx
, rx_status
, efx
->net_dev
,
894 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
895 (spec
.ip_proto
== IPPROTO_TCP
) ? "TCP" : "UDP",
896 spec
.rem_host
, ntohs(spec
.rem_port
), spec
.loc_host
,
897 ntohs(spec
.loc_port
), rxq_index
, flow_id
, rc
);
899 netif_info(efx
, rx_status
, efx
->net_dev
,
900 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
901 (spec
.ip_proto
== IPPROTO_TCP
) ? "TCP" : "UDP",
902 spec
.rem_host
, ntohs(spec
.rem_port
), spec
.loc_host
,
903 ntohs(spec
.loc_port
), rxq_index
, flow_id
, rc
);
908 bool __ef4_filter_rfs_expire(struct ef4_nic
*efx
, unsigned int quota
)
910 bool (*expire_one
)(struct ef4_nic
*efx
, u32 flow_id
, unsigned int index
);
911 unsigned int channel_idx
, index
, size
;
914 if (!spin_trylock_bh(&efx
->filter_lock
))
917 expire_one
= efx
->type
->filter_rfs_expire_one
;
918 channel_idx
= efx
->rps_expire_channel
;
919 index
= efx
->rps_expire_index
;
920 size
= efx
->type
->max_rx_ip_filters
;
922 struct ef4_channel
*channel
= ef4_get_channel(efx
, channel_idx
);
923 flow_id
= channel
->rps_flow_id
[index
];
925 if (flow_id
!= RPS_FLOW_ID_INVALID
&&
926 expire_one(efx
, flow_id
, index
)) {
927 netif_info(efx
, rx_status
, efx
->net_dev
,
928 "expired filter %d [queue %u flow %u]\n",
929 index
, channel_idx
, flow_id
);
930 channel
->rps_flow_id
[index
] = RPS_FLOW_ID_INVALID
;
932 if (++index
== size
) {
933 if (++channel_idx
== efx
->n_channels
)
938 efx
->rps_expire_channel
= channel_idx
;
939 efx
->rps_expire_index
= index
;
941 spin_unlock_bh(&efx
->filter_lock
);
945 #endif /* CONFIG_RFS_ACCEL */
948 * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
949 * @spec: Specification to test
951 * Return: %true if the specification is a non-drop RX filter that
952 * matches a local MAC address I/G bit value of 1 or matches a local
953 * IPv4 or IPv6 address value in the respective multicast address
954 * range. Otherwise %false.
956 bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec
*spec
)
958 if (!(spec
->flags
& EF4_FILTER_FLAG_RX
) ||
959 spec
->dmaq_id
== EF4_FILTER_RX_DMAQ_ID_DROP
)
962 if (spec
->match_flags
&
963 (EF4_FILTER_MATCH_LOC_MAC
| EF4_FILTER_MATCH_LOC_MAC_IG
) &&
964 is_multicast_ether_addr(spec
->loc_mac
))
967 if ((spec
->match_flags
&
968 (EF4_FILTER_MATCH_ETHER_TYPE
| EF4_FILTER_MATCH_LOC_HOST
)) ==
969 (EF4_FILTER_MATCH_ETHER_TYPE
| EF4_FILTER_MATCH_LOC_HOST
)) {
970 if (spec
->ether_type
== htons(ETH_P_IP
) &&
971 ipv4_is_multicast(spec
->loc_host
[0]))
973 if (spec
->ether_type
== htons(ETH_P_IPV6
) &&
974 ((const u8
*)spec
->loc_host
)[0] == 0xff)