1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
8 #include <linux/socket.h>
10 #include <linux/slab.h>
12 #include <linux/ipv6.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/prefetch.h>
16 #include <linux/moduleparam.h>
17 #include <linux/iommu.h>
19 #include <net/checksum.h>
21 #include <linux/bpf_trace.h>
22 #include "net_driver.h"
24 #include "rx_common.h"
28 #include "workarounds.h"
30 /* Preferred number of descriptors to fill at once */
31 #define EFX_RX_PREFERRED_BATCH 8U
33 /* Maximum rx prefix used by any architecture. */
34 #define EFX_MAX_RX_PREFIX_SIZE 16
36 /* Size of buffer allocated for skb header area. */
37 #define EFX_SKB_HEADERS 128u
39 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
40 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
43 static void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
44 struct efx_rx_buffer
*rx_buf
,
47 struct efx_nic
*efx
= rx_queue
->efx
;
48 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
50 if (likely(len
<= max_len
))
53 /* The packet must be discarded, but this is only a fatal error
54 * if the caller indicated it was
56 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
59 netif_err(efx
, rx_err
, efx
->net_dev
,
60 "RX queue %d overlength RX event (%#x > %#x)\n",
61 efx_rx_queue_index(rx_queue
), len
, max_len
);
63 efx_rx_queue_channel(rx_queue
)->n_rx_overlength
++;
66 /* Allocate and construct an SKB around page fragments */
67 static struct sk_buff
*efx_rx_mk_skb(struct efx_channel
*channel
,
68 struct efx_rx_buffer
*rx_buf
,
72 struct efx_nic
*efx
= channel
->efx
;
75 /* Allocate an SKB to store the headers */
76 skb
= netdev_alloc_skb(efx
->net_dev
,
77 efx
->rx_ip_align
+ efx
->rx_prefix_size
+
79 if (unlikely(skb
== NULL
)) {
80 atomic_inc(&efx
->n_rx_noskb_drops
);
84 EFX_WARN_ON_ONCE_PARANOID(rx_buf
->len
< hdr_len
);
86 memcpy(skb
->data
+ efx
->rx_ip_align
, eh
- efx
->rx_prefix_size
,
87 efx
->rx_prefix_size
+ hdr_len
);
88 skb_reserve(skb
, efx
->rx_ip_align
+ efx
->rx_prefix_size
);
89 __skb_put(skb
, hdr_len
);
91 /* Append the remaining page(s) onto the frag list */
92 if (rx_buf
->len
> hdr_len
) {
93 rx_buf
->page_offset
+= hdr_len
;
94 rx_buf
->len
-= hdr_len
;
97 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
98 rx_buf
->page
, rx_buf
->page_offset
,
99 rx_buf
->len
, efx
->rx_buffer_truesize
);
102 if (skb_shinfo(skb
)->nr_frags
== n_frags
)
105 rx_buf
= efx_rx_buf_next(&channel
->rx_queue
, rx_buf
);
108 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
113 /* Move past the ethernet header */
114 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
116 skb_mark_napi_id(skb
, &channel
->napi_str
);
121 void efx_siena_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
122 unsigned int n_frags
, unsigned int len
, u16 flags
)
124 struct efx_nic
*efx
= rx_queue
->efx
;
125 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
126 struct efx_rx_buffer
*rx_buf
;
128 rx_queue
->rx_packets
++;
130 rx_buf
= efx_rx_buffer(rx_queue
, index
);
131 rx_buf
->flags
|= flags
;
133 /* Validate the number of fragments and completed length */
135 if (!(flags
& EFX_RX_PKT_PREFIX_LEN
))
136 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
);
137 } else if (unlikely(n_frags
> EFX_RX_MAX_FRAGS
) ||
138 unlikely(len
<= (n_frags
- 1) * efx
->rx_dma_len
) ||
139 unlikely(len
> n_frags
* efx
->rx_dma_len
) ||
140 unlikely(!efx
->rx_scatter
)) {
141 /* If this isn't an explicit discard request, either
142 * the hardware or the driver is broken.
144 WARN_ON(!(len
== 0 && rx_buf
->flags
& EFX_RX_PKT_DISCARD
));
145 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
148 netif_vdbg(efx
, rx_status
, efx
->net_dev
,
149 "RX queue %d received ids %x-%x len %d %s%s\n",
150 efx_rx_queue_index(rx_queue
), index
,
151 (index
+ n_frags
- 1) & rx_queue
->ptr_mask
, len
,
152 (rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ? " [SUMMED]" : "",
153 (rx_buf
->flags
& EFX_RX_PKT_DISCARD
) ? " [DISCARD]" : "");
155 /* Discard packet, if instructed to do so. Process the
156 * previous receive first.
158 if (unlikely(rx_buf
->flags
& EFX_RX_PKT_DISCARD
)) {
159 efx_rx_flush_packet(channel
);
160 efx_siena_discard_rx_packet(channel
, rx_buf
, n_frags
);
164 if (n_frags
== 1 && !(flags
& EFX_RX_PKT_PREFIX_LEN
))
167 /* Release and/or sync the DMA mapping - assumes all RX buffers
168 * consumed in-order per RX queue.
170 efx_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
172 /* Prefetch nice and early so data will (hopefully) be in cache by
173 * the time we look at it.
175 prefetch(efx_rx_buf_va(rx_buf
));
177 rx_buf
->page_offset
+= efx
->rx_prefix_size
;
178 rx_buf
->len
-= efx
->rx_prefix_size
;
181 /* Release/sync DMA mapping for additional fragments.
182 * Fix length for last fragment.
184 unsigned int tail_frags
= n_frags
- 1;
187 rx_buf
= efx_rx_buf_next(rx_queue
, rx_buf
);
188 if (--tail_frags
== 0)
190 efx_sync_rx_buffer(efx
, rx_buf
, efx
->rx_dma_len
);
192 rx_buf
->len
= len
- (n_frags
- 1) * efx
->rx_dma_len
;
193 efx_sync_rx_buffer(efx
, rx_buf
, rx_buf
->len
);
196 /* All fragments have been DMA-synced, so recycle pages. */
197 rx_buf
= efx_rx_buffer(rx_queue
, index
);
198 efx_siena_recycle_rx_pages(channel
, rx_buf
, n_frags
);
200 /* Pipeline receives so that we give time for packet headers to be
201 * prefetched into cache.
203 efx_rx_flush_packet(channel
);
204 channel
->rx_pkt_n_frags
= n_frags
;
205 channel
->rx_pkt_index
= index
;
208 static void efx_rx_deliver(struct efx_channel
*channel
, u8
*eh
,
209 struct efx_rx_buffer
*rx_buf
,
210 unsigned int n_frags
)
213 u16 hdr_len
= min_t(u16
, rx_buf
->len
, EFX_SKB_HEADERS
);
215 skb
= efx_rx_mk_skb(channel
, rx_buf
, n_frags
, eh
, hdr_len
);
216 if (unlikely(skb
== NULL
)) {
217 struct efx_rx_queue
*rx_queue
;
219 rx_queue
= efx_channel_get_rx_queue(channel
);
220 efx_siena_free_rx_buffers(rx_queue
, rx_buf
, n_frags
);
223 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
225 /* Set the SKB flags */
226 skb_checksum_none_assert(skb
);
227 if (likely(rx_buf
->flags
& EFX_RX_PKT_CSUMMED
)) {
228 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
229 skb
->csum_level
= !!(rx_buf
->flags
& EFX_RX_PKT_CSUM_LEVEL
);
232 efx_rx_skb_attach_timestamp(channel
, skb
);
234 if (channel
->type
->receive_skb
)
235 if (channel
->type
->receive_skb(channel
, skb
))
238 /* Pass the packet up */
239 if (channel
->rx_list
!= NULL
)
240 /* Add to list, will pass up later */
241 list_add_tail(&skb
->list
, channel
->rx_list
);
243 /* No list, so pass it up now */
244 netif_receive_skb(skb
);
247 /** efx_do_xdp: perform XDP processing on a received packet
249 * Returns true if packet should still be delivered.
251 static bool efx_do_xdp(struct efx_nic
*efx
, struct efx_channel
*channel
,
252 struct efx_rx_buffer
*rx_buf
, u8
**ehp
)
254 u8 rx_prefix
[EFX_MAX_RX_PREFIX_SIZE
];
255 struct efx_rx_queue
*rx_queue
;
256 struct bpf_prog
*xdp_prog
;
257 struct xdp_frame
*xdpf
;
263 xdp_prog
= rcu_dereference_bh(efx
->xdp_prog
);
267 rx_queue
= efx_channel_get_rx_queue(channel
);
269 if (unlikely(channel
->rx_pkt_n_frags
> 1)) {
270 /* We can't do XDP on fragmented packets - drop. */
271 efx_siena_free_rx_buffers(rx_queue
, rx_buf
,
272 channel
->rx_pkt_n_frags
);
274 netif_err(efx
, rx_err
, efx
->net_dev
,
275 "XDP is not possible with multiple receive fragments (%d)\n",
276 channel
->rx_pkt_n_frags
);
277 channel
->n_rx_xdp_bad_drops
++;
281 dma_sync_single_for_cpu(&efx
->pci_dev
->dev
, rx_buf
->dma_addr
,
282 rx_buf
->len
, DMA_FROM_DEVICE
);
284 /* Save the rx prefix. */
285 EFX_WARN_ON_PARANOID(efx
->rx_prefix_size
> EFX_MAX_RX_PREFIX_SIZE
);
286 memcpy(rx_prefix
, *ehp
- efx
->rx_prefix_size
,
287 efx
->rx_prefix_size
);
289 xdp_init_buff(&xdp
, efx
->rx_page_buf_step
, &rx_queue
->xdp_rxq_info
);
290 /* No support yet for XDP metadata */
291 xdp_prepare_buff(&xdp
, *ehp
- EFX_XDP_HEADROOM
, EFX_XDP_HEADROOM
,
294 xdp_act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
296 offset
= (u8
*)xdp
.data
- *ehp
;
300 /* Fix up rx prefix. */
303 rx_buf
->page_offset
+= offset
;
304 rx_buf
->len
-= offset
;
305 memcpy(*ehp
- efx
->rx_prefix_size
, rx_prefix
,
306 efx
->rx_prefix_size
);
311 /* Buffer ownership passes to tx on success. */
312 xdpf
= xdp_convert_buff_to_frame(&xdp
);
313 err
= efx_siena_xdp_tx_buffers(efx
, 1, &xdpf
, true);
314 if (unlikely(err
!= 1)) {
315 efx_siena_free_rx_buffers(rx_queue
, rx_buf
, 1);
317 netif_err(efx
, rx_err
, efx
->net_dev
,
318 "XDP TX failed (%d)\n", err
);
319 channel
->n_rx_xdp_bad_drops
++;
320 trace_xdp_exception(efx
->net_dev
, xdp_prog
, xdp_act
);
322 channel
->n_rx_xdp_tx
++;
327 err
= xdp_do_redirect(efx
->net_dev
, &xdp
, xdp_prog
);
329 efx_siena_free_rx_buffers(rx_queue
, rx_buf
, 1);
331 netif_err(efx
, rx_err
, efx
->net_dev
,
332 "XDP redirect failed (%d)\n", err
);
333 channel
->n_rx_xdp_bad_drops
++;
334 trace_xdp_exception(efx
->net_dev
, xdp_prog
, xdp_act
);
336 channel
->n_rx_xdp_redirect
++;
341 bpf_warn_invalid_xdp_action(efx
->net_dev
, xdp_prog
, xdp_act
);
342 efx_siena_free_rx_buffers(rx_queue
, rx_buf
, 1);
343 channel
->n_rx_xdp_bad_drops
++;
344 trace_xdp_exception(efx
->net_dev
, xdp_prog
, xdp_act
);
348 trace_xdp_exception(efx
->net_dev
, xdp_prog
, xdp_act
);
351 efx_siena_free_rx_buffers(rx_queue
, rx_buf
, 1);
352 channel
->n_rx_xdp_drops
++;
356 return xdp_act
== XDP_PASS
;
359 /* Handle a received packet. Second half: Touches packet payload. */
360 void __efx_siena_rx_packet(struct efx_channel
*channel
)
362 struct efx_nic
*efx
= channel
->efx
;
363 struct efx_rx_buffer
*rx_buf
=
364 efx_rx_buffer(&channel
->rx_queue
, channel
->rx_pkt_index
);
365 u8
*eh
= efx_rx_buf_va(rx_buf
);
367 /* Read length from the prefix if necessary. This already
368 * excludes the length of the prefix itself.
370 if (rx_buf
->flags
& EFX_RX_PKT_PREFIX_LEN
)
371 rx_buf
->len
= le16_to_cpup((__le16
*)
372 (eh
+ efx
->rx_packet_len_offset
));
374 /* If we're in loopback test, then pass the packet directly to the
375 * loopback layer, and free the rx_buf here
377 if (unlikely(efx
->loopback_selftest
)) {
378 struct efx_rx_queue
*rx_queue
;
380 efx_siena_loopback_rx_packet(efx
, eh
, rx_buf
->len
);
381 rx_queue
= efx_channel_get_rx_queue(channel
);
382 efx_siena_free_rx_buffers(rx_queue
, rx_buf
,
383 channel
->rx_pkt_n_frags
);
387 if (!efx_do_xdp(efx
, channel
, rx_buf
, &eh
))
390 if (unlikely(!(efx
->net_dev
->features
& NETIF_F_RXCSUM
)))
391 rx_buf
->flags
&= ~EFX_RX_PKT_CSUMMED
;
393 if ((rx_buf
->flags
& EFX_RX_PKT_TCP
) && !channel
->type
->receive_skb
)
394 efx_siena_rx_packet_gro(channel
, rx_buf
,
395 channel
->rx_pkt_n_frags
, eh
, 0);
397 efx_rx_deliver(channel
, eh
, rx_buf
, channel
->rx_pkt_n_frags
);
399 channel
->rx_pkt_n_frags
= 0;