1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
9 #include <linux/skbuff.h>
11 #include <net/protocol.h>
12 #include <net/inet_common.h>
14 static struct sk_buff
*__skb_udp_tunnel_segment(struct sk_buff
*skb
,
15 netdev_features_t features
,
16 struct sk_buff
*(*gso_inner_segment
)(struct sk_buff
*skb
,
17 netdev_features_t features
),
18 __be16 new_protocol
, bool is_ipv6
)
20 int tnl_hlen
= skb_inner_mac_header(skb
) - skb_transport_header(skb
);
21 bool remcsum
, need_csum
, offload_csum
, gso_partial
;
22 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
23 struct udphdr
*uh
= udp_hdr(skb
);
24 u16 mac_offset
= skb
->mac_header
;
25 __be16 protocol
= skb
->protocol
;
26 u16 mac_len
= skb
->mac_len
;
27 int udp_offset
, outer_hlen
;
31 if (unlikely(!pskb_may_pull(skb
, tnl_hlen
)))
34 /* Adjust partial header checksum to negate old length.
35 * We cannot rely on the value contained in uh->len as it is
36 * possible that the actual value exceeds the boundaries of the
37 * 16 bit length field due to the header being added outside of an
38 * IP or IPv6 frame that was already limited to 64K - 1.
40 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
)
41 partial
= (__force __wsum
)uh
->len
;
43 partial
= (__force __wsum
)htonl(skb
->len
);
44 partial
= csum_sub(csum_unfold(uh
->check
), partial
);
46 /* setup inner skb. */
47 skb
->encapsulation
= 0;
48 SKB_GSO_CB(skb
)->encap_level
= 0;
49 __skb_pull(skb
, tnl_hlen
);
50 skb_reset_mac_header(skb
);
51 skb_set_network_header(skb
, skb_inner_network_offset(skb
));
52 skb
->mac_len
= skb_inner_network_offset(skb
);
53 skb
->protocol
= new_protocol
;
55 need_csum
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
);
56 skb
->encap_hdr_csum
= need_csum
;
58 remcsum
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_TUNNEL_REMCSUM
);
59 skb
->remcsum_offload
= remcsum
;
61 need_ipsec
= skb_dst(skb
) && dst_xfrm(skb_dst(skb
));
62 /* Try to offload checksum if possible */
63 offload_csum
= !!(need_csum
&&
66 (is_ipv6
? (NETIF_F_HW_CSUM
| NETIF_F_IPV6_CSUM
) :
67 (NETIF_F_HW_CSUM
| NETIF_F_IP_CSUM
))));
69 features
&= skb
->dev
->hw_enc_features
;
71 /* The only checksum offload we care about from here on out is the
72 * outer one so strip the existing checksum feature flags and
73 * instead set the flag based on our outer checksum offload value.
76 features
&= ~NETIF_F_CSUM_MASK
;
77 if (!need_csum
|| offload_csum
)
78 features
|= NETIF_F_HW_CSUM
;
81 /* segment inner packet. */
82 segs
= gso_inner_segment(skb
, features
);
83 if (IS_ERR_OR_NULL(segs
)) {
84 skb_gso_error_unwind(skb
, protocol
, tnl_hlen
, mac_offset
,
89 gso_partial
= !!(skb_shinfo(segs
)->gso_type
& SKB_GSO_PARTIAL
);
91 outer_hlen
= skb_tnl_header_len(skb
);
92 udp_offset
= outer_hlen
- tnl_hlen
;
98 skb
->ip_summed
= CHECKSUM_NONE
;
100 /* Set up inner headers if we are offloading inner checksum */
101 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
102 skb_reset_inner_headers(skb
);
103 skb
->encapsulation
= 1;
106 skb
->mac_len
= mac_len
;
107 skb
->protocol
= protocol
;
109 __skb_push(skb
, outer_hlen
);
110 skb_reset_mac_header(skb
);
111 skb_set_network_header(skb
, mac_len
);
112 skb_set_transport_header(skb
, udp_offset
);
113 len
= skb
->len
- udp_offset
;
116 /* If we are only performing partial GSO the inner header
117 * will be using a length value equal to only one MSS sized
118 * segment instead of the entire frame.
120 if (gso_partial
&& skb_is_gso(skb
)) {
121 uh
->len
= htons(skb_shinfo(skb
)->gso_size
+
122 SKB_GSO_CB(skb
)->data_offset
+
123 skb
->head
- (unsigned char *)uh
);
125 uh
->len
= htons(len
);
131 uh
->check
= ~csum_fold(csum_add(partial
,
132 (__force __wsum
)htonl(len
)));
134 if (skb
->encapsulation
|| !offload_csum
) {
135 uh
->check
= gso_make_checksum(skb
, ~uh
->check
);
137 uh
->check
= CSUM_MANGLED_0
;
139 skb
->ip_summed
= CHECKSUM_PARTIAL
;
140 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
141 skb
->csum_offset
= offsetof(struct udphdr
, check
);
143 } while ((skb
= skb
->next
));
148 struct sk_buff
*skb_udp_tunnel_segment(struct sk_buff
*skb
,
149 netdev_features_t features
,
152 __be16 protocol
= skb
->protocol
;
153 const struct net_offload
**offloads
;
154 const struct net_offload
*ops
;
155 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
156 struct sk_buff
*(*gso_inner_segment
)(struct sk_buff
*skb
,
157 netdev_features_t features
);
161 switch (skb
->inner_protocol_type
) {
162 case ENCAP_TYPE_ETHER
:
163 protocol
= skb
->inner_protocol
;
164 gso_inner_segment
= skb_mac_gso_segment
;
166 case ENCAP_TYPE_IPPROTO
:
167 offloads
= is_ipv6
? inet6_offloads
: inet_offloads
;
168 ops
= rcu_dereference(offloads
[skb
->inner_ipproto
]);
169 if (!ops
|| !ops
->callbacks
.gso_segment
)
171 gso_inner_segment
= ops
->callbacks
.gso_segment
;
177 segs
= __skb_udp_tunnel_segment(skb
, features
, gso_inner_segment
,
185 EXPORT_SYMBOL(skb_udp_tunnel_segment
);
187 static struct sk_buff
*__udp_gso_segment_list(struct sk_buff
*skb
,
188 netdev_features_t features
)
190 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
192 skb
= skb_segment_list(skb
, features
, skb_mac_header_len(skb
));
196 udp_hdr(skb
)->len
= htons(sizeof(struct udphdr
) + mss
);
201 struct sk_buff
*__udp_gso_segment(struct sk_buff
*gso_skb
,
202 netdev_features_t features
)
204 struct sock
*sk
= gso_skb
->sk
;
205 unsigned int sum_truesize
= 0;
206 struct sk_buff
*segs
, *seg
;
213 if (skb_shinfo(gso_skb
)->gso_type
& SKB_GSO_FRAGLIST
)
214 return __udp_gso_segment_list(gso_skb
, features
);
216 mss
= skb_shinfo(gso_skb
)->gso_size
;
217 if (gso_skb
->len
<= sizeof(*uh
) + mss
)
218 return ERR_PTR(-EINVAL
);
220 skb_pull(gso_skb
, sizeof(*uh
));
222 /* clear destructor to avoid skb_segment assigning it to tail */
223 copy_dtor
= gso_skb
->destructor
== sock_wfree
;
225 gso_skb
->destructor
= NULL
;
227 segs
= skb_segment(gso_skb
, features
);
228 if (IS_ERR_OR_NULL(segs
)) {
230 gso_skb
->destructor
= sock_wfree
;
234 /* GSO partial and frag_list segmentation only requires splitting
235 * the frame into an MSS multiple and possibly a remainder, both
236 * cases return a GSO skb. So update the mss now.
238 if (skb_is_gso(segs
))
239 mss
*= skb_shinfo(segs
)->gso_segs
;
244 /* preserve TX timestamp flags and TS key for first segment */
245 skb_shinfo(seg
)->tskey
= skb_shinfo(gso_skb
)->tskey
;
246 skb_shinfo(seg
)->tx_flags
|=
247 (skb_shinfo(gso_skb
)->tx_flags
& SKBTX_ANY_TSTAMP
);
249 /* compute checksum adjustment based on old length versus new */
250 newlen
= htons(sizeof(*uh
) + mss
);
251 check
= csum16_add(csum16_sub(uh
->check
, uh
->len
), newlen
);
255 seg
->destructor
= sock_wfree
;
257 sum_truesize
+= seg
->truesize
;
266 if (seg
->ip_summed
== CHECKSUM_PARTIAL
)
267 gso_reset_checksum(seg
, ~check
);
269 uh
->check
= gso_make_checksum(seg
, ~check
) ? :
276 /* last packet can be partial gso_size, account for that in checksum */
277 newlen
= htons(skb_tail_pointer(seg
) - skb_transport_header(seg
) +
279 check
= csum16_add(csum16_sub(uh
->check
, uh
->len
), newlen
);
284 if (seg
->ip_summed
== CHECKSUM_PARTIAL
)
285 gso_reset_checksum(seg
, ~check
);
287 uh
->check
= gso_make_checksum(seg
, ~check
) ? : CSUM_MANGLED_0
;
289 /* update refcount for the packet */
291 int delta
= sum_truesize
- gso_skb
->truesize
;
293 /* In some pathological cases, delta can be negative.
294 * We need to either use refcount_add() or refcount_sub_and_test()
296 if (likely(delta
>= 0))
297 refcount_add(delta
, &sk
->sk_wmem_alloc
);
299 WARN_ON_ONCE(refcount_sub_and_test(-delta
, &sk
->sk_wmem_alloc
));
303 EXPORT_SYMBOL_GPL(__udp_gso_segment
);
305 static struct sk_buff
*udp4_ufo_fragment(struct sk_buff
*skb
,
306 netdev_features_t features
)
308 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
314 if (skb
->encapsulation
&&
315 (skb_shinfo(skb
)->gso_type
&
316 (SKB_GSO_UDP_TUNNEL
|SKB_GSO_UDP_TUNNEL_CSUM
))) {
317 segs
= skb_udp_tunnel_segment(skb
, features
, false);
321 if (!(skb_shinfo(skb
)->gso_type
& (SKB_GSO_UDP
| SKB_GSO_UDP_L4
)))
324 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
327 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
)
328 return __udp_gso_segment(skb
, features
);
330 mss
= skb_shinfo(skb
)->gso_size
;
331 if (unlikely(skb
->len
<= mss
))
334 /* Do software UFO. Complete and fill in the UDP checksum as
335 * HW cannot do checksum of UDP packets sent as multiple
343 csum
= skb_checksum(skb
, 0, skb
->len
, 0);
344 uh
->check
= udp_v4_check(skb
->len
, iph
->saddr
, iph
->daddr
, csum
);
346 uh
->check
= CSUM_MANGLED_0
;
348 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
350 /* If there is no outer header we can fake a checksum offload
351 * due to the fact that we have already done the checksum in
352 * software prior to segmenting the frame.
354 if (!skb
->encap_hdr_csum
)
355 features
|= NETIF_F_HW_CSUM
;
357 /* Fragment the skb. IP headers of the fragments are updated in
360 segs
= skb_segment(skb
, features
);
365 #define UDP_GRO_CNT_MAX 64
366 static struct sk_buff
*udp_gro_receive_segment(struct list_head
*head
,
369 struct udphdr
*uh
= udp_hdr(skb
);
370 struct sk_buff
*pp
= NULL
;
376 /* requires non zero csum, for symmetry with GSO */
378 NAPI_GRO_CB(skb
)->flush
= 1;
382 /* Do not deal with padded or malicious packets, sorry ! */
383 ulen
= ntohs(uh
->len
);
384 if (ulen
<= sizeof(*uh
) || ulen
!= skb_gro_len(skb
)) {
385 NAPI_GRO_CB(skb
)->flush
= 1;
388 /* pull encapsulating udp header */
389 skb_gro_pull(skb
, sizeof(struct udphdr
));
391 list_for_each_entry(p
, head
, list
) {
392 if (!NAPI_GRO_CB(p
)->same_flow
)
397 /* Match ports only, as csum is always non zero */
398 if ((*(u32
*)&uh
->source
!= *(u32
*)&uh2
->source
)) {
399 NAPI_GRO_CB(p
)->same_flow
= 0;
403 if (NAPI_GRO_CB(skb
)->is_flist
!= NAPI_GRO_CB(p
)->is_flist
) {
404 NAPI_GRO_CB(skb
)->flush
= 1;
408 /* Terminate the flow on len mismatch or if it grow "too much".
409 * Under small packet flood GRO count could elsewhere grow a lot
410 * leading to excessive truesize values.
411 * On len mismatch merge the first packet shorter than gso_size,
412 * otherwise complete the GRO packet.
414 if (ulen
> ntohs(uh2
->len
)) {
417 if (NAPI_GRO_CB(skb
)->is_flist
) {
418 if (!pskb_may_pull(skb
, skb_gro_offset(skb
))) {
419 NAPI_GRO_CB(skb
)->flush
= 1;
422 if ((skb
->ip_summed
!= p
->ip_summed
) ||
423 (skb
->csum_level
!= p
->csum_level
)) {
424 NAPI_GRO_CB(skb
)->flush
= 1;
427 ret
= skb_gro_receive_list(p
, skb
);
429 skb_gro_postpull_rcsum(skb
, uh
,
430 sizeof(struct udphdr
));
432 ret
= skb_gro_receive(p
, skb
);
436 if (ret
|| ulen
!= ntohs(uh2
->len
) ||
437 NAPI_GRO_CB(p
)->count
>= UDP_GRO_CNT_MAX
)
443 /* mismatch, but we never need to flush */
447 struct sk_buff
*udp_gro_receive(struct list_head
*head
, struct sk_buff
*skb
,
448 struct udphdr
*uh
, struct sock
*sk
)
450 struct sk_buff
*pp
= NULL
;
453 unsigned int off
= skb_gro_offset(skb
);
456 NAPI_GRO_CB(skb
)->is_flist
= 0;
457 if (skb
->dev
->features
& NETIF_F_GRO_FRAGLIST
)
458 NAPI_GRO_CB(skb
)->is_flist
= sk
? !udp_sk(sk
)->gro_enabled
: 1;
460 if ((sk
&& udp_sk(sk
)->gro_enabled
) || NAPI_GRO_CB(skb
)->is_flist
) {
461 pp
= call_gro_receive(udp_gro_receive_segment
, head
, skb
);
465 if (!sk
|| NAPI_GRO_CB(skb
)->encap_mark
||
466 (skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
467 NAPI_GRO_CB(skb
)->csum_cnt
== 0 &&
468 !NAPI_GRO_CB(skb
)->csum_valid
) ||
469 !udp_sk(sk
)->gro_receive
)
472 /* mark that this skb passed once through the tunnel gro layer */
473 NAPI_GRO_CB(skb
)->encap_mark
= 1;
477 list_for_each_entry(p
, head
, list
) {
478 if (!NAPI_GRO_CB(p
)->same_flow
)
481 uh2
= (struct udphdr
*)(p
->data
+ off
);
483 /* Match ports and either checksums are either both zero
486 if ((*(u32
*)&uh
->source
!= *(u32
*)&uh2
->source
) ||
487 (!uh
->check
^ !uh2
->check
)) {
488 NAPI_GRO_CB(p
)->same_flow
= 0;
493 skb_gro_pull(skb
, sizeof(struct udphdr
)); /* pull encapsulating udp header */
494 skb_gro_postpull_rcsum(skb
, uh
, sizeof(struct udphdr
));
495 pp
= call_gro_receive_sk(udp_sk(sk
)->gro_receive
, sk
, head
, skb
);
498 skb_gro_flush_final(skb
, pp
, flush
);
501 EXPORT_SYMBOL(udp_gro_receive
);
503 INDIRECT_CALLABLE_SCOPE
504 struct sk_buff
*udp4_gro_receive(struct list_head
*head
, struct sk_buff
*skb
)
506 struct udphdr
*uh
= udp_gro_udphdr(skb
);
513 /* Don't bother verifying checksum if we're going to flush anyway. */
514 if (NAPI_GRO_CB(skb
)->flush
)
517 if (skb_gro_checksum_validate_zero_check(skb
, IPPROTO_UDP
, uh
->check
,
518 inet_gro_compute_pseudo
))
521 skb_gro_checksum_try_convert(skb
, IPPROTO_UDP
,
522 inet_gro_compute_pseudo
);
524 NAPI_GRO_CB(skb
)->is_ipv6
= 0;
526 sk
= static_branch_unlikely(&udp_encap_needed_key
) ? udp4_lib_lookup_skb(skb
, uh
->source
, uh
->dest
) : NULL
;
527 pp
= udp_gro_receive(head
, skb
, uh
, sk
);
532 NAPI_GRO_CB(skb
)->flush
= 1;
536 static int udp_gro_complete_segment(struct sk_buff
*skb
)
538 struct udphdr
*uh
= udp_hdr(skb
);
540 skb
->csum_start
= (unsigned char *)uh
- skb
->head
;
541 skb
->csum_offset
= offsetof(struct udphdr
, check
);
542 skb
->ip_summed
= CHECKSUM_PARTIAL
;
544 skb_shinfo(skb
)->gso_segs
= NAPI_GRO_CB(skb
)->count
;
545 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_L4
;
549 int udp_gro_complete(struct sk_buff
*skb
, int nhoff
,
552 __be16 newlen
= htons(skb
->len
- nhoff
);
553 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+ nhoff
);
560 sk
= INDIRECT_CALL_INET(lookup
, udp6_lib_lookup_skb
,
561 udp4_lib_lookup_skb
, skb
, uh
->source
, uh
->dest
);
562 if (sk
&& udp_sk(sk
)->gro_complete
) {
563 skb_shinfo(skb
)->gso_type
= uh
->check
? SKB_GSO_UDP_TUNNEL_CSUM
564 : SKB_GSO_UDP_TUNNEL
;
566 /* Set encapsulation before calling into inner gro_complete()
567 * functions to make them set up the inner offsets.
569 skb
->encapsulation
= 1;
570 err
= udp_sk(sk
)->gro_complete(sk
, skb
,
571 nhoff
+ sizeof(struct udphdr
));
573 err
= udp_gro_complete_segment(skb
);
577 if (skb
->remcsum_offload
)
578 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TUNNEL_REMCSUM
;
582 EXPORT_SYMBOL(udp_gro_complete
);
584 INDIRECT_CALLABLE_SCOPE
int udp4_gro_complete(struct sk_buff
*skb
, int nhoff
)
586 const struct iphdr
*iph
= ip_hdr(skb
);
587 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+ nhoff
);
589 if (NAPI_GRO_CB(skb
)->is_flist
) {
590 uh
->len
= htons(skb
->len
- nhoff
);
592 skb_shinfo(skb
)->gso_type
|= (SKB_GSO_FRAGLIST
|SKB_GSO_UDP_L4
);
593 skb_shinfo(skb
)->gso_segs
= NAPI_GRO_CB(skb
)->count
;
595 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
596 if (skb
->csum_level
< SKB_MAX_CSUM_LEVEL
)
599 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
607 uh
->check
= ~udp_v4_check(skb
->len
- nhoff
, iph
->saddr
,
610 return udp_gro_complete(skb
, nhoff
, udp4_lib_lookup_skb
);
613 static const struct net_offload udpv4_offload
= {
615 .gso_segment
= udp4_ufo_fragment
,
616 .gro_receive
= udp4_gro_receive
,
617 .gro_complete
= udp4_gro_complete
,
621 int __init
udpv4_offload_init(void)
623 return inet_add_offload(&udpv4_offload
, IPPROTO_UDP
);