2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
13 #include <linux/skbuff.h>
15 #include <net/protocol.h>
17 static struct sk_buff
*__skb_udp_tunnel_segment(struct sk_buff
*skb
,
18 netdev_features_t features
,
19 struct sk_buff
*(*gso_inner_segment
)(struct sk_buff
*skb
,
20 netdev_features_t features
),
21 __be16 new_protocol
, bool is_ipv6
)
23 int tnl_hlen
= skb_inner_mac_header(skb
) - skb_transport_header(skb
);
24 bool remcsum
, need_csum
, offload_csum
, ufo
, gso_partial
;
25 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
26 struct udphdr
*uh
= udp_hdr(skb
);
27 u16 mac_offset
= skb
->mac_header
;
28 __be16 protocol
= skb
->protocol
;
29 u16 mac_len
= skb
->mac_len
;
30 int udp_offset
, outer_hlen
;
33 if (unlikely(!pskb_may_pull(skb
, tnl_hlen
)))
36 /* Adjust partial header checksum to negate old length.
37 * We cannot rely on the value contained in uh->len as it is
38 * possible that the actual value exceeds the boundaries of the
39 * 16 bit length field due to the header being added outside of an
40 * IP or IPv6 frame that was already limited to 64K - 1.
42 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
)
43 partial
= (__force __wsum
)uh
->len
;
45 partial
= (__force __wsum
)htonl(skb
->len
);
46 partial
= csum_sub(csum_unfold(uh
->check
), partial
);
48 /* setup inner skb. */
49 skb
->encapsulation
= 0;
50 SKB_GSO_CB(skb
)->encap_level
= 0;
51 __skb_pull(skb
, tnl_hlen
);
52 skb_reset_mac_header(skb
);
53 skb_set_network_header(skb
, skb_inner_network_offset(skb
));
54 skb
->mac_len
= skb_inner_network_offset(skb
);
55 skb
->protocol
= new_protocol
;
57 need_csum
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
);
58 skb
->encap_hdr_csum
= need_csum
;
60 remcsum
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_TUNNEL_REMCSUM
);
61 skb
->remcsum_offload
= remcsum
;
63 ufo
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
);
65 /* Try to offload checksum if possible */
66 offload_csum
= !!(need_csum
&&
68 (is_ipv6
? (NETIF_F_HW_CSUM
| NETIF_F_IPV6_CSUM
) :
69 (NETIF_F_HW_CSUM
| NETIF_F_IP_CSUM
))));
71 features
&= skb
->dev
->hw_enc_features
;
73 /* The only checksum offload we care about from here on out is the
74 * outer one so strip the existing checksum feature flags and
75 * instead set the flag based on our outer checksum offload value.
78 features
&= ~NETIF_F_CSUM_MASK
;
79 if (!need_csum
|| offload_csum
)
80 features
|= NETIF_F_HW_CSUM
;
83 /* segment inner packet. */
84 segs
= gso_inner_segment(skb
, features
);
85 if (IS_ERR_OR_NULL(segs
)) {
86 skb_gso_error_unwind(skb
, protocol
, tnl_hlen
, mac_offset
,
91 gso_partial
= !!(skb_shinfo(segs
)->gso_type
& SKB_GSO_PARTIAL
);
93 outer_hlen
= skb_tnl_header_len(skb
);
94 udp_offset
= outer_hlen
- tnl_hlen
;
100 skb
->ip_summed
= CHECKSUM_NONE
;
102 /* Set up inner headers if we are offloading inner checksum */
103 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
104 skb_reset_inner_headers(skb
);
105 skb
->encapsulation
= 1;
108 skb
->mac_len
= mac_len
;
109 skb
->protocol
= protocol
;
111 __skb_push(skb
, outer_hlen
);
112 skb_reset_mac_header(skb
);
113 skb_set_network_header(skb
, mac_len
);
114 skb_set_transport_header(skb
, udp_offset
);
115 len
= skb
->len
- udp_offset
;
118 /* If we are only performing partial GSO the inner header
119 * will be using a length value equal to only one MSS sized
120 * segment instead of the entire frame.
123 uh
->len
= htons(skb_shinfo(skb
)->gso_size
+
124 SKB_GSO_CB(skb
)->data_offset
+
125 skb
->head
- (unsigned char *)uh
);
127 uh
->len
= htons(len
);
133 uh
->check
= ~csum_fold(csum_add(partial
,
134 (__force __wsum
)htonl(len
)));
136 if (skb
->encapsulation
|| !offload_csum
) {
137 uh
->check
= gso_make_checksum(skb
, ~uh
->check
);
139 uh
->check
= CSUM_MANGLED_0
;
141 skb
->ip_summed
= CHECKSUM_PARTIAL
;
142 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
143 skb
->csum_offset
= offsetof(struct udphdr
, check
);
145 } while ((skb
= skb
->next
));
150 struct sk_buff
*skb_udp_tunnel_segment(struct sk_buff
*skb
,
151 netdev_features_t features
,
154 __be16 protocol
= skb
->protocol
;
155 const struct net_offload
**offloads
;
156 const struct net_offload
*ops
;
157 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
158 struct sk_buff
*(*gso_inner_segment
)(struct sk_buff
*skb
,
159 netdev_features_t features
);
163 switch (skb
->inner_protocol_type
) {
164 case ENCAP_TYPE_ETHER
:
165 protocol
= skb
->inner_protocol
;
166 gso_inner_segment
= skb_mac_gso_segment
;
168 case ENCAP_TYPE_IPPROTO
:
169 offloads
= is_ipv6
? inet6_offloads
: inet_offloads
;
170 ops
= rcu_dereference(offloads
[skb
->inner_ipproto
]);
171 if (!ops
|| !ops
->callbacks
.gso_segment
)
173 gso_inner_segment
= ops
->callbacks
.gso_segment
;
179 segs
= __skb_udp_tunnel_segment(skb
, features
, gso_inner_segment
,
187 EXPORT_SYMBOL(skb_udp_tunnel_segment
);
189 static struct sk_buff
*udp4_ufo_fragment(struct sk_buff
*skb
,
190 netdev_features_t features
)
192 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
198 if (skb
->encapsulation
&&
199 (skb_shinfo(skb
)->gso_type
&
200 (SKB_GSO_UDP_TUNNEL
|SKB_GSO_UDP_TUNNEL_CSUM
))) {
201 segs
= skb_udp_tunnel_segment(skb
, features
, false);
205 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
208 mss
= skb_shinfo(skb
)->gso_size
;
209 if (unlikely(skb
->len
<= mss
))
212 if (skb_gso_ok(skb
, features
| NETIF_F_GSO_ROBUST
)) {
213 /* Packet is from an untrusted source, reset gso_segs. */
215 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(skb
->len
, mss
);
221 /* Do software UFO. Complete and fill in the UDP checksum as
222 * HW cannot do checksum of UDP packets sent as multiple
230 csum
= skb_checksum(skb
, 0, skb
->len
, 0);
231 uh
->check
= udp_v4_check(skb
->len
, iph
->saddr
, iph
->daddr
, csum
);
233 uh
->check
= CSUM_MANGLED_0
;
235 skb
->ip_summed
= CHECKSUM_NONE
;
237 /* If there is no outer header we can fake a checksum offload
238 * due to the fact that we have already done the checksum in
239 * software prior to segmenting the frame.
241 if (!skb
->encap_hdr_csum
)
242 features
|= NETIF_F_HW_CSUM
;
244 /* Fragment the skb. IP headers of the fragments are updated in
247 segs
= skb_segment(skb
, features
);
252 struct sk_buff
**udp_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
,
253 struct udphdr
*uh
, udp_lookup_t lookup
)
255 struct sk_buff
*p
, **pp
= NULL
;
257 unsigned int off
= skb_gro_offset(skb
);
261 if (NAPI_GRO_CB(skb
)->encap_mark
||
262 (skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
263 NAPI_GRO_CB(skb
)->csum_cnt
== 0 &&
264 !NAPI_GRO_CB(skb
)->csum_valid
))
267 /* mark that this skb passed once through the tunnel gro layer */
268 NAPI_GRO_CB(skb
)->encap_mark
= 1;
271 sk
= (*lookup
)(skb
, uh
->source
, uh
->dest
);
273 if (sk
&& udp_sk(sk
)->gro_receive
)
280 for (p
= *head
; p
; p
= p
->next
) {
281 if (!NAPI_GRO_CB(p
)->same_flow
)
284 uh2
= (struct udphdr
*)(p
->data
+ off
);
286 /* Match ports and either checksums are either both zero
289 if ((*(u32
*)&uh
->source
!= *(u32
*)&uh2
->source
) ||
290 (!uh
->check
^ !uh2
->check
)) {
291 NAPI_GRO_CB(p
)->same_flow
= 0;
296 skb_gro_pull(skb
, sizeof(struct udphdr
)); /* pull encapsulating udp header */
297 skb_gro_postpull_rcsum(skb
, uh
, sizeof(struct udphdr
));
298 pp
= call_gro_receive_sk(udp_sk(sk
)->gro_receive
, sk
, head
, skb
);
303 NAPI_GRO_CB(skb
)->flush
|= flush
;
306 EXPORT_SYMBOL(udp_gro_receive
);
308 static struct sk_buff
**udp4_gro_receive(struct sk_buff
**head
,
311 struct udphdr
*uh
= udp_gro_udphdr(skb
);
316 /* Don't bother verifying checksum if we're going to flush anyway. */
317 if (NAPI_GRO_CB(skb
)->flush
)
320 if (skb_gro_checksum_validate_zero_check(skb
, IPPROTO_UDP
, uh
->check
,
321 inet_gro_compute_pseudo
))
324 skb_gro_checksum_try_convert(skb
, IPPROTO_UDP
, uh
->check
,
325 inet_gro_compute_pseudo
);
327 NAPI_GRO_CB(skb
)->is_ipv6
= 0;
328 return udp_gro_receive(head
, skb
, uh
, udp4_lib_lookup_skb
);
331 NAPI_GRO_CB(skb
)->flush
= 1;
335 int udp_gro_complete(struct sk_buff
*skb
, int nhoff
,
338 __be16 newlen
= htons(skb
->len
- nhoff
);
339 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+ nhoff
);
345 /* Set encapsulation before calling into inner gro_complete() functions
346 * to make them set up the inner offsets.
348 skb
->encapsulation
= 1;
351 sk
= (*lookup
)(skb
, uh
->source
, uh
->dest
);
352 if (sk
&& udp_sk(sk
)->gro_complete
)
353 err
= udp_sk(sk
)->gro_complete(sk
, skb
,
354 nhoff
+ sizeof(struct udphdr
));
357 if (skb
->remcsum_offload
)
358 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TUNNEL_REMCSUM
;
362 EXPORT_SYMBOL(udp_gro_complete
);
364 static int udp4_gro_complete(struct sk_buff
*skb
, int nhoff
)
366 const struct iphdr
*iph
= ip_hdr(skb
);
367 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+ nhoff
);
370 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL_CSUM
;
371 uh
->check
= ~udp_v4_check(skb
->len
- nhoff
, iph
->saddr
,
374 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL
;
377 return udp_gro_complete(skb
, nhoff
, udp4_lib_lookup_skb
);
380 static const struct net_offload udpv4_offload
= {
382 .gso_segment
= udp4_ufo_fragment
,
383 .gro_receive
= udp4_gro_receive
,
384 .gro_complete
= udp4_gro_complete
,
388 int __init
udpv4_offload_init(void)
390 return inet_add_offload(&udpv4_offload
, IPPROTO_UDP
);