2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/socket.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/printk.h>
17 #include <net/protocol.h>
20 #include "ip6_offload.h"
22 static int ipv6_gso_pull_exthdrs(struct sk_buff
*skb
, int proto
)
24 const struct net_offload
*ops
= NULL
;
27 struct ipv6_opt_hdr
*opth
;
30 if (proto
!= NEXTHDR_HOP
) {
31 ops
= rcu_dereference(inet6_offloads
[proto
]);
36 if (!(ops
->flags
& INET6_PROTO_GSO_EXTHDR
))
40 if (unlikely(!pskb_may_pull(skb
, 8)))
43 opth
= (void *)skb
->data
;
44 len
= ipv6_optlen(opth
);
46 if (unlikely(!pskb_may_pull(skb
, len
)))
49 opth
= (void *)skb
->data
;
50 proto
= opth
->nexthdr
;
57 static struct sk_buff
*ipv6_gso_segment(struct sk_buff
*skb
,
58 netdev_features_t features
)
60 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
61 struct ipv6hdr
*ipv6h
;
62 const struct net_offload
*ops
;
64 struct frag_hdr
*fptr
;
70 if (unlikely(skb_shinfo(skb
)->gso_type
&
80 SKB_GSO_UDP_TUNNEL_CSUM
|
81 SKB_GSO_TUNNEL_REMCSUM
|
86 skb_reset_network_header(skb
);
87 nhoff
= skb_network_header(skb
) - skb_mac_header(skb
);
88 if (unlikely(!pskb_may_pull(skb
, sizeof(*ipv6h
))))
91 encap
= SKB_GSO_CB(skb
)->encap_level
> 0;
93 features
&= skb
->dev
->hw_enc_features
;
94 SKB_GSO_CB(skb
)->encap_level
+= sizeof(*ipv6h
);
96 ipv6h
= ipv6_hdr(skb
);
97 __skb_pull(skb
, sizeof(*ipv6h
));
98 segs
= ERR_PTR(-EPROTONOSUPPORT
);
100 proto
= ipv6_gso_pull_exthdrs(skb
, ipv6h
->nexthdr
);
102 if (skb
->encapsulation
&&
103 skb_shinfo(skb
)->gso_type
& (SKB_GSO_SIT
|SKB_GSO_IPIP
))
104 udpfrag
= proto
== IPPROTO_UDP
&& encap
;
106 udpfrag
= proto
== IPPROTO_UDP
&& !skb
->encapsulation
;
108 ops
= rcu_dereference(inet6_offloads
[proto
]);
109 if (likely(ops
&& ops
->callbacks
.gso_segment
)) {
110 skb_reset_transport_header(skb
);
111 segs
= ops
->callbacks
.gso_segment(skb
, features
);
117 for (skb
= segs
; skb
; skb
= skb
->next
) {
118 ipv6h
= (struct ipv6hdr
*)(skb_mac_header(skb
) + nhoff
);
119 ipv6h
->payload_len
= htons(skb
->len
- nhoff
- sizeof(*ipv6h
));
120 skb
->network_header
= (u8
*)ipv6h
- skb
->head
;
121 skb_reset_mac_len(skb
);
124 int err
= ip6_find_1stfragopt(skb
, &prevhdr
);
126 kfree_skb_list(segs
);
129 fptr
= (struct frag_hdr
*)((u8
*)ipv6h
+ err
);
130 fptr
->frag_off
= htons(offset
);
132 fptr
->frag_off
|= htons(IP6_MF
);
133 offset
+= (ntohs(ipv6h
->payload_len
) -
134 sizeof(struct frag_hdr
));
137 skb_reset_inner_headers(skb
);
144 /* Return the total length of all the extension hdrs, following the same
145 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
147 static int ipv6_exthdrs_len(struct ipv6hdr
*iph
,
148 const struct net_offload
**opps
)
150 struct ipv6_opt_hdr
*opth
= (void *)iph
;
151 int len
= 0, proto
, optlen
= sizeof(*iph
);
153 proto
= iph
->nexthdr
;
155 if (proto
!= NEXTHDR_HOP
) {
156 *opps
= rcu_dereference(inet6_offloads
[proto
]);
157 if (unlikely(!(*opps
)))
159 if (!((*opps
)->flags
& INET6_PROTO_GSO_EXTHDR
))
162 opth
= (void *)opth
+ optlen
;
163 optlen
= ipv6_optlen(opth
);
165 proto
= opth
->nexthdr
;
170 static struct sk_buff
**ipv6_gro_receive(struct sk_buff
**head
,
173 const struct net_offload
*ops
;
174 struct sk_buff
**pp
= NULL
;
183 off
= skb_gro_offset(skb
);
184 hlen
= off
+ sizeof(*iph
);
185 iph
= skb_gro_header_fast(skb
, off
);
186 if (skb_gro_header_hard(skb
, hlen
)) {
187 iph
= skb_gro_header_slow(skb
, hlen
, off
);
192 skb_set_network_header(skb
, off
);
193 skb_gro_pull(skb
, sizeof(*iph
));
194 skb_set_transport_header(skb
, skb_gro_offset(skb
));
196 flush
+= ntohs(iph
->payload_len
) != skb_gro_len(skb
);
199 proto
= iph
->nexthdr
;
200 ops
= rcu_dereference(inet6_offloads
[proto
]);
201 if (!ops
|| !ops
->callbacks
.gro_receive
) {
202 __pskb_pull(skb
, skb_gro_offset(skb
));
203 skb_gro_frag0_invalidate(skb
);
204 proto
= ipv6_gso_pull_exthdrs(skb
, proto
);
205 skb_gro_pull(skb
, -skb_transport_offset(skb
));
206 skb_reset_transport_header(skb
);
207 __skb_push(skb
, skb_gro_offset(skb
));
209 ops
= rcu_dereference(inet6_offloads
[proto
]);
210 if (!ops
|| !ops
->callbacks
.gro_receive
)
216 NAPI_GRO_CB(skb
)->proto
= proto
;
219 nlen
= skb_network_header_len(skb
);
221 for (p
= *head
; p
; p
= p
->next
) {
222 const struct ipv6hdr
*iph2
;
223 __be32 first_word
; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
225 if (!NAPI_GRO_CB(p
)->same_flow
)
228 iph2
= (struct ipv6hdr
*)(p
->data
+ off
);
229 first_word
= *(__be32
*)iph
^ *(__be32
*)iph2
;
231 /* All fields must match except length and Traffic Class.
232 * XXX skbs on the gro_list have all been parsed and pulled
233 * already so we don't need to compare nlen
234 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
235 * memcmp() alone below is suffcient, right?
237 if ((first_word
& htonl(0xF00FFFFF)) ||
238 memcmp(&iph
->nexthdr
, &iph2
->nexthdr
,
239 nlen
- offsetof(struct ipv6hdr
, nexthdr
))) {
240 NAPI_GRO_CB(p
)->same_flow
= 0;
243 /* flush if Traffic Class fields are different */
244 NAPI_GRO_CB(p
)->flush
|= !!(first_word
& htonl(0x0FF00000));
245 NAPI_GRO_CB(p
)->flush
|= flush
;
247 /* Clear flush_id, there's really no concept of ID in IPv6. */
248 NAPI_GRO_CB(p
)->flush_id
= 0;
251 NAPI_GRO_CB(skb
)->flush
|= flush
;
253 skb_gro_postpull_rcsum(skb
, iph
, nlen
);
255 pp
= call_gro_receive(ops
->callbacks
.gro_receive
, head
, skb
);
261 NAPI_GRO_CB(skb
)->flush
|= flush
;
266 static struct sk_buff
**sit_gro_receive(struct sk_buff
**head
,
269 if (NAPI_GRO_CB(skb
)->encap_mark
) {
270 NAPI_GRO_CB(skb
)->flush
= 1;
274 NAPI_GRO_CB(skb
)->encap_mark
= 1;
276 return ipv6_gro_receive(head
, skb
);
279 static int ipv6_gro_complete(struct sk_buff
*skb
, int nhoff
)
281 const struct net_offload
*ops
;
282 struct ipv6hdr
*iph
= (struct ipv6hdr
*)(skb
->data
+ nhoff
);
285 if (skb
->encapsulation
)
286 skb_set_inner_network_header(skb
, nhoff
);
288 iph
->payload_len
= htons(skb
->len
- nhoff
- sizeof(*iph
));
292 nhoff
+= sizeof(*iph
) + ipv6_exthdrs_len(iph
, &ops
);
293 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
296 err
= ops
->callbacks
.gro_complete(skb
, nhoff
);
304 static int sit_gro_complete(struct sk_buff
*skb
, int nhoff
)
306 skb
->encapsulation
= 1;
307 skb_shinfo(skb
)->gso_type
|= SKB_GSO_SIT
;
308 return ipv6_gro_complete(skb
, nhoff
);
311 static struct packet_offload ipv6_packet_offload __read_mostly
= {
312 .type
= cpu_to_be16(ETH_P_IPV6
),
314 .gso_segment
= ipv6_gso_segment
,
315 .gro_receive
= ipv6_gro_receive
,
316 .gro_complete
= ipv6_gro_complete
,
320 static const struct net_offload sit_offload
= {
322 .gso_segment
= ipv6_gso_segment
,
323 .gro_receive
= sit_gro_receive
,
324 .gro_complete
= sit_gro_complete
,
328 static int __init
ipv6_offload_init(void)
331 if (tcpv6_offload_init() < 0)
332 pr_crit("%s: Cannot add TCP protocol offload\n", __func__
);
333 if (udp_offload_init() < 0)
334 pr_crit("%s: Cannot add UDP protocol offload\n", __func__
);
335 if (ipv6_exthdrs_offload_init() < 0)
336 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__
);
338 dev_add_offload(&ipv6_packet_offload
);
340 inet_add_offload(&sit_offload
, IPPROTO_IPV6
);
345 fs_initcall(ipv6_offload_init
);