Linux 5.1.5
[linux/fpc-iii.git] / net / ipv6 / ip6_offload.c
blob345882d9c0618e6b9f6cb0c5b41f3e5abde8d1d3
1 /*
2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
11 #include <linux/kernel.h>
12 #include <linux/socket.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/printk.h>
17 #include <net/protocol.h>
18 #include <net/ipv6.h>
19 #include <net/inet_common.h>
21 #include "ip6_offload.h"
23 /* All GRO functions are always builtin, except UDP over ipv6, which lays in
24 * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
25 * when ipv6 is built as a module
27 #if IS_BUILTIN(CONFIG_IPV6)
28 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
29 #else
30 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
31 #endif
33 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \
34 ({ \
35 unlikely(gro_recursion_inc_test(skb)) ? \
36 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
37 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
40 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
42 const struct net_offload *ops = NULL;
44 for (;;) {
45 struct ipv6_opt_hdr *opth;
46 int len;
48 if (proto != NEXTHDR_HOP) {
49 ops = rcu_dereference(inet6_offloads[proto]);
51 if (unlikely(!ops))
52 break;
54 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
55 break;
58 if (unlikely(!pskb_may_pull(skb, 8)))
59 break;
61 opth = (void *)skb->data;
62 len = ipv6_optlen(opth);
64 if (unlikely(!pskb_may_pull(skb, len)))
65 break;
67 opth = (void *)skb->data;
68 proto = opth->nexthdr;
69 __skb_pull(skb, len);
72 return proto;
75 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
76 netdev_features_t features)
78 struct sk_buff *segs = ERR_PTR(-EINVAL);
79 struct ipv6hdr *ipv6h;
80 const struct net_offload *ops;
81 int proto;
82 struct frag_hdr *fptr;
83 unsigned int payload_len;
84 u8 *prevhdr;
85 int offset = 0;
86 bool encap, udpfrag;
87 int nhoff;
88 bool gso_partial;
90 skb_reset_network_header(skb);
91 nhoff = skb_network_header(skb) - skb_mac_header(skb);
92 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
93 goto out;
95 encap = SKB_GSO_CB(skb)->encap_level > 0;
96 if (encap)
97 features &= skb->dev->hw_enc_features;
98 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
100 ipv6h = ipv6_hdr(skb);
101 __skb_pull(skb, sizeof(*ipv6h));
102 segs = ERR_PTR(-EPROTONOSUPPORT);
104 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
106 if (skb->encapsulation &&
107 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
108 udpfrag = proto == IPPROTO_UDP && encap &&
109 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
110 else
111 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
112 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
114 ops = rcu_dereference(inet6_offloads[proto]);
115 if (likely(ops && ops->callbacks.gso_segment)) {
116 skb_reset_transport_header(skb);
117 segs = ops->callbacks.gso_segment(skb, features);
120 if (IS_ERR_OR_NULL(segs))
121 goto out;
123 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
125 for (skb = segs; skb; skb = skb->next) {
126 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
127 if (gso_partial && skb_is_gso(skb))
128 payload_len = skb_shinfo(skb)->gso_size +
129 SKB_GSO_CB(skb)->data_offset +
130 skb->head - (unsigned char *)(ipv6h + 1);
131 else
132 payload_len = skb->len - nhoff - sizeof(*ipv6h);
133 ipv6h->payload_len = htons(payload_len);
134 skb->network_header = (u8 *)ipv6h - skb->head;
135 skb_reset_mac_len(skb);
137 if (udpfrag) {
138 int err = ip6_find_1stfragopt(skb, &prevhdr);
139 if (err < 0) {
140 kfree_skb_list(segs);
141 return ERR_PTR(err);
143 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
144 fptr->frag_off = htons(offset);
145 if (skb->next)
146 fptr->frag_off |= htons(IP6_MF);
147 offset += (ntohs(ipv6h->payload_len) -
148 sizeof(struct frag_hdr));
150 if (encap)
151 skb_reset_inner_headers(skb);
154 out:
155 return segs;
158 /* Return the total length of all the extension hdrs, following the same
159 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
161 static int ipv6_exthdrs_len(struct ipv6hdr *iph,
162 const struct net_offload **opps)
164 struct ipv6_opt_hdr *opth = (void *)iph;
165 int len = 0, proto, optlen = sizeof(*iph);
167 proto = iph->nexthdr;
168 for (;;) {
169 if (proto != NEXTHDR_HOP) {
170 *opps = rcu_dereference(inet6_offloads[proto]);
171 if (unlikely(!(*opps)))
172 break;
173 if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
174 break;
176 opth = (void *)opth + optlen;
177 optlen = ipv6_optlen(opth);
178 len += optlen;
179 proto = opth->nexthdr;
181 return len;
184 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *,
185 struct sk_buff *));
186 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
187 struct sk_buff *));
188 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
189 struct sk_buff *skb)
191 const struct net_offload *ops;
192 struct sk_buff *pp = NULL;
193 struct sk_buff *p;
194 struct ipv6hdr *iph;
195 unsigned int nlen;
196 unsigned int hlen;
197 unsigned int off;
198 u16 flush = 1;
199 int proto;
201 off = skb_gro_offset(skb);
202 hlen = off + sizeof(*iph);
203 iph = skb_gro_header_fast(skb, off);
204 if (skb_gro_header_hard(skb, hlen)) {
205 iph = skb_gro_header_slow(skb, hlen, off);
206 if (unlikely(!iph))
207 goto out;
210 skb_set_network_header(skb, off);
211 skb_gro_pull(skb, sizeof(*iph));
212 skb_set_transport_header(skb, skb_gro_offset(skb));
214 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
216 rcu_read_lock();
217 proto = iph->nexthdr;
218 ops = rcu_dereference(inet6_offloads[proto]);
219 if (!ops || !ops->callbacks.gro_receive) {
220 __pskb_pull(skb, skb_gro_offset(skb));
221 skb_gro_frag0_invalidate(skb);
222 proto = ipv6_gso_pull_exthdrs(skb, proto);
223 skb_gro_pull(skb, -skb_transport_offset(skb));
224 skb_reset_transport_header(skb);
225 __skb_push(skb, skb_gro_offset(skb));
227 ops = rcu_dereference(inet6_offloads[proto]);
228 if (!ops || !ops->callbacks.gro_receive)
229 goto out_unlock;
231 iph = ipv6_hdr(skb);
234 NAPI_GRO_CB(skb)->proto = proto;
236 flush--;
237 nlen = skb_network_header_len(skb);
239 list_for_each_entry(p, head, list) {
240 const struct ipv6hdr *iph2;
241 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
243 if (!NAPI_GRO_CB(p)->same_flow)
244 continue;
246 iph2 = (struct ipv6hdr *)(p->data + off);
247 first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
249 /* All fields must match except length and Traffic Class.
250 * XXX skbs on the gro_list have all been parsed and pulled
251 * already so we don't need to compare nlen
252 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
253 * memcmp() alone below is sufficient, right?
255 if ((first_word & htonl(0xF00FFFFF)) ||
256 !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
257 !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
258 *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
259 not_same_flow:
260 NAPI_GRO_CB(p)->same_flow = 0;
261 continue;
263 if (unlikely(nlen > sizeof(struct ipv6hdr))) {
264 if (memcmp(iph + 1, iph2 + 1,
265 nlen - sizeof(struct ipv6hdr)))
266 goto not_same_flow;
268 /* flush if Traffic Class fields are different */
269 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
270 NAPI_GRO_CB(p)->flush |= flush;
272 /* If the previous IP ID value was based on an atomic
273 * datagram we can overwrite the value and ignore it.
275 if (NAPI_GRO_CB(skb)->is_atomic)
276 NAPI_GRO_CB(p)->flush_id = 0;
279 NAPI_GRO_CB(skb)->is_atomic = true;
280 NAPI_GRO_CB(skb)->flush |= flush;
282 skb_gro_postpull_rcsum(skb, iph, nlen);
284 pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
285 ops->callbacks.gro_receive, head, skb);
287 out_unlock:
288 rcu_read_unlock();
290 out:
291 skb_gro_flush_final(skb, pp, flush);
293 return pp;
296 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
297 struct sk_buff *skb)
299 /* Common GRO receive for SIT and IP6IP6 */
301 if (NAPI_GRO_CB(skb)->encap_mark) {
302 NAPI_GRO_CB(skb)->flush = 1;
303 return NULL;
306 NAPI_GRO_CB(skb)->encap_mark = 1;
308 return ipv6_gro_receive(head, skb);
311 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
312 struct sk_buff *skb)
314 /* Common GRO receive for SIT and IP6IP6 */
316 if (NAPI_GRO_CB(skb)->encap_mark) {
317 NAPI_GRO_CB(skb)->flush = 1;
318 return NULL;
321 NAPI_GRO_CB(skb)->encap_mark = 1;
323 return inet_gro_receive(head, skb);
326 INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int));
327 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
328 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
330 const struct net_offload *ops;
331 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
332 int err = -ENOSYS;
334 if (skb->encapsulation) {
335 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
336 skb_set_inner_network_header(skb, nhoff);
339 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
341 rcu_read_lock();
343 nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
344 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
345 goto out_unlock;
347 err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
348 udp6_gro_complete, skb, nhoff);
350 out_unlock:
351 rcu_read_unlock();
353 return err;
356 static int sit_gro_complete(struct sk_buff *skb, int nhoff)
358 skb->encapsulation = 1;
359 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
360 return ipv6_gro_complete(skb, nhoff);
363 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
365 skb->encapsulation = 1;
366 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
367 return ipv6_gro_complete(skb, nhoff);
370 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
372 skb->encapsulation = 1;
373 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
374 return inet_gro_complete(skb, nhoff);
377 static struct packet_offload ipv6_packet_offload __read_mostly = {
378 .type = cpu_to_be16(ETH_P_IPV6),
379 .callbacks = {
380 .gso_segment = ipv6_gso_segment,
381 .gro_receive = ipv6_gro_receive,
382 .gro_complete = ipv6_gro_complete,
386 static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
387 netdev_features_t features)
389 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
390 return ERR_PTR(-EINVAL);
392 return ipv6_gso_segment(skb, features);
395 static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
396 netdev_features_t features)
398 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
399 return ERR_PTR(-EINVAL);
401 return inet_gso_segment(skb, features);
404 static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
405 netdev_features_t features)
407 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
408 return ERR_PTR(-EINVAL);
410 return ipv6_gso_segment(skb, features);
413 static const struct net_offload sit_offload = {
414 .callbacks = {
415 .gso_segment = sit_gso_segment,
416 .gro_receive = sit_ip6ip6_gro_receive,
417 .gro_complete = sit_gro_complete,
421 static const struct net_offload ip4ip6_offload = {
422 .callbacks = {
423 .gso_segment = ip4ip6_gso_segment,
424 .gro_receive = ip4ip6_gro_receive,
425 .gro_complete = ip4ip6_gro_complete,
429 static const struct net_offload ip6ip6_offload = {
430 .callbacks = {
431 .gso_segment = ip6ip6_gso_segment,
432 .gro_receive = sit_ip6ip6_gro_receive,
433 .gro_complete = ip6ip6_gro_complete,
436 static int __init ipv6_offload_init(void)
439 if (tcpv6_offload_init() < 0)
440 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
441 if (ipv6_exthdrs_offload_init() < 0)
442 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
444 dev_add_offload(&ipv6_packet_offload);
446 inet_add_offload(&sit_offload, IPPROTO_IPV6);
447 inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
448 inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
450 return 0;
453 fs_initcall(ipv6_offload_init);