Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / net / ipv6 / ip6_offload.c
bloba80f90bf3ae7dc1aec904fd93b3d8e8c87a926e4
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPV6 GSO/GRO offload support
4 * Linux INET6 implementation
5 */
7 #include <linux/kernel.h>
8 #include <linux/socket.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/printk.h>
13 #include <net/protocol.h>
14 #include <net/ipv6.h>
15 #include <net/inet_common.h>
16 #include <net/tcp.h>
17 #include <net/udp.h>
19 #include "ip6_offload.h"
21 /* All GRO functions are always builtin, except UDP over ipv6, which lays in
22 * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
23 * when ipv6 is built as a module
25 #if IS_BUILTIN(CONFIG_IPV6)
26 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
27 #else
28 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
29 #endif
31 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \
32 ({ \
33 unlikely(gro_recursion_inc_test(skb)) ? \
34 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
35 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
38 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
40 const struct net_offload *ops = NULL;
42 for (;;) {
43 struct ipv6_opt_hdr *opth;
44 int len;
46 if (proto != NEXTHDR_HOP) {
47 ops = rcu_dereference(inet6_offloads[proto]);
49 if (unlikely(!ops))
50 break;
52 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
53 break;
56 if (unlikely(!pskb_may_pull(skb, 8)))
57 break;
59 opth = (void *)skb->data;
60 len = ipv6_optlen(opth);
62 if (unlikely(!pskb_may_pull(skb, len)))
63 break;
65 opth = (void *)skb->data;
66 proto = opth->nexthdr;
67 __skb_pull(skb, len);
70 return proto;
73 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
74 netdev_features_t features)
76 struct sk_buff *segs = ERR_PTR(-EINVAL);
77 struct ipv6hdr *ipv6h;
78 const struct net_offload *ops;
79 int proto;
80 struct frag_hdr *fptr;
81 unsigned int payload_len;
82 u8 *prevhdr;
83 int offset = 0;
84 bool encap, udpfrag;
85 int nhoff;
86 bool gso_partial;
88 skb_reset_network_header(skb);
89 nhoff = skb_network_header(skb) - skb_mac_header(skb);
90 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
91 goto out;
93 encap = SKB_GSO_CB(skb)->encap_level > 0;
94 if (encap)
95 features &= skb->dev->hw_enc_features;
96 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
98 ipv6h = ipv6_hdr(skb);
99 __skb_pull(skb, sizeof(*ipv6h));
100 segs = ERR_PTR(-EPROTONOSUPPORT);
102 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
104 if (skb->encapsulation &&
105 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
106 udpfrag = proto == IPPROTO_UDP && encap &&
107 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
108 else
109 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
110 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
112 ops = rcu_dereference(inet6_offloads[proto]);
113 if (likely(ops && ops->callbacks.gso_segment)) {
114 skb_reset_transport_header(skb);
115 segs = ops->callbacks.gso_segment(skb, features);
118 if (IS_ERR_OR_NULL(segs))
119 goto out;
121 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
123 for (skb = segs; skb; skb = skb->next) {
124 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
125 if (gso_partial && skb_is_gso(skb))
126 payload_len = skb_shinfo(skb)->gso_size +
127 SKB_GSO_CB(skb)->data_offset +
128 skb->head - (unsigned char *)(ipv6h + 1);
129 else
130 payload_len = skb->len - nhoff - sizeof(*ipv6h);
131 ipv6h->payload_len = htons(payload_len);
132 skb->network_header = (u8 *)ipv6h - skb->head;
133 skb_reset_mac_len(skb);
135 if (udpfrag) {
136 int err = ip6_find_1stfragopt(skb, &prevhdr);
137 if (err < 0) {
138 kfree_skb_list(segs);
139 return ERR_PTR(err);
141 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
142 fptr->frag_off = htons(offset);
143 if (skb->next)
144 fptr->frag_off |= htons(IP6_MF);
145 offset += (ntohs(ipv6h->payload_len) -
146 sizeof(struct frag_hdr));
148 if (encap)
149 skb_reset_inner_headers(skb);
152 out:
153 return segs;
156 /* Return the total length of all the extension hdrs, following the same
157 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
159 static int ipv6_exthdrs_len(struct ipv6hdr *iph,
160 const struct net_offload **opps)
162 struct ipv6_opt_hdr *opth = (void *)iph;
163 int len = 0, proto, optlen = sizeof(*iph);
165 proto = iph->nexthdr;
166 for (;;) {
167 if (proto != NEXTHDR_HOP) {
168 *opps = rcu_dereference(inet6_offloads[proto]);
169 if (unlikely(!(*opps)))
170 break;
171 if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
172 break;
174 opth = (void *)opth + optlen;
175 optlen = ipv6_optlen(opth);
176 len += optlen;
177 proto = opth->nexthdr;
179 return len;
182 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
183 struct sk_buff *skb)
185 const struct net_offload *ops;
186 struct sk_buff *pp = NULL;
187 struct sk_buff *p;
188 struct ipv6hdr *iph;
189 unsigned int nlen;
190 unsigned int hlen;
191 unsigned int off;
192 u16 flush = 1;
193 int proto;
195 off = skb_gro_offset(skb);
196 hlen = off + sizeof(*iph);
197 iph = skb_gro_header_fast(skb, off);
198 if (skb_gro_header_hard(skb, hlen)) {
199 iph = skb_gro_header_slow(skb, hlen, off);
200 if (unlikely(!iph))
201 goto out;
204 skb_set_network_header(skb, off);
205 skb_gro_pull(skb, sizeof(*iph));
206 skb_set_transport_header(skb, skb_gro_offset(skb));
208 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
210 rcu_read_lock();
211 proto = iph->nexthdr;
212 ops = rcu_dereference(inet6_offloads[proto]);
213 if (!ops || !ops->callbacks.gro_receive) {
214 __pskb_pull(skb, skb_gro_offset(skb));
215 skb_gro_frag0_invalidate(skb);
216 proto = ipv6_gso_pull_exthdrs(skb, proto);
217 skb_gro_pull(skb, -skb_transport_offset(skb));
218 skb_reset_transport_header(skb);
219 __skb_push(skb, skb_gro_offset(skb));
221 ops = rcu_dereference(inet6_offloads[proto]);
222 if (!ops || !ops->callbacks.gro_receive)
223 goto out_unlock;
225 iph = ipv6_hdr(skb);
228 NAPI_GRO_CB(skb)->proto = proto;
230 flush--;
231 nlen = skb_network_header_len(skb);
233 list_for_each_entry(p, head, list) {
234 const struct ipv6hdr *iph2;
235 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
237 if (!NAPI_GRO_CB(p)->same_flow)
238 continue;
240 iph2 = (struct ipv6hdr *)(p->data + off);
241 first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
243 /* All fields must match except length and Traffic Class.
244 * XXX skbs on the gro_list have all been parsed and pulled
245 * already so we don't need to compare nlen
246 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
247 * memcmp() alone below is sufficient, right?
249 if ((first_word & htonl(0xF00FFFFF)) ||
250 !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
251 !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
252 *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
253 not_same_flow:
254 NAPI_GRO_CB(p)->same_flow = 0;
255 continue;
257 if (unlikely(nlen > sizeof(struct ipv6hdr))) {
258 if (memcmp(iph + 1, iph2 + 1,
259 nlen - sizeof(struct ipv6hdr)))
260 goto not_same_flow;
262 /* flush if Traffic Class fields are different */
263 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
264 NAPI_GRO_CB(p)->flush |= flush;
266 /* If the previous IP ID value was based on an atomic
267 * datagram we can overwrite the value and ignore it.
269 if (NAPI_GRO_CB(skb)->is_atomic)
270 NAPI_GRO_CB(p)->flush_id = 0;
273 NAPI_GRO_CB(skb)->is_atomic = true;
274 NAPI_GRO_CB(skb)->flush |= flush;
276 skb_gro_postpull_rcsum(skb, iph, nlen);
278 pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
279 ops->callbacks.gro_receive, head, skb);
281 out_unlock:
282 rcu_read_unlock();
284 out:
285 skb_gro_flush_final(skb, pp, flush);
287 return pp;
290 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
291 struct sk_buff *skb)
293 /* Common GRO receive for SIT and IP6IP6 */
295 if (NAPI_GRO_CB(skb)->encap_mark) {
296 NAPI_GRO_CB(skb)->flush = 1;
297 return NULL;
300 NAPI_GRO_CB(skb)->encap_mark = 1;
302 return ipv6_gro_receive(head, skb);
305 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
306 struct sk_buff *skb)
308 /* Common GRO receive for SIT and IP6IP6 */
310 if (NAPI_GRO_CB(skb)->encap_mark) {
311 NAPI_GRO_CB(skb)->flush = 1;
312 return NULL;
315 NAPI_GRO_CB(skb)->encap_mark = 1;
317 return inet_gro_receive(head, skb);
320 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
322 const struct net_offload *ops;
323 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
324 int err = -ENOSYS;
326 if (skb->encapsulation) {
327 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
328 skb_set_inner_network_header(skb, nhoff);
331 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
333 rcu_read_lock();
335 nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
336 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
337 goto out_unlock;
339 err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
340 udp6_gro_complete, skb, nhoff);
342 out_unlock:
343 rcu_read_unlock();
345 return err;
348 static int sit_gro_complete(struct sk_buff *skb, int nhoff)
350 skb->encapsulation = 1;
351 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
352 return ipv6_gro_complete(skb, nhoff);
355 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
357 skb->encapsulation = 1;
358 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
359 return ipv6_gro_complete(skb, nhoff);
362 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
364 skb->encapsulation = 1;
365 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
366 return inet_gro_complete(skb, nhoff);
369 static struct packet_offload ipv6_packet_offload __read_mostly = {
370 .type = cpu_to_be16(ETH_P_IPV6),
371 .callbacks = {
372 .gso_segment = ipv6_gso_segment,
373 .gro_receive = ipv6_gro_receive,
374 .gro_complete = ipv6_gro_complete,
378 static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
379 netdev_features_t features)
381 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
382 return ERR_PTR(-EINVAL);
384 return ipv6_gso_segment(skb, features);
387 static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
388 netdev_features_t features)
390 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
391 return ERR_PTR(-EINVAL);
393 return inet_gso_segment(skb, features);
396 static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
397 netdev_features_t features)
399 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
400 return ERR_PTR(-EINVAL);
402 return ipv6_gso_segment(skb, features);
405 static const struct net_offload sit_offload = {
406 .callbacks = {
407 .gso_segment = sit_gso_segment,
408 .gro_receive = sit_ip6ip6_gro_receive,
409 .gro_complete = sit_gro_complete,
413 static const struct net_offload ip4ip6_offload = {
414 .callbacks = {
415 .gso_segment = ip4ip6_gso_segment,
416 .gro_receive = ip4ip6_gro_receive,
417 .gro_complete = ip4ip6_gro_complete,
421 static const struct net_offload ip6ip6_offload = {
422 .callbacks = {
423 .gso_segment = ip6ip6_gso_segment,
424 .gro_receive = sit_ip6ip6_gro_receive,
425 .gro_complete = ip6ip6_gro_complete,
428 static int __init ipv6_offload_init(void)
431 if (tcpv6_offload_init() < 0)
432 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
433 if (ipv6_exthdrs_offload_init() < 0)
434 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
436 dev_add_offload(&ipv6_packet_offload);
438 inet_add_offload(&sit_offload, IPPROTO_IPV6);
439 inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
440 inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
442 return 0;
445 fs_initcall(ipv6_offload_init);