2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on: net/ipv4/ip_fragment.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/jiffies.h>
35 #include <linux/net.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/jhash.h>
43 #include <linux/skbuff.h>
44 #include <linux/slab.h>
45 #include <linux/export.h>
51 #include <net/ip6_route.h>
52 #include <net/protocol.h>
53 #include <net/transp_v6.h>
54 #include <net/rawv6.h>
55 #include <net/ndisc.h>
56 #include <net/addrconf.h>
57 #include <net/inet_frag.h>
61 struct inet6_skb_parm h
;
65 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
68 static struct inet_frags ip6_frags
;
70 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*prev
,
71 struct net_device
*dev
);
74 * callers should be careful not to use the hash value outside the ipfrag_lock
75 * as doing so could race with ipfrag_hash_rnd being recalculated.
77 unsigned int inet6_hash_frag(__be32 id
, const struct in6_addr
*saddr
,
78 const struct in6_addr
*daddr
, u32 rnd
)
82 c
= jhash_3words((__force u32
)saddr
->s6_addr32
[0],
83 (__force u32
)saddr
->s6_addr32
[1],
84 (__force u32
)saddr
->s6_addr32
[2],
87 c
= jhash_3words((__force u32
)saddr
->s6_addr32
[3],
88 (__force u32
)daddr
->s6_addr32
[0],
89 (__force u32
)daddr
->s6_addr32
[1],
92 c
= jhash_3words((__force u32
)daddr
->s6_addr32
[2],
93 (__force u32
)daddr
->s6_addr32
[3],
97 return c
& (INETFRAGS_HASHSZ
- 1);
99 EXPORT_SYMBOL_GPL(inet6_hash_frag
);
101 static unsigned int ip6_hashfn(struct inet_frag_queue
*q
)
103 struct frag_queue
*fq
;
105 fq
= container_of(q
, struct frag_queue
, q
);
106 return inet6_hash_frag(fq
->id
, &fq
->saddr
, &fq
->daddr
, ip6_frags
.rnd
);
109 bool ip6_frag_match(struct inet_frag_queue
*q
, void *a
)
111 struct frag_queue
*fq
;
112 struct ip6_create_arg
*arg
= a
;
114 fq
= container_of(q
, struct frag_queue
, q
);
115 return fq
->id
== arg
->id
&&
116 fq
->user
== arg
->user
&&
117 ipv6_addr_equal(&fq
->saddr
, arg
->src
) &&
118 ipv6_addr_equal(&fq
->daddr
, arg
->dst
);
120 EXPORT_SYMBOL(ip6_frag_match
);
122 void ip6_frag_init(struct inet_frag_queue
*q
, void *a
)
124 struct frag_queue
*fq
= container_of(q
, struct frag_queue
, q
);
125 struct ip6_create_arg
*arg
= a
;
128 fq
->user
= arg
->user
;
129 fq
->saddr
= *arg
->src
;
130 fq
->daddr
= *arg
->dst
;
132 EXPORT_SYMBOL(ip6_frag_init
);
134 void ip6_expire_frag_queue(struct net
*net
, struct frag_queue
*fq
,
135 struct inet_frags
*frags
)
137 struct net_device
*dev
= NULL
;
139 spin_lock(&fq
->q
.lock
);
141 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
144 inet_frag_kill(&fq
->q
, frags
);
147 dev
= dev_get_by_index_rcu(net
, fq
->iif
);
151 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMTIMEOUT
);
152 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
154 /* Don't send error if the first segment did not arrive. */
155 if (!(fq
->q
.last_in
& INET_FRAG_FIRST_IN
) || !fq
->q
.fragments
)
159 But use as source device on which LAST ARRIVED
160 segment was received. And do not use fq->dev
161 pointer directly, device might already disappeared.
163 fq
->q
.fragments
->dev
= dev
;
164 icmpv6_send(fq
->q
.fragments
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_FRAGTIME
, 0);
168 spin_unlock(&fq
->q
.lock
);
169 inet_frag_put(&fq
->q
, frags
);
171 EXPORT_SYMBOL(ip6_expire_frag_queue
);
173 static void ip6_frag_expire(unsigned long data
)
175 struct frag_queue
*fq
;
178 fq
= container_of((struct inet_frag_queue
*)data
, struct frag_queue
, q
);
179 net
= container_of(fq
->q
.net
, struct net
, ipv6
.frags
);
181 ip6_expire_frag_queue(net
, fq
, &ip6_frags
);
184 static __inline__
struct frag_queue
*
185 fq_find(struct net
*net
, __be32 id
, const struct in6_addr
*src
, const struct in6_addr
*dst
)
187 struct inet_frag_queue
*q
;
188 struct ip6_create_arg arg
;
192 arg
.user
= IP6_DEFRAG_LOCAL_DELIVER
;
196 read_lock(&ip6_frags
.lock
);
197 hash
= inet6_hash_frag(id
, src
, dst
, ip6_frags
.rnd
);
199 q
= inet_frag_find(&net
->ipv6
.frags
, &ip6_frags
, &arg
, hash
);
203 return container_of(q
, struct frag_queue
, q
);
206 static int ip6_frag_queue(struct frag_queue
*fq
, struct sk_buff
*skb
,
207 struct frag_hdr
*fhdr
, int nhoff
)
209 struct sk_buff
*prev
, *next
;
210 struct net_device
*dev
;
212 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
214 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
217 offset
= ntohs(fhdr
->frag_off
) & ~0x7;
218 end
= offset
+ (ntohs(ipv6_hdr(skb
)->payload_len
) -
219 ((u8
*)(fhdr
+ 1) - (u8
*)(ipv6_hdr(skb
) + 1)));
221 if ((unsigned int)end
> IPV6_MAXPLEN
) {
222 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
223 IPSTATS_MIB_INHDRERRORS
);
224 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
225 ((u8
*)&fhdr
->frag_off
-
226 skb_network_header(skb
)));
230 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
231 const unsigned char *nh
= skb_network_header(skb
);
232 skb
->csum
= csum_sub(skb
->csum
,
233 csum_partial(nh
, (u8
*)(fhdr
+ 1) - nh
,
237 /* Is this the final fragment? */
238 if (!(fhdr
->frag_off
& htons(IP6_MF
))) {
239 /* If we already have some bits beyond end
240 * or have different end, the segment is corrupted.
242 if (end
< fq
->q
.len
||
243 ((fq
->q
.last_in
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
245 fq
->q
.last_in
|= INET_FRAG_LAST_IN
;
248 /* Check if the fragment is rounded to 8 bytes.
249 * Required by the RFC.
252 /* RFC2460 says always send parameter problem in
255 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
256 IPSTATS_MIB_INHDRERRORS
);
257 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
258 offsetof(struct ipv6hdr
, payload_len
));
261 if (end
> fq
->q
.len
) {
262 /* Some bits beyond end -> corruption. */
263 if (fq
->q
.last_in
& INET_FRAG_LAST_IN
)
272 /* Point into the IP datagram 'data' part. */
273 if (!pskb_pull(skb
, (u8
*) (fhdr
+ 1) - skb
->data
))
276 if (pskb_trim_rcsum(skb
, end
- offset
))
279 /* Find out which fragments are in front and at the back of us
280 * in the chain of fragments so far. We must know where to put
281 * this fragment, right?
283 prev
= fq
->q
.fragments_tail
;
284 if (!prev
|| FRAG6_CB(prev
)->offset
< offset
) {
289 for(next
= fq
->q
.fragments
; next
!= NULL
; next
= next
->next
) {
290 if (FRAG6_CB(next
)->offset
>= offset
)
296 /* RFC5722, Section 4, amended by Errata ID : 3089
297 * When reassembling an IPv6 datagram, if
298 * one or more its constituent fragments is determined to be an
299 * overlapping fragment, the entire datagram (and any constituent
300 * fragments) MUST be silently discarded.
303 /* Check for overlap with preceding fragment. */
305 (FRAG6_CB(prev
)->offset
+ prev
->len
) > offset
)
308 /* Look for overlap with succeeding segment. */
309 if (next
&& FRAG6_CB(next
)->offset
< end
)
312 FRAG6_CB(skb
)->offset
= offset
;
314 /* Insert this fragment in the chain of fragments. */
317 fq
->q
.fragments_tail
= skb
;
321 fq
->q
.fragments
= skb
;
325 fq
->iif
= dev
->ifindex
;
328 fq
->q
.stamp
= skb
->tstamp
;
329 fq
->q
.meat
+= skb
->len
;
330 atomic_add(skb
->truesize
, &fq
->q
.net
->mem
);
332 /* The first fragment.
333 * nhoffset is obtained from the first fragment, of course.
336 fq
->nhoffset
= nhoff
;
337 fq
->q
.last_in
|= INET_FRAG_FIRST_IN
;
340 if (fq
->q
.last_in
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
341 fq
->q
.meat
== fq
->q
.len
)
342 return ip6_frag_reasm(fq
, prev
, dev
);
344 write_lock(&ip6_frags
.lock
);
345 list_move_tail(&fq
->q
.lru_list
, &fq
->q
.net
->lru_list
);
346 write_unlock(&ip6_frags
.lock
);
350 inet_frag_kill(&fq
->q
, &ip6_frags
);
352 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
353 IPSTATS_MIB_REASMFAILS
);
359 * Check if this packet is complete.
360 * Returns NULL on failure by any reason, and pointer
361 * to current nexthdr field in reassembled frame.
363 * It is called with locked fq, and caller must check that
364 * queue is eligible for reassembly i.e. it is not COMPLETE,
365 * the last and the first frames arrived and all the bits are here.
367 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*prev
,
368 struct net_device
*dev
)
370 struct net
*net
= container_of(fq
->q
.net
, struct net
, ipv6
.frags
);
371 struct sk_buff
*fp
, *head
= fq
->q
.fragments
;
376 inet_frag_kill(&fq
->q
, &ip6_frags
);
378 /* Make the one we just received the head. */
381 fp
= skb_clone(head
, GFP_ATOMIC
);
386 fp
->next
= head
->next
;
388 fq
->q
.fragments_tail
= fp
;
391 skb_morph(head
, fq
->q
.fragments
);
392 head
->next
= fq
->q
.fragments
->next
;
394 consume_skb(fq
->q
.fragments
);
395 fq
->q
.fragments
= head
;
398 WARN_ON(head
== NULL
);
399 WARN_ON(FRAG6_CB(head
)->offset
!= 0);
401 /* Unfragmented part is taken from the first segment. */
402 payload_len
= ((head
->data
- skb_network_header(head
)) -
403 sizeof(struct ipv6hdr
) + fq
->q
.len
-
404 sizeof(struct frag_hdr
));
405 if (payload_len
> IPV6_MAXPLEN
)
408 /* Head of list must not be cloned. */
409 if (skb_cloned(head
) && pskb_expand_head(head
, 0, 0, GFP_ATOMIC
))
412 /* If the first fragment is fragmented itself, we split
413 * it to two chunks: the first with data and paged part
414 * and the second, holding only fragments. */
415 if (skb_has_frag_list(head
)) {
416 struct sk_buff
*clone
;
419 if ((clone
= alloc_skb(0, GFP_ATOMIC
)) == NULL
)
421 clone
->next
= head
->next
;
423 skb_shinfo(clone
)->frag_list
= skb_shinfo(head
)->frag_list
;
424 skb_frag_list_init(head
);
425 for (i
= 0; i
< skb_shinfo(head
)->nr_frags
; i
++)
426 plen
+= skb_frag_size(&skb_shinfo(head
)->frags
[i
]);
427 clone
->len
= clone
->data_len
= head
->data_len
- plen
;
428 head
->data_len
-= clone
->len
;
429 head
->len
-= clone
->len
;
431 clone
->ip_summed
= head
->ip_summed
;
432 atomic_add(clone
->truesize
, &fq
->q
.net
->mem
);
435 /* We have to remove fragment header from datagram and to relocate
436 * header in order to calculate ICV correctly. */
437 nhoff
= fq
->nhoffset
;
438 skb_network_header(head
)[nhoff
] = skb_transport_header(head
)[0];
439 memmove(head
->head
+ sizeof(struct frag_hdr
), head
->head
,
440 (head
->data
- head
->head
) - sizeof(struct frag_hdr
));
441 head
->mac_header
+= sizeof(struct frag_hdr
);
442 head
->network_header
+= sizeof(struct frag_hdr
);
444 skb_reset_transport_header(head
);
445 skb_push(head
, head
->data
- skb_network_header(head
));
447 sum_truesize
= head
->truesize
;
448 for (fp
= head
->next
; fp
;) {
451 struct sk_buff
*next
= fp
->next
;
453 sum_truesize
+= fp
->truesize
;
454 if (head
->ip_summed
!= fp
->ip_summed
)
455 head
->ip_summed
= CHECKSUM_NONE
;
456 else if (head
->ip_summed
== CHECKSUM_COMPLETE
)
457 head
->csum
= csum_add(head
->csum
, fp
->csum
);
459 if (skb_try_coalesce(head
, fp
, &headstolen
, &delta
)) {
460 kfree_skb_partial(fp
, headstolen
);
462 if (!skb_shinfo(head
)->frag_list
)
463 skb_shinfo(head
)->frag_list
= fp
;
464 head
->data_len
+= fp
->len
;
465 head
->len
+= fp
->len
;
466 head
->truesize
+= fp
->truesize
;
470 atomic_sub(sum_truesize
, &fq
->q
.net
->mem
);
474 head
->tstamp
= fq
->q
.stamp
;
475 ipv6_hdr(head
)->payload_len
= htons(payload_len
);
476 IP6CB(head
)->nhoff
= nhoff
;
478 /* Yes, and fold redundant checksum back. 8) */
479 if (head
->ip_summed
== CHECKSUM_COMPLETE
)
480 head
->csum
= csum_partial(skb_network_header(head
),
481 skb_network_header_len(head
),
485 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMOKS
);
487 fq
->q
.fragments
= NULL
;
488 fq
->q
.fragments_tail
= NULL
;
492 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len
);
495 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
498 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
503 static int ipv6_frag_rcv(struct sk_buff
*skb
)
505 struct frag_hdr
*fhdr
;
506 struct frag_queue
*fq
;
507 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
508 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
511 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMREQDS
);
513 /* Jumbo payload inhibits frag. header */
514 if (hdr
->payload_len
==0)
517 if (!pskb_may_pull(skb
, (skb_transport_offset(skb
) +
518 sizeof(struct frag_hdr
))))
522 fhdr
= (struct frag_hdr
*)skb_transport_header(skb
);
524 if (!(fhdr
->frag_off
& htons(0xFFF9))) {
525 /* It is not a fragmented frame */
526 skb
->transport_header
+= sizeof(struct frag_hdr
);
527 IP6_INC_STATS_BH(net
,
528 ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMOKS
);
530 IP6CB(skb
)->nhoff
= (u8
*)fhdr
- skb_network_header(skb
);
534 evicted
= inet_frag_evictor(&net
->ipv6
.frags
, &ip6_frags
, false);
536 IP6_ADD_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
537 IPSTATS_MIB_REASMFAILS
, evicted
);
539 fq
= fq_find(net
, fhdr
->identification
, &hdr
->saddr
, &hdr
->daddr
);
543 spin_lock(&fq
->q
.lock
);
545 ret
= ip6_frag_queue(fq
, skb
, fhdr
, IP6CB(skb
)->nhoff
);
547 spin_unlock(&fq
->q
.lock
);
548 inet_frag_put(&fq
->q
, &ip6_frags
);
552 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMFAILS
);
557 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_INHDRERRORS
);
558 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, skb_network_header_len(skb
));
562 static const struct inet6_protocol frag_protocol
=
564 .handler
= ipv6_frag_rcv
,
565 .flags
= INET6_PROTO_NOPOLICY
,
569 static struct ctl_table ip6_frags_ns_ctl_table
[] = {
571 .procname
= "ip6frag_high_thresh",
572 .data
= &init_net
.ipv6
.frags
.high_thresh
,
573 .maxlen
= sizeof(int),
575 .proc_handler
= proc_dointvec
578 .procname
= "ip6frag_low_thresh",
579 .data
= &init_net
.ipv6
.frags
.low_thresh
,
580 .maxlen
= sizeof(int),
582 .proc_handler
= proc_dointvec
585 .procname
= "ip6frag_time",
586 .data
= &init_net
.ipv6
.frags
.timeout
,
587 .maxlen
= sizeof(int),
589 .proc_handler
= proc_dointvec_jiffies
,
594 static struct ctl_table ip6_frags_ctl_table
[] = {
596 .procname
= "ip6frag_secret_interval",
597 .data
= &ip6_frags
.secret_interval
,
598 .maxlen
= sizeof(int),
600 .proc_handler
= proc_dointvec_jiffies
,
605 static int __net_init
ip6_frags_ns_sysctl_register(struct net
*net
)
607 struct ctl_table
*table
;
608 struct ctl_table_header
*hdr
;
610 table
= ip6_frags_ns_ctl_table
;
611 if (!net_eq(net
, &init_net
)) {
612 table
= kmemdup(table
, sizeof(ip6_frags_ns_ctl_table
), GFP_KERNEL
);
616 table
[0].data
= &net
->ipv6
.frags
.high_thresh
;
617 table
[1].data
= &net
->ipv6
.frags
.low_thresh
;
618 table
[2].data
= &net
->ipv6
.frags
.timeout
;
620 /* Don't export sysctls to unprivileged users */
621 if (net
->user_ns
!= &init_user_ns
)
622 table
[0].procname
= NULL
;
625 hdr
= register_net_sysctl(net
, "net/ipv6", table
);
629 net
->ipv6
.sysctl
.frags_hdr
= hdr
;
633 if (!net_eq(net
, &init_net
))
639 static void __net_exit
ip6_frags_ns_sysctl_unregister(struct net
*net
)
641 struct ctl_table
*table
;
643 table
= net
->ipv6
.sysctl
.frags_hdr
->ctl_table_arg
;
644 unregister_net_sysctl_table(net
->ipv6
.sysctl
.frags_hdr
);
645 if (!net_eq(net
, &init_net
))
649 static struct ctl_table_header
*ip6_ctl_header
;
651 static int ip6_frags_sysctl_register(void)
653 ip6_ctl_header
= register_net_sysctl(&init_net
, "net/ipv6",
654 ip6_frags_ctl_table
);
655 return ip6_ctl_header
== NULL
? -ENOMEM
: 0;
658 static void ip6_frags_sysctl_unregister(void)
660 unregister_net_sysctl_table(ip6_ctl_header
);
663 static inline int ip6_frags_ns_sysctl_register(struct net
*net
)
668 static inline void ip6_frags_ns_sysctl_unregister(struct net
*net
)
672 static inline int ip6_frags_sysctl_register(void)
677 static inline void ip6_frags_sysctl_unregister(void)
682 static int __net_init
ipv6_frags_init_net(struct net
*net
)
684 net
->ipv6
.frags
.high_thresh
= IPV6_FRAG_HIGH_THRESH
;
685 net
->ipv6
.frags
.low_thresh
= IPV6_FRAG_LOW_THRESH
;
686 net
->ipv6
.frags
.timeout
= IPV6_FRAG_TIMEOUT
;
688 inet_frags_init_net(&net
->ipv6
.frags
);
690 return ip6_frags_ns_sysctl_register(net
);
693 static void __net_exit
ipv6_frags_exit_net(struct net
*net
)
695 ip6_frags_ns_sysctl_unregister(net
);
696 inet_frags_exit_net(&net
->ipv6
.frags
, &ip6_frags
);
699 static struct pernet_operations ip6_frags_ops
= {
700 .init
= ipv6_frags_init_net
,
701 .exit
= ipv6_frags_exit_net
,
704 int __init
ipv6_frag_init(void)
708 ret
= inet6_add_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
712 ret
= ip6_frags_sysctl_register();
716 ret
= register_pernet_subsys(&ip6_frags_ops
);
720 ip6_frags
.hashfn
= ip6_hashfn
;
721 ip6_frags
.constructor
= ip6_frag_init
;
722 ip6_frags
.destructor
= NULL
;
723 ip6_frags
.skb_free
= NULL
;
724 ip6_frags
.qsize
= sizeof(struct frag_queue
);
725 ip6_frags
.match
= ip6_frag_match
;
726 ip6_frags
.frag_expire
= ip6_frag_expire
;
727 ip6_frags
.secret_interval
= 10 * 60 * HZ
;
728 inet_frags_init(&ip6_frags
);
733 ip6_frags_sysctl_unregister();
735 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
739 void ipv6_frag_exit(void)
741 inet_frags_fini(&ip6_frags
);
742 ip6_frags_sysctl_unregister();
743 unregister_pernet_subsys(&ip6_frags_ops
);
744 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);