2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on: net/ipv4/ip_fragment.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
30 #define pr_fmt(fmt) "IPv6: " fmt
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/sockios.h>
37 #include <linux/jiffies.h>
38 #include <linux/net.h>
39 #include <linux/list.h>
40 #include <linux/netdevice.h>
41 #include <linux/in6.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
45 #include <linux/jhash.h>
46 #include <linux/skbuff.h>
47 #include <linux/slab.h>
48 #include <linux/export.h>
54 #include <net/ip6_route.h>
55 #include <net/protocol.h>
56 #include <net/transp_v6.h>
57 #include <net/rawv6.h>
58 #include <net/ndisc.h>
59 #include <net/addrconf.h>
60 #include <net/ipv6_frag.h>
61 #include <net/inet_ecn.h>
63 static const char ip6_frag_cache_name
[] = "ip6-frags";
65 static u8
ip6_frag_ecn(const struct ipv6hdr
*ipv6h
)
67 return 1 << (ipv6_get_dsfield(ipv6h
) & INET_ECN_MASK
);
70 static struct inet_frags ip6_frags
;
72 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*skb
,
73 struct sk_buff
*prev_tail
, struct net_device
*dev
);
75 static void ip6_frag_expire(struct timer_list
*t
)
77 struct inet_frag_queue
*frag
= from_timer(frag
, t
, timer
);
78 struct frag_queue
*fq
;
81 fq
= container_of(frag
, struct frag_queue
, q
);
82 net
= container_of(fq
->q
.net
, struct net
, ipv6
.frags
);
84 ip6frag_expire_frag_queue(net
, fq
);
87 static struct frag_queue
*
88 fq_find(struct net
*net
, __be32 id
, const struct ipv6hdr
*hdr
, int iif
)
90 struct frag_v6_compare_key key
= {
94 .user
= IP6_DEFRAG_LOCAL_DELIVER
,
97 struct inet_frag_queue
*q
;
99 if (!(ipv6_addr_type(&hdr
->daddr
) & (IPV6_ADDR_MULTICAST
|
100 IPV6_ADDR_LINKLOCAL
)))
103 q
= inet_frag_find(&net
->ipv6
.frags
, &key
);
107 return container_of(q
, struct frag_queue
, q
);
110 static int ip6_frag_queue(struct frag_queue
*fq
, struct sk_buff
*skb
,
111 struct frag_hdr
*fhdr
, int nhoff
,
114 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
115 int offset
, end
, fragsize
;
116 struct sk_buff
*prev_tail
;
117 struct net_device
*dev
;
121 if (fq
->q
.flags
& INET_FRAG_COMPLETE
)
125 offset
= ntohs(fhdr
->frag_off
) & ~0x7;
126 end
= offset
+ (ntohs(ipv6_hdr(skb
)->payload_len
) -
127 ((u8
*)(fhdr
+ 1) - (u8
*)(ipv6_hdr(skb
) + 1)));
129 if ((unsigned int)end
> IPV6_MAXPLEN
) {
130 *prob_offset
= (u8
*)&fhdr
->frag_off
- skb_network_header(skb
);
131 /* note that if prob_offset is set, the skb is freed elsewhere,
132 * we do not free it here.
137 ecn
= ip6_frag_ecn(ipv6_hdr(skb
));
139 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
140 const unsigned char *nh
= skb_network_header(skb
);
141 skb
->csum
= csum_sub(skb
->csum
,
142 csum_partial(nh
, (u8
*)(fhdr
+ 1) - nh
,
146 /* Is this the final fragment? */
147 if (!(fhdr
->frag_off
& htons(IP6_MF
))) {
148 /* If we already have some bits beyond end
149 * or have different end, the segment is corrupted.
151 if (end
< fq
->q
.len
||
152 ((fq
->q
.flags
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
154 fq
->q
.flags
|= INET_FRAG_LAST_IN
;
157 /* Check if the fragment is rounded to 8 bytes.
158 * Required by the RFC.
161 /* RFC2460 says always send parameter problem in
164 *prob_offset
= offsetof(struct ipv6hdr
, payload_len
);
167 if (end
> fq
->q
.len
) {
168 /* Some bits beyond end -> corruption. */
169 if (fq
->q
.flags
& INET_FRAG_LAST_IN
)
179 /* Point into the IP datagram 'data' part. */
180 if (!pskb_pull(skb
, (u8
*) (fhdr
+ 1) - skb
->data
))
183 err
= pskb_trim_rcsum(skb
, end
- offset
);
187 /* Note : skb->rbnode and skb->dev share the same location. */
189 /* Makes sure compiler wont do silly aliasing games */
192 prev_tail
= fq
->q
.fragments_tail
;
193 err
= inet_frag_queue_insert(&fq
->q
, skb
, offset
, end
);
198 fq
->iif
= dev
->ifindex
;
200 fq
->q
.stamp
= skb
->tstamp
;
201 fq
->q
.meat
+= skb
->len
;
203 add_frag_mem_limit(fq
->q
.net
, skb
->truesize
);
205 fragsize
= -skb_network_offset(skb
) + skb
->len
;
206 if (fragsize
> fq
->q
.max_size
)
207 fq
->q
.max_size
= fragsize
;
209 /* The first fragment.
210 * nhoffset is obtained from the first fragment, of course.
213 fq
->nhoffset
= nhoff
;
214 fq
->q
.flags
|= INET_FRAG_FIRST_IN
;
217 if (fq
->q
.flags
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
218 fq
->q
.meat
== fq
->q
.len
) {
219 unsigned long orefdst
= skb
->_skb_refdst
;
221 skb
->_skb_refdst
= 0UL;
222 err
= ip6_frag_reasm(fq
, skb
, prev_tail
, dev
);
223 skb
->_skb_refdst
= orefdst
;
231 if (err
== IPFRAG_DUP
) {
236 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
237 IPSTATS_MIB_REASM_OVERLAPS
);
239 inet_frag_kill(&fq
->q
);
240 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
241 IPSTATS_MIB_REASMFAILS
);
248 * Check if this packet is complete.
250 * It is called with locked fq, and caller must check that
251 * queue is eligible for reassembly i.e. it is not COMPLETE,
252 * the last and the first frames arrived and all the bits are here.
254 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*skb
,
255 struct sk_buff
*prev_tail
, struct net_device
*dev
)
257 struct net
*net
= container_of(fq
->q
.net
, struct net
, ipv6
.frags
);
263 inet_frag_kill(&fq
->q
);
265 ecn
= ip_frag_ecn_table
[fq
->ecn
];
266 if (unlikely(ecn
== 0xff))
269 reasm_data
= inet_frag_reasm_prepare(&fq
->q
, skb
, prev_tail
);
273 payload_len
= ((skb
->data
- skb_network_header(skb
)) -
274 sizeof(struct ipv6hdr
) + fq
->q
.len
-
275 sizeof(struct frag_hdr
));
276 if (payload_len
> IPV6_MAXPLEN
)
279 /* We have to remove fragment header from datagram and to relocate
280 * header in order to calculate ICV correctly. */
281 nhoff
= fq
->nhoffset
;
282 skb_network_header(skb
)[nhoff
] = skb_transport_header(skb
)[0];
283 memmove(skb
->head
+ sizeof(struct frag_hdr
), skb
->head
,
284 (skb
->data
- skb
->head
) - sizeof(struct frag_hdr
));
285 if (skb_mac_header_was_set(skb
))
286 skb
->mac_header
+= sizeof(struct frag_hdr
);
287 skb
->network_header
+= sizeof(struct frag_hdr
);
289 skb_reset_transport_header(skb
);
291 inet_frag_reasm_finish(&fq
->q
, skb
, reasm_data
);
294 ipv6_hdr(skb
)->payload_len
= htons(payload_len
);
295 ipv6_change_dsfield(ipv6_hdr(skb
), 0xff, ecn
);
296 IP6CB(skb
)->nhoff
= nhoff
;
297 IP6CB(skb
)->flags
|= IP6SKB_FRAGMENTED
;
298 IP6CB(skb
)->frag_max_size
= fq
->q
.max_size
;
300 /* Yes, and fold redundant checksum back. 8) */
301 skb_postpush_rcsum(skb
, skb_network_header(skb
),
302 skb_network_header_len(skb
));
305 __IP6_INC_STATS(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMOKS
);
307 fq
->q
.fragments
= NULL
;
308 fq
->q
.rb_fragments
= RB_ROOT
;
309 fq
->q
.fragments_tail
= NULL
;
310 fq
->q
.last_run_head
= NULL
;
314 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len
);
317 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
320 __IP6_INC_STATS(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
322 inet_frag_kill(&fq
->q
);
326 static int ipv6_frag_rcv(struct sk_buff
*skb
)
328 struct frag_hdr
*fhdr
;
329 struct frag_queue
*fq
;
330 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
331 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
334 if (IP6CB(skb
)->flags
& IP6SKB_FRAGMENTED
)
337 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMREQDS
);
339 /* Jumbo payload inhibits frag. header */
340 if (hdr
->payload_len
== 0)
343 if (!pskb_may_pull(skb
, (skb_transport_offset(skb
) +
344 sizeof(struct frag_hdr
))))
348 fhdr
= (struct frag_hdr
*)skb_transport_header(skb
);
350 if (!(fhdr
->frag_off
& htons(0xFFF9))) {
351 /* It is not a fragmented frame */
352 skb
->transport_header
+= sizeof(struct frag_hdr
);
354 ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMOKS
);
356 IP6CB(skb
)->nhoff
= (u8
*)fhdr
- skb_network_header(skb
);
357 IP6CB(skb
)->flags
|= IP6SKB_FRAGMENTED
;
361 iif
= skb
->dev
? skb
->dev
->ifindex
: 0;
362 fq
= fq_find(net
, fhdr
->identification
, hdr
, iif
);
367 spin_lock(&fq
->q
.lock
);
370 ret
= ip6_frag_queue(fq
, skb
, fhdr
, IP6CB(skb
)->nhoff
,
373 spin_unlock(&fq
->q
.lock
);
374 inet_frag_put(&fq
->q
);
376 __IP6_INC_STATS(net
, __in6_dev_get_safely(skb
->dev
),
377 IPSTATS_MIB_INHDRERRORS
);
378 /* icmpv6_param_prob() calls kfree_skb(skb) */
379 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, prob_offset
);
384 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMFAILS
);
389 __IP6_INC_STATS(net
, __in6_dev_get_safely(skb
->dev
),
390 IPSTATS_MIB_INHDRERRORS
);
391 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, skb_network_header_len(skb
));
395 static const struct inet6_protocol frag_protocol
= {
396 .handler
= ipv6_frag_rcv
,
397 .flags
= INET6_PROTO_NOPOLICY
,
402 static struct ctl_table ip6_frags_ns_ctl_table
[] = {
404 .procname
= "ip6frag_high_thresh",
405 .data
= &init_net
.ipv6
.frags
.high_thresh
,
406 .maxlen
= sizeof(unsigned long),
408 .proc_handler
= proc_doulongvec_minmax
,
409 .extra1
= &init_net
.ipv6
.frags
.low_thresh
412 .procname
= "ip6frag_low_thresh",
413 .data
= &init_net
.ipv6
.frags
.low_thresh
,
414 .maxlen
= sizeof(unsigned long),
416 .proc_handler
= proc_doulongvec_minmax
,
417 .extra2
= &init_net
.ipv6
.frags
.high_thresh
420 .procname
= "ip6frag_time",
421 .data
= &init_net
.ipv6
.frags
.timeout
,
422 .maxlen
= sizeof(int),
424 .proc_handler
= proc_dointvec_jiffies
,
429 /* secret interval has been deprecated */
430 static int ip6_frags_secret_interval_unused
;
431 static struct ctl_table ip6_frags_ctl_table
[] = {
433 .procname
= "ip6frag_secret_interval",
434 .data
= &ip6_frags_secret_interval_unused
,
435 .maxlen
= sizeof(int),
437 .proc_handler
= proc_dointvec_jiffies
,
442 static int __net_init
ip6_frags_ns_sysctl_register(struct net
*net
)
444 struct ctl_table
*table
;
445 struct ctl_table_header
*hdr
;
447 table
= ip6_frags_ns_ctl_table
;
448 if (!net_eq(net
, &init_net
)) {
449 table
= kmemdup(table
, sizeof(ip6_frags_ns_ctl_table
), GFP_KERNEL
);
453 table
[0].data
= &net
->ipv6
.frags
.high_thresh
;
454 table
[0].extra1
= &net
->ipv6
.frags
.low_thresh
;
455 table
[0].extra2
= &init_net
.ipv6
.frags
.high_thresh
;
456 table
[1].data
= &net
->ipv6
.frags
.low_thresh
;
457 table
[1].extra2
= &net
->ipv6
.frags
.high_thresh
;
458 table
[2].data
= &net
->ipv6
.frags
.timeout
;
461 hdr
= register_net_sysctl(net
, "net/ipv6", table
);
465 net
->ipv6
.sysctl
.frags_hdr
= hdr
;
469 if (!net_eq(net
, &init_net
))
475 static void __net_exit
ip6_frags_ns_sysctl_unregister(struct net
*net
)
477 struct ctl_table
*table
;
479 table
= net
->ipv6
.sysctl
.frags_hdr
->ctl_table_arg
;
480 unregister_net_sysctl_table(net
->ipv6
.sysctl
.frags_hdr
);
481 if (!net_eq(net
, &init_net
))
485 static struct ctl_table_header
*ip6_ctl_header
;
487 static int ip6_frags_sysctl_register(void)
489 ip6_ctl_header
= register_net_sysctl(&init_net
, "net/ipv6",
490 ip6_frags_ctl_table
);
491 return ip6_ctl_header
== NULL
? -ENOMEM
: 0;
494 static void ip6_frags_sysctl_unregister(void)
496 unregister_net_sysctl_table(ip6_ctl_header
);
499 static int ip6_frags_ns_sysctl_register(struct net
*net
)
504 static void ip6_frags_ns_sysctl_unregister(struct net
*net
)
508 static int ip6_frags_sysctl_register(void)
513 static void ip6_frags_sysctl_unregister(void)
518 static int __net_init
ipv6_frags_init_net(struct net
*net
)
522 net
->ipv6
.frags
.high_thresh
= IPV6_FRAG_HIGH_THRESH
;
523 net
->ipv6
.frags
.low_thresh
= IPV6_FRAG_LOW_THRESH
;
524 net
->ipv6
.frags
.timeout
= IPV6_FRAG_TIMEOUT
;
525 net
->ipv6
.frags
.f
= &ip6_frags
;
527 res
= inet_frags_init_net(&net
->ipv6
.frags
);
531 res
= ip6_frags_ns_sysctl_register(net
);
533 inet_frags_exit_net(&net
->ipv6
.frags
);
537 static void __net_exit
ipv6_frags_exit_net(struct net
*net
)
539 ip6_frags_ns_sysctl_unregister(net
);
540 inet_frags_exit_net(&net
->ipv6
.frags
);
543 static struct pernet_operations ip6_frags_ops
= {
544 .init
= ipv6_frags_init_net
,
545 .exit
= ipv6_frags_exit_net
,
548 static const struct rhashtable_params ip6_rhash_params
= {
549 .head_offset
= offsetof(struct inet_frag_queue
, node
),
550 .hashfn
= ip6frag_key_hashfn
,
551 .obj_hashfn
= ip6frag_obj_hashfn
,
552 .obj_cmpfn
= ip6frag_obj_cmpfn
,
553 .automatic_shrinking
= true,
556 int __init
ipv6_frag_init(void)
560 ip6_frags
.constructor
= ip6frag_init
;
561 ip6_frags
.destructor
= NULL
;
562 ip6_frags
.qsize
= sizeof(struct frag_queue
);
563 ip6_frags
.frag_expire
= ip6_frag_expire
;
564 ip6_frags
.frags_cache_name
= ip6_frag_cache_name
;
565 ip6_frags
.rhash_params
= ip6_rhash_params
;
566 ret
= inet_frags_init(&ip6_frags
);
570 ret
= inet6_add_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
574 ret
= ip6_frags_sysctl_register();
578 ret
= register_pernet_subsys(&ip6_frags_ops
);
586 ip6_frags_sysctl_unregister();
588 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
590 inet_frags_fini(&ip6_frags
);
594 void ipv6_frag_exit(void)
596 ip6_frags_sysctl_unregister();
597 unregister_pernet_subsys(&ip6_frags_ops
);
598 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
599 inet_frags_fini(&ip6_frags
);