1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPv6 fragment reassembly
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
9 * Based on: net/ipv4/ip_fragment.c
14 * Andi Kleen Make it work with multiple hosts.
15 * More RFC compliance.
17 * Horst von Brand Add missing #include <linux/string.h>
18 * Alexey Kuznetsov SMP races, threading, cleanup.
19 * Patrick McHardy LRU queue of frag heads for evictor.
20 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
22 * YOSHIFUJI,H. @USAGI Always remove fragment header to
23 * calculate ICV correctly.
26 #define pr_fmt(fmt) "IPv6: " fmt
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/jiffies.h>
34 #include <linux/net.h>
35 #include <linux/list.h>
36 #include <linux/netdevice.h>
37 #include <linux/in6.h>
38 #include <linux/ipv6.h>
39 #include <linux/icmpv6.h>
40 #include <linux/random.h>
41 #include <linux/jhash.h>
42 #include <linux/skbuff.h>
43 #include <linux/slab.h>
44 #include <linux/export.h>
45 #include <linux/tcp.h>
46 #include <linux/udp.h>
52 #include <net/ip6_route.h>
53 #include <net/protocol.h>
54 #include <net/transp_v6.h>
55 #include <net/rawv6.h>
56 #include <net/ndisc.h>
57 #include <net/addrconf.h>
58 #include <net/ipv6_frag.h>
59 #include <net/inet_ecn.h>
61 static const char ip6_frag_cache_name
[] = "ip6-frags";
63 static u8
ip6_frag_ecn(const struct ipv6hdr
*ipv6h
)
65 return 1 << (ipv6_get_dsfield(ipv6h
) & INET_ECN_MASK
);
68 static struct inet_frags ip6_frags
;
70 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*skb
,
71 struct sk_buff
*prev_tail
, struct net_device
*dev
);
73 static void ip6_frag_expire(struct timer_list
*t
)
75 struct inet_frag_queue
*frag
= from_timer(frag
, t
, timer
);
76 struct frag_queue
*fq
;
78 fq
= container_of(frag
, struct frag_queue
, q
);
80 ip6frag_expire_frag_queue(fq
->q
.fqdir
->net
, fq
);
83 static struct frag_queue
*
84 fq_find(struct net
*net
, __be32 id
, const struct ipv6hdr
*hdr
, int iif
)
86 struct frag_v6_compare_key key
= {
90 .user
= IP6_DEFRAG_LOCAL_DELIVER
,
93 struct inet_frag_queue
*q
;
95 if (!(ipv6_addr_type(&hdr
->daddr
) & (IPV6_ADDR_MULTICAST
|
96 IPV6_ADDR_LINKLOCAL
)))
99 q
= inet_frag_find(net
->ipv6
.fqdir
, &key
);
103 return container_of(q
, struct frag_queue
, q
);
106 static int ip6_frag_queue(struct frag_queue
*fq
, struct sk_buff
*skb
,
107 struct frag_hdr
*fhdr
, int nhoff
,
110 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
111 int offset
, end
, fragsize
;
112 struct sk_buff
*prev_tail
;
113 struct net_device
*dev
;
117 if (fq
->q
.flags
& INET_FRAG_COMPLETE
)
121 offset
= ntohs(fhdr
->frag_off
) & ~0x7;
122 end
= offset
+ (ntohs(ipv6_hdr(skb
)->payload_len
) -
123 ((u8
*)(fhdr
+ 1) - (u8
*)(ipv6_hdr(skb
) + 1)));
125 if ((unsigned int)end
> IPV6_MAXPLEN
) {
126 *prob_offset
= (u8
*)&fhdr
->frag_off
- skb_network_header(skb
);
127 /* note that if prob_offset is set, the skb is freed elsewhere,
128 * we do not free it here.
133 ecn
= ip6_frag_ecn(ipv6_hdr(skb
));
135 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
136 const unsigned char *nh
= skb_network_header(skb
);
137 skb
->csum
= csum_sub(skb
->csum
,
138 csum_partial(nh
, (u8
*)(fhdr
+ 1) - nh
,
142 /* Is this the final fragment? */
143 if (!(fhdr
->frag_off
& htons(IP6_MF
))) {
144 /* If we already have some bits beyond end
145 * or have different end, the segment is corrupted.
147 if (end
< fq
->q
.len
||
148 ((fq
->q
.flags
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
150 fq
->q
.flags
|= INET_FRAG_LAST_IN
;
153 /* Check if the fragment is rounded to 8 bytes.
154 * Required by the RFC.
157 /* RFC2460 says always send parameter problem in
160 *prob_offset
= offsetof(struct ipv6hdr
, payload_len
);
163 if (end
> fq
->q
.len
) {
164 /* Some bits beyond end -> corruption. */
165 if (fq
->q
.flags
& INET_FRAG_LAST_IN
)
175 /* Point into the IP datagram 'data' part. */
176 if (!pskb_pull(skb
, (u8
*) (fhdr
+ 1) - skb
->data
))
179 err
= pskb_trim_rcsum(skb
, end
- offset
);
183 /* Note : skb->rbnode and skb->dev share the same location. */
185 /* Makes sure compiler wont do silly aliasing games */
188 prev_tail
= fq
->q
.fragments_tail
;
189 err
= inet_frag_queue_insert(&fq
->q
, skb
, offset
, end
);
194 fq
->iif
= dev
->ifindex
;
196 fq
->q
.stamp
= skb
->tstamp
;
197 fq
->q
.meat
+= skb
->len
;
199 add_frag_mem_limit(fq
->q
.fqdir
, skb
->truesize
);
201 fragsize
= -skb_network_offset(skb
) + skb
->len
;
202 if (fragsize
> fq
->q
.max_size
)
203 fq
->q
.max_size
= fragsize
;
205 /* The first fragment.
206 * nhoffset is obtained from the first fragment, of course.
209 fq
->nhoffset
= nhoff
;
210 fq
->q
.flags
|= INET_FRAG_FIRST_IN
;
213 if (fq
->q
.flags
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
214 fq
->q
.meat
== fq
->q
.len
) {
215 unsigned long orefdst
= skb
->_skb_refdst
;
217 skb
->_skb_refdst
= 0UL;
218 err
= ip6_frag_reasm(fq
, skb
, prev_tail
, dev
);
219 skb
->_skb_refdst
= orefdst
;
227 if (err
== IPFRAG_DUP
) {
232 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
233 IPSTATS_MIB_REASM_OVERLAPS
);
235 inet_frag_kill(&fq
->q
);
236 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
237 IPSTATS_MIB_REASMFAILS
);
244 * Check if this packet is complete.
246 * It is called with locked fq, and caller must check that
247 * queue is eligible for reassembly i.e. it is not COMPLETE,
248 * the last and the first frames arrived and all the bits are here.
250 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*skb
,
251 struct sk_buff
*prev_tail
, struct net_device
*dev
)
253 struct net
*net
= fq
->q
.fqdir
->net
;
259 inet_frag_kill(&fq
->q
);
261 ecn
= ip_frag_ecn_table
[fq
->ecn
];
262 if (unlikely(ecn
== 0xff))
265 reasm_data
= inet_frag_reasm_prepare(&fq
->q
, skb
, prev_tail
);
269 payload_len
= ((skb
->data
- skb_network_header(skb
)) -
270 sizeof(struct ipv6hdr
) + fq
->q
.len
-
271 sizeof(struct frag_hdr
));
272 if (payload_len
> IPV6_MAXPLEN
)
275 /* We have to remove fragment header from datagram and to relocate
276 * header in order to calculate ICV correctly. */
277 nhoff
= fq
->nhoffset
;
278 skb_network_header(skb
)[nhoff
] = skb_transport_header(skb
)[0];
279 memmove(skb
->head
+ sizeof(struct frag_hdr
), skb
->head
,
280 (skb
->data
- skb
->head
) - sizeof(struct frag_hdr
));
281 if (skb_mac_header_was_set(skb
))
282 skb
->mac_header
+= sizeof(struct frag_hdr
);
283 skb
->network_header
+= sizeof(struct frag_hdr
);
285 skb_reset_transport_header(skb
);
287 inet_frag_reasm_finish(&fq
->q
, skb
, reasm_data
, true);
290 ipv6_hdr(skb
)->payload_len
= htons(payload_len
);
291 ipv6_change_dsfield(ipv6_hdr(skb
), 0xff, ecn
);
292 IP6CB(skb
)->nhoff
= nhoff
;
293 IP6CB(skb
)->flags
|= IP6SKB_FRAGMENTED
;
294 IP6CB(skb
)->frag_max_size
= fq
->q
.max_size
;
296 /* Yes, and fold redundant checksum back. 8) */
297 skb_postpush_rcsum(skb
, skb_network_header(skb
),
298 skb_network_header_len(skb
));
301 __IP6_INC_STATS(net
, __in6_dev_stats_get(dev
, skb
), IPSTATS_MIB_REASMOKS
);
303 fq
->q
.rb_fragments
= RB_ROOT
;
304 fq
->q
.fragments_tail
= NULL
;
305 fq
->q
.last_run_head
= NULL
;
309 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len
);
312 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
315 __IP6_INC_STATS(net
, __in6_dev_stats_get(dev
, skb
), IPSTATS_MIB_REASMFAILS
);
317 inet_frag_kill(&fq
->q
);
321 static int ipv6_frag_rcv(struct sk_buff
*skb
)
323 struct frag_hdr
*fhdr
;
324 struct frag_queue
*fq
;
325 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
326 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
330 if (IP6CB(skb
)->flags
& IP6SKB_FRAGMENTED
)
333 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMREQDS
);
335 /* Jumbo payload inhibits frag. header */
336 if (hdr
->payload_len
== 0)
339 if (!pskb_may_pull(skb
, (skb_transport_offset(skb
) +
340 sizeof(struct frag_hdr
))))
344 fhdr
= (struct frag_hdr
*)skb_transport_header(skb
);
346 if (!(fhdr
->frag_off
& htons(0xFFF9))) {
347 /* It is not a fragmented frame */
348 skb
->transport_header
+= sizeof(struct frag_hdr
);
350 ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMOKS
);
352 IP6CB(skb
)->nhoff
= (u8
*)fhdr
- skb_network_header(skb
);
353 IP6CB(skb
)->flags
|= IP6SKB_FRAGMENTED
;
357 /* RFC 8200, Section 4.5 Fragment Header:
358 * If the first fragment does not include all headers through an
359 * Upper-Layer header, then that fragment should be discarded and
360 * an ICMP Parameter Problem, Code 3, message should be sent to
361 * the source of the fragment, with the Pointer field set to zero.
363 nexthdr
= hdr
->nexthdr
;
364 if (ipv6frag_thdr_truncated(skb
, skb_transport_offset(skb
), &nexthdr
)) {
365 __IP6_INC_STATS(net
, __in6_dev_get_safely(skb
->dev
),
366 IPSTATS_MIB_INHDRERRORS
);
367 icmpv6_param_prob(skb
, ICMPV6_HDR_INCOMP
, 0);
371 iif
= skb
->dev
? skb
->dev
->ifindex
: 0;
372 fq
= fq_find(net
, fhdr
->identification
, hdr
, iif
);
377 spin_lock(&fq
->q
.lock
);
380 ret
= ip6_frag_queue(fq
, skb
, fhdr
, IP6CB(skb
)->nhoff
,
383 spin_unlock(&fq
->q
.lock
);
384 inet_frag_put(&fq
->q
);
386 __IP6_INC_STATS(net
, __in6_dev_get_safely(skb
->dev
),
387 IPSTATS_MIB_INHDRERRORS
);
388 /* icmpv6_param_prob() calls kfree_skb(skb) */
389 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, prob_offset
);
394 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMFAILS
);
399 __IP6_INC_STATS(net
, __in6_dev_get_safely(skb
->dev
),
400 IPSTATS_MIB_INHDRERRORS
);
401 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, skb_network_header_len(skb
));
405 static const struct inet6_protocol frag_protocol
= {
406 .handler
= ipv6_frag_rcv
,
407 .flags
= INET6_PROTO_NOPOLICY
,
412 static struct ctl_table ip6_frags_ns_ctl_table
[] = {
414 .procname
= "ip6frag_high_thresh",
415 .maxlen
= sizeof(unsigned long),
417 .proc_handler
= proc_doulongvec_minmax
,
420 .procname
= "ip6frag_low_thresh",
421 .maxlen
= sizeof(unsigned long),
423 .proc_handler
= proc_doulongvec_minmax
,
426 .procname
= "ip6frag_time",
427 .maxlen
= sizeof(int),
429 .proc_handler
= proc_dointvec_jiffies
,
434 /* secret interval has been deprecated */
435 static int ip6_frags_secret_interval_unused
;
436 static struct ctl_table ip6_frags_ctl_table
[] = {
438 .procname
= "ip6frag_secret_interval",
439 .data
= &ip6_frags_secret_interval_unused
,
440 .maxlen
= sizeof(int),
442 .proc_handler
= proc_dointvec_jiffies
,
447 static int __net_init
ip6_frags_ns_sysctl_register(struct net
*net
)
449 struct ctl_table
*table
;
450 struct ctl_table_header
*hdr
;
452 table
= ip6_frags_ns_ctl_table
;
453 if (!net_eq(net
, &init_net
)) {
454 table
= kmemdup(table
, sizeof(ip6_frags_ns_ctl_table
), GFP_KERNEL
);
459 table
[0].data
= &net
->ipv6
.fqdir
->high_thresh
;
460 table
[0].extra1
= &net
->ipv6
.fqdir
->low_thresh
;
461 table
[1].data
= &net
->ipv6
.fqdir
->low_thresh
;
462 table
[1].extra2
= &net
->ipv6
.fqdir
->high_thresh
;
463 table
[2].data
= &net
->ipv6
.fqdir
->timeout
;
465 hdr
= register_net_sysctl(net
, "net/ipv6", table
);
469 net
->ipv6
.sysctl
.frags_hdr
= hdr
;
473 if (!net_eq(net
, &init_net
))
479 static void __net_exit
ip6_frags_ns_sysctl_unregister(struct net
*net
)
481 struct ctl_table
*table
;
483 table
= net
->ipv6
.sysctl
.frags_hdr
->ctl_table_arg
;
484 unregister_net_sysctl_table(net
->ipv6
.sysctl
.frags_hdr
);
485 if (!net_eq(net
, &init_net
))
489 static struct ctl_table_header
*ip6_ctl_header
;
491 static int ip6_frags_sysctl_register(void)
493 ip6_ctl_header
= register_net_sysctl(&init_net
, "net/ipv6",
494 ip6_frags_ctl_table
);
495 return ip6_ctl_header
== NULL
? -ENOMEM
: 0;
498 static void ip6_frags_sysctl_unregister(void)
500 unregister_net_sysctl_table(ip6_ctl_header
);
503 static int ip6_frags_ns_sysctl_register(struct net
*net
)
508 static void ip6_frags_ns_sysctl_unregister(struct net
*net
)
512 static int ip6_frags_sysctl_register(void)
517 static void ip6_frags_sysctl_unregister(void)
522 static int __net_init
ipv6_frags_init_net(struct net
*net
)
526 res
= fqdir_init(&net
->ipv6
.fqdir
, &ip6_frags
, net
);
530 net
->ipv6
.fqdir
->high_thresh
= IPV6_FRAG_HIGH_THRESH
;
531 net
->ipv6
.fqdir
->low_thresh
= IPV6_FRAG_LOW_THRESH
;
532 net
->ipv6
.fqdir
->timeout
= IPV6_FRAG_TIMEOUT
;
534 res
= ip6_frags_ns_sysctl_register(net
);
536 fqdir_exit(net
->ipv6
.fqdir
);
540 static void __net_exit
ipv6_frags_pre_exit_net(struct net
*net
)
542 fqdir_pre_exit(net
->ipv6
.fqdir
);
545 static void __net_exit
ipv6_frags_exit_net(struct net
*net
)
547 ip6_frags_ns_sysctl_unregister(net
);
548 fqdir_exit(net
->ipv6
.fqdir
);
551 static struct pernet_operations ip6_frags_ops
= {
552 .init
= ipv6_frags_init_net
,
553 .pre_exit
= ipv6_frags_pre_exit_net
,
554 .exit
= ipv6_frags_exit_net
,
557 static const struct rhashtable_params ip6_rhash_params
= {
558 .head_offset
= offsetof(struct inet_frag_queue
, node
),
559 .hashfn
= ip6frag_key_hashfn
,
560 .obj_hashfn
= ip6frag_obj_hashfn
,
561 .obj_cmpfn
= ip6frag_obj_cmpfn
,
562 .automatic_shrinking
= true,
565 int __init
ipv6_frag_init(void)
569 ip6_frags
.constructor
= ip6frag_init
;
570 ip6_frags
.destructor
= NULL
;
571 ip6_frags
.qsize
= sizeof(struct frag_queue
);
572 ip6_frags
.frag_expire
= ip6_frag_expire
;
573 ip6_frags
.frags_cache_name
= ip6_frag_cache_name
;
574 ip6_frags
.rhash_params
= ip6_rhash_params
;
575 ret
= inet_frags_init(&ip6_frags
);
579 ret
= inet6_add_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
583 ret
= ip6_frags_sysctl_register();
587 ret
= register_pernet_subsys(&ip6_frags_ops
);
595 ip6_frags_sysctl_unregister();
597 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
599 inet_frags_fini(&ip6_frags
);
603 void ipv6_frag_exit(void)
605 ip6_frags_sysctl_unregister();
606 unregister_pernet_subsys(&ip6_frags_ops
);
607 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
608 inet_frags_fini(&ip6_frags
);