1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPv6 fragment reassembly
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
9 * Based on: net/ipv4/ip_fragment.c
14 * Andi Kleen Make it work with multiple hosts.
15 * More RFC compliance.
17 * Horst von Brand Add missing #include <linux/string.h>
18 * Alexey Kuznetsov SMP races, threading, cleanup.
19 * Patrick McHardy LRU queue of frag heads for evictor.
20 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
22 * YOSHIFUJI,H. @USAGI Always remove fragment header to
23 * calculate ICV correctly.
26 #define pr_fmt(fmt) "IPv6: " fmt
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/jiffies.h>
34 #include <linux/net.h>
35 #include <linux/list.h>
36 #include <linux/netdevice.h>
37 #include <linux/in6.h>
38 #include <linux/ipv6.h>
39 #include <linux/icmpv6.h>
40 #include <linux/random.h>
41 #include <linux/jhash.h>
42 #include <linux/skbuff.h>
43 #include <linux/slab.h>
44 #include <linux/export.h>
45 #include <linux/tcp.h>
46 #include <linux/udp.h>
52 #include <net/ip6_route.h>
53 #include <net/protocol.h>
54 #include <net/transp_v6.h>
55 #include <net/rawv6.h>
56 #include <net/ndisc.h>
57 #include <net/addrconf.h>
58 #include <net/ipv6_frag.h>
59 #include <net/inet_ecn.h>
61 static const char ip6_frag_cache_name
[] = "ip6-frags";
63 static u8
ip6_frag_ecn(const struct ipv6hdr
*ipv6h
)
65 return 1 << (ipv6_get_dsfield(ipv6h
) & INET_ECN_MASK
);
68 static struct inet_frags ip6_frags
;
70 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*skb
,
71 struct sk_buff
*prev_tail
, struct net_device
*dev
);
73 static void ip6_frag_expire(struct timer_list
*t
)
75 struct inet_frag_queue
*frag
= from_timer(frag
, t
, timer
);
76 struct frag_queue
*fq
;
78 fq
= container_of(frag
, struct frag_queue
, q
);
80 ip6frag_expire_frag_queue(fq
->q
.fqdir
->net
, fq
);
83 static struct frag_queue
*
84 fq_find(struct net
*net
, __be32 id
, const struct ipv6hdr
*hdr
, int iif
)
86 struct frag_v6_compare_key key
= {
90 .user
= IP6_DEFRAG_LOCAL_DELIVER
,
93 struct inet_frag_queue
*q
;
95 if (!(ipv6_addr_type(&hdr
->daddr
) & (IPV6_ADDR_MULTICAST
|
96 IPV6_ADDR_LINKLOCAL
)))
99 q
= inet_frag_find(net
->ipv6
.fqdir
, &key
);
103 return container_of(q
, struct frag_queue
, q
);
106 static int ip6_frag_queue(struct frag_queue
*fq
, struct sk_buff
*skb
,
107 struct frag_hdr
*fhdr
, int nhoff
,
110 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
111 int offset
, end
, fragsize
;
112 struct sk_buff
*prev_tail
;
113 struct net_device
*dev
;
118 /* If reassembly is already done, @skb must be a duplicate frag. */
119 if (fq
->q
.flags
& INET_FRAG_COMPLETE
) {
120 SKB_DR_SET(reason
, DUP_FRAG
);
125 offset
= ntohs(fhdr
->frag_off
) & ~0x7;
126 end
= offset
+ (ntohs(ipv6_hdr(skb
)->payload_len
) -
127 ((u8
*)(fhdr
+ 1) - (u8
*)(ipv6_hdr(skb
) + 1)));
129 if ((unsigned int)end
> IPV6_MAXPLEN
) {
130 *prob_offset
= (u8
*)&fhdr
->frag_off
- skb_network_header(skb
);
131 /* note that if prob_offset is set, the skb is freed elsewhere,
132 * we do not free it here.
137 ecn
= ip6_frag_ecn(ipv6_hdr(skb
));
139 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
140 const unsigned char *nh
= skb_network_header(skb
);
141 skb
->csum
= csum_sub(skb
->csum
,
142 csum_partial(nh
, (u8
*)(fhdr
+ 1) - nh
,
146 /* Is this the final fragment? */
147 if (!(fhdr
->frag_off
& htons(IP6_MF
))) {
148 /* If we already have some bits beyond end
149 * or have different end, the segment is corrupted.
151 if (end
< fq
->q
.len
||
152 ((fq
->q
.flags
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
154 fq
->q
.flags
|= INET_FRAG_LAST_IN
;
157 /* Check if the fragment is rounded to 8 bytes.
158 * Required by the RFC.
161 /* RFC2460 says always send parameter problem in
164 *prob_offset
= offsetof(struct ipv6hdr
, payload_len
);
167 if (end
> fq
->q
.len
) {
168 /* Some bits beyond end -> corruption. */
169 if (fq
->q
.flags
& INET_FRAG_LAST_IN
)
179 /* Point into the IP datagram 'data' part. */
180 if (!pskb_pull(skb
, (u8
*) (fhdr
+ 1) - skb
->data
))
183 err
= pskb_trim_rcsum(skb
, end
- offset
);
187 /* Note : skb->rbnode and skb->dev share the same location. */
189 /* Makes sure compiler wont do silly aliasing games */
192 prev_tail
= fq
->q
.fragments_tail
;
193 err
= inet_frag_queue_insert(&fq
->q
, skb
, offset
, end
);
198 fq
->iif
= dev
->ifindex
;
200 fq
->q
.stamp
= skb
->tstamp
;
201 fq
->q
.tstamp_type
= skb
->tstamp_type
;
202 fq
->q
.meat
+= skb
->len
;
204 add_frag_mem_limit(fq
->q
.fqdir
, skb
->truesize
);
206 fragsize
= -skb_network_offset(skb
) + skb
->len
;
207 if (fragsize
> fq
->q
.max_size
)
208 fq
->q
.max_size
= fragsize
;
210 /* The first fragment.
211 * nhoffset is obtained from the first fragment, of course.
214 fq
->nhoffset
= nhoff
;
215 fq
->q
.flags
|= INET_FRAG_FIRST_IN
;
218 if (fq
->q
.flags
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
219 fq
->q
.meat
== fq
->q
.len
) {
220 unsigned long orefdst
= skb
->_skb_refdst
;
222 skb
->_skb_refdst
= 0UL;
223 err
= ip6_frag_reasm(fq
, skb
, prev_tail
, dev
);
224 skb
->_skb_refdst
= orefdst
;
232 if (err
== IPFRAG_DUP
) {
233 SKB_DR_SET(reason
, DUP_FRAG
);
238 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
239 IPSTATS_MIB_REASM_OVERLAPS
);
241 inet_frag_kill(&fq
->q
);
242 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
243 IPSTATS_MIB_REASMFAILS
);
245 kfree_skb_reason(skb
, reason
);
250 * Check if this packet is complete.
252 * It is called with locked fq, and caller must check that
253 * queue is eligible for reassembly i.e. it is not COMPLETE,
254 * the last and the first frames arrived and all the bits are here.
256 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*skb
,
257 struct sk_buff
*prev_tail
, struct net_device
*dev
)
259 struct net
*net
= fq
->q
.fqdir
->net
;
265 inet_frag_kill(&fq
->q
);
267 ecn
= ip_frag_ecn_table
[fq
->ecn
];
268 if (unlikely(ecn
== 0xff))
271 reasm_data
= inet_frag_reasm_prepare(&fq
->q
, skb
, prev_tail
);
275 payload_len
= -skb_network_offset(skb
) -
276 sizeof(struct ipv6hdr
) + fq
->q
.len
-
277 sizeof(struct frag_hdr
);
278 if (payload_len
> IPV6_MAXPLEN
)
281 /* We have to remove fragment header from datagram and to relocate
282 * header in order to calculate ICV correctly. */
283 nhoff
= fq
->nhoffset
;
284 skb_network_header(skb
)[nhoff
] = skb_transport_header(skb
)[0];
285 memmove(skb
->head
+ sizeof(struct frag_hdr
), skb
->head
,
286 (skb
->data
- skb
->head
) - sizeof(struct frag_hdr
));
287 if (skb_mac_header_was_set(skb
))
288 skb
->mac_header
+= sizeof(struct frag_hdr
);
289 skb
->network_header
+= sizeof(struct frag_hdr
);
291 skb_reset_transport_header(skb
);
293 inet_frag_reasm_finish(&fq
->q
, skb
, reasm_data
, true);
296 ipv6_hdr(skb
)->payload_len
= htons(payload_len
);
297 ipv6_change_dsfield(ipv6_hdr(skb
), 0xff, ecn
);
298 IP6CB(skb
)->nhoff
= nhoff
;
299 IP6CB(skb
)->flags
|= IP6SKB_FRAGMENTED
;
300 IP6CB(skb
)->frag_max_size
= fq
->q
.max_size
;
302 /* Yes, and fold redundant checksum back. 8) */
303 skb_postpush_rcsum(skb
, skb_network_header(skb
),
304 skb_network_header_len(skb
));
307 __IP6_INC_STATS(net
, __in6_dev_stats_get(dev
, skb
), IPSTATS_MIB_REASMOKS
);
309 fq
->q
.rb_fragments
= RB_ROOT
;
310 fq
->q
.fragments_tail
= NULL
;
311 fq
->q
.last_run_head
= NULL
;
315 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len
);
318 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
321 __IP6_INC_STATS(net
, __in6_dev_stats_get(dev
, skb
), IPSTATS_MIB_REASMFAILS
);
323 inet_frag_kill(&fq
->q
);
327 static int ipv6_frag_rcv(struct sk_buff
*skb
)
329 struct frag_hdr
*fhdr
;
330 struct frag_queue
*fq
;
331 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
332 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
336 if (IP6CB(skb
)->flags
& IP6SKB_FRAGMENTED
)
339 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMREQDS
);
341 /* Jumbo payload inhibits frag. header */
342 if (hdr
->payload_len
== 0)
345 if (!pskb_may_pull(skb
, (skb_transport_offset(skb
) +
346 sizeof(struct frag_hdr
))))
350 fhdr
= (struct frag_hdr
*)skb_transport_header(skb
);
352 if (!(fhdr
->frag_off
& htons(IP6_OFFSET
| IP6_MF
))) {
353 /* It is not a fragmented frame */
354 skb
->transport_header
+= sizeof(struct frag_hdr
);
356 ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMOKS
);
358 IP6CB(skb
)->nhoff
= (u8
*)fhdr
- skb_network_header(skb
);
359 IP6CB(skb
)->flags
|= IP6SKB_FRAGMENTED
;
360 IP6CB(skb
)->frag_max_size
= ntohs(hdr
->payload_len
) +
361 sizeof(struct ipv6hdr
);
365 /* RFC 8200, Section 4.5 Fragment Header:
366 * If the first fragment does not include all headers through an
367 * Upper-Layer header, then that fragment should be discarded and
368 * an ICMP Parameter Problem, Code 3, message should be sent to
369 * the source of the fragment, with the Pointer field set to zero.
371 nexthdr
= hdr
->nexthdr
;
372 if (ipv6frag_thdr_truncated(skb
, skb_network_offset(skb
) + sizeof(struct ipv6hdr
), &nexthdr
)) {
373 __IP6_INC_STATS(net
, __in6_dev_get_safely(skb
->dev
),
374 IPSTATS_MIB_INHDRERRORS
);
375 icmpv6_param_prob(skb
, ICMPV6_HDR_INCOMP
, 0);
379 iif
= skb
->dev
? skb
->dev
->ifindex
: 0;
380 fq
= fq_find(net
, fhdr
->identification
, hdr
, iif
);
385 spin_lock(&fq
->q
.lock
);
388 ret
= ip6_frag_queue(fq
, skb
, fhdr
, IP6CB(skb
)->nhoff
,
391 spin_unlock(&fq
->q
.lock
);
392 inet_frag_put(&fq
->q
);
394 __IP6_INC_STATS(net
, __in6_dev_get_safely(skb
->dev
),
395 IPSTATS_MIB_INHDRERRORS
);
396 /* icmpv6_param_prob() calls kfree_skb(skb) */
397 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, prob_offset
);
402 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMFAILS
);
407 __IP6_INC_STATS(net
, __in6_dev_get_safely(skb
->dev
),
408 IPSTATS_MIB_INHDRERRORS
);
409 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, skb_network_header_len(skb
));
413 static const struct inet6_protocol frag_protocol
= {
414 .handler
= ipv6_frag_rcv
,
415 .flags
= INET6_PROTO_NOPOLICY
,
420 static struct ctl_table ip6_frags_ns_ctl_table
[] = {
422 .procname
= "ip6frag_high_thresh",
423 .maxlen
= sizeof(unsigned long),
425 .proc_handler
= proc_doulongvec_minmax
,
428 .procname
= "ip6frag_low_thresh",
429 .maxlen
= sizeof(unsigned long),
431 .proc_handler
= proc_doulongvec_minmax
,
434 .procname
= "ip6frag_time",
435 .maxlen
= sizeof(int),
437 .proc_handler
= proc_dointvec_jiffies
,
441 /* secret interval has been deprecated */
442 static int ip6_frags_secret_interval_unused
;
443 static struct ctl_table ip6_frags_ctl_table
[] = {
445 .procname
= "ip6frag_secret_interval",
446 .data
= &ip6_frags_secret_interval_unused
,
447 .maxlen
= sizeof(int),
449 .proc_handler
= proc_dointvec_jiffies
,
453 static int __net_init
ip6_frags_ns_sysctl_register(struct net
*net
)
455 struct ctl_table
*table
;
456 struct ctl_table_header
*hdr
;
458 table
= ip6_frags_ns_ctl_table
;
459 if (!net_eq(net
, &init_net
)) {
460 table
= kmemdup(table
, sizeof(ip6_frags_ns_ctl_table
), GFP_KERNEL
);
465 table
[0].data
= &net
->ipv6
.fqdir
->high_thresh
;
466 table
[0].extra1
= &net
->ipv6
.fqdir
->low_thresh
;
467 table
[1].data
= &net
->ipv6
.fqdir
->low_thresh
;
468 table
[1].extra2
= &net
->ipv6
.fqdir
->high_thresh
;
469 table
[2].data
= &net
->ipv6
.fqdir
->timeout
;
471 hdr
= register_net_sysctl_sz(net
, "net/ipv6", table
,
472 ARRAY_SIZE(ip6_frags_ns_ctl_table
));
476 net
->ipv6
.sysctl
.frags_hdr
= hdr
;
480 if (!net_eq(net
, &init_net
))
486 static void __net_exit
ip6_frags_ns_sysctl_unregister(struct net
*net
)
488 const struct ctl_table
*table
;
490 table
= net
->ipv6
.sysctl
.frags_hdr
->ctl_table_arg
;
491 unregister_net_sysctl_table(net
->ipv6
.sysctl
.frags_hdr
);
492 if (!net_eq(net
, &init_net
))
496 static struct ctl_table_header
*ip6_ctl_header
;
498 static int ip6_frags_sysctl_register(void)
500 ip6_ctl_header
= register_net_sysctl(&init_net
, "net/ipv6",
501 ip6_frags_ctl_table
);
502 return ip6_ctl_header
== NULL
? -ENOMEM
: 0;
505 static void ip6_frags_sysctl_unregister(void)
507 unregister_net_sysctl_table(ip6_ctl_header
);
510 static int ip6_frags_ns_sysctl_register(struct net
*net
)
515 static void ip6_frags_ns_sysctl_unregister(struct net
*net
)
519 static int ip6_frags_sysctl_register(void)
524 static void ip6_frags_sysctl_unregister(void)
529 static int __net_init
ipv6_frags_init_net(struct net
*net
)
533 res
= fqdir_init(&net
->ipv6
.fqdir
, &ip6_frags
, net
);
537 net
->ipv6
.fqdir
->high_thresh
= IPV6_FRAG_HIGH_THRESH
;
538 net
->ipv6
.fqdir
->low_thresh
= IPV6_FRAG_LOW_THRESH
;
539 net
->ipv6
.fqdir
->timeout
= IPV6_FRAG_TIMEOUT
;
541 res
= ip6_frags_ns_sysctl_register(net
);
543 fqdir_exit(net
->ipv6
.fqdir
);
547 static void __net_exit
ipv6_frags_pre_exit_net(struct net
*net
)
549 fqdir_pre_exit(net
->ipv6
.fqdir
);
552 static void __net_exit
ipv6_frags_exit_net(struct net
*net
)
554 ip6_frags_ns_sysctl_unregister(net
);
555 fqdir_exit(net
->ipv6
.fqdir
);
558 static struct pernet_operations ip6_frags_ops
= {
559 .init
= ipv6_frags_init_net
,
560 .pre_exit
= ipv6_frags_pre_exit_net
,
561 .exit
= ipv6_frags_exit_net
,
564 static const struct rhashtable_params ip6_rhash_params
= {
565 .head_offset
= offsetof(struct inet_frag_queue
, node
),
566 .hashfn
= ip6frag_key_hashfn
,
567 .obj_hashfn
= ip6frag_obj_hashfn
,
568 .obj_cmpfn
= ip6frag_obj_cmpfn
,
569 .automatic_shrinking
= true,
572 int __init
ipv6_frag_init(void)
576 ip6_frags
.constructor
= ip6frag_init
;
577 ip6_frags
.destructor
= NULL
;
578 ip6_frags
.qsize
= sizeof(struct frag_queue
);
579 ip6_frags
.frag_expire
= ip6_frag_expire
;
580 ip6_frags
.frags_cache_name
= ip6_frag_cache_name
;
581 ip6_frags
.rhash_params
= ip6_rhash_params
;
582 ret
= inet_frags_init(&ip6_frags
);
586 ret
= inet6_add_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
590 ret
= ip6_frags_sysctl_register();
594 ret
= register_pernet_subsys(&ip6_frags_ops
);
602 ip6_frags_sysctl_unregister();
604 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
606 inet_frags_fini(&ip6_frags
);
610 void ipv6_frag_exit(void)
612 ip6_frags_sysctl_unregister();
613 unregister_pernet_subsys(&ip6_frags_ops
);
614 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
615 inet_frags_fini(&ip6_frags
);