1 // SPDX-License-Identifier: GPL-2.0
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The IP fragmentation functionality.
9 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox <alan@lxorguk.ukuu.org.uk>
13 * Alan Cox : Split from ip.c , see ip_input.c for history.
14 * David S. Miller : Begin massive cleanup...
15 * Andi Kleen : Add sysctls.
16 * xxxx : Overlapfrag bug.
17 * Ultima : ip_expire() kernel panic.
18 * Bill Hawes : Frag accounting and evictor fixes.
19 * John McDonald : 0 length frag bug.
20 * Alexey Kuznetsov: SMP races, threading, cleanup.
21 * Patrick McHardy : LRU queue of frag heads for evictor.
24 #define pr_fmt(fmt) "IPv4: " fmt
26 #include <linux/compiler.h>
27 #include <linux/module.h>
28 #include <linux/types.h>
30 #include <linux/jiffies.h>
31 #include <linux/skbuff.h>
32 #include <linux/list.h>
34 #include <linux/icmp.h>
35 #include <linux/netdevice.h>
36 #include <linux/jhash.h>
37 #include <linux/random.h>
38 #include <linux/slab.h>
39 #include <net/route.h>
44 #include <net/checksum.h>
45 #include <net/inetpeer.h>
46 #include <net/inet_frag.h>
47 #include <linux/tcp.h>
48 #include <linux/udp.h>
49 #include <linux/inet.h>
50 #include <linux/netfilter_ipv4.h>
51 #include <net/inet_ecn.h>
52 #include <net/l3mdev.h>
54 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
56 * as well. Or notify me, at least. --ANK
58 static const char ip_frag_cache_name
[] = "ip4-frags";
60 /* Describe an entry in the "incomplete datagrams" queue. */
62 struct inet_frag_queue q
;
64 u8 ecn
; /* RFC3168 support */
65 u16 max_df_size
; /* largest frag with DF set seen */
68 struct inet_peer
*peer
;
71 static u8
ip4_frag_ecn(u8 tos
)
73 return 1 << (tos
& INET_ECN_MASK
);
76 static struct inet_frags ip4_frags
;
78 static int ip_frag_reasm(struct ipq
*qp
, struct sk_buff
*skb
,
79 struct sk_buff
*prev_tail
, struct net_device
*dev
);
82 static void ip4_frag_init(struct inet_frag_queue
*q
, const void *a
)
84 struct ipq
*qp
= container_of(q
, struct ipq
, q
);
85 struct net
*net
= q
->fqdir
->net
;
87 const struct frag_v4_compare_key
*key
= a
;
91 qp
->peer
= q
->fqdir
->max_dist
?
92 inet_getpeer_v4(net
->ipv4
.peers
, key
->saddr
, key
->vif
, 1) :
96 static void ip4_frag_free(struct inet_frag_queue
*q
)
100 qp
= container_of(q
, struct ipq
, q
);
102 inet_putpeer(qp
->peer
);
106 /* Destruction primitives. */
108 static void ipq_put(struct ipq
*ipq
)
110 inet_frag_put(&ipq
->q
);
113 /* Kill ipq entry. It is not destroyed immediately,
114 * because caller (and someone more) holds reference count.
116 static void ipq_kill(struct ipq
*ipq
)
118 inet_frag_kill(&ipq
->q
);
121 static bool frag_expire_skip_icmp(u32 user
)
123 return user
== IP_DEFRAG_AF_PACKET
||
124 ip_defrag_user_in_between(user
, IP_DEFRAG_CONNTRACK_IN
,
125 __IP_DEFRAG_CONNTRACK_IN_END
) ||
126 ip_defrag_user_in_between(user
, IP_DEFRAG_CONNTRACK_BRIDGE_IN
,
127 __IP_DEFRAG_CONNTRACK_BRIDGE_IN
);
131 * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
133 static void ip_expire(struct timer_list
*t
)
135 struct inet_frag_queue
*frag
= from_timer(frag
, t
, timer
);
136 const struct iphdr
*iph
;
137 struct sk_buff
*head
= NULL
;
142 qp
= container_of(frag
, struct ipq
, q
);
143 net
= qp
->q
.fqdir
->net
;
147 if (qp
->q
.fqdir
->dead
)
150 spin_lock(&qp
->q
.lock
);
152 if (qp
->q
.flags
& INET_FRAG_COMPLETE
)
156 __IP_INC_STATS(net
, IPSTATS_MIB_REASMFAILS
);
157 __IP_INC_STATS(net
, IPSTATS_MIB_REASMTIMEOUT
);
159 if (!(qp
->q
.flags
& INET_FRAG_FIRST_IN
))
162 /* sk_buff::dev and sk_buff::rbnode are unionized. So we
163 * pull the head out of the tree in order to be able to
164 * deal with head->dev.
166 head
= inet_frag_pull_head(&qp
->q
);
169 head
->dev
= dev_get_by_index_rcu(net
, qp
->iif
);
174 /* skb has no dst, perform route lookup again */
176 err
= ip_route_input_noref(head
, iph
->daddr
, iph
->saddr
,
177 iph
->tos
, head
->dev
);
181 /* Only an end host needs to send an ICMP
182 * "Fragment Reassembly Timeout" message, per RFC792.
184 if (frag_expire_skip_icmp(qp
->q
.key
.v4
.user
) &&
185 (skb_rtable(head
)->rt_type
!= RTN_LOCAL
))
188 spin_unlock(&qp
->q
.lock
);
189 icmp_send(head
, ICMP_TIME_EXCEEDED
, ICMP_EXC_FRAGTIME
, 0);
193 spin_unlock(&qp
->q
.lock
);
200 /* Find the correct entry in the "incomplete datagrams" queue for
201 * this IP datagram, and create new one, if nothing is found.
203 static struct ipq
*ip_find(struct net
*net
, struct iphdr
*iph
,
206 struct frag_v4_compare_key key
= {
212 .protocol
= iph
->protocol
,
214 struct inet_frag_queue
*q
;
216 q
= inet_frag_find(net
->ipv4
.fqdir
, &key
);
220 return container_of(q
, struct ipq
, q
);
223 /* Is the fragment too far ahead to be part of ipq? */
224 static int ip_frag_too_far(struct ipq
*qp
)
226 struct inet_peer
*peer
= qp
->peer
;
227 unsigned int max
= qp
->q
.fqdir
->max_dist
;
228 unsigned int start
, end
;
236 end
= atomic_inc_return(&peer
->rid
);
239 rc
= qp
->q
.fragments_tail
&& (end
- start
) > max
;
242 __IP_INC_STATS(qp
->q
.fqdir
->net
, IPSTATS_MIB_REASMFAILS
);
247 static int ip_frag_reinit(struct ipq
*qp
)
249 unsigned int sum_truesize
= 0;
251 if (!mod_timer(&qp
->q
.timer
, jiffies
+ qp
->q
.fqdir
->timeout
)) {
252 refcount_inc(&qp
->q
.refcnt
);
256 sum_truesize
= inet_frag_rbtree_purge(&qp
->q
.rb_fragments
);
257 sub_frag_mem_limit(qp
->q
.fqdir
, sum_truesize
);
262 qp
->q
.rb_fragments
= RB_ROOT
;
263 qp
->q
.fragments_tail
= NULL
;
264 qp
->q
.last_run_head
= NULL
;
271 /* Add new segment to existing queue. */
272 static int ip_frag_queue(struct ipq
*qp
, struct sk_buff
*skb
)
274 struct net
*net
= qp
->q
.fqdir
->net
;
275 int ihl
, end
, flags
, offset
;
276 struct sk_buff
*prev_tail
;
277 struct net_device
*dev
;
278 unsigned int fragsize
;
282 if (qp
->q
.flags
& INET_FRAG_COMPLETE
)
285 if (!(IPCB(skb
)->flags
& IPSKB_FRAG_COMPLETE
) &&
286 unlikely(ip_frag_too_far(qp
)) &&
287 unlikely(err
= ip_frag_reinit(qp
))) {
292 ecn
= ip4_frag_ecn(ip_hdr(skb
)->tos
);
293 offset
= ntohs(ip_hdr(skb
)->frag_off
);
294 flags
= offset
& ~IP_OFFSET
;
296 offset
<<= 3; /* offset is in 8-byte chunks */
297 ihl
= ip_hdrlen(skb
);
299 /* Determine the position of this fragment. */
300 end
= offset
+ skb
->len
- skb_network_offset(skb
) - ihl
;
303 /* Is this the final fragment? */
304 if ((flags
& IP_MF
) == 0) {
305 /* If we already have some bits beyond end
306 * or have different end, the segment is corrupted.
308 if (end
< qp
->q
.len
||
309 ((qp
->q
.flags
& INET_FRAG_LAST_IN
) && end
!= qp
->q
.len
))
311 qp
->q
.flags
|= INET_FRAG_LAST_IN
;
316 if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)
317 skb
->ip_summed
= CHECKSUM_NONE
;
319 if (end
> qp
->q
.len
) {
320 /* Some bits beyond end -> corruption. */
321 if (qp
->q
.flags
& INET_FRAG_LAST_IN
)
330 if (!pskb_pull(skb
, skb_network_offset(skb
) + ihl
))
333 err
= pskb_trim_rcsum(skb
, end
- offset
);
337 /* Note : skb->rbnode and skb->dev share the same location. */
339 /* Makes sure compiler wont do silly aliasing games */
342 prev_tail
= qp
->q
.fragments_tail
;
343 err
= inet_frag_queue_insert(&qp
->q
, skb
, offset
, end
);
348 qp
->iif
= dev
->ifindex
;
350 qp
->q
.stamp
= skb
->tstamp
;
351 qp
->q
.meat
+= skb
->len
;
353 add_frag_mem_limit(qp
->q
.fqdir
, skb
->truesize
);
355 qp
->q
.flags
|= INET_FRAG_FIRST_IN
;
357 fragsize
= skb
->len
+ ihl
;
359 if (fragsize
> qp
->q
.max_size
)
360 qp
->q
.max_size
= fragsize
;
362 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
) &&
363 fragsize
> qp
->max_df_size
)
364 qp
->max_df_size
= fragsize
;
366 if (qp
->q
.flags
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
367 qp
->q
.meat
== qp
->q
.len
) {
368 unsigned long orefdst
= skb
->_skb_refdst
;
370 skb
->_skb_refdst
= 0UL;
371 err
= ip_frag_reasm(qp
, skb
, prev_tail
, dev
);
372 skb
->_skb_refdst
= orefdst
;
374 inet_frag_kill(&qp
->q
);
382 if (err
== IPFRAG_DUP
) {
387 __IP_INC_STATS(net
, IPSTATS_MIB_REASM_OVERLAPS
);
389 inet_frag_kill(&qp
->q
);
390 __IP_INC_STATS(net
, IPSTATS_MIB_REASMFAILS
);
396 static bool ip_frag_coalesce_ok(const struct ipq
*qp
)
398 return qp
->q
.key
.v4
.user
== IP_DEFRAG_LOCAL_DELIVER
;
401 /* Build a new IP datagram from all its fragments. */
402 static int ip_frag_reasm(struct ipq
*qp
, struct sk_buff
*skb
,
403 struct sk_buff
*prev_tail
, struct net_device
*dev
)
405 struct net
*net
= qp
->q
.fqdir
->net
;
413 ecn
= ip_frag_ecn_table
[qp
->ecn
];
414 if (unlikely(ecn
== 0xff)) {
419 /* Make the one we just received the head. */
420 reasm_data
= inet_frag_reasm_prepare(&qp
->q
, skb
, prev_tail
);
424 len
= ip_hdrlen(skb
) + qp
->q
.len
;
429 inet_frag_reasm_finish(&qp
->q
, skb
, reasm_data
,
430 ip_frag_coalesce_ok(qp
));
433 IPCB(skb
)->frag_max_size
= max(qp
->max_df_size
, qp
->q
.max_size
);
436 iph
->tot_len
= htons(len
);
439 /* When we set IP_DF on a refragmented skb we must also force a
440 * call to ip_fragment to avoid forwarding a DF-skb of size s while
441 * original sender only sent fragments of size f (where f < s).
443 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
444 * frag seen to avoid sending tiny DF-fragments in case skb was built
445 * from one very small df-fragment and one large non-df frag.
447 if (qp
->max_df_size
== qp
->q
.max_size
) {
448 IPCB(skb
)->flags
|= IPSKB_FRAG_PMTU
;
449 iph
->frag_off
= htons(IP_DF
);
456 __IP_INC_STATS(net
, IPSTATS_MIB_REASMOKS
);
457 qp
->q
.rb_fragments
= RB_ROOT
;
458 qp
->q
.fragments_tail
= NULL
;
459 qp
->q
.last_run_head
= NULL
;
463 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp
);
467 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp
->q
.key
.v4
.saddr
);
469 __IP_INC_STATS(net
, IPSTATS_MIB_REASMFAILS
);
473 /* Process an incoming IP datagram fragment. */
474 int ip_defrag(struct net
*net
, struct sk_buff
*skb
, u32 user
)
476 struct net_device
*dev
= skb
->dev
? : skb_dst(skb
)->dev
;
477 int vif
= l3mdev_master_ifindex_rcu(dev
);
480 __IP_INC_STATS(net
, IPSTATS_MIB_REASMREQDS
);
483 /* Lookup (or create) queue header */
484 qp
= ip_find(net
, ip_hdr(skb
), user
, vif
);
488 spin_lock(&qp
->q
.lock
);
490 ret
= ip_frag_queue(qp
, skb
);
492 spin_unlock(&qp
->q
.lock
);
497 __IP_INC_STATS(net
, IPSTATS_MIB_REASMFAILS
);
501 EXPORT_SYMBOL(ip_defrag
);
503 struct sk_buff
*ip_check_defrag(struct net
*net
, struct sk_buff
*skb
, u32 user
)
509 if (skb
->protocol
!= htons(ETH_P_IP
))
512 netoff
= skb_network_offset(skb
);
514 if (skb_copy_bits(skb
, netoff
, &iph
, sizeof(iph
)) < 0)
517 if (iph
.ihl
< 5 || iph
.version
!= 4)
520 len
= ntohs(iph
.tot_len
);
521 if (skb
->len
< netoff
+ len
|| len
< (iph
.ihl
* 4))
524 if (ip_is_fragment(&iph
)) {
525 skb
= skb_share_check(skb
, GFP_ATOMIC
);
527 if (!pskb_may_pull(skb
, netoff
+ iph
.ihl
* 4)) {
531 if (pskb_trim_rcsum(skb
, netoff
+ len
)) {
535 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
536 if (ip_defrag(net
, skb
, user
))
543 EXPORT_SYMBOL(ip_check_defrag
);
548 static struct ctl_table ip4_frags_ns_ctl_table
[] = {
550 .procname
= "ipfrag_high_thresh",
551 .maxlen
= sizeof(unsigned long),
553 .proc_handler
= proc_doulongvec_minmax
,
556 .procname
= "ipfrag_low_thresh",
557 .maxlen
= sizeof(unsigned long),
559 .proc_handler
= proc_doulongvec_minmax
,
562 .procname
= "ipfrag_time",
563 .maxlen
= sizeof(int),
565 .proc_handler
= proc_dointvec_jiffies
,
568 .procname
= "ipfrag_max_dist",
569 .maxlen
= sizeof(int),
571 .proc_handler
= proc_dointvec_minmax
,
577 /* secret interval has been deprecated */
578 static int ip4_frags_secret_interval_unused
;
579 static struct ctl_table ip4_frags_ctl_table
[] = {
581 .procname
= "ipfrag_secret_interval",
582 .data
= &ip4_frags_secret_interval_unused
,
583 .maxlen
= sizeof(int),
585 .proc_handler
= proc_dointvec_jiffies
,
590 static int __net_init
ip4_frags_ns_ctl_register(struct net
*net
)
592 struct ctl_table
*table
;
593 struct ctl_table_header
*hdr
;
595 table
= ip4_frags_ns_ctl_table
;
596 if (!net_eq(net
, &init_net
)) {
597 table
= kmemdup(table
, sizeof(ip4_frags_ns_ctl_table
), GFP_KERNEL
);
602 table
[0].data
= &net
->ipv4
.fqdir
->high_thresh
;
603 table
[0].extra1
= &net
->ipv4
.fqdir
->low_thresh
;
604 table
[1].data
= &net
->ipv4
.fqdir
->low_thresh
;
605 table
[1].extra2
= &net
->ipv4
.fqdir
->high_thresh
;
606 table
[2].data
= &net
->ipv4
.fqdir
->timeout
;
607 table
[3].data
= &net
->ipv4
.fqdir
->max_dist
;
609 hdr
= register_net_sysctl(net
, "net/ipv4", table
);
613 net
->ipv4
.frags_hdr
= hdr
;
617 if (!net_eq(net
, &init_net
))
623 static void __net_exit
ip4_frags_ns_ctl_unregister(struct net
*net
)
625 struct ctl_table
*table
;
627 table
= net
->ipv4
.frags_hdr
->ctl_table_arg
;
628 unregister_net_sysctl_table(net
->ipv4
.frags_hdr
);
632 static void __init
ip4_frags_ctl_register(void)
634 register_net_sysctl(&init_net
, "net/ipv4", ip4_frags_ctl_table
);
637 static int ip4_frags_ns_ctl_register(struct net
*net
)
642 static void ip4_frags_ns_ctl_unregister(struct net
*net
)
646 static void __init
ip4_frags_ctl_register(void)
651 static int __net_init
ipv4_frags_init_net(struct net
*net
)
655 res
= fqdir_init(&net
->ipv4
.fqdir
, &ip4_frags
, net
);
658 /* Fragment cache limits.
660 * The fragment memory accounting code, (tries to) account for
661 * the real memory usage, by measuring both the size of frag
662 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
663 * and the SKB's truesize.
665 * A 64K fragment consumes 129736 bytes (44*2944)+200
666 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
668 * We will commit 4MB at one time. Should we cross that limit
669 * we will prune down to 3MB, making room for approx 8 big 64K
672 net
->ipv4
.fqdir
->high_thresh
= 4 * 1024 * 1024;
673 net
->ipv4
.fqdir
->low_thresh
= 3 * 1024 * 1024;
675 * Important NOTE! Fragment queue must be destroyed before MSL expires.
676 * RFC791 is wrong proposing to prolongate timer each fragment arrival
679 net
->ipv4
.fqdir
->timeout
= IP_FRAG_TIME
;
681 net
->ipv4
.fqdir
->max_dist
= 64;
683 res
= ip4_frags_ns_ctl_register(net
);
685 fqdir_exit(net
->ipv4
.fqdir
);
689 static void __net_exit
ipv4_frags_pre_exit_net(struct net
*net
)
691 fqdir_pre_exit(net
->ipv4
.fqdir
);
694 static void __net_exit
ipv4_frags_exit_net(struct net
*net
)
696 ip4_frags_ns_ctl_unregister(net
);
697 fqdir_exit(net
->ipv4
.fqdir
);
700 static struct pernet_operations ip4_frags_ops
= {
701 .init
= ipv4_frags_init_net
,
702 .pre_exit
= ipv4_frags_pre_exit_net
,
703 .exit
= ipv4_frags_exit_net
,
707 static u32
ip4_key_hashfn(const void *data
, u32 len
, u32 seed
)
710 sizeof(struct frag_v4_compare_key
) / sizeof(u32
), seed
);
713 static u32
ip4_obj_hashfn(const void *data
, u32 len
, u32 seed
)
715 const struct inet_frag_queue
*fq
= data
;
717 return jhash2((const u32
*)&fq
->key
.v4
,
718 sizeof(struct frag_v4_compare_key
) / sizeof(u32
), seed
);
721 static int ip4_obj_cmpfn(struct rhashtable_compare_arg
*arg
, const void *ptr
)
723 const struct frag_v4_compare_key
*key
= arg
->key
;
724 const struct inet_frag_queue
*fq
= ptr
;
726 return !!memcmp(&fq
->key
, key
, sizeof(*key
));
729 static const struct rhashtable_params ip4_rhash_params
= {
730 .head_offset
= offsetof(struct inet_frag_queue
, node
),
731 .key_offset
= offsetof(struct inet_frag_queue
, key
),
732 .key_len
= sizeof(struct frag_v4_compare_key
),
733 .hashfn
= ip4_key_hashfn
,
734 .obj_hashfn
= ip4_obj_hashfn
,
735 .obj_cmpfn
= ip4_obj_cmpfn
,
736 .automatic_shrinking
= true,
739 void __init
ipfrag_init(void)
741 ip4_frags
.constructor
= ip4_frag_init
;
742 ip4_frags
.destructor
= ip4_frag_free
;
743 ip4_frags
.qsize
= sizeof(struct ipq
);
744 ip4_frags
.frag_expire
= ip_expire
;
745 ip4_frags
.frags_cache_name
= ip_frag_cache_name
;
746 ip4_frags
.rhash_params
= ip4_rhash_params
;
747 if (inet_frags_init(&ip4_frags
))
748 panic("IP: failed to allocate ip4_frags cache\n");
749 ip4_frags_ctl_register();
750 register_pernet_subsys(&ip4_frags_ops
);