1 // SPDX-License-Identifier: GPL-2.0
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The IP fragmentation functionality.
9 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox <alan@lxorguk.ukuu.org.uk>
13 * Alan Cox : Split from ip.c , see ip_input.c for history.
14 * David S. Miller : Begin massive cleanup...
15 * Andi Kleen : Add sysctls.
16 * xxxx : Overlapfrag bug.
17 * Ultima : ip_expire() kernel panic.
18 * Bill Hawes : Frag accounting and evictor fixes.
19 * John McDonald : 0 length frag bug.
20 * Alexey Kuznetsov: SMP races, threading, cleanup.
21 * Patrick McHardy : LRU queue of frag heads for evictor.
24 #define pr_fmt(fmt) "IPv4: " fmt
26 #include <linux/compiler.h>
27 #include <linux/module.h>
28 #include <linux/types.h>
30 #include <linux/jiffies.h>
31 #include <linux/skbuff.h>
32 #include <linux/list.h>
34 #include <linux/icmp.h>
35 #include <linux/netdevice.h>
36 #include <linux/jhash.h>
37 #include <linux/random.h>
38 #include <linux/slab.h>
39 #include <net/route.h>
44 #include <net/checksum.h>
45 #include <net/inetpeer.h>
46 #include <net/inet_frag.h>
47 #include <linux/tcp.h>
48 #include <linux/udp.h>
49 #include <linux/inet.h>
50 #include <linux/netfilter_ipv4.h>
51 #include <net/inet_ecn.h>
52 #include <net/l3mdev.h>
54 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
56 * as well. Or notify me, at least. --ANK
58 static const char ip_frag_cache_name
[] = "ip4-frags";
60 /* Use skb->cb to track consecutive/adjacent fragments coming at
61 * the end of the queue. Nodes in the rb-tree queue will
62 * contain "runs" of one or more adjacent fragments.
65 * - next_frag is NULL at the tail of a "run";
66 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
68 struct ipfrag_skb_cb
{
69 struct inet_skb_parm h
;
70 struct sk_buff
*next_frag
;
74 #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
76 static void ip4_frag_init_run(struct sk_buff
*skb
)
78 BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb
) > sizeof(skb
->cb
));
80 FRAG_CB(skb
)->next_frag
= NULL
;
81 FRAG_CB(skb
)->frag_run_len
= skb
->len
;
84 /* Append skb to the last "run". */
85 static void ip4_frag_append_to_last_run(struct inet_frag_queue
*q
,
88 RB_CLEAR_NODE(&skb
->rbnode
);
89 FRAG_CB(skb
)->next_frag
= NULL
;
91 FRAG_CB(q
->last_run_head
)->frag_run_len
+= skb
->len
;
92 FRAG_CB(q
->fragments_tail
)->next_frag
= skb
;
93 q
->fragments_tail
= skb
;
96 /* Create a new "run" with the skb. */
97 static void ip4_frag_create_run(struct inet_frag_queue
*q
, struct sk_buff
*skb
)
100 rb_link_node(&skb
->rbnode
, &q
->last_run_head
->rbnode
,
101 &q
->last_run_head
->rbnode
.rb_right
);
103 rb_link_node(&skb
->rbnode
, NULL
, &q
->rb_fragments
.rb_node
);
104 rb_insert_color(&skb
->rbnode
, &q
->rb_fragments
);
106 ip4_frag_init_run(skb
);
107 q
->fragments_tail
= skb
;
108 q
->last_run_head
= skb
;
111 /* Describe an entry in the "incomplete datagrams" queue. */
113 struct inet_frag_queue q
;
115 u8 ecn
; /* RFC3168 support */
116 u16 max_df_size
; /* largest frag with DF set seen */
119 struct inet_peer
*peer
;
122 static u8
ip4_frag_ecn(u8 tos
)
124 return 1 << (tos
& INET_ECN_MASK
);
127 static struct inet_frags ip4_frags
;
129 static int ip_frag_reasm(struct ipq
*qp
, struct sk_buff
*skb
,
130 struct sk_buff
*prev_tail
, struct net_device
*dev
);
133 static void ip4_frag_init(struct inet_frag_queue
*q
, const void *a
)
135 struct ipq
*qp
= container_of(q
, struct ipq
, q
);
136 struct netns_ipv4
*ipv4
= container_of(q
->net
, struct netns_ipv4
,
138 struct net
*net
= container_of(ipv4
, struct net
, ipv4
);
140 const struct frag_v4_compare_key
*key
= a
;
144 qp
->peer
= q
->net
->max_dist
?
145 inet_getpeer_v4(net
->ipv4
.peers
, key
->saddr
, key
->vif
, 1) :
149 static void ip4_frag_free(struct inet_frag_queue
*q
)
153 qp
= container_of(q
, struct ipq
, q
);
155 inet_putpeer(qp
->peer
);
159 /* Destruction primitives. */
161 static void ipq_put(struct ipq
*ipq
)
163 inet_frag_put(&ipq
->q
);
166 /* Kill ipq entry. It is not destroyed immediately,
167 * because caller (and someone more) holds reference count.
169 static void ipq_kill(struct ipq
*ipq
)
171 inet_frag_kill(&ipq
->q
);
174 static bool frag_expire_skip_icmp(u32 user
)
176 return user
== IP_DEFRAG_AF_PACKET
||
177 ip_defrag_user_in_between(user
, IP_DEFRAG_CONNTRACK_IN
,
178 __IP_DEFRAG_CONNTRACK_IN_END
) ||
179 ip_defrag_user_in_between(user
, IP_DEFRAG_CONNTRACK_BRIDGE_IN
,
180 __IP_DEFRAG_CONNTRACK_BRIDGE_IN
);
184 * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
186 static void ip_expire(struct timer_list
*t
)
188 struct inet_frag_queue
*frag
= from_timer(frag
, t
, timer
);
189 const struct iphdr
*iph
;
190 struct sk_buff
*head
= NULL
;
195 qp
= container_of(frag
, struct ipq
, q
);
196 net
= container_of(qp
->q
.net
, struct net
, ipv4
.frags
);
199 spin_lock(&qp
->q
.lock
);
201 if (qp
->q
.flags
& INET_FRAG_COMPLETE
)
205 __IP_INC_STATS(net
, IPSTATS_MIB_REASMFAILS
);
206 __IP_INC_STATS(net
, IPSTATS_MIB_REASMTIMEOUT
);
208 if (!(qp
->q
.flags
& INET_FRAG_FIRST_IN
))
211 /* sk_buff::dev and sk_buff::rbnode are unionized. So we
212 * pull the head out of the tree in order to be able to
213 * deal with head->dev.
215 if (qp
->q
.fragments
) {
216 head
= qp
->q
.fragments
;
217 qp
->q
.fragments
= head
->next
;
219 head
= skb_rb_first(&qp
->q
.rb_fragments
);
222 if (FRAG_CB(head
)->next_frag
)
223 rb_replace_node(&head
->rbnode
,
224 &FRAG_CB(head
)->next_frag
->rbnode
,
225 &qp
->q
.rb_fragments
);
227 rb_erase(&head
->rbnode
, &qp
->q
.rb_fragments
);
228 memset(&head
->rbnode
, 0, sizeof(head
->rbnode
));
231 if (head
== qp
->q
.fragments_tail
)
232 qp
->q
.fragments_tail
= NULL
;
234 sub_frag_mem_limit(qp
->q
.net
, head
->truesize
);
236 head
->dev
= dev_get_by_index_rcu(net
, qp
->iif
);
241 /* skb has no dst, perform route lookup again */
243 err
= ip_route_input_noref(head
, iph
->daddr
, iph
->saddr
,
244 iph
->tos
, head
->dev
);
248 /* Only an end host needs to send an ICMP
249 * "Fragment Reassembly Timeout" message, per RFC792.
251 if (frag_expire_skip_icmp(qp
->q
.key
.v4
.user
) &&
252 (skb_rtable(head
)->rt_type
!= RTN_LOCAL
))
255 spin_unlock(&qp
->q
.lock
);
256 icmp_send(head
, ICMP_TIME_EXCEEDED
, ICMP_EXC_FRAGTIME
, 0);
260 spin_unlock(&qp
->q
.lock
);
267 /* Find the correct entry in the "incomplete datagrams" queue for
268 * this IP datagram, and create new one, if nothing is found.
270 static struct ipq
*ip_find(struct net
*net
, struct iphdr
*iph
,
273 struct frag_v4_compare_key key
= {
279 .protocol
= iph
->protocol
,
281 struct inet_frag_queue
*q
;
283 q
= inet_frag_find(&net
->ipv4
.frags
, &key
);
287 return container_of(q
, struct ipq
, q
);
290 /* Is the fragment too far ahead to be part of ipq? */
291 static int ip_frag_too_far(struct ipq
*qp
)
293 struct inet_peer
*peer
= qp
->peer
;
294 unsigned int max
= qp
->q
.net
->max_dist
;
295 unsigned int start
, end
;
303 end
= atomic_inc_return(&peer
->rid
);
306 rc
= qp
->q
.fragments_tail
&& (end
- start
) > max
;
311 net
= container_of(qp
->q
.net
, struct net
, ipv4
.frags
);
312 __IP_INC_STATS(net
, IPSTATS_MIB_REASMFAILS
);
318 static int ip_frag_reinit(struct ipq
*qp
)
320 unsigned int sum_truesize
= 0;
322 if (!mod_timer(&qp
->q
.timer
, jiffies
+ qp
->q
.net
->timeout
)) {
323 refcount_inc(&qp
->q
.refcnt
);
327 sum_truesize
= inet_frag_rbtree_purge(&qp
->q
.rb_fragments
);
328 sub_frag_mem_limit(qp
->q
.net
, sum_truesize
);
333 qp
->q
.fragments
= NULL
;
334 qp
->q
.rb_fragments
= RB_ROOT
;
335 qp
->q
.fragments_tail
= NULL
;
336 qp
->q
.last_run_head
= NULL
;
343 /* Add new segment to existing queue. */
344 static int ip_frag_queue(struct ipq
*qp
, struct sk_buff
*skb
)
346 struct net
*net
= container_of(qp
->q
.net
, struct net
, ipv4
.frags
);
347 struct rb_node
**rbn
, *parent
;
348 struct sk_buff
*skb1
, *prev_tail
;
349 struct net_device
*dev
;
350 unsigned int fragsize
;
356 if (qp
->q
.flags
& INET_FRAG_COMPLETE
)
359 if (!(IPCB(skb
)->flags
& IPSKB_FRAG_COMPLETE
) &&
360 unlikely(ip_frag_too_far(qp
)) &&
361 unlikely(err
= ip_frag_reinit(qp
))) {
366 ecn
= ip4_frag_ecn(ip_hdr(skb
)->tos
);
367 offset
= ntohs(ip_hdr(skb
)->frag_off
);
368 flags
= offset
& ~IP_OFFSET
;
370 offset
<<= 3; /* offset is in 8-byte chunks */
371 ihl
= ip_hdrlen(skb
);
373 /* Determine the position of this fragment. */
374 end
= offset
+ skb
->len
- skb_network_offset(skb
) - ihl
;
377 /* Is this the final fragment? */
378 if ((flags
& IP_MF
) == 0) {
379 /* If we already have some bits beyond end
380 * or have different end, the segment is corrupted.
382 if (end
< qp
->q
.len
||
383 ((qp
->q
.flags
& INET_FRAG_LAST_IN
) && end
!= qp
->q
.len
))
385 qp
->q
.flags
|= INET_FRAG_LAST_IN
;
390 if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)
391 skb
->ip_summed
= CHECKSUM_NONE
;
393 if (end
> qp
->q
.len
) {
394 /* Some bits beyond end -> corruption. */
395 if (qp
->q
.flags
& INET_FRAG_LAST_IN
)
404 if (!pskb_pull(skb
, skb_network_offset(skb
) + ihl
))
407 err
= pskb_trim_rcsum(skb
, end
- offset
);
411 /* Note : skb->rbnode and skb->dev share the same location. */
413 /* Makes sure compiler wont do silly aliasing games */
416 /* RFC5722, Section 4, amended by Errata ID : 3089
417 * When reassembling an IPv6 datagram, if
418 * one or more its constituent fragments is determined to be an
419 * overlapping fragment, the entire datagram (and any constituent
420 * fragments) MUST be silently discarded.
422 * We do the same here for IPv4 (and increment an snmp counter).
426 /* Find out where to put this fragment. */
427 prev_tail
= qp
->q
.fragments_tail
;
429 ip4_frag_create_run(&qp
->q
, skb
); /* First fragment. */
430 else if (prev_tail
->ip_defrag_offset
+ prev_tail
->len
< end
) {
431 /* This is the common case: skb goes to the end. */
432 /* Detect and discard overlaps. */
433 if (offset
< prev_tail
->ip_defrag_offset
+ prev_tail
->len
)
435 if (offset
== prev_tail
->ip_defrag_offset
+ prev_tail
->len
)
436 ip4_frag_append_to_last_run(&qp
->q
, skb
);
438 ip4_frag_create_run(&qp
->q
, skb
);
440 /* Binary search. Note that skb can become the first fragment,
441 * but not the last (covered above).
443 rbn
= &qp
->q
.rb_fragments
.rb_node
;
446 skb1
= rb_to_skb(parent
);
447 if (end
<= skb1
->ip_defrag_offset
)
448 rbn
= &parent
->rb_left
;
449 else if (offset
>= skb1
->ip_defrag_offset
+
450 FRAG_CB(skb1
)->frag_run_len
)
451 rbn
= &parent
->rb_right
;
452 else /* Found an overlap with skb1. */
455 /* Here we have parent properly set, and rbn pointing to
456 * one of its NULL left/right children. Insert skb.
458 ip4_frag_init_run(skb
);
459 rb_link_node(&skb
->rbnode
, parent
, rbn
);
460 rb_insert_color(&skb
->rbnode
, &qp
->q
.rb_fragments
);
464 qp
->iif
= dev
->ifindex
;
465 skb
->ip_defrag_offset
= offset
;
467 qp
->q
.stamp
= skb
->tstamp
;
468 qp
->q
.meat
+= skb
->len
;
470 add_frag_mem_limit(qp
->q
.net
, skb
->truesize
);
472 qp
->q
.flags
|= INET_FRAG_FIRST_IN
;
474 fragsize
= skb
->len
+ ihl
;
476 if (fragsize
> qp
->q
.max_size
)
477 qp
->q
.max_size
= fragsize
;
479 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
) &&
480 fragsize
> qp
->max_df_size
)
481 qp
->max_df_size
= fragsize
;
483 if (qp
->q
.flags
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
484 qp
->q
.meat
== qp
->q
.len
) {
485 unsigned long orefdst
= skb
->_skb_refdst
;
487 skb
->_skb_refdst
= 0UL;
488 err
= ip_frag_reasm(qp
, skb
, prev_tail
, dev
);
489 skb
->_skb_refdst
= orefdst
;
491 inet_frag_kill(&qp
->q
);
499 __IP_INC_STATS(net
, IPSTATS_MIB_REASM_OVERLAPS
);
501 inet_frag_kill(&qp
->q
);
507 /* Build a new IP datagram from all its fragments. */
508 static int ip_frag_reasm(struct ipq
*qp
, struct sk_buff
*skb
,
509 struct sk_buff
*prev_tail
, struct net_device
*dev
)
511 struct net
*net
= container_of(qp
->q
.net
, struct net
, ipv4
.frags
);
513 struct sk_buff
*fp
, *head
= skb_rb_first(&qp
->q
.rb_fragments
);
514 struct sk_buff
**nextp
; /* To build frag_list. */
524 ecn
= ip_frag_ecn_table
[qp
->ecn
];
525 if (unlikely(ecn
== 0xff)) {
529 /* Make the one we just received the head. */
531 fp
= skb_clone(skb
, GFP_ATOMIC
);
534 FRAG_CB(fp
)->next_frag
= FRAG_CB(skb
)->next_frag
;
535 if (RB_EMPTY_NODE(&skb
->rbnode
))
536 FRAG_CB(prev_tail
)->next_frag
= fp
;
538 rb_replace_node(&skb
->rbnode
, &fp
->rbnode
,
539 &qp
->q
.rb_fragments
);
540 if (qp
->q
.fragments_tail
== skb
)
541 qp
->q
.fragments_tail
= fp
;
542 skb_morph(skb
, head
);
543 FRAG_CB(skb
)->next_frag
= FRAG_CB(head
)->next_frag
;
544 rb_replace_node(&head
->rbnode
, &skb
->rbnode
,
545 &qp
->q
.rb_fragments
);
550 WARN_ON(head
->ip_defrag_offset
!= 0);
552 /* Allocate a new buffer for the datagram. */
553 ihlen
= ip_hdrlen(head
);
554 len
= ihlen
+ qp
->q
.len
;
560 delta
= - head
->truesize
;
562 /* Head of list must not be cloned. */
563 if (skb_unclone(head
, GFP_ATOMIC
))
566 delta
+= head
->truesize
;
568 add_frag_mem_limit(qp
->q
.net
, delta
);
570 /* If the first fragment is fragmented itself, we split
571 * it to two chunks: the first with data and paged part
572 * and the second, holding only fragments. */
573 if (skb_has_frag_list(head
)) {
574 struct sk_buff
*clone
;
577 clone
= alloc_skb(0, GFP_ATOMIC
);
580 skb_shinfo(clone
)->frag_list
= skb_shinfo(head
)->frag_list
;
581 skb_frag_list_init(head
);
582 for (i
= 0; i
< skb_shinfo(head
)->nr_frags
; i
++)
583 plen
+= skb_frag_size(&skb_shinfo(head
)->frags
[i
]);
584 clone
->len
= clone
->data_len
= head
->data_len
- plen
;
585 head
->truesize
+= clone
->truesize
;
587 clone
->ip_summed
= head
->ip_summed
;
588 add_frag_mem_limit(qp
->q
.net
, clone
->truesize
);
589 skb_shinfo(head
)->frag_list
= clone
;
590 nextp
= &clone
->next
;
592 nextp
= &skb_shinfo(head
)->frag_list
;
595 skb_push(head
, head
->data
- skb_network_header(head
));
597 /* Traverse the tree in order, to build frag_list. */
598 fp
= FRAG_CB(head
)->next_frag
;
599 rbn
= rb_next(&head
->rbnode
);
600 rb_erase(&head
->rbnode
, &qp
->q
.rb_fragments
);
602 /* fp points to the next sk_buff in the current run;
603 * rbn points to the next run.
605 /* Go through the current run. */
610 memset(&fp
->rbnode
, 0, sizeof(fp
->rbnode
));
612 head
->data_len
+= fp
->len
;
613 head
->len
+= fp
->len
;
614 if (head
->ip_summed
!= fp
->ip_summed
)
615 head
->ip_summed
= CHECKSUM_NONE
;
616 else if (head
->ip_summed
== CHECKSUM_COMPLETE
)
617 head
->csum
= csum_add(head
->csum
, fp
->csum
);
618 head
->truesize
+= fp
->truesize
;
619 fp
= FRAG_CB(fp
)->next_frag
;
621 /* Move to the next run. */
623 struct rb_node
*rbnext
= rb_next(rbn
);
626 rb_erase(rbn
, &qp
->q
.rb_fragments
);
630 sub_frag_mem_limit(qp
->q
.net
, head
->truesize
);
633 skb_mark_not_on_list(head
);
636 head
->tstamp
= qp
->q
.stamp
;
637 IPCB(head
)->frag_max_size
= max(qp
->max_df_size
, qp
->q
.max_size
);
640 iph
->tot_len
= htons(len
);
643 /* When we set IP_DF on a refragmented skb we must also force a
644 * call to ip_fragment to avoid forwarding a DF-skb of size s while
645 * original sender only sent fragments of size f (where f < s).
647 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
648 * frag seen to avoid sending tiny DF-fragments in case skb was built
649 * from one very small df-fragment and one large non-df frag.
651 if (qp
->max_df_size
== qp
->q
.max_size
) {
652 IPCB(head
)->flags
|= IPSKB_FRAG_PMTU
;
653 iph
->frag_off
= htons(IP_DF
);
660 __IP_INC_STATS(net
, IPSTATS_MIB_REASMOKS
);
661 qp
->q
.fragments
= NULL
;
662 qp
->q
.rb_fragments
= RB_ROOT
;
663 qp
->q
.fragments_tail
= NULL
;
664 qp
->q
.last_run_head
= NULL
;
668 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp
);
672 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp
->q
.key
.v4
.saddr
);
674 __IP_INC_STATS(net
, IPSTATS_MIB_REASMFAILS
);
678 /* Process an incoming IP datagram fragment. */
679 int ip_defrag(struct net
*net
, struct sk_buff
*skb
, u32 user
)
681 struct net_device
*dev
= skb
->dev
? : skb_dst(skb
)->dev
;
682 int vif
= l3mdev_master_ifindex_rcu(dev
);
685 __IP_INC_STATS(net
, IPSTATS_MIB_REASMREQDS
);
688 /* Lookup (or create) queue header */
689 qp
= ip_find(net
, ip_hdr(skb
), user
, vif
);
693 spin_lock(&qp
->q
.lock
);
695 ret
= ip_frag_queue(qp
, skb
);
697 spin_unlock(&qp
->q
.lock
);
702 __IP_INC_STATS(net
, IPSTATS_MIB_REASMFAILS
);
706 EXPORT_SYMBOL(ip_defrag
);
708 struct sk_buff
*ip_check_defrag(struct net
*net
, struct sk_buff
*skb
, u32 user
)
714 if (skb
->protocol
!= htons(ETH_P_IP
))
717 netoff
= skb_network_offset(skb
);
719 if (skb_copy_bits(skb
, netoff
, &iph
, sizeof(iph
)) < 0)
722 if (iph
.ihl
< 5 || iph
.version
!= 4)
725 len
= ntohs(iph
.tot_len
);
726 if (skb
->len
< netoff
+ len
|| len
< (iph
.ihl
* 4))
729 if (ip_is_fragment(&iph
)) {
730 skb
= skb_share_check(skb
, GFP_ATOMIC
);
732 if (!pskb_may_pull(skb
, netoff
+ iph
.ihl
* 4)) {
736 if (pskb_trim_rcsum(skb
, netoff
+ len
)) {
740 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
741 if (ip_defrag(net
, skb
, user
))
748 EXPORT_SYMBOL(ip_check_defrag
);
750 unsigned int inet_frag_rbtree_purge(struct rb_root
*root
)
752 struct rb_node
*p
= rb_first(root
);
753 unsigned int sum
= 0;
756 struct sk_buff
*skb
= rb_entry(p
, struct sk_buff
, rbnode
);
759 rb_erase(&skb
->rbnode
, root
);
761 struct sk_buff
*next
= FRAG_CB(skb
)->next_frag
;
763 sum
+= skb
->truesize
;
770 EXPORT_SYMBOL(inet_frag_rbtree_purge
);
775 static struct ctl_table ip4_frags_ns_ctl_table
[] = {
777 .procname
= "ipfrag_high_thresh",
778 .data
= &init_net
.ipv4
.frags
.high_thresh
,
779 .maxlen
= sizeof(unsigned long),
781 .proc_handler
= proc_doulongvec_minmax
,
782 .extra1
= &init_net
.ipv4
.frags
.low_thresh
785 .procname
= "ipfrag_low_thresh",
786 .data
= &init_net
.ipv4
.frags
.low_thresh
,
787 .maxlen
= sizeof(unsigned long),
789 .proc_handler
= proc_doulongvec_minmax
,
790 .extra2
= &init_net
.ipv4
.frags
.high_thresh
793 .procname
= "ipfrag_time",
794 .data
= &init_net
.ipv4
.frags
.timeout
,
795 .maxlen
= sizeof(int),
797 .proc_handler
= proc_dointvec_jiffies
,
800 .procname
= "ipfrag_max_dist",
801 .data
= &init_net
.ipv4
.frags
.max_dist
,
802 .maxlen
= sizeof(int),
804 .proc_handler
= proc_dointvec_minmax
,
810 /* secret interval has been deprecated */
811 static int ip4_frags_secret_interval_unused
;
812 static struct ctl_table ip4_frags_ctl_table
[] = {
814 .procname
= "ipfrag_secret_interval",
815 .data
= &ip4_frags_secret_interval_unused
,
816 .maxlen
= sizeof(int),
818 .proc_handler
= proc_dointvec_jiffies
,
823 static int __net_init
ip4_frags_ns_ctl_register(struct net
*net
)
825 struct ctl_table
*table
;
826 struct ctl_table_header
*hdr
;
828 table
= ip4_frags_ns_ctl_table
;
829 if (!net_eq(net
, &init_net
)) {
830 table
= kmemdup(table
, sizeof(ip4_frags_ns_ctl_table
), GFP_KERNEL
);
834 table
[0].data
= &net
->ipv4
.frags
.high_thresh
;
835 table
[0].extra1
= &net
->ipv4
.frags
.low_thresh
;
836 table
[1].data
= &net
->ipv4
.frags
.low_thresh
;
837 table
[1].extra2
= &net
->ipv4
.frags
.high_thresh
;
838 table
[2].data
= &net
->ipv4
.frags
.timeout
;
839 table
[3].data
= &net
->ipv4
.frags
.max_dist
;
842 hdr
= register_net_sysctl(net
, "net/ipv4", table
);
846 net
->ipv4
.frags_hdr
= hdr
;
850 if (!net_eq(net
, &init_net
))
856 static void __net_exit
ip4_frags_ns_ctl_unregister(struct net
*net
)
858 struct ctl_table
*table
;
860 table
= net
->ipv4
.frags_hdr
->ctl_table_arg
;
861 unregister_net_sysctl_table(net
->ipv4
.frags_hdr
);
865 static void __init
ip4_frags_ctl_register(void)
867 register_net_sysctl(&init_net
, "net/ipv4", ip4_frags_ctl_table
);
870 static int ip4_frags_ns_ctl_register(struct net
*net
)
875 static void ip4_frags_ns_ctl_unregister(struct net
*net
)
879 static void __init
ip4_frags_ctl_register(void)
884 static int __net_init
ipv4_frags_init_net(struct net
*net
)
888 /* Fragment cache limits.
890 * The fragment memory accounting code, (tries to) account for
891 * the real memory usage, by measuring both the size of frag
892 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
893 * and the SKB's truesize.
895 * A 64K fragment consumes 129736 bytes (44*2944)+200
896 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
898 * We will commit 4MB at one time. Should we cross that limit
899 * we will prune down to 3MB, making room for approx 8 big 64K
902 net
->ipv4
.frags
.high_thresh
= 4 * 1024 * 1024;
903 net
->ipv4
.frags
.low_thresh
= 3 * 1024 * 1024;
905 * Important NOTE! Fragment queue must be destroyed before MSL expires.
906 * RFC791 is wrong proposing to prolongate timer each fragment arrival
909 net
->ipv4
.frags
.timeout
= IP_FRAG_TIME
;
911 net
->ipv4
.frags
.max_dist
= 64;
912 net
->ipv4
.frags
.f
= &ip4_frags
;
914 res
= inet_frags_init_net(&net
->ipv4
.frags
);
917 res
= ip4_frags_ns_ctl_register(net
);
919 inet_frags_exit_net(&net
->ipv4
.frags
);
923 static void __net_exit
ipv4_frags_exit_net(struct net
*net
)
925 ip4_frags_ns_ctl_unregister(net
);
926 inet_frags_exit_net(&net
->ipv4
.frags
);
929 static struct pernet_operations ip4_frags_ops
= {
930 .init
= ipv4_frags_init_net
,
931 .exit
= ipv4_frags_exit_net
,
935 static u32
ip4_key_hashfn(const void *data
, u32 len
, u32 seed
)
938 sizeof(struct frag_v4_compare_key
) / sizeof(u32
), seed
);
941 static u32
ip4_obj_hashfn(const void *data
, u32 len
, u32 seed
)
943 const struct inet_frag_queue
*fq
= data
;
945 return jhash2((const u32
*)&fq
->key
.v4
,
946 sizeof(struct frag_v4_compare_key
) / sizeof(u32
), seed
);
949 static int ip4_obj_cmpfn(struct rhashtable_compare_arg
*arg
, const void *ptr
)
951 const struct frag_v4_compare_key
*key
= arg
->key
;
952 const struct inet_frag_queue
*fq
= ptr
;
954 return !!memcmp(&fq
->key
, key
, sizeof(*key
));
957 static const struct rhashtable_params ip4_rhash_params
= {
958 .head_offset
= offsetof(struct inet_frag_queue
, node
),
959 .key_offset
= offsetof(struct inet_frag_queue
, key
),
960 .key_len
= sizeof(struct frag_v4_compare_key
),
961 .hashfn
= ip4_key_hashfn
,
962 .obj_hashfn
= ip4_obj_hashfn
,
963 .obj_cmpfn
= ip4_obj_cmpfn
,
964 .automatic_shrinking
= true,
967 void __init
ipfrag_init(void)
969 ip4_frags
.constructor
= ip4_frag_init
;
970 ip4_frags
.destructor
= ip4_frag_free
;
971 ip4_frags
.qsize
= sizeof(struct ipq
);
972 ip4_frags
.frag_expire
= ip_expire
;
973 ip4_frags
.frags_cache_name
= ip_frag_cache_name
;
974 ip4_frags
.rhash_params
= ip4_rhash_params
;
975 if (inet_frags_init(&ip4_frags
))
976 panic("IP: failed to allocate ip4_frags cache\n");
977 ip4_frags_ctl_register();
978 register_pernet_subsys(&ip4_frags_ops
);