2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
28 #define INETFRAGS_EVICT_BUCKETS 128
29 #define INETFRAGS_EVICT_MAX 512
31 /* don't rebuild inetfrag table with new secret more often than this */
32 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
34 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35 * Value : 0xff if frame should be dropped.
36 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
38 const u8 ip_frag_ecn_table
[16] = {
39 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40 [IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_0
] = INET_ECN_CE
,
41 [IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_1
] = INET_ECN_CE
,
42 [IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_0
| IPFRAG_ECN_ECT_1
] = INET_ECN_CE
,
44 /* invalid combinations : drop frame */
45 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_CE
] = 0xff,
46 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_ECT_0
] = 0xff,
47 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_ECT_1
] = 0xff,
48 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_ECT_0
| IPFRAG_ECN_ECT_1
] = 0xff,
49 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_0
] = 0xff,
50 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_1
] = 0xff,
51 [IPFRAG_ECN_NOT_ECT
| IPFRAG_ECN_CE
| IPFRAG_ECN_ECT_0
| IPFRAG_ECN_ECT_1
] = 0xff,
53 EXPORT_SYMBOL(ip_frag_ecn_table
);
56 inet_frag_hashfn(const struct inet_frags
*f
, const struct inet_frag_queue
*q
)
58 return f
->hashfn(q
) & (INETFRAGS_HASHSZ
- 1);
61 static bool inet_frag_may_rebuild(struct inet_frags
*f
)
63 return time_after(jiffies
,
64 f
->last_rebuild_jiffies
+ INETFRAGS_MIN_REBUILD_INTERVAL
);
67 static void inet_frag_secret_rebuild(struct inet_frags
*f
)
71 write_seqlock_bh(&f
->rnd_seqlock
);
73 if (!inet_frag_may_rebuild(f
))
76 get_random_bytes(&f
->rnd
, sizeof(u32
));
78 for (i
= 0; i
< INETFRAGS_HASHSZ
; i
++) {
79 struct inet_frag_bucket
*hb
;
80 struct inet_frag_queue
*q
;
84 spin_lock(&hb
->chain_lock
);
86 hlist_for_each_entry_safe(q
, n
, &hb
->chain
, list
) {
87 unsigned int hval
= inet_frag_hashfn(f
, q
);
90 struct inet_frag_bucket
*hb_dest
;
94 /* Relink to new hash chain. */
95 hb_dest
= &f
->hash
[hval
];
97 /* This is the only place where we take
98 * another chain_lock while already holding
99 * one. As this will not run concurrently,
100 * we cannot deadlock on hb_dest lock below, if its
101 * already locked it will be released soon since
102 * other caller cannot be waiting for hb lock
103 * that we've taken above.
105 spin_lock_nested(&hb_dest
->chain_lock
,
106 SINGLE_DEPTH_NESTING
);
107 hlist_add_head(&q
->list
, &hb_dest
->chain
);
108 spin_unlock(&hb_dest
->chain_lock
);
111 spin_unlock(&hb
->chain_lock
);
115 f
->last_rebuild_jiffies
= jiffies
;
117 write_sequnlock_bh(&f
->rnd_seqlock
);
120 static bool inet_fragq_should_evict(const struct inet_frag_queue
*q
)
122 return q
->net
->low_thresh
== 0 ||
123 frag_mem_limit(q
->net
) >= q
->net
->low_thresh
;
127 inet_evict_bucket(struct inet_frags
*f
, struct inet_frag_bucket
*hb
)
129 struct inet_frag_queue
*fq
;
130 struct hlist_node
*n
;
131 unsigned int evicted
= 0;
134 spin_lock(&hb
->chain_lock
);
136 hlist_for_each_entry_safe(fq
, n
, &hb
->chain
, list
) {
137 if (!inet_fragq_should_evict(fq
))
140 if (!del_timer(&fq
->timer
))
143 hlist_add_head(&fq
->list_evictor
, &expired
);
147 spin_unlock(&hb
->chain_lock
);
149 hlist_for_each_entry_safe(fq
, n
, &expired
, list_evictor
)
150 f
->frag_expire((unsigned long) fq
);
155 static void inet_frag_worker(struct work_struct
*work
)
157 unsigned int budget
= INETFRAGS_EVICT_BUCKETS
;
158 unsigned int i
, evicted
= 0;
159 struct inet_frags
*f
;
161 f
= container_of(work
, struct inet_frags
, frags_work
);
163 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS
>= INETFRAGS_HASHSZ
);
167 for (i
= ACCESS_ONCE(f
->next_bucket
); budget
; --budget
) {
168 evicted
+= inet_evict_bucket(f
, &f
->hash
[i
]);
169 i
= (i
+ 1) & (INETFRAGS_HASHSZ
- 1);
170 if (evicted
> INETFRAGS_EVICT_MAX
)
178 if (f
->rebuild
&& inet_frag_may_rebuild(f
))
179 inet_frag_secret_rebuild(f
);
182 static void inet_frag_schedule_worker(struct inet_frags
*f
)
184 if (unlikely(!work_pending(&f
->frags_work
)))
185 schedule_work(&f
->frags_work
);
188 int inet_frags_init(struct inet_frags
*f
)
192 INIT_WORK(&f
->frags_work
, inet_frag_worker
);
194 for (i
= 0; i
< INETFRAGS_HASHSZ
; i
++) {
195 struct inet_frag_bucket
*hb
= &f
->hash
[i
];
197 spin_lock_init(&hb
->chain_lock
);
198 INIT_HLIST_HEAD(&hb
->chain
);
201 seqlock_init(&f
->rnd_seqlock
);
202 f
->last_rebuild_jiffies
= 0;
203 f
->frags_cachep
= kmem_cache_create(f
->frags_cache_name
, f
->qsize
, 0, 0,
205 if (!f
->frags_cachep
)
210 EXPORT_SYMBOL(inet_frags_init
);
212 void inet_frags_init_net(struct netns_frags
*nf
)
214 init_frag_mem_limit(nf
);
216 EXPORT_SYMBOL(inet_frags_init_net
);
218 void inet_frags_fini(struct inet_frags
*f
)
220 cancel_work_sync(&f
->frags_work
);
221 kmem_cache_destroy(f
->frags_cachep
);
223 EXPORT_SYMBOL(inet_frags_fini
);
225 void inet_frags_exit_net(struct netns_frags
*nf
, struct inet_frags
*f
)
234 seq
= read_seqbegin(&f
->rnd_seqlock
);
236 for (i
= 0; i
< INETFRAGS_HASHSZ
; i
++)
237 inet_evict_bucket(f
, &f
->hash
[i
]);
242 if (read_seqretry(&f
->rnd_seqlock
, seq
) ||
243 percpu_counter_sum(&nf
->mem
))
246 percpu_counter_destroy(&nf
->mem
);
248 EXPORT_SYMBOL(inet_frags_exit_net
);
250 static struct inet_frag_bucket
*
251 get_frag_bucket_locked(struct inet_frag_queue
*fq
, struct inet_frags
*f
)
252 __acquires(hb
->chain_lock
)
254 struct inet_frag_bucket
*hb
;
255 unsigned int seq
, hash
;
258 seq
= read_seqbegin(&f
->rnd_seqlock
);
260 hash
= inet_frag_hashfn(f
, fq
);
263 spin_lock(&hb
->chain_lock
);
264 if (read_seqretry(&f
->rnd_seqlock
, seq
)) {
265 spin_unlock(&hb
->chain_lock
);
272 static inline void fq_unlink(struct inet_frag_queue
*fq
, struct inet_frags
*f
)
274 struct inet_frag_bucket
*hb
;
276 hb
= get_frag_bucket_locked(fq
, f
);
277 hlist_del(&fq
->list
);
278 fq
->flags
|= INET_FRAG_COMPLETE
;
279 spin_unlock(&hb
->chain_lock
);
282 void inet_frag_kill(struct inet_frag_queue
*fq
, struct inet_frags
*f
)
284 if (del_timer(&fq
->timer
))
285 atomic_dec(&fq
->refcnt
);
287 if (!(fq
->flags
& INET_FRAG_COMPLETE
)) {
289 atomic_dec(&fq
->refcnt
);
292 EXPORT_SYMBOL(inet_frag_kill
);
294 static inline void frag_kfree_skb(struct netns_frags
*nf
, struct inet_frags
*f
,
302 void inet_frag_destroy(struct inet_frag_queue
*q
, struct inet_frags
*f
)
305 struct netns_frags
*nf
;
306 unsigned int sum
, sum_truesize
= 0;
308 WARN_ON(!(q
->flags
& INET_FRAG_COMPLETE
));
309 WARN_ON(del_timer(&q
->timer
) != 0);
311 /* Release all fragment data. */
315 struct sk_buff
*xp
= fp
->next
;
317 sum_truesize
+= fp
->truesize
;
318 frag_kfree_skb(nf
, f
, fp
);
321 sum
= sum_truesize
+ f
->qsize
;
325 kmem_cache_free(f
->frags_cachep
, q
);
327 sub_frag_mem_limit(nf
, sum
);
329 EXPORT_SYMBOL(inet_frag_destroy
);
331 static struct inet_frag_queue
*inet_frag_intern(struct netns_frags
*nf
,
332 struct inet_frag_queue
*qp_in
,
333 struct inet_frags
*f
,
336 struct inet_frag_bucket
*hb
= get_frag_bucket_locked(qp_in
, f
);
337 struct inet_frag_queue
*qp
;
340 /* With SMP race we have to recheck hash table, because
341 * such entry could have been created on other cpu before
342 * we acquired hash bucket lock.
344 hlist_for_each_entry(qp
, &hb
->chain
, list
) {
345 if (qp
->net
== nf
&& f
->match(qp
, arg
)) {
346 atomic_inc(&qp
->refcnt
);
347 spin_unlock(&hb
->chain_lock
);
348 qp_in
->flags
|= INET_FRAG_COMPLETE
;
349 inet_frag_put(qp_in
, f
);
355 if (!mod_timer(&qp
->timer
, jiffies
+ nf
->timeout
))
356 atomic_inc(&qp
->refcnt
);
358 atomic_inc(&qp
->refcnt
);
359 hlist_add_head(&qp
->list
, &hb
->chain
);
361 spin_unlock(&hb
->chain_lock
);
366 static struct inet_frag_queue
*inet_frag_alloc(struct netns_frags
*nf
,
367 struct inet_frags
*f
,
370 struct inet_frag_queue
*q
;
372 if (frag_mem_limit(nf
) > nf
->high_thresh
) {
373 inet_frag_schedule_worker(f
);
377 q
= kmem_cache_zalloc(f
->frags_cachep
, GFP_ATOMIC
);
382 f
->constructor(q
, arg
);
383 add_frag_mem_limit(nf
, f
->qsize
);
385 setup_timer(&q
->timer
, f
->frag_expire
, (unsigned long)q
);
386 spin_lock_init(&q
->lock
);
387 atomic_set(&q
->refcnt
, 1);
392 static struct inet_frag_queue
*inet_frag_create(struct netns_frags
*nf
,
393 struct inet_frags
*f
,
396 struct inet_frag_queue
*q
;
398 q
= inet_frag_alloc(nf
, f
, arg
);
402 return inet_frag_intern(nf
, q
, f
, arg
);
405 struct inet_frag_queue
*inet_frag_find(struct netns_frags
*nf
,
406 struct inet_frags
*f
, void *key
,
409 struct inet_frag_bucket
*hb
;
410 struct inet_frag_queue
*q
;
413 if (frag_mem_limit(nf
) > nf
->low_thresh
)
414 inet_frag_schedule_worker(f
);
416 hash
&= (INETFRAGS_HASHSZ
- 1);
419 spin_lock(&hb
->chain_lock
);
420 hlist_for_each_entry(q
, &hb
->chain
, list
) {
421 if (q
->net
== nf
&& f
->match(q
, key
)) {
422 atomic_inc(&q
->refcnt
);
423 spin_unlock(&hb
->chain_lock
);
428 spin_unlock(&hb
->chain_lock
);
430 if (depth
<= INETFRAGS_MAXDEPTH
)
431 return inet_frag_create(nf
, f
, key
);
433 if (inet_frag_may_rebuild(f
)) {
436 inet_frag_schedule_worker(f
);
439 return ERR_PTR(-ENOBUFS
);
441 EXPORT_SYMBOL(inet_frag_find
);
443 void inet_frag_maybe_warn_overflow(struct inet_frag_queue
*q
,
446 static const char msg
[] = "inet_frag_find: Fragment hash bucket"
447 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH
)
448 ". Dropping fragment.\n";
450 if (PTR_ERR(q
) == -ENOBUFS
)
451 net_dbg_ratelimited("%s%s", prefix
, msg
);
453 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow
);