2 * INETPEER - A storage for permanent information about peers
4 * This source is covered by the GNU GPL, the same as all kernel sources.
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
19 #include <linux/net.h>
20 #include <linux/workqueue.h>
22 #include <net/inetpeer.h>
23 #include <net/secure_seq.h>
26 * Theory of operations.
27 * We keep one entry for each peer IP address. The nodes contains long-living
28 * information about the peer which doesn't depend on routes.
29 * At this moment this information consists only of ID field for the next
30 * outgoing IP packet. This field is incremented with each packet as encoded
31 * in inet_getid() function (include/net/inetpeer.h).
32 * At the moment of writing this notes identifier of IP packets is generated
33 * to be unpredictable using this code only for packets subjected
34 * (actually or potentially) to defragmentation. I.e. DF packets less than
35 * PMTU in size uses a constant ID and do not use this code (see
36 * ip_select_ident() in include/net/ip.h).
38 * Route cache entries hold references to our nodes.
39 * New cache entries get references via lookup by destination IP address in
40 * the avl tree. The reference is grabbed only when it's needed i.e. only
41 * when we try to output IP packet which needs an unpredictable ID (see
42 * __ip_select_ident() in net/ipv4/route.c).
43 * Nodes are removed only when reference counter goes to 0.
44 * When it's happened the node may be removed when a sufficient amount of
45 * time has been passed since its last use. The less-recently-used entry can
46 * also be removed if the pool is overloaded i.e. if the total amount of
47 * entries is greater-or-equal than the threshold.
49 * Node pool is organised as an AVL tree.
50 * Such an implementation has been chosen not just for fun. It's a way to
51 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
52 * amount of long living nodes in a single hash slot would significantly delay
53 * lookups performed with disabled BHs.
55 * Serialisation issues.
56 * 1. Nodes may appear in the tree only with the pool lock held.
57 * 2. Nodes may disappear from the tree only with the pool lock held
58 * AND reference count being 0.
59 * 3. Global variable peer_total is modified under the pool lock.
60 * 4. struct inet_peer fields modification:
61 * avl_left, avl_right, avl_parent, avl_height: pool lock
62 * refcnt: atomically against modifications on other CPU;
63 * usually under some other lock to prevent node disappearing
65 * ip_id_count: atomic value (no lock needed)
68 static struct kmem_cache
*peer_cachep __read_mostly
;
70 static LIST_HEAD(gc_list
);
71 static const int gc_delay
= 60 * HZ
;
72 static struct delayed_work gc_work
;
73 static DEFINE_SPINLOCK(gc_lock
);
75 #define node_height(x) x->avl_height
77 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
78 #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
79 static const struct inet_peer peer_fake_node
= {
80 .avl_left
= peer_avl_empty_rcu
,
81 .avl_right
= peer_avl_empty_rcu
,
85 void inet_peer_base_init(struct inet_peer_base
*bp
)
87 bp
->root
= peer_avl_empty_rcu
;
88 seqlock_init(&bp
->lock
);
92 EXPORT_SYMBOL_GPL(inet_peer_base_init
);
94 static atomic_t v4_seq
= ATOMIC_INIT(0);
95 static atomic_t v6_seq
= ATOMIC_INIT(0);
97 static atomic_t
*inetpeer_seq_ptr(int family
)
99 return (family
== AF_INET
? &v4_seq
: &v6_seq
);
102 static inline void flush_check(struct inet_peer_base
*base
, int family
)
104 atomic_t
*fp
= inetpeer_seq_ptr(family
);
106 if (unlikely(base
->flush_seq
!= atomic_read(fp
))) {
107 inetpeer_invalidate_tree(base
);
108 base
->flush_seq
= atomic_read(fp
);
112 void inetpeer_invalidate_family(int family
)
114 atomic_t
*fp
= inetpeer_seq_ptr(family
);
119 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
121 /* Exported for sysctl_net_ipv4. */
122 int inet_peer_threshold __read_mostly
= 65536 + 128; /* start to throw entries more
123 * aggressively at this stage */
124 int inet_peer_minttl __read_mostly
= 120 * HZ
; /* TTL under high load: 120 sec */
125 int inet_peer_maxttl __read_mostly
= 10 * 60 * HZ
; /* usual time to live: 10 min */
127 static void inetpeer_gc_worker(struct work_struct
*work
)
129 struct inet_peer
*p
, *n
, *c
;
132 spin_lock_bh(&gc_lock
);
133 list_replace_init(&gc_list
, &list
);
134 spin_unlock_bh(&gc_lock
);
136 if (list_empty(&list
))
139 list_for_each_entry_safe(p
, n
, &list
, gc_list
) {
144 c
= rcu_dereference_protected(p
->avl_left
, 1);
145 if (c
!= peer_avl_empty
) {
146 list_add_tail(&c
->gc_list
, &list
);
147 p
->avl_left
= peer_avl_empty_rcu
;
150 c
= rcu_dereference_protected(p
->avl_right
, 1);
151 if (c
!= peer_avl_empty
) {
152 list_add_tail(&c
->gc_list
, &list
);
153 p
->avl_right
= peer_avl_empty_rcu
;
156 n
= list_entry(p
->gc_list
.next
, struct inet_peer
, gc_list
);
158 if (!atomic_read(&p
->refcnt
)) {
159 list_del(&p
->gc_list
);
160 kmem_cache_free(peer_cachep
, p
);
164 if (list_empty(&list
))
167 spin_lock_bh(&gc_lock
);
168 list_splice(&list
, &gc_list
);
169 spin_unlock_bh(&gc_lock
);
171 schedule_delayed_work(&gc_work
, gc_delay
);
174 /* Called from ip_output.c:ip_init */
175 void __init
inet_initpeers(void)
179 /* Use the straight interface to information about memory. */
181 /* The values below were suggested by Alexey Kuznetsov
182 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
185 if (si
.totalram
<= (32768*1024)/PAGE_SIZE
)
186 inet_peer_threshold
>>= 1; /* max pool size about 1MB on IA32 */
187 if (si
.totalram
<= (16384*1024)/PAGE_SIZE
)
188 inet_peer_threshold
>>= 1; /* about 512KB */
189 if (si
.totalram
<= (8192*1024)/PAGE_SIZE
)
190 inet_peer_threshold
>>= 2; /* about 128KB */
192 peer_cachep
= kmem_cache_create("inet_peer_cache",
193 sizeof(struct inet_peer
),
194 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
,
197 INIT_DEFERRABLE_WORK(&gc_work
, inetpeer_gc_worker
);
200 static int addr_compare(const struct inetpeer_addr
*a
,
201 const struct inetpeer_addr
*b
)
203 int i
, n
= (a
->family
== AF_INET
? 1 : 4);
205 for (i
= 0; i
< n
; i
++) {
206 if (a
->addr
.a6
[i
] == b
->addr
.a6
[i
])
208 if ((__force u32
)a
->addr
.a6
[i
] < (__force u32
)b
->addr
.a6
[i
])
216 #define rcu_deref_locked(X, BASE) \
217 rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
220 * Called with local BH disabled and the pool lock held.
222 #define lookup(_daddr, _stack, _base) \
224 struct inet_peer *u; \
225 struct inet_peer __rcu **v; \
228 *stackptr++ = &_base->root; \
229 for (u = rcu_deref_locked(_base->root, _base); \
230 u != peer_avl_empty; ) { \
231 int cmp = addr_compare(_daddr, &u->daddr); \
239 u = rcu_deref_locked(*v, _base); \
245 * Called with rcu_read_lock()
246 * Because we hold no lock against a writer, its quite possible we fall
247 * in an endless loop.
248 * But every pointer we follow is guaranteed to be valid thanks to RCU.
249 * We exit from this function if number of links exceeds PEER_MAXDEPTH
251 static struct inet_peer
*lookup_rcu(const struct inetpeer_addr
*daddr
,
252 struct inet_peer_base
*base
)
254 struct inet_peer
*u
= rcu_dereference(base
->root
);
257 while (u
!= peer_avl_empty
) {
258 int cmp
= addr_compare(daddr
, &u
->daddr
);
260 /* Before taking a reference, check if this entry was
261 * deleted (refcnt=-1)
263 if (!atomic_add_unless(&u
->refcnt
, 1, -1))
268 u
= rcu_dereference(u
->avl_left
);
270 u
= rcu_dereference(u
->avl_right
);
271 if (unlikely(++count
== PEER_MAXDEPTH
))
277 /* Called with local BH disabled and the pool lock held. */
278 #define lookup_rightempty(start, base) \
280 struct inet_peer *u; \
281 struct inet_peer __rcu **v; \
282 *stackptr++ = &start->avl_left; \
283 v = &start->avl_left; \
284 for (u = rcu_deref_locked(*v, base); \
285 u->avl_right != peer_avl_empty_rcu; ) { \
288 u = rcu_deref_locked(*v, base); \
293 /* Called with local BH disabled and the pool lock held.
294 * Variable names are the proof of operation correctness.
295 * Look into mm/map_avl.c for more detail description of the ideas.
297 static void peer_avl_rebalance(struct inet_peer __rcu
**stack
[],
298 struct inet_peer __rcu
***stackend
,
299 struct inet_peer_base
*base
)
301 struct inet_peer __rcu
**nodep
;
302 struct inet_peer
*node
, *l
, *r
;
305 while (stackend
> stack
) {
307 node
= rcu_deref_locked(*nodep
, base
);
308 l
= rcu_deref_locked(node
->avl_left
, base
);
309 r
= rcu_deref_locked(node
->avl_right
, base
);
312 if (lh
> rh
+ 1) { /* l: RH+2 */
313 struct inet_peer
*ll
, *lr
, *lrl
, *lrr
;
315 ll
= rcu_deref_locked(l
->avl_left
, base
);
316 lr
= rcu_deref_locked(l
->avl_right
, base
);
317 lrh
= node_height(lr
);
318 if (lrh
<= node_height(ll
)) { /* ll: RH+1 */
319 RCU_INIT_POINTER(node
->avl_left
, lr
); /* lr: RH or RH+1 */
320 RCU_INIT_POINTER(node
->avl_right
, r
); /* r: RH */
321 node
->avl_height
= lrh
+ 1; /* RH+1 or RH+2 */
322 RCU_INIT_POINTER(l
->avl_left
, ll
); /* ll: RH+1 */
323 RCU_INIT_POINTER(l
->avl_right
, node
); /* node: RH+1 or RH+2 */
324 l
->avl_height
= node
->avl_height
+ 1;
325 RCU_INIT_POINTER(*nodep
, l
);
326 } else { /* ll: RH, lr: RH+1 */
327 lrl
= rcu_deref_locked(lr
->avl_left
, base
);/* lrl: RH or RH-1 */
328 lrr
= rcu_deref_locked(lr
->avl_right
, base
);/* lrr: RH or RH-1 */
329 RCU_INIT_POINTER(node
->avl_left
, lrr
); /* lrr: RH or RH-1 */
330 RCU_INIT_POINTER(node
->avl_right
, r
); /* r: RH */
331 node
->avl_height
= rh
+ 1; /* node: RH+1 */
332 RCU_INIT_POINTER(l
->avl_left
, ll
); /* ll: RH */
333 RCU_INIT_POINTER(l
->avl_right
, lrl
); /* lrl: RH or RH-1 */
334 l
->avl_height
= rh
+ 1; /* l: RH+1 */
335 RCU_INIT_POINTER(lr
->avl_left
, l
); /* l: RH+1 */
336 RCU_INIT_POINTER(lr
->avl_right
, node
); /* node: RH+1 */
337 lr
->avl_height
= rh
+ 2;
338 RCU_INIT_POINTER(*nodep
, lr
);
340 } else if (rh
> lh
+ 1) { /* r: LH+2 */
341 struct inet_peer
*rr
, *rl
, *rlr
, *rll
;
343 rr
= rcu_deref_locked(r
->avl_right
, base
);
344 rl
= rcu_deref_locked(r
->avl_left
, base
);
345 rlh
= node_height(rl
);
346 if (rlh
<= node_height(rr
)) { /* rr: LH+1 */
347 RCU_INIT_POINTER(node
->avl_right
, rl
); /* rl: LH or LH+1 */
348 RCU_INIT_POINTER(node
->avl_left
, l
); /* l: LH */
349 node
->avl_height
= rlh
+ 1; /* LH+1 or LH+2 */
350 RCU_INIT_POINTER(r
->avl_right
, rr
); /* rr: LH+1 */
351 RCU_INIT_POINTER(r
->avl_left
, node
); /* node: LH+1 or LH+2 */
352 r
->avl_height
= node
->avl_height
+ 1;
353 RCU_INIT_POINTER(*nodep
, r
);
354 } else { /* rr: RH, rl: RH+1 */
355 rlr
= rcu_deref_locked(rl
->avl_right
, base
);/* rlr: LH or LH-1 */
356 rll
= rcu_deref_locked(rl
->avl_left
, base
);/* rll: LH or LH-1 */
357 RCU_INIT_POINTER(node
->avl_right
, rll
); /* rll: LH or LH-1 */
358 RCU_INIT_POINTER(node
->avl_left
, l
); /* l: LH */
359 node
->avl_height
= lh
+ 1; /* node: LH+1 */
360 RCU_INIT_POINTER(r
->avl_right
, rr
); /* rr: LH */
361 RCU_INIT_POINTER(r
->avl_left
, rlr
); /* rlr: LH or LH-1 */
362 r
->avl_height
= lh
+ 1; /* r: LH+1 */
363 RCU_INIT_POINTER(rl
->avl_right
, r
); /* r: LH+1 */
364 RCU_INIT_POINTER(rl
->avl_left
, node
); /* node: LH+1 */
365 rl
->avl_height
= lh
+ 2;
366 RCU_INIT_POINTER(*nodep
, rl
);
369 node
->avl_height
= (lh
> rh
? lh
: rh
) + 1;
374 /* Called with local BH disabled and the pool lock held. */
375 #define link_to_pool(n, base) \
378 n->avl_left = peer_avl_empty_rcu; \
379 n->avl_right = peer_avl_empty_rcu; \
380 /* lockless readers can catch us now */ \
381 rcu_assign_pointer(**--stackptr, n); \
382 peer_avl_rebalance(stack, stackptr, base); \
385 static void inetpeer_free_rcu(struct rcu_head
*head
)
387 kmem_cache_free(peer_cachep
, container_of(head
, struct inet_peer
, rcu
));
390 static void unlink_from_pool(struct inet_peer
*p
, struct inet_peer_base
*base
,
391 struct inet_peer __rcu
**stack
[PEER_MAXDEPTH
])
393 struct inet_peer __rcu
***stackptr
, ***delp
;
395 if (lookup(&p
->daddr
, stack
, base
) != p
)
397 delp
= stackptr
- 1; /* *delp[0] == p */
398 if (p
->avl_left
== peer_avl_empty_rcu
) {
399 *delp
[0] = p
->avl_right
;
402 /* look for a node to insert instead of p */
404 t
= lookup_rightempty(p
, base
);
405 BUG_ON(rcu_deref_locked(*stackptr
[-1], base
) != t
);
406 **--stackptr
= t
->avl_left
;
407 /* t is removed, t->daddr > x->daddr for any
408 * x in p->avl_left subtree.
409 * Put t in the old place of p. */
410 RCU_INIT_POINTER(*delp
[0], t
);
411 t
->avl_left
= p
->avl_left
;
412 t
->avl_right
= p
->avl_right
;
413 t
->avl_height
= p
->avl_height
;
414 BUG_ON(delp
[1] != &p
->avl_left
);
415 delp
[1] = &t
->avl_left
; /* was &p->avl_left */
417 peer_avl_rebalance(stack
, stackptr
, base
);
419 call_rcu(&p
->rcu
, inetpeer_free_rcu
);
422 /* perform garbage collect on all items stacked during a lookup */
423 static int inet_peer_gc(struct inet_peer_base
*base
,
424 struct inet_peer __rcu
**stack
[PEER_MAXDEPTH
],
425 struct inet_peer __rcu
***stackptr
)
427 struct inet_peer
*p
, *gchead
= NULL
;
431 if (base
->total
>= inet_peer_threshold
)
432 ttl
= 0; /* be aggressive */
434 ttl
= inet_peer_maxttl
435 - (inet_peer_maxttl
- inet_peer_minttl
) / HZ
*
436 base
->total
/ inet_peer_threshold
* HZ
;
437 stackptr
--; /* last stack slot is peer_avl_empty */
438 while (stackptr
> stack
) {
440 p
= rcu_deref_locked(**stackptr
, base
);
441 if (atomic_read(&p
->refcnt
) == 0) {
443 delta
= (__u32
)jiffies
- p
->dtime
;
445 atomic_cmpxchg(&p
->refcnt
, 0, -1) == 0) {
451 while ((p
= gchead
) != NULL
) {
454 unlink_from_pool(p
, base
, stack
);
459 struct inet_peer
*inet_getpeer(struct inet_peer_base
*base
,
460 const struct inetpeer_addr
*daddr
,
463 struct inet_peer __rcu
**stack
[PEER_MAXDEPTH
], ***stackptr
;
465 unsigned int sequence
;
466 int invalidated
, gccnt
= 0;
468 flush_check(base
, daddr
->family
);
470 /* Attempt a lockless lookup first.
471 * Because of a concurrent writer, we might not find an existing entry.
474 sequence
= read_seqbegin(&base
->lock
);
475 p
= lookup_rcu(daddr
, base
);
476 invalidated
= read_seqretry(&base
->lock
, sequence
);
482 /* If no writer did a change during our lookup, we can return early. */
483 if (!create
&& !invalidated
)
486 /* retry an exact lookup, taking the lock before.
487 * At least, nodes should be hot in our cache.
489 write_seqlock_bh(&base
->lock
);
491 p
= lookup(daddr
, stack
, base
);
492 if (p
!= peer_avl_empty
) {
493 atomic_inc(&p
->refcnt
);
494 write_sequnlock_bh(&base
->lock
);
498 gccnt
= inet_peer_gc(base
, stack
, stackptr
);
502 p
= create
? kmem_cache_alloc(peer_cachep
, GFP_ATOMIC
) : NULL
;
505 atomic_set(&p
->refcnt
, 1);
506 atomic_set(&p
->rid
, 0);
507 atomic_set(&p
->ip_id_count
,
508 (daddr
->family
== AF_INET
) ?
509 secure_ip_id(daddr
->addr
.a4
) :
510 secure_ipv6_id(daddr
->addr
.a6
));
511 p
->metrics
[RTAX_LOCK
-1] = INETPEER_METRICS_NEW
;
513 /* 60*HZ is arbitrary, but chosen enough high so that the first
514 * calculation of tokens is at its maximum.
516 p
->rate_last
= jiffies
- 60*HZ
;
517 INIT_LIST_HEAD(&p
->gc_list
);
520 link_to_pool(p
, base
);
523 write_sequnlock_bh(&base
->lock
);
527 EXPORT_SYMBOL_GPL(inet_getpeer
);
529 void inet_putpeer(struct inet_peer
*p
)
531 p
->dtime
= (__u32
)jiffies
;
532 smp_mb__before_atomic_dec();
533 atomic_dec(&p
->refcnt
);
535 EXPORT_SYMBOL_GPL(inet_putpeer
);
538 * Check transmit rate limitation for given message.
539 * The rate information is held in the inet_peer entries now.
540 * This function is generic and could be used for other purposes
541 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
543 * Note that the same inet_peer fields are modified by functions in
544 * route.c too, but these work for packet destinations while xrlim_allow
545 * works for icmp destinations. This means the rate limiting information
546 * for one "ip object" is shared - and these ICMPs are twice limited:
547 * by source and by destination.
549 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
550 * SHOULD allow setting of rate limits
552 * Shared between ICMPv4 and ICMPv6.
554 #define XRLIM_BURST_FACTOR 6
555 bool inet_peer_xrlim_allow(struct inet_peer
*peer
, int timeout
)
557 unsigned long now
, token
;
563 token
= peer
->rate_tokens
;
565 token
+= now
- peer
->rate_last
;
566 peer
->rate_last
= now
;
567 if (token
> XRLIM_BURST_FACTOR
* timeout
)
568 token
= XRLIM_BURST_FACTOR
* timeout
;
569 if (token
>= timeout
) {
573 peer
->rate_tokens
= token
;
576 EXPORT_SYMBOL(inet_peer_xrlim_allow
);
578 static void inetpeer_inval_rcu(struct rcu_head
*head
)
580 struct inet_peer
*p
= container_of(head
, struct inet_peer
, gc_rcu
);
582 spin_lock_bh(&gc_lock
);
583 list_add_tail(&p
->gc_list
, &gc_list
);
584 spin_unlock_bh(&gc_lock
);
586 schedule_delayed_work(&gc_work
, gc_delay
);
589 void inetpeer_invalidate_tree(struct inet_peer_base
*base
)
591 struct inet_peer
*root
;
593 write_seqlock_bh(&base
->lock
);
595 root
= rcu_deref_locked(base
->root
, base
);
596 if (root
!= peer_avl_empty
) {
597 base
->root
= peer_avl_empty_rcu
;
599 call_rcu(&root
->gc_rcu
, inetpeer_inval_rcu
);
602 write_sequnlock_bh(&base
->lock
);
604 EXPORT_SYMBOL(inetpeer_invalidate_tree
);