net: fix incorrect backport of tcp_send_fin in 2.6.32.66
[linux/fpc-iii.git] / net / ipv4 / inetpeer.c
blob13b229f400b3946bde2c152089d80c3e5d344a02
1 /*
2 * INETPEER - A storage for permanent information about peers
4 * This source is covered by the GNU GPL, the same as all kernel sources.
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <net/ip.h>
21 #include <net/inetpeer.h>
22 #include <net/secure_seq.h>
25 * Theory of operations.
26 * We keep one entry for each peer IP address. The nodes contains long-living
27 * information about the peer which doesn't depend on routes.
28 * At this moment this information consists only of ID field for the next
29 * outgoing IP packet. This field is incremented with each packet as encoded
30 * in inet_getid() function (include/net/inetpeer.h).
31 * At the moment of writing this notes identifier of IP packets is generated
32 * to be unpredictable using this code only for packets subjected
33 * (actually or potentially) to defragmentation. I.e. DF packets less than
34 * PMTU in size uses a constant ID and do not use this code (see
35 * ip_select_ident() in include/net/ip.h).
37 * Route cache entries hold references to our nodes.
38 * New cache entries get references via lookup by destination IP address in
39 * the avl tree. The reference is grabbed only when it's needed i.e. only
40 * when we try to output IP packet which needs an unpredictable ID (see
41 * __ip_select_ident() in net/ipv4/route.c).
42 * Nodes are removed only when reference counter goes to 0.
43 * When it's happened the node may be removed when a sufficient amount of
44 * time has been passed since its last use. The less-recently-used entry can
45 * also be removed if the pool is overloaded i.e. if the total amount of
46 * entries is greater-or-equal than the threshold.
48 * Node pool is organised as an AVL tree.
49 * Such an implementation has been chosen not just for fun. It's a way to
50 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
51 * amount of long living nodes in a single hash slot would significantly delay
52 * lookups performed with disabled BHs.
54 * Serialisation issues.
55 * 1. Nodes may appear in the tree only with the pool write lock held.
56 * 2. Nodes may disappear from the tree only with the pool write lock held
57 * AND reference count being 0.
58 * 3. Nodes appears and disappears from unused node list only under
59 * "inet_peer_unused_lock".
60 * 4. Global variable peer_total is modified under the pool lock.
61 * 5. struct inet_peer fields modification:
62 * avl_left, avl_right, avl_parent, avl_height: pool lock
63 * unused: unused node list lock
64 * refcnt: atomically against modifications on other CPU;
65 * usually under some other lock to prevent node disappearing
66 * dtime: unused node list lock
67 * v4daddr: unchangeable
68 * ip_id_count: idlock
71 /* Exported for inet_getid inline function. */
72 DEFINE_SPINLOCK(inet_peer_idlock);
74 static struct kmem_cache *peer_cachep __read_mostly;
76 #define node_height(x) x->avl_height
77 static struct inet_peer peer_fake_node = {
78 .avl_left = &peer_fake_node,
79 .avl_right = &peer_fake_node,
80 .avl_height = 0
82 #define peer_avl_empty (&peer_fake_node)
83 static struct inet_peer *peer_root = peer_avl_empty;
84 static DEFINE_RWLOCK(peer_pool_lock);
85 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
87 static int peer_total;
88 /* Exported for sysctl_net_ipv4. */
89 int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
90 * aggressively at this stage */
91 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
92 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
93 int inet_peer_gc_mintime __read_mostly = 10 * HZ;
94 int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
96 static LIST_HEAD(unused_peers);
97 static DEFINE_SPINLOCK(inet_peer_unused_lock);
99 static void peer_check_expire(unsigned long dummy);
100 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
103 /* Called from ip_output.c:ip_init */
104 void __init inet_initpeers(void)
106 struct sysinfo si;
108 /* Use the straight interface to information about memory. */
109 si_meminfo(&si);
110 /* The values below were suggested by Alexey Kuznetsov
111 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
112 * myself. --SAW
114 if (si.totalram <= (32768*1024)/PAGE_SIZE)
115 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
116 if (si.totalram <= (16384*1024)/PAGE_SIZE)
117 inet_peer_threshold >>= 1; /* about 512KB */
118 if (si.totalram <= (8192*1024)/PAGE_SIZE)
119 inet_peer_threshold >>= 2; /* about 128KB */
121 peer_cachep = kmem_cache_create("inet_peer_cache",
122 sizeof(struct inet_peer),
123 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
124 NULL);
126 /* All the timers, started at system startup tend
127 to synchronize. Perturb it a bit.
129 peer_periodic_timer.expires = jiffies
130 + net_random() % inet_peer_gc_maxtime
131 + inet_peer_gc_maxtime;
132 add_timer(&peer_periodic_timer);
135 /* Called with or without local BH being disabled. */
136 static void unlink_from_unused(struct inet_peer *p)
138 spin_lock_bh(&inet_peer_unused_lock);
139 list_del_init(&p->unused);
140 spin_unlock_bh(&inet_peer_unused_lock);
144 * Called with local BH disabled and the pool lock held.
145 * _stack is known to be NULL or not at compile time,
146 * so compiler will optimize the if (_stack) tests.
148 #define lookup(_daddr, _stack) \
149 ({ \
150 struct inet_peer *u, **v; \
151 if (_stack != NULL) { \
152 stackptr = _stack; \
153 *stackptr++ = &peer_root; \
155 for (u = peer_root; u != peer_avl_empty; ) { \
156 if (_daddr == u->v4daddr) \
157 break; \
158 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
159 v = &u->avl_left; \
160 else \
161 v = &u->avl_right; \
162 if (_stack != NULL) \
163 *stackptr++ = v; \
164 u = *v; \
166 u; \
169 /* Called with local BH disabled and the pool write lock held. */
170 #define lookup_rightempty(start) \
171 ({ \
172 struct inet_peer *u, **v; \
173 *stackptr++ = &start->avl_left; \
174 v = &start->avl_left; \
175 for (u = *v; u->avl_right != peer_avl_empty; ) { \
176 v = &u->avl_right; \
177 *stackptr++ = v; \
178 u = *v; \
180 u; \
183 /* Called with local BH disabled and the pool write lock held.
184 * Variable names are the proof of operation correctness.
185 * Look into mm/map_avl.c for more detail description of the ideas. */
186 static void peer_avl_rebalance(struct inet_peer **stack[],
187 struct inet_peer ***stackend)
189 struct inet_peer **nodep, *node, *l, *r;
190 int lh, rh;
192 while (stackend > stack) {
193 nodep = *--stackend;
194 node = *nodep;
195 l = node->avl_left;
196 r = node->avl_right;
197 lh = node_height(l);
198 rh = node_height(r);
199 if (lh > rh + 1) { /* l: RH+2 */
200 struct inet_peer *ll, *lr, *lrl, *lrr;
201 int lrh;
202 ll = l->avl_left;
203 lr = l->avl_right;
204 lrh = node_height(lr);
205 if (lrh <= node_height(ll)) { /* ll: RH+1 */
206 node->avl_left = lr; /* lr: RH or RH+1 */
207 node->avl_right = r; /* r: RH */
208 node->avl_height = lrh + 1; /* RH+1 or RH+2 */
209 l->avl_left = ll; /* ll: RH+1 */
210 l->avl_right = node; /* node: RH+1 or RH+2 */
211 l->avl_height = node->avl_height + 1;
212 *nodep = l;
213 } else { /* ll: RH, lr: RH+1 */
214 lrl = lr->avl_left; /* lrl: RH or RH-1 */
215 lrr = lr->avl_right; /* lrr: RH or RH-1 */
216 node->avl_left = lrr; /* lrr: RH or RH-1 */
217 node->avl_right = r; /* r: RH */
218 node->avl_height = rh + 1; /* node: RH+1 */
219 l->avl_left = ll; /* ll: RH */
220 l->avl_right = lrl; /* lrl: RH or RH-1 */
221 l->avl_height = rh + 1; /* l: RH+1 */
222 lr->avl_left = l; /* l: RH+1 */
223 lr->avl_right = node; /* node: RH+1 */
224 lr->avl_height = rh + 2;
225 *nodep = lr;
227 } else if (rh > lh + 1) { /* r: LH+2 */
228 struct inet_peer *rr, *rl, *rlr, *rll;
229 int rlh;
230 rr = r->avl_right;
231 rl = r->avl_left;
232 rlh = node_height(rl);
233 if (rlh <= node_height(rr)) { /* rr: LH+1 */
234 node->avl_right = rl; /* rl: LH or LH+1 */
235 node->avl_left = l; /* l: LH */
236 node->avl_height = rlh + 1; /* LH+1 or LH+2 */
237 r->avl_right = rr; /* rr: LH+1 */
238 r->avl_left = node; /* node: LH+1 or LH+2 */
239 r->avl_height = node->avl_height + 1;
240 *nodep = r;
241 } else { /* rr: RH, rl: RH+1 */
242 rlr = rl->avl_right; /* rlr: LH or LH-1 */
243 rll = rl->avl_left; /* rll: LH or LH-1 */
244 node->avl_right = rll; /* rll: LH or LH-1 */
245 node->avl_left = l; /* l: LH */
246 node->avl_height = lh + 1; /* node: LH+1 */
247 r->avl_right = rr; /* rr: LH */
248 r->avl_left = rlr; /* rlr: LH or LH-1 */
249 r->avl_height = lh + 1; /* r: LH+1 */
250 rl->avl_right = r; /* r: LH+1 */
251 rl->avl_left = node; /* node: LH+1 */
252 rl->avl_height = lh + 2;
253 *nodep = rl;
255 } else {
256 node->avl_height = (lh > rh ? lh : rh) + 1;
261 /* Called with local BH disabled and the pool write lock held. */
262 #define link_to_pool(n) \
263 do { \
264 n->avl_height = 1; \
265 n->avl_left = peer_avl_empty; \
266 n->avl_right = peer_avl_empty; \
267 **--stackptr = n; \
268 peer_avl_rebalance(stack, stackptr); \
269 } while(0)
271 /* May be called with local BH enabled. */
272 static void unlink_from_pool(struct inet_peer *p)
274 int do_free;
276 do_free = 0;
278 write_lock_bh(&peer_pool_lock);
279 /* Check the reference counter. It was artificially incremented by 1
280 * in cleanup() function to prevent sudden disappearing. If the
281 * reference count is still 1 then the node is referenced only as `p'
282 * here and from the pool. So under the exclusive pool lock it's safe
283 * to remove the node and free it later. */
284 if (atomic_read(&p->refcnt) == 1) {
285 struct inet_peer **stack[PEER_MAXDEPTH];
286 struct inet_peer ***stackptr, ***delp;
287 if (lookup(p->v4daddr, stack) != p)
288 BUG();
289 delp = stackptr - 1; /* *delp[0] == p */
290 if (p->avl_left == peer_avl_empty) {
291 *delp[0] = p->avl_right;
292 --stackptr;
293 } else {
294 /* look for a node to insert instead of p */
295 struct inet_peer *t;
296 t = lookup_rightempty(p);
297 BUG_ON(*stackptr[-1] != t);
298 **--stackptr = t->avl_left;
299 /* t is removed, t->v4daddr > x->v4daddr for any
300 * x in p->avl_left subtree.
301 * Put t in the old place of p. */
302 *delp[0] = t;
303 t->avl_left = p->avl_left;
304 t->avl_right = p->avl_right;
305 t->avl_height = p->avl_height;
306 BUG_ON(delp[1] != &p->avl_left);
307 delp[1] = &t->avl_left; /* was &p->avl_left */
309 peer_avl_rebalance(stack, stackptr);
310 peer_total--;
311 do_free = 1;
313 write_unlock_bh(&peer_pool_lock);
315 if (do_free)
316 kmem_cache_free(peer_cachep, p);
317 else
318 /* The node is used again. Decrease the reference counter
319 * back. The loop "cleanup -> unlink_from_unused
320 * -> unlink_from_pool -> putpeer -> link_to_unused
321 * -> cleanup (for the same node)"
322 * doesn't really exist because the entry will have a
323 * recent deletion time and will not be cleaned again soon. */
324 inet_putpeer(p);
327 /* May be called with local BH enabled. */
328 static int cleanup_once(unsigned long ttl)
330 struct inet_peer *p = NULL;
332 /* Remove the first entry from the list of unused nodes. */
333 spin_lock_bh(&inet_peer_unused_lock);
334 if (!list_empty(&unused_peers)) {
335 __u32 delta;
337 p = list_first_entry(&unused_peers, struct inet_peer, unused);
338 delta = (__u32)jiffies - p->dtime;
340 if (delta < ttl) {
341 /* Do not prune fresh entries. */
342 spin_unlock_bh(&inet_peer_unused_lock);
343 return -1;
346 list_del_init(&p->unused);
348 /* Grab an extra reference to prevent node disappearing
349 * before unlink_from_pool() call. */
350 atomic_inc(&p->refcnt);
352 spin_unlock_bh(&inet_peer_unused_lock);
354 if (p == NULL)
355 /* It means that the total number of USED entries has
356 * grown over inet_peer_threshold. It shouldn't really
357 * happen because of entry limits in route cache. */
358 return -1;
360 unlink_from_pool(p);
361 return 0;
364 /* Called with or without local BH being disabled. */
365 struct inet_peer *inet_getpeer(__be32 daddr, int create)
367 struct inet_peer *p, *n;
368 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
370 /* Look up for the address quickly. */
371 read_lock_bh(&peer_pool_lock);
372 p = lookup(daddr, NULL);
373 if (p != peer_avl_empty)
374 atomic_inc(&p->refcnt);
375 read_unlock_bh(&peer_pool_lock);
377 if (p != peer_avl_empty) {
378 /* The existing node has been found. */
379 /* Remove the entry from unused list if it was there. */
380 unlink_from_unused(p);
381 return p;
384 if (!create)
385 return NULL;
387 /* Allocate the space outside the locked region. */
388 n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
389 if (n == NULL)
390 return NULL;
391 n->v4daddr = daddr;
392 atomic_set(&n->refcnt, 1);
393 atomic_set(&n->rid, 0);
394 n->ip_id_count = secure_ip_id(daddr);
395 n->tcp_ts_stamp = 0;
397 write_lock_bh(&peer_pool_lock);
398 /* Check if an entry has suddenly appeared. */
399 p = lookup(daddr, stack);
400 if (p != peer_avl_empty)
401 goto out_free;
403 /* Link the node. */
404 link_to_pool(n);
405 INIT_LIST_HEAD(&n->unused);
406 peer_total++;
407 write_unlock_bh(&peer_pool_lock);
409 if (peer_total >= inet_peer_threshold)
410 /* Remove one less-recently-used entry. */
411 cleanup_once(0);
413 return n;
415 out_free:
416 /* The appropriate node is already in the pool. */
417 atomic_inc(&p->refcnt);
418 write_unlock_bh(&peer_pool_lock);
419 /* Remove the entry from unused list if it was there. */
420 unlink_from_unused(p);
421 /* Free preallocated the preallocated node. */
422 kmem_cache_free(peer_cachep, n);
423 return p;
426 /* Called with local BH disabled. */
427 static void peer_check_expire(unsigned long dummy)
429 unsigned long now = jiffies;
430 int ttl;
432 if (peer_total >= inet_peer_threshold)
433 ttl = inet_peer_minttl;
434 else
435 ttl = inet_peer_maxttl
436 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
437 peer_total / inet_peer_threshold * HZ;
438 while (!cleanup_once(ttl)) {
439 if (jiffies != now)
440 break;
443 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
444 * interval depending on the total number of entries (more entries,
445 * less interval). */
446 if (peer_total >= inet_peer_threshold)
447 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
448 else
449 peer_periodic_timer.expires = jiffies
450 + inet_peer_gc_maxtime
451 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
452 peer_total / inet_peer_threshold * HZ;
453 add_timer(&peer_periodic_timer);
456 void inet_putpeer(struct inet_peer *p)
458 spin_lock_bh(&inet_peer_unused_lock);
459 if (atomic_dec_and_test(&p->refcnt)) {
460 list_add_tail(&p->unused, &unused_peers);
461 p->dtime = (__u32)jiffies;
463 spin_unlock_bh(&inet_peer_unused_lock);