[NETFILTER]: PPTP conntrack: clean up debugging cruft
[hh.org.git] / include / net / inet_hashtables.h
blobb4491c9e2a5a028177913e4f21b45401346e5421
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Authors: Lotsa people, from code originally in tcp
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #ifndef _INET_HASHTABLES_H
15 #define _INET_HASHTABLES_H
18 #include <linux/interrupt.h>
19 #include <linux/ipv6.h>
20 #include <linux/list.h>
21 #include <linux/slab.h>
22 #include <linux/socket.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/wait.h>
27 #include <net/inet_connection_sock.h>
28 #include <net/inet_sock.h>
29 #include <net/route.h>
30 #include <net/sock.h>
31 #include <net/tcp_states.h>
33 #include <asm/atomic.h>
34 #include <asm/byteorder.h>
36 /* This is for all connections with a full identity, no wildcards.
37 * New scheme, half the table is for TIME_WAIT, the other half is
38 * for the rest. I'll experiment with dynamic table growth later.
40 struct inet_ehash_bucket {
41 rwlock_t lock;
42 struct hlist_head chain;
45 /* There are a few simple rules, which allow for local port reuse by
46 * an application. In essence:
48 * 1) Sockets bound to different interfaces may share a local port.
49 * Failing that, goto test 2.
50 * 2) If all sockets have sk->sk_reuse set, and none of them are in
51 * TCP_LISTEN state, the port may be shared.
52 * Failing that, goto test 3.
53 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
54 * address, and none of them are the same, the port may be
55 * shared.
56 * Failing this, the port cannot be shared.
58 * The interesting point, is test #2. This is what an FTP server does
59 * all day. To optimize this case we use a specific flag bit defined
60 * below. As we add sockets to a bind bucket list, we perform a
61 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
62 * As long as all sockets added to a bind bucket pass this test,
63 * the flag bit will be set.
64 * The resulting situation is that tcp_v[46]_verify_bind() can just check
65 * for this flag bit, if it is set and the socket trying to bind has
66 * sk->sk_reuse set, we don't even have to walk the owners list at all,
67 * we return that it is ok to bind this socket to the requested local port.
69 * Sounds like a lot of work, but it is worth it. In a more naive
70 * implementation (ie. current FreeBSD etc.) the entire list of ports
71 * must be walked for each data port opened by an ftp server. Needless
72 * to say, this does not scale at all. With a couple thousand FTP
73 * users logged onto your box, isn't it nice to know that new data
74 * ports are created in O(1) time? I thought so. ;-) -DaveM
76 struct inet_bind_bucket {
77 unsigned short port;
78 signed short fastreuse;
79 struct hlist_node node;
80 struct hlist_head owners;
83 #define inet_bind_bucket_for_each(tb, node, head) \
84 hlist_for_each_entry(tb, node, head, node)
86 struct inet_bind_hashbucket {
87 spinlock_t lock;
88 struct hlist_head chain;
91 /* This is for listening sockets, thus all sockets which possess wildcards. */
92 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
94 struct inet_hashinfo {
95 /* This is for sockets with full identity only. Sockets here will
96 * always be without wildcards and will have the following invariant:
98 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
100 * First half of the table is for sockets not in TIME_WAIT, second half
101 * is for TIME_WAIT sockets only.
103 struct inet_ehash_bucket *ehash;
105 /* Ok, let's try this, I give up, we do need a local binding
106 * TCP hash as well as the others for fast bind/connect.
108 struct inet_bind_hashbucket *bhash;
110 int bhash_size;
111 unsigned int ehash_size;
113 /* All sockets in TCP_LISTEN state will be in here. This is the only
114 * table where wildcard'd TCP sockets can exist. Hash function here
115 * is just local port number.
117 struct hlist_head listening_hash[INET_LHTABLE_SIZE];
119 /* All the above members are written once at bootup and
120 * never written again _or_ are predominantly read-access.
122 * Now align to a new cache line as all the following members
123 * are often dirty.
125 rwlock_t lhash_lock ____cacheline_aligned;
126 atomic_t lhash_users;
127 wait_queue_head_t lhash_wait;
128 kmem_cache_t *bind_bucket_cachep;
131 static inline struct inet_ehash_bucket *inet_ehash_bucket(
132 struct inet_hashinfo *hashinfo,
133 unsigned int hash)
135 return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
138 extern struct inet_bind_bucket *
139 inet_bind_bucket_create(kmem_cache_t *cachep,
140 struct inet_bind_hashbucket *head,
141 const unsigned short snum);
142 extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
143 struct inet_bind_bucket *tb);
145 static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
147 return lport & (bhash_size - 1);
150 extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
151 const unsigned short snum);
153 /* These can have wildcards, don't try too hard. */
154 static inline int inet_lhashfn(const unsigned short num)
156 return num & (INET_LHTABLE_SIZE - 1);
159 static inline int inet_sk_listen_hashfn(const struct sock *sk)
161 return inet_lhashfn(inet_sk(sk)->num);
164 /* Caller must disable local BH processing. */
165 static inline void __inet_inherit_port(struct inet_hashinfo *table,
166 struct sock *sk, struct sock *child)
168 const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
169 struct inet_bind_hashbucket *head = &table->bhash[bhash];
170 struct inet_bind_bucket *tb;
172 spin_lock(&head->lock);
173 tb = inet_csk(sk)->icsk_bind_hash;
174 sk_add_bind_node(child, &tb->owners);
175 inet_csk(child)->icsk_bind_hash = tb;
176 spin_unlock(&head->lock);
179 static inline void inet_inherit_port(struct inet_hashinfo *table,
180 struct sock *sk, struct sock *child)
182 local_bh_disable();
183 __inet_inherit_port(table, sk, child);
184 local_bh_enable();
187 extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
189 extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
192 * - We may sleep inside this lock.
193 * - If sleeping is not required (or called from BH),
194 * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
196 static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
198 /* read_lock synchronizes to candidates to writers */
199 read_lock(&hashinfo->lhash_lock);
200 atomic_inc(&hashinfo->lhash_users);
201 read_unlock(&hashinfo->lhash_lock);
204 static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
206 if (atomic_dec_and_test(&hashinfo->lhash_users))
207 wake_up(&hashinfo->lhash_wait);
210 static inline void __inet_hash(struct inet_hashinfo *hashinfo,
211 struct sock *sk, const int listen_possible)
213 struct hlist_head *list;
214 rwlock_t *lock;
216 BUG_TRAP(sk_unhashed(sk));
217 if (listen_possible && sk->sk_state == TCP_LISTEN) {
218 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
219 lock = &hashinfo->lhash_lock;
220 inet_listen_wlock(hashinfo);
221 } else {
222 struct inet_ehash_bucket *head;
223 sk->sk_hash = inet_sk_ehashfn(sk);
224 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
225 list = &head->chain;
226 lock = &head->lock;
227 write_lock(lock);
229 __sk_add_node(sk, list);
230 sock_prot_inc_use(sk->sk_prot);
231 write_unlock(lock);
232 if (listen_possible && sk->sk_state == TCP_LISTEN)
233 wake_up(&hashinfo->lhash_wait);
236 static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
238 if (sk->sk_state != TCP_CLOSE) {
239 local_bh_disable();
240 __inet_hash(hashinfo, sk, 1);
241 local_bh_enable();
245 static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
247 rwlock_t *lock;
249 if (sk_unhashed(sk))
250 goto out;
252 if (sk->sk_state == TCP_LISTEN) {
253 local_bh_disable();
254 inet_listen_wlock(hashinfo);
255 lock = &hashinfo->lhash_lock;
256 } else {
257 lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
258 write_lock_bh(lock);
261 if (__sk_del_node_init(sk))
262 sock_prot_dec_use(sk->sk_prot);
263 write_unlock_bh(lock);
264 out:
265 if (sk->sk_state == TCP_LISTEN)
266 wake_up(&hashinfo->lhash_wait);
269 static inline int inet_iif(const struct sk_buff *skb)
271 return ((struct rtable *)skb->dst)->rt_iif;
274 extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
275 const u32 daddr,
276 const unsigned short hnum,
277 const int dif);
279 static inline struct sock *inet_lookup_listener(struct inet_hashinfo *hashinfo,
280 u32 daddr, u16 dport, int dif)
282 return __inet_lookup_listener(hashinfo, daddr, ntohs(dport), dif);
285 /* Socket demux engine toys. */
286 #ifdef __BIG_ENDIAN
287 #define INET_COMBINED_PORTS(__sport, __dport) \
288 (((__u32)(__sport) << 16) | (__u32)(__dport))
289 #else /* __LITTLE_ENDIAN */
290 #define INET_COMBINED_PORTS(__sport, __dport) \
291 (((__u32)(__dport) << 16) | (__u32)(__sport))
292 #endif
294 #if (BITS_PER_LONG == 64)
295 #ifdef __BIG_ENDIAN
296 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
297 const __u64 __name = (((__u64)(__saddr)) << 32) | ((__u64)(__daddr));
298 #else /* __LITTLE_ENDIAN */
299 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
300 const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
301 #endif /* __BIG_ENDIAN */
302 #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
303 (((__sk)->sk_hash == (__hash)) && \
304 ((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
305 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
306 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
307 #define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
308 (((__sk)->sk_hash == (__hash)) && \
309 ((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
310 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
311 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
312 #else /* 32-bit arch */
313 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
314 #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
315 (((__sk)->sk_hash == (__hash)) && \
316 (inet_sk(__sk)->daddr == (__saddr)) && \
317 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
318 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
319 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
320 #define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
321 (((__sk)->sk_hash == (__hash)) && \
322 (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
323 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
324 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
325 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
326 #endif /* 64-bit arch */
329 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
330 * not check it for lookups anymore, thanks Alexey. -DaveM
332 * Local BH must be disabled here.
334 static inline struct sock *
335 __inet_lookup_established(struct inet_hashinfo *hashinfo,
336 const u32 saddr, const u16 sport,
337 const u32 daddr, const u16 hnum,
338 const int dif)
340 INET_ADDR_COOKIE(acookie, saddr, daddr)
341 const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
342 struct sock *sk;
343 const struct hlist_node *node;
344 /* Optimize here for direct hit, only listening connections can
345 * have wildcards anyways.
347 unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
348 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
350 prefetch(head->chain.first);
351 read_lock(&head->lock);
352 sk_for_each(sk, node, &head->chain) {
353 if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
354 goto hit; /* You sunk my battleship! */
357 /* Must check for a TIME_WAIT'er before going to listener hash. */
358 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
359 if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
360 goto hit;
362 sk = NULL;
363 out:
364 read_unlock(&head->lock);
365 return sk;
366 hit:
367 sock_hold(sk);
368 goto out;
371 static inline struct sock *
372 inet_lookup_established(struct inet_hashinfo *hashinfo,
373 const u32 saddr, const u16 sport,
374 const u32 daddr, const u16 dport,
375 const int dif)
377 return __inet_lookup_established(hashinfo, saddr, sport, daddr,
378 ntohs(dport), dif);
381 static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo,
382 const u32 saddr, const u16 sport,
383 const u32 daddr, const u16 dport,
384 const int dif)
386 u16 hnum = ntohs(dport);
387 struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr,
388 hnum, dif);
389 return sk ? : __inet_lookup_listener(hashinfo, daddr, hnum, dif);
392 static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo,
393 const u32 saddr, const u16 sport,
394 const u32 daddr, const u16 dport,
395 const int dif)
397 struct sock *sk;
399 local_bh_disable();
400 sk = __inet_lookup(hashinfo, saddr, sport, daddr, dport, dif);
401 local_bh_enable();
403 return sk;
406 extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
407 struct sock *sk);
408 #endif /* _INET_HASHTABLES_H */