2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic INET transport hashtables
8 * Authors: Lotsa people, from code originally in tcp
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_hashtables.h>
27 * Allocate and initialize a new local port bind bucket.
28 * The bindhash mutex for snum's hash chain must be held here.
30 struct inet_bind_bucket
*inet_bind_bucket_create(struct kmem_cache
*cachep
,
32 struct inet_bind_hashbucket
*head
,
33 const unsigned short snum
)
35 struct inet_bind_bucket
*tb
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
38 tb
->ib_net
= hold_net(net
);
41 INIT_HLIST_HEAD(&tb
->owners
);
42 hlist_add_head(&tb
->node
, &head
->chain
);
48 * Caller must hold hashbucket lock for this tb with local BH disabled
50 void inet_bind_bucket_destroy(struct kmem_cache
*cachep
, struct inet_bind_bucket
*tb
)
52 if (hlist_empty(&tb
->owners
)) {
53 __hlist_del(&tb
->node
);
54 release_net(tb
->ib_net
);
55 kmem_cache_free(cachep
, tb
);
59 void inet_bind_hash(struct sock
*sk
, struct inet_bind_bucket
*tb
,
60 const unsigned short snum
)
62 inet_sk(sk
)->num
= snum
;
63 sk_add_bind_node(sk
, &tb
->owners
);
64 inet_csk(sk
)->icsk_bind_hash
= tb
;
68 * Get rid of any references to a local port held by the given sock.
70 static void __inet_put_port(struct sock
*sk
)
72 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
73 const int bhash
= inet_bhashfn(inet_sk(sk
)->num
, hashinfo
->bhash_size
);
74 struct inet_bind_hashbucket
*head
= &hashinfo
->bhash
[bhash
];
75 struct inet_bind_bucket
*tb
;
77 spin_lock(&head
->lock
);
78 tb
= inet_csk(sk
)->icsk_bind_hash
;
79 __sk_del_bind_node(sk
);
80 inet_csk(sk
)->icsk_bind_hash
= NULL
;
82 inet_bind_bucket_destroy(hashinfo
->bind_bucket_cachep
, tb
);
83 spin_unlock(&head
->lock
);
86 void inet_put_port(struct sock
*sk
)
93 EXPORT_SYMBOL(inet_put_port
);
95 void __inet_inherit_port(struct sock
*sk
, struct sock
*child
)
97 struct inet_hashinfo
*table
= sk
->sk_prot
->h
.hashinfo
;
98 const int bhash
= inet_bhashfn(inet_sk(child
)->num
, table
->bhash_size
);
99 struct inet_bind_hashbucket
*head
= &table
->bhash
[bhash
];
100 struct inet_bind_bucket
*tb
;
102 spin_lock(&head
->lock
);
103 tb
= inet_csk(sk
)->icsk_bind_hash
;
104 sk_add_bind_node(child
, &tb
->owners
);
105 inet_csk(child
)->icsk_bind_hash
= tb
;
106 spin_unlock(&head
->lock
);
109 EXPORT_SYMBOL_GPL(__inet_inherit_port
);
112 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
113 * Look, when several writers sleep and reader wakes them up, all but one
114 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
115 * this, _but_ remember, it adds useless work on UP machines (wake up each
116 * exclusive lock release). It should be ifdefed really.
118 void inet_listen_wlock(struct inet_hashinfo
*hashinfo
)
119 __acquires(hashinfo
->lhash_lock
)
121 write_lock(&hashinfo
->lhash_lock
);
123 if (atomic_read(&hashinfo
->lhash_users
)) {
127 prepare_to_wait_exclusive(&hashinfo
->lhash_wait
,
128 &wait
, TASK_UNINTERRUPTIBLE
);
129 if (!atomic_read(&hashinfo
->lhash_users
))
131 write_unlock_bh(&hashinfo
->lhash_lock
);
133 write_lock_bh(&hashinfo
->lhash_lock
);
136 finish_wait(&hashinfo
->lhash_wait
, &wait
);
141 * Don't inline this cruft. Here are some nice properties to exploit here. The
142 * BSD API does not allow a listening sock to specify the remote port nor the
143 * remote address for the connection. So always assume those are both
144 * wildcarded during the search since they can never be otherwise.
146 static struct sock
*inet_lookup_listener_slow(struct net
*net
,
147 const struct hlist_head
*head
,
149 const unsigned short hnum
,
152 struct sock
*result
= NULL
, *sk
;
153 const struct hlist_node
*node
;
156 sk_for_each(sk
, node
, head
) {
157 const struct inet_sock
*inet
= inet_sk(sk
);
159 if (net_eq(sock_net(sk
), net
) && inet
->num
== hnum
&&
160 !ipv6_only_sock(sk
)) {
161 const __be32 rcv_saddr
= inet
->rcv_saddr
;
162 int score
= sk
->sk_family
== PF_INET
? 1 : 0;
165 if (rcv_saddr
!= daddr
)
169 if (sk
->sk_bound_dev_if
) {
170 if (sk
->sk_bound_dev_if
!= dif
)
176 if (score
> hiscore
) {
185 /* Optimize the common listener case. */
186 struct sock
*__inet_lookup_listener(struct net
*net
,
187 struct inet_hashinfo
*hashinfo
,
188 const __be32 daddr
, const unsigned short hnum
,
191 struct sock
*sk
= NULL
;
192 const struct hlist_head
*head
;
194 read_lock(&hashinfo
->lhash_lock
);
195 head
= &hashinfo
->listening_hash
[inet_lhashfn(hnum
)];
196 if (!hlist_empty(head
)) {
197 const struct inet_sock
*inet
= inet_sk((sk
= __sk_head(head
)));
199 if (inet
->num
== hnum
&& !sk
->sk_node
.next
&&
200 (!inet
->rcv_saddr
|| inet
->rcv_saddr
== daddr
) &&
201 (sk
->sk_family
== PF_INET
|| !ipv6_only_sock(sk
)) &&
202 !sk
->sk_bound_dev_if
&& net_eq(sock_net(sk
), net
))
204 sk
= inet_lookup_listener_slow(net
, head
, daddr
, hnum
, dif
);
210 read_unlock(&hashinfo
->lhash_lock
);
213 EXPORT_SYMBOL_GPL(__inet_lookup_listener
);
215 struct sock
* __inet_lookup_established(struct net
*net
,
216 struct inet_hashinfo
*hashinfo
,
217 const __be32 saddr
, const __be16 sport
,
218 const __be32 daddr
, const u16 hnum
,
221 INET_ADDR_COOKIE(acookie
, saddr
, daddr
)
222 const __portpair ports
= INET_COMBINED_PORTS(sport
, hnum
);
224 const struct hlist_node
*node
;
225 /* Optimize here for direct hit, only listening connections can
226 * have wildcards anyways.
228 unsigned int hash
= inet_ehashfn(daddr
, hnum
, saddr
, sport
);
229 struct inet_ehash_bucket
*head
= inet_ehash_bucket(hashinfo
, hash
);
230 rwlock_t
*lock
= inet_ehash_lockp(hashinfo
, hash
);
232 prefetch(head
->chain
.first
);
234 sk_for_each(sk
, node
, &head
->chain
) {
235 if (INET_MATCH(sk
, net
, hash
, acookie
,
236 saddr
, daddr
, ports
, dif
))
237 goto hit
; /* You sunk my battleship! */
240 /* Must check for a TIME_WAIT'er before going to listener hash. */
241 sk_for_each(sk
, node
, &head
->twchain
) {
242 if (INET_TW_MATCH(sk
, net
, hash
, acookie
,
243 saddr
, daddr
, ports
, dif
))
254 EXPORT_SYMBOL_GPL(__inet_lookup_established
);
256 /* called with local bh disabled */
257 static int __inet_check_established(struct inet_timewait_death_row
*death_row
,
258 struct sock
*sk
, __u16 lport
,
259 struct inet_timewait_sock
**twp
)
261 struct inet_hashinfo
*hinfo
= death_row
->hashinfo
;
262 struct inet_sock
*inet
= inet_sk(sk
);
263 __be32 daddr
= inet
->rcv_saddr
;
264 __be32 saddr
= inet
->daddr
;
265 int dif
= sk
->sk_bound_dev_if
;
266 INET_ADDR_COOKIE(acookie
, saddr
, daddr
)
267 const __portpair ports
= INET_COMBINED_PORTS(inet
->dport
, lport
);
268 unsigned int hash
= inet_ehashfn(daddr
, lport
, saddr
, inet
->dport
);
269 struct inet_ehash_bucket
*head
= inet_ehash_bucket(hinfo
, hash
);
270 rwlock_t
*lock
= inet_ehash_lockp(hinfo
, hash
);
272 const struct hlist_node
*node
;
273 struct inet_timewait_sock
*tw
;
274 struct net
*net
= sock_net(sk
);
276 prefetch(head
->chain
.first
);
279 /* Check TIME-WAIT sockets first. */
280 sk_for_each(sk2
, node
, &head
->twchain
) {
283 if (INET_TW_MATCH(sk2
, net
, hash
, acookie
,
284 saddr
, daddr
, ports
, dif
)) {
285 if (twsk_unique(sk
, sk2
, twp
))
293 /* And established part... */
294 sk_for_each(sk2
, node
, &head
->chain
) {
295 if (INET_MATCH(sk2
, net
, hash
, acookie
,
296 saddr
, daddr
, ports
, dif
))
301 /* Must record num and sport now. Otherwise we will see
302 * in hash table socket with a funny identity. */
304 inet
->sport
= htons(lport
);
306 BUG_TRAP(sk_unhashed(sk
));
307 __sk_add_node(sk
, &head
->chain
);
308 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
313 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED
);
315 /* Silly. Should hash-dance instead... */
316 inet_twsk_deschedule(tw
, death_row
);
317 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED
);
326 return -EADDRNOTAVAIL
;
329 static inline u32
inet_sk_port_offset(const struct sock
*sk
)
331 const struct inet_sock
*inet
= inet_sk(sk
);
332 return secure_ipv4_port_ephemeral(inet
->rcv_saddr
, inet
->daddr
,
336 void __inet_hash_nolisten(struct sock
*sk
)
338 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
339 struct hlist_head
*list
;
341 struct inet_ehash_bucket
*head
;
343 BUG_TRAP(sk_unhashed(sk
));
345 sk
->sk_hash
= inet_sk_ehashfn(sk
);
346 head
= inet_ehash_bucket(hashinfo
, sk
->sk_hash
);
348 lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
351 __sk_add_node(sk
, list
);
352 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
355 EXPORT_SYMBOL_GPL(__inet_hash_nolisten
);
357 static void __inet_hash(struct sock
*sk
)
359 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
360 struct hlist_head
*list
;
363 if (sk
->sk_state
!= TCP_LISTEN
) {
364 __inet_hash_nolisten(sk
);
368 BUG_TRAP(sk_unhashed(sk
));
369 list
= &hashinfo
->listening_hash
[inet_sk_listen_hashfn(sk
)];
370 lock
= &hashinfo
->lhash_lock
;
372 inet_listen_wlock(hashinfo
);
373 __sk_add_node(sk
, list
);
374 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
376 wake_up(&hashinfo
->lhash_wait
);
379 void inet_hash(struct sock
*sk
)
381 if (sk
->sk_state
!= TCP_CLOSE
) {
387 EXPORT_SYMBOL_GPL(inet_hash
);
389 void inet_unhash(struct sock
*sk
)
392 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
397 if (sk
->sk_state
== TCP_LISTEN
) {
399 inet_listen_wlock(hashinfo
);
400 lock
= &hashinfo
->lhash_lock
;
402 lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
406 if (__sk_del_node_init(sk
))
407 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
408 write_unlock_bh(lock
);
410 if (sk
->sk_state
== TCP_LISTEN
)
411 wake_up(&hashinfo
->lhash_wait
);
413 EXPORT_SYMBOL_GPL(inet_unhash
);
415 int __inet_hash_connect(struct inet_timewait_death_row
*death_row
,
416 struct sock
*sk
, u32 port_offset
,
417 int (*check_established
)(struct inet_timewait_death_row
*,
418 struct sock
*, __u16
, struct inet_timewait_sock
**),
419 void (*hash
)(struct sock
*sk
))
421 struct inet_hashinfo
*hinfo
= death_row
->hashinfo
;
422 const unsigned short snum
= inet_sk(sk
)->num
;
423 struct inet_bind_hashbucket
*head
;
424 struct inet_bind_bucket
*tb
;
426 struct net
*net
= sock_net(sk
);
429 int i
, remaining
, low
, high
, port
;
431 u32 offset
= hint
+ port_offset
;
432 struct hlist_node
*node
;
433 struct inet_timewait_sock
*tw
= NULL
;
435 inet_get_local_port_range(&low
, &high
);
436 remaining
= (high
- low
) + 1;
439 for (i
= 1; i
<= remaining
; i
++) {
440 port
= low
+ (i
+ offset
) % remaining
;
441 head
= &hinfo
->bhash
[inet_bhashfn(port
, hinfo
->bhash_size
)];
442 spin_lock(&head
->lock
);
444 /* Does not bother with rcv_saddr checks,
445 * because the established check is already
448 inet_bind_bucket_for_each(tb
, node
, &head
->chain
) {
449 if (tb
->ib_net
== net
&& tb
->port
== port
) {
450 BUG_TRAP(!hlist_empty(&tb
->owners
));
451 if (tb
->fastreuse
>= 0)
453 if (!check_established(death_row
, sk
,
460 tb
= inet_bind_bucket_create(hinfo
->bind_bucket_cachep
,
463 spin_unlock(&head
->lock
);
470 spin_unlock(&head
->lock
);
474 return -EADDRNOTAVAIL
;
479 /* Head lock still held and bh's disabled */
480 inet_bind_hash(sk
, tb
, port
);
481 if (sk_unhashed(sk
)) {
482 inet_sk(sk
)->sport
= htons(port
);
485 spin_unlock(&head
->lock
);
488 inet_twsk_deschedule(tw
, death_row
);
496 head
= &hinfo
->bhash
[inet_bhashfn(snum
, hinfo
->bhash_size
)];
497 tb
= inet_csk(sk
)->icsk_bind_hash
;
498 spin_lock_bh(&head
->lock
);
499 if (sk_head(&tb
->owners
) == sk
&& !sk
->sk_bind_node
.next
) {
501 spin_unlock_bh(&head
->lock
);
504 spin_unlock(&head
->lock
);
505 /* No definite answer... Walk to established hash table */
506 ret
= check_established(death_row
, sk
, snum
, NULL
);
514 * Bind a port for a connect operation and hash it.
516 int inet_hash_connect(struct inet_timewait_death_row
*death_row
,
519 return __inet_hash_connect(death_row
, sk
, inet_sk_port_offset(sk
),
520 __inet_check_established
, __inet_hash_nolisten
);
523 EXPORT_SYMBOL_GPL(inet_hash_connect
);