2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic INET transport hashtables
8 * Authors: Lotsa people, from code originally in tcp
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
21 #include <linux/vmalloc.h>
23 #include <net/addrconf.h>
24 #include <net/inet_connection_sock.h>
25 #include <net/inet_hashtables.h>
26 #include <net/secure_seq.h>
29 #include <net/sock_reuseport.h>
31 static u32
inet_ehashfn(const struct net
*net
, const __be32 laddr
,
32 const __u16 lport
, const __be32 faddr
,
35 static u32 inet_ehash_secret __read_mostly
;
37 net_get_random_once(&inet_ehash_secret
, sizeof(inet_ehash_secret
));
39 return __inet_ehashfn(laddr
, lport
, faddr
, fport
,
40 inet_ehash_secret
+ net_hash_mix(net
));
43 /* This function handles inet_sock, but also timewait and request sockets
46 u32
sk_ehashfn(const struct sock
*sk
)
48 #if IS_ENABLED(CONFIG_IPV6)
49 if (sk
->sk_family
== AF_INET6
&&
50 !ipv6_addr_v4mapped(&sk
->sk_v6_daddr
))
51 return inet6_ehashfn(sock_net(sk
),
52 &sk
->sk_v6_rcv_saddr
, sk
->sk_num
,
53 &sk
->sk_v6_daddr
, sk
->sk_dport
);
55 return inet_ehashfn(sock_net(sk
),
56 sk
->sk_rcv_saddr
, sk
->sk_num
,
57 sk
->sk_daddr
, sk
->sk_dport
);
61 * Allocate and initialize a new local port bind bucket.
62 * The bindhash mutex for snum's hash chain must be held here.
64 struct inet_bind_bucket
*inet_bind_bucket_create(struct kmem_cache
*cachep
,
66 struct inet_bind_hashbucket
*head
,
67 const unsigned short snum
)
69 struct inet_bind_bucket
*tb
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
72 write_pnet(&tb
->ib_net
, net
);
75 tb
->fastreuseport
= 0;
76 INIT_HLIST_HEAD(&tb
->owners
);
77 hlist_add_head(&tb
->node
, &head
->chain
);
83 * Caller must hold hashbucket lock for this tb with local BH disabled
85 void inet_bind_bucket_destroy(struct kmem_cache
*cachep
, struct inet_bind_bucket
*tb
)
87 if (hlist_empty(&tb
->owners
)) {
88 __hlist_del(&tb
->node
);
89 kmem_cache_free(cachep
, tb
);
93 void inet_bind_hash(struct sock
*sk
, struct inet_bind_bucket
*tb
,
94 const unsigned short snum
)
96 inet_sk(sk
)->inet_num
= snum
;
97 sk_add_bind_node(sk
, &tb
->owners
);
98 inet_csk(sk
)->icsk_bind_hash
= tb
;
102 * Get rid of any references to a local port held by the given sock.
104 static void __inet_put_port(struct sock
*sk
)
106 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
107 const int bhash
= inet_bhashfn(sock_net(sk
), inet_sk(sk
)->inet_num
,
108 hashinfo
->bhash_size
);
109 struct inet_bind_hashbucket
*head
= &hashinfo
->bhash
[bhash
];
110 struct inet_bind_bucket
*tb
;
112 spin_lock(&head
->lock
);
113 tb
= inet_csk(sk
)->icsk_bind_hash
;
114 __sk_del_bind_node(sk
);
115 inet_csk(sk
)->icsk_bind_hash
= NULL
;
116 inet_sk(sk
)->inet_num
= 0;
117 inet_bind_bucket_destroy(hashinfo
->bind_bucket_cachep
, tb
);
118 spin_unlock(&head
->lock
);
121 void inet_put_port(struct sock
*sk
)
127 EXPORT_SYMBOL(inet_put_port
);
129 int __inet_inherit_port(const struct sock
*sk
, struct sock
*child
)
131 struct inet_hashinfo
*table
= sk
->sk_prot
->h
.hashinfo
;
132 unsigned short port
= inet_sk(child
)->inet_num
;
133 const int bhash
= inet_bhashfn(sock_net(sk
), port
,
135 struct inet_bind_hashbucket
*head
= &table
->bhash
[bhash
];
136 struct inet_bind_bucket
*tb
;
138 spin_lock(&head
->lock
);
139 tb
= inet_csk(sk
)->icsk_bind_hash
;
141 spin_unlock(&head
->lock
);
144 if (tb
->port
!= port
) {
145 /* NOTE: using tproxy and redirecting skbs to a proxy
146 * on a different listener port breaks the assumption
147 * that the listener socket's icsk_bind_hash is the same
148 * as that of the child socket. We have to look up or
149 * create a new bind bucket for the child here. */
150 inet_bind_bucket_for_each(tb
, &head
->chain
) {
151 if (net_eq(ib_net(tb
), sock_net(sk
)) &&
156 tb
= inet_bind_bucket_create(table
->bind_bucket_cachep
,
157 sock_net(sk
), head
, port
);
159 spin_unlock(&head
->lock
);
164 inet_bind_hash(child
, tb
, port
);
165 spin_unlock(&head
->lock
);
169 EXPORT_SYMBOL_GPL(__inet_inherit_port
);
171 static inline int compute_score(struct sock
*sk
, struct net
*net
,
172 const unsigned short hnum
, const __be32 daddr
,
173 const int dif
, bool exact_dif
)
176 struct inet_sock
*inet
= inet_sk(sk
);
178 if (net_eq(sock_net(sk
), net
) && inet
->inet_num
== hnum
&&
179 !ipv6_only_sock(sk
)) {
180 __be32 rcv_saddr
= inet
->inet_rcv_saddr
;
181 score
= sk
->sk_family
== PF_INET
? 2 : 1;
183 if (rcv_saddr
!= daddr
)
187 if (sk
->sk_bound_dev_if
|| exact_dif
) {
188 if (sk
->sk_bound_dev_if
!= dif
)
192 if (sk
->sk_incoming_cpu
== raw_smp_processor_id())
199 * Here are some nice properties to exploit here. The BSD API
200 * does not allow a listening sock to specify the remote port nor the
201 * remote address for the connection. So always assume those are both
202 * wildcarded during the search since they can never be otherwise.
205 /* called with rcu_read_lock() : No refcount taken on the socket */
206 struct sock
*__inet_lookup_listener(struct net
*net
,
207 struct inet_hashinfo
*hashinfo
,
208 struct sk_buff
*skb
, int doff
,
209 const __be32 saddr
, __be16 sport
,
210 const __be32 daddr
, const unsigned short hnum
,
213 unsigned int hash
= inet_lhashfn(net
, hnum
);
214 struct inet_listen_hashbucket
*ilb
= &hashinfo
->listening_hash
[hash
];
215 int score
, hiscore
= 0, matches
= 0, reuseport
= 0;
216 bool exact_dif
= inet_exact_dif_match(net
, skb
);
217 struct sock
*sk
, *result
= NULL
;
220 sk_for_each_rcu(sk
, &ilb
->head
) {
221 score
= compute_score(sk
, net
, hnum
, daddr
, dif
, exact_dif
);
222 if (score
> hiscore
) {
223 reuseport
= sk
->sk_reuseport
;
225 phash
= inet_ehashfn(net
, daddr
, hnum
,
227 result
= reuseport_select_sock(sk
, phash
,
235 } else if (score
== hiscore
&& reuseport
) {
237 if (reciprocal_scale(phash
, matches
) == 0)
239 phash
= next_pseudo_random32(phash
);
244 EXPORT_SYMBOL_GPL(__inet_lookup_listener
);
246 /* All sockets share common refcount, but have different destructors */
247 void sock_gen_put(struct sock
*sk
)
249 if (!atomic_dec_and_test(&sk
->sk_refcnt
))
252 if (sk
->sk_state
== TCP_TIME_WAIT
)
253 inet_twsk_free(inet_twsk(sk
));
254 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
255 reqsk_free(inet_reqsk(sk
));
259 EXPORT_SYMBOL_GPL(sock_gen_put
);
261 void sock_edemux(struct sk_buff
*skb
)
263 sock_gen_put(skb
->sk
);
265 EXPORT_SYMBOL(sock_edemux
);
267 struct sock
*__inet_lookup_established(struct net
*net
,
268 struct inet_hashinfo
*hashinfo
,
269 const __be32 saddr
, const __be16 sport
,
270 const __be32 daddr
, const u16 hnum
,
273 INET_ADDR_COOKIE(acookie
, saddr
, daddr
);
274 const __portpair ports
= INET_COMBINED_PORTS(sport
, hnum
);
276 const struct hlist_nulls_node
*node
;
277 /* Optimize here for direct hit, only listening connections can
278 * have wildcards anyways.
280 unsigned int hash
= inet_ehashfn(net
, daddr
, hnum
, saddr
, sport
);
281 unsigned int slot
= hash
& hashinfo
->ehash_mask
;
282 struct inet_ehash_bucket
*head
= &hashinfo
->ehash
[slot
];
285 sk_nulls_for_each_rcu(sk
, node
, &head
->chain
) {
286 if (sk
->sk_hash
!= hash
)
288 if (likely(INET_MATCH(sk
, net
, acookie
,
289 saddr
, daddr
, ports
, dif
))) {
290 if (unlikely(!atomic_inc_not_zero(&sk
->sk_refcnt
)))
292 if (unlikely(!INET_MATCH(sk
, net
, acookie
,
293 saddr
, daddr
, ports
, dif
))) {
301 * if the nulls value we got at the end of this lookup is
302 * not the expected one, we must restart lookup.
303 * We probably met an item that was moved to another chain.
305 if (get_nulls_value(node
) != slot
)
312 EXPORT_SYMBOL_GPL(__inet_lookup_established
);
314 /* called with local bh disabled */
315 static int __inet_check_established(struct inet_timewait_death_row
*death_row
,
316 struct sock
*sk
, __u16 lport
,
317 struct inet_timewait_sock
**twp
)
319 struct inet_hashinfo
*hinfo
= death_row
->hashinfo
;
320 struct inet_sock
*inet
= inet_sk(sk
);
321 __be32 daddr
= inet
->inet_rcv_saddr
;
322 __be32 saddr
= inet
->inet_daddr
;
323 int dif
= sk
->sk_bound_dev_if
;
324 INET_ADDR_COOKIE(acookie
, saddr
, daddr
);
325 const __portpair ports
= INET_COMBINED_PORTS(inet
->inet_dport
, lport
);
326 struct net
*net
= sock_net(sk
);
327 unsigned int hash
= inet_ehashfn(net
, daddr
, lport
,
328 saddr
, inet
->inet_dport
);
329 struct inet_ehash_bucket
*head
= inet_ehash_bucket(hinfo
, hash
);
330 spinlock_t
*lock
= inet_ehash_lockp(hinfo
, hash
);
332 const struct hlist_nulls_node
*node
;
333 struct inet_timewait_sock
*tw
= NULL
;
337 sk_nulls_for_each(sk2
, node
, &head
->chain
) {
338 if (sk2
->sk_hash
!= hash
)
341 if (likely(INET_MATCH(sk2
, net
, acookie
,
342 saddr
, daddr
, ports
, dif
))) {
343 if (sk2
->sk_state
== TCP_TIME_WAIT
) {
345 if (twsk_unique(sk
, sk2
, twp
))
352 /* Must record num and sport now. Otherwise we will see
353 * in hash table socket with a funny identity.
355 inet
->inet_num
= lport
;
356 inet
->inet_sport
= htons(lport
);
358 WARN_ON(!sk_unhashed(sk
));
359 __sk_nulls_add_node_rcu(sk
, &head
->chain
);
361 sk_nulls_del_node_init_rcu((struct sock
*)tw
);
362 __NET_INC_STATS(net
, LINUX_MIB_TIMEWAITRECYCLED
);
365 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
370 /* Silly. Should hash-dance instead... */
371 inet_twsk_deschedule_put(tw
);
377 return -EADDRNOTAVAIL
;
380 static u32
inet_sk_port_offset(const struct sock
*sk
)
382 const struct inet_sock
*inet
= inet_sk(sk
);
384 return secure_ipv4_port_ephemeral(inet
->inet_rcv_saddr
,
389 /* insert a socket into ehash, and eventually remove another one
390 * (The another one can be a SYN_RECV or TIMEWAIT
392 bool inet_ehash_insert(struct sock
*sk
, struct sock
*osk
)
394 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
395 struct hlist_nulls_head
*list
;
396 struct inet_ehash_bucket
*head
;
400 WARN_ON_ONCE(!sk_unhashed(sk
));
402 sk
->sk_hash
= sk_ehashfn(sk
);
403 head
= inet_ehash_bucket(hashinfo
, sk
->sk_hash
);
405 lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
409 WARN_ON_ONCE(sk
->sk_hash
!= osk
->sk_hash
);
410 ret
= sk_nulls_del_node_init_rcu(osk
);
413 __sk_nulls_add_node_rcu(sk
, list
);
418 bool inet_ehash_nolisten(struct sock
*sk
, struct sock
*osk
)
420 bool ok
= inet_ehash_insert(sk
, osk
);
423 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
425 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
426 sk
->sk_state
= TCP_CLOSE
;
427 sock_set_flag(sk
, SOCK_DEAD
);
428 inet_csk_destroy_sock(sk
);
432 EXPORT_SYMBOL_GPL(inet_ehash_nolisten
);
434 static int inet_reuseport_add_sock(struct sock
*sk
,
435 struct inet_listen_hashbucket
*ilb
)
437 struct inet_bind_bucket
*tb
= inet_csk(sk
)->icsk_bind_hash
;
439 kuid_t uid
= sock_i_uid(sk
);
441 sk_for_each_rcu(sk2
, &ilb
->head
) {
443 sk2
->sk_family
== sk
->sk_family
&&
444 ipv6_only_sock(sk2
) == ipv6_only_sock(sk
) &&
445 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
&&
446 inet_csk(sk2
)->icsk_bind_hash
== tb
&&
447 sk2
->sk_reuseport
&& uid_eq(uid
, sock_i_uid(sk2
)) &&
448 inet_rcv_saddr_equal(sk
, sk2
, false))
449 return reuseport_add_sock(sk
, sk2
);
452 /* Initial allocation may have already happened via setsockopt */
453 if (!rcu_access_pointer(sk
->sk_reuseport_cb
))
454 return reuseport_alloc(sk
);
458 int __inet_hash(struct sock
*sk
, struct sock
*osk
)
460 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
461 struct inet_listen_hashbucket
*ilb
;
464 if (sk
->sk_state
!= TCP_LISTEN
) {
465 inet_ehash_nolisten(sk
, osk
);
468 WARN_ON(!sk_unhashed(sk
));
469 ilb
= &hashinfo
->listening_hash
[inet_sk_listen_hashfn(sk
)];
471 spin_lock(&ilb
->lock
);
472 if (sk
->sk_reuseport
) {
473 err
= inet_reuseport_add_sock(sk
, ilb
);
477 if (IS_ENABLED(CONFIG_IPV6
) && sk
->sk_reuseport
&&
478 sk
->sk_family
== AF_INET6
)
479 hlist_add_tail_rcu(&sk
->sk_node
, &ilb
->head
);
481 hlist_add_head_rcu(&sk
->sk_node
, &ilb
->head
);
482 sock_set_flag(sk
, SOCK_RCU_FREE
);
483 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
485 spin_unlock(&ilb
->lock
);
489 EXPORT_SYMBOL(__inet_hash
);
491 int inet_hash(struct sock
*sk
)
495 if (sk
->sk_state
!= TCP_CLOSE
) {
497 err
= __inet_hash(sk
, NULL
);
503 EXPORT_SYMBOL_GPL(inet_hash
);
505 void inet_unhash(struct sock
*sk
)
507 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
509 bool listener
= false;
515 if (sk
->sk_state
== TCP_LISTEN
) {
516 lock
= &hashinfo
->listening_hash
[inet_sk_listen_hashfn(sk
)].lock
;
519 lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
522 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
523 reuseport_detach_sock(sk
);
525 done
= __sk_del_node_init(sk
);
527 done
= __sk_nulls_del_node_init_rcu(sk
);
529 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
530 spin_unlock_bh(lock
);
532 EXPORT_SYMBOL_GPL(inet_unhash
);
534 int __inet_hash_connect(struct inet_timewait_death_row
*death_row
,
535 struct sock
*sk
, u32 port_offset
,
536 int (*check_established
)(struct inet_timewait_death_row
*,
537 struct sock
*, __u16
, struct inet_timewait_sock
**))
539 struct inet_hashinfo
*hinfo
= death_row
->hashinfo
;
540 struct inet_timewait_sock
*tw
= NULL
;
541 struct inet_bind_hashbucket
*head
;
542 int port
= inet_sk(sk
)->inet_num
;
543 struct net
*net
= sock_net(sk
);
544 struct inet_bind_bucket
*tb
;
545 u32 remaining
, offset
;
546 int ret
, i
, low
, high
;
550 head
= &hinfo
->bhash
[inet_bhashfn(net
, port
,
552 tb
= inet_csk(sk
)->icsk_bind_hash
;
553 spin_lock_bh(&head
->lock
);
554 if (sk_head(&tb
->owners
) == sk
&& !sk
->sk_bind_node
.next
) {
555 inet_ehash_nolisten(sk
, NULL
);
556 spin_unlock_bh(&head
->lock
);
559 spin_unlock(&head
->lock
);
560 /* No definite answer... Walk to established hash table */
561 ret
= check_established(death_row
, sk
, port
, NULL
);
566 inet_get_local_port_range(net
, &low
, &high
);
567 high
++; /* [32768, 60999] -> [32768, 61000[ */
568 remaining
= high
- low
;
569 if (likely(remaining
> 1))
572 offset
= (hint
+ port_offset
) % remaining
;
573 /* In first pass we try ports of @low parity.
574 * inet_csk_get_port() does the opposite choice.
579 for (i
= 0; i
< remaining
; i
+= 2, port
+= 2) {
580 if (unlikely(port
>= high
))
582 if (inet_is_local_reserved_port(net
, port
))
584 head
= &hinfo
->bhash
[inet_bhashfn(net
, port
,
586 spin_lock_bh(&head
->lock
);
588 /* Does not bother with rcv_saddr checks, because
589 * the established check is already unique enough.
591 inet_bind_bucket_for_each(tb
, &head
->chain
) {
592 if (net_eq(ib_net(tb
), net
) && tb
->port
== port
) {
593 if (tb
->fastreuse
>= 0 ||
594 tb
->fastreuseport
>= 0)
596 WARN_ON(hlist_empty(&tb
->owners
));
597 if (!check_established(death_row
, sk
,
604 tb
= inet_bind_bucket_create(hinfo
->bind_bucket_cachep
,
607 spin_unlock_bh(&head
->lock
);
611 tb
->fastreuseport
= -1;
614 spin_unlock_bh(&head
->lock
);
619 if ((offset
& 1) && remaining
> 1)
620 goto other_parity_scan
;
622 return -EADDRNOTAVAIL
;
627 /* Head lock still held and bh's disabled */
628 inet_bind_hash(sk
, tb
, port
);
629 if (sk_unhashed(sk
)) {
630 inet_sk(sk
)->inet_sport
= htons(port
);
631 inet_ehash_nolisten(sk
, (struct sock
*)tw
);
634 inet_twsk_bind_unhash(tw
, hinfo
);
635 spin_unlock(&head
->lock
);
637 inet_twsk_deschedule_put(tw
);
643 * Bind a port for a connect operation and hash it.
645 int inet_hash_connect(struct inet_timewait_death_row
*death_row
,
650 if (!inet_sk(sk
)->inet_num
)
651 port_offset
= inet_sk_port_offset(sk
);
652 return __inet_hash_connect(death_row
, sk
, port_offset
,
653 __inet_check_established
);
655 EXPORT_SYMBOL_GPL(inet_hash_connect
);
657 void inet_hashinfo_init(struct inet_hashinfo
*h
)
661 for (i
= 0; i
< INET_LHTABLE_SIZE
; i
++) {
662 spin_lock_init(&h
->listening_hash
[i
].lock
);
663 INIT_HLIST_HEAD(&h
->listening_hash
[i
].head
);
666 EXPORT_SYMBOL_GPL(inet_hashinfo_init
);
668 int inet_ehash_locks_alloc(struct inet_hashinfo
*hashinfo
)
670 unsigned int locksz
= sizeof(spinlock_t
);
671 unsigned int i
, nblocks
= 1;
674 /* allocate 2 cache lines or at least one spinlock per cpu */
675 nblocks
= max(2U * L1_CACHE_BYTES
/ locksz
, 1U);
676 nblocks
= roundup_pow_of_two(nblocks
* num_possible_cpus());
678 /* no more locks than number of hash buckets */
679 nblocks
= min(nblocks
, hashinfo
->ehash_mask
+ 1);
681 hashinfo
->ehash_locks
= kmalloc_array(nblocks
, locksz
,
682 GFP_KERNEL
| __GFP_NOWARN
);
683 if (!hashinfo
->ehash_locks
)
684 hashinfo
->ehash_locks
= vmalloc(nblocks
* locksz
);
686 if (!hashinfo
->ehash_locks
)
689 for (i
= 0; i
< nblocks
; i
++)
690 spin_lock_init(&hashinfo
->ehash_locks
[i
]);
692 hashinfo
->ehash_locks_mask
= nblocks
- 1;
695 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc
);