Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / net / ipv4 / inet_hashtables.c
blobbc68eced0105716dca6f04abbdd491a1cfb9e908
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic INET transport hashtables
8 * Authors: Lotsa people, from code originally in tcp
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
21 #include <linux/vmalloc.h>
23 #include <net/addrconf.h>
24 #include <net/inet_connection_sock.h>
25 #include <net/inet_hashtables.h>
26 #include <net/secure_seq.h>
27 #include <net/ip.h>
28 #include <net/sock_reuseport.h>
30 static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
31 const __u16 lport, const __be32 faddr,
32 const __be16 fport)
34 static u32 inet_ehash_secret __read_mostly;
36 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
38 return __inet_ehashfn(laddr, lport, faddr, fport,
39 inet_ehash_secret + net_hash_mix(net));
42 /* This function handles inet_sock, but also timewait and request sockets
43 * for IPv4/IPv6.
45 u32 sk_ehashfn(const struct sock *sk)
47 #if IS_ENABLED(CONFIG_IPV6)
48 if (sk->sk_family == AF_INET6 &&
49 !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
50 return inet6_ehashfn(sock_net(sk),
51 &sk->sk_v6_rcv_saddr, sk->sk_num,
52 &sk->sk_v6_daddr, sk->sk_dport);
53 #endif
54 return inet_ehashfn(sock_net(sk),
55 sk->sk_rcv_saddr, sk->sk_num,
56 sk->sk_daddr, sk->sk_dport);
60 * Allocate and initialize a new local port bind bucket.
61 * The bindhash mutex for snum's hash chain must be held here.
63 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
64 struct net *net,
65 struct inet_bind_hashbucket *head,
66 const unsigned short snum)
68 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
70 if (tb) {
71 write_pnet(&tb->ib_net, net);
72 tb->port = snum;
73 tb->fastreuse = 0;
74 tb->fastreuseport = 0;
75 tb->num_owners = 0;
76 INIT_HLIST_HEAD(&tb->owners);
77 hlist_add_head(&tb->node, &head->chain);
79 return tb;
83 * Caller must hold hashbucket lock for this tb with local BH disabled
85 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
87 if (hlist_empty(&tb->owners)) {
88 __hlist_del(&tb->node);
89 kmem_cache_free(cachep, tb);
93 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
94 const unsigned short snum)
96 inet_sk(sk)->inet_num = snum;
97 sk_add_bind_node(sk, &tb->owners);
98 tb->num_owners++;
99 inet_csk(sk)->icsk_bind_hash = tb;
103 * Get rid of any references to a local port held by the given sock.
105 static void __inet_put_port(struct sock *sk)
107 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
108 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
109 hashinfo->bhash_size);
110 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
111 struct inet_bind_bucket *tb;
113 spin_lock(&head->lock);
114 tb = inet_csk(sk)->icsk_bind_hash;
115 __sk_del_bind_node(sk);
116 tb->num_owners--;
117 inet_csk(sk)->icsk_bind_hash = NULL;
118 inet_sk(sk)->inet_num = 0;
119 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
120 spin_unlock(&head->lock);
123 void inet_put_port(struct sock *sk)
125 local_bh_disable();
126 __inet_put_port(sk);
127 local_bh_enable();
129 EXPORT_SYMBOL(inet_put_port);
131 int __inet_inherit_port(const struct sock *sk, struct sock *child)
133 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
134 unsigned short port = inet_sk(child)->inet_num;
135 const int bhash = inet_bhashfn(sock_net(sk), port,
136 table->bhash_size);
137 struct inet_bind_hashbucket *head = &table->bhash[bhash];
138 struct inet_bind_bucket *tb;
140 spin_lock(&head->lock);
141 tb = inet_csk(sk)->icsk_bind_hash;
142 if (unlikely(!tb)) {
143 spin_unlock(&head->lock);
144 return -ENOENT;
146 if (tb->port != port) {
147 /* NOTE: using tproxy and redirecting skbs to a proxy
148 * on a different listener port breaks the assumption
149 * that the listener socket's icsk_bind_hash is the same
150 * as that of the child socket. We have to look up or
151 * create a new bind bucket for the child here. */
152 inet_bind_bucket_for_each(tb, &head->chain) {
153 if (net_eq(ib_net(tb), sock_net(sk)) &&
154 tb->port == port)
155 break;
157 if (!tb) {
158 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
159 sock_net(sk), head, port);
160 if (!tb) {
161 spin_unlock(&head->lock);
162 return -ENOMEM;
166 inet_bind_hash(child, tb, port);
167 spin_unlock(&head->lock);
169 return 0;
171 EXPORT_SYMBOL_GPL(__inet_inherit_port);
173 static inline int compute_score(struct sock *sk, struct net *net,
174 const unsigned short hnum, const __be32 daddr,
175 const int dif)
177 int score = -1;
178 struct inet_sock *inet = inet_sk(sk);
180 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
181 !ipv6_only_sock(sk)) {
182 __be32 rcv_saddr = inet->inet_rcv_saddr;
183 score = sk->sk_family == PF_INET ? 2 : 1;
184 if (rcv_saddr) {
185 if (rcv_saddr != daddr)
186 return -1;
187 score += 4;
189 if (sk->sk_bound_dev_if) {
190 if (sk->sk_bound_dev_if != dif)
191 return -1;
192 score += 4;
194 if (sk->sk_incoming_cpu == raw_smp_processor_id())
195 score++;
197 return score;
201 * Don't inline this cruft. Here are some nice properties to exploit here. The
202 * BSD API does not allow a listening sock to specify the remote port nor the
203 * remote address for the connection. So always assume those are both
204 * wildcarded during the search since they can never be otherwise.
208 struct sock *__inet_lookup_listener(struct net *net,
209 struct inet_hashinfo *hashinfo,
210 struct sk_buff *skb, int doff,
211 const __be32 saddr, __be16 sport,
212 const __be32 daddr, const unsigned short hnum,
213 const int dif)
215 struct sock *sk, *result;
216 struct hlist_nulls_node *node;
217 unsigned int hash = inet_lhashfn(net, hnum);
218 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
219 int score, hiscore, matches = 0, reuseport = 0;
220 bool select_ok = true;
221 u32 phash = 0;
223 rcu_read_lock();
224 begin:
225 result = NULL;
226 hiscore = 0;
227 sk_nulls_for_each_rcu(sk, node, &ilb->head) {
228 score = compute_score(sk, net, hnum, daddr, dif);
229 if (score > hiscore) {
230 result = sk;
231 hiscore = score;
232 reuseport = sk->sk_reuseport;
233 if (reuseport) {
234 phash = inet_ehashfn(net, daddr, hnum,
235 saddr, sport);
236 if (select_ok) {
237 struct sock *sk2;
238 sk2 = reuseport_select_sock(sk, phash,
239 skb, doff);
240 if (sk2) {
241 result = sk2;
242 goto found;
245 matches = 1;
247 } else if (score == hiscore && reuseport) {
248 matches++;
249 if (reciprocal_scale(phash, matches) == 0)
250 result = sk;
251 phash = next_pseudo_random32(phash);
255 * if the nulls value we got at the end of this lookup is
256 * not the expected one, we must restart lookup.
257 * We probably met an item that was moved to another chain.
259 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
260 goto begin;
261 if (result) {
262 found:
263 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
264 result = NULL;
265 else if (unlikely(compute_score(result, net, hnum, daddr,
266 dif) < hiscore)) {
267 sock_put(result);
268 select_ok = false;
269 goto begin;
272 rcu_read_unlock();
273 return result;
275 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
277 /* All sockets share common refcount, but have different destructors */
278 void sock_gen_put(struct sock *sk)
280 if (!atomic_dec_and_test(&sk->sk_refcnt))
281 return;
283 if (sk->sk_state == TCP_TIME_WAIT)
284 inet_twsk_free(inet_twsk(sk));
285 else if (sk->sk_state == TCP_NEW_SYN_RECV)
286 reqsk_free(inet_reqsk(sk));
287 else
288 sk_free(sk);
290 EXPORT_SYMBOL_GPL(sock_gen_put);
292 void sock_edemux(struct sk_buff *skb)
294 sock_gen_put(skb->sk);
296 EXPORT_SYMBOL(sock_edemux);
298 struct sock *__inet_lookup_established(struct net *net,
299 struct inet_hashinfo *hashinfo,
300 const __be32 saddr, const __be16 sport,
301 const __be32 daddr, const u16 hnum,
302 const int dif)
304 INET_ADDR_COOKIE(acookie, saddr, daddr);
305 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
306 struct sock *sk;
307 const struct hlist_nulls_node *node;
308 /* Optimize here for direct hit, only listening connections can
309 * have wildcards anyways.
311 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
312 unsigned int slot = hash & hashinfo->ehash_mask;
313 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
315 rcu_read_lock();
316 begin:
317 sk_nulls_for_each_rcu(sk, node, &head->chain) {
318 if (sk->sk_hash != hash)
319 continue;
320 if (likely(INET_MATCH(sk, net, acookie,
321 saddr, daddr, ports, dif))) {
322 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
323 goto out;
324 if (unlikely(!INET_MATCH(sk, net, acookie,
325 saddr, daddr, ports, dif))) {
326 sock_gen_put(sk);
327 goto begin;
329 goto found;
333 * if the nulls value we got at the end of this lookup is
334 * not the expected one, we must restart lookup.
335 * We probably met an item that was moved to another chain.
337 if (get_nulls_value(node) != slot)
338 goto begin;
339 out:
340 sk = NULL;
341 found:
342 rcu_read_unlock();
343 return sk;
345 EXPORT_SYMBOL_GPL(__inet_lookup_established);
347 /* called with local bh disabled */
348 static int __inet_check_established(struct inet_timewait_death_row *death_row,
349 struct sock *sk, __u16 lport,
350 struct inet_timewait_sock **twp)
352 struct inet_hashinfo *hinfo = death_row->hashinfo;
353 struct inet_sock *inet = inet_sk(sk);
354 __be32 daddr = inet->inet_rcv_saddr;
355 __be32 saddr = inet->inet_daddr;
356 int dif = sk->sk_bound_dev_if;
357 INET_ADDR_COOKIE(acookie, saddr, daddr);
358 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
359 struct net *net = sock_net(sk);
360 unsigned int hash = inet_ehashfn(net, daddr, lport,
361 saddr, inet->inet_dport);
362 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
363 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
364 struct sock *sk2;
365 const struct hlist_nulls_node *node;
366 struct inet_timewait_sock *tw = NULL;
368 spin_lock(lock);
370 sk_nulls_for_each(sk2, node, &head->chain) {
371 if (sk2->sk_hash != hash)
372 continue;
374 if (likely(INET_MATCH(sk2, net, acookie,
375 saddr, daddr, ports, dif))) {
376 if (sk2->sk_state == TCP_TIME_WAIT) {
377 tw = inet_twsk(sk2);
378 if (twsk_unique(sk, sk2, twp))
379 break;
381 goto not_unique;
385 /* Must record num and sport now. Otherwise we will see
386 * in hash table socket with a funny identity.
388 inet->inet_num = lport;
389 inet->inet_sport = htons(lport);
390 sk->sk_hash = hash;
391 WARN_ON(!sk_unhashed(sk));
392 __sk_nulls_add_node_rcu(sk, &head->chain);
393 if (tw) {
394 sk_nulls_del_node_init_rcu((struct sock *)tw);
395 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
397 spin_unlock(lock);
398 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
400 if (twp) {
401 *twp = tw;
402 } else if (tw) {
403 /* Silly. Should hash-dance instead... */
404 inet_twsk_deschedule_put(tw);
406 return 0;
408 not_unique:
409 spin_unlock(lock);
410 return -EADDRNOTAVAIL;
413 static u32 inet_sk_port_offset(const struct sock *sk)
415 const struct inet_sock *inet = inet_sk(sk);
417 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
418 inet->inet_daddr,
419 inet->inet_dport);
422 /* insert a socket into ehash, and eventually remove another one
423 * (The another one can be a SYN_RECV or TIMEWAIT
425 bool inet_ehash_insert(struct sock *sk, struct sock *osk)
427 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
428 struct hlist_nulls_head *list;
429 struct inet_ehash_bucket *head;
430 spinlock_t *lock;
431 bool ret = true;
433 WARN_ON_ONCE(!sk_unhashed(sk));
435 sk->sk_hash = sk_ehashfn(sk);
436 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
437 list = &head->chain;
438 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
440 spin_lock(lock);
441 if (osk) {
442 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
443 ret = sk_nulls_del_node_init_rcu(osk);
445 if (ret)
446 __sk_nulls_add_node_rcu(sk, list);
447 spin_unlock(lock);
448 return ret;
451 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
453 bool ok = inet_ehash_insert(sk, osk);
455 if (ok) {
456 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
457 } else {
458 percpu_counter_inc(sk->sk_prot->orphan_count);
459 sk->sk_state = TCP_CLOSE;
460 sock_set_flag(sk, SOCK_DEAD);
461 inet_csk_destroy_sock(sk);
463 return ok;
465 EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
467 static int inet_reuseport_add_sock(struct sock *sk,
468 struct inet_listen_hashbucket *ilb,
469 int (*saddr_same)(const struct sock *sk1,
470 const struct sock *sk2,
471 bool match_wildcard))
473 struct sock *sk2;
474 struct hlist_nulls_node *node;
475 kuid_t uid = sock_i_uid(sk);
477 sk_nulls_for_each_rcu(sk2, node, &ilb->head) {
478 if (sk2 != sk &&
479 sk2->sk_family == sk->sk_family &&
480 ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
481 sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
482 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
483 saddr_same(sk, sk2, false))
484 return reuseport_add_sock(sk, sk2);
487 /* Initial allocation may have already happened via setsockopt */
488 if (!rcu_access_pointer(sk->sk_reuseport_cb))
489 return reuseport_alloc(sk);
490 return 0;
493 int __inet_hash(struct sock *sk, struct sock *osk,
494 int (*saddr_same)(const struct sock *sk1,
495 const struct sock *sk2,
496 bool match_wildcard))
498 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
499 struct inet_listen_hashbucket *ilb;
500 int err = 0;
502 if (sk->sk_state != TCP_LISTEN) {
503 inet_ehash_nolisten(sk, osk);
504 return 0;
506 WARN_ON(!sk_unhashed(sk));
507 ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
509 spin_lock(&ilb->lock);
510 if (sk->sk_reuseport) {
511 err = inet_reuseport_add_sock(sk, ilb, saddr_same);
512 if (err)
513 goto unlock;
515 __sk_nulls_add_node_rcu(sk, &ilb->head);
516 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
517 unlock:
518 spin_unlock(&ilb->lock);
520 return err;
522 EXPORT_SYMBOL(__inet_hash);
524 int inet_hash(struct sock *sk)
526 int err = 0;
528 if (sk->sk_state != TCP_CLOSE) {
529 local_bh_disable();
530 err = __inet_hash(sk, NULL, ipv4_rcv_saddr_equal);
531 local_bh_enable();
534 return err;
536 EXPORT_SYMBOL_GPL(inet_hash);
538 void inet_unhash(struct sock *sk)
540 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
541 spinlock_t *lock;
542 int done;
544 if (sk_unhashed(sk))
545 return;
547 if (sk->sk_state == TCP_LISTEN)
548 lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
549 else
550 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
552 spin_lock_bh(lock);
553 if (rcu_access_pointer(sk->sk_reuseport_cb))
554 reuseport_detach_sock(sk);
555 done = __sk_nulls_del_node_init_rcu(sk);
556 if (done)
557 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
558 spin_unlock_bh(lock);
560 EXPORT_SYMBOL_GPL(inet_unhash);
562 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
563 struct sock *sk, u32 port_offset,
564 int (*check_established)(struct inet_timewait_death_row *,
565 struct sock *, __u16, struct inet_timewait_sock **))
567 struct inet_hashinfo *hinfo = death_row->hashinfo;
568 struct inet_timewait_sock *tw = NULL;
569 struct inet_bind_hashbucket *head;
570 int port = inet_sk(sk)->inet_num;
571 struct net *net = sock_net(sk);
572 struct inet_bind_bucket *tb;
573 u32 remaining, offset;
574 int ret, i, low, high;
575 static u32 hint;
577 if (port) {
578 head = &hinfo->bhash[inet_bhashfn(net, port,
579 hinfo->bhash_size)];
580 tb = inet_csk(sk)->icsk_bind_hash;
581 spin_lock_bh(&head->lock);
582 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
583 inet_ehash_nolisten(sk, NULL);
584 spin_unlock_bh(&head->lock);
585 return 0;
587 spin_unlock(&head->lock);
588 /* No definite answer... Walk to established hash table */
589 ret = check_established(death_row, sk, port, NULL);
590 local_bh_enable();
591 return ret;
594 inet_get_local_port_range(net, &low, &high);
595 high++; /* [32768, 60999] -> [32768, 61000[ */
596 remaining = high - low;
597 if (likely(remaining > 1))
598 remaining &= ~1U;
600 offset = (hint + port_offset) % remaining;
601 /* In first pass we try ports of @low parity.
602 * inet_csk_get_port() does the opposite choice.
604 offset &= ~1U;
605 other_parity_scan:
606 port = low + offset;
607 for (i = 0; i < remaining; i += 2, port += 2) {
608 if (unlikely(port >= high))
609 port -= remaining;
610 if (inet_is_local_reserved_port(net, port))
611 continue;
612 head = &hinfo->bhash[inet_bhashfn(net, port,
613 hinfo->bhash_size)];
614 spin_lock_bh(&head->lock);
616 /* Does not bother with rcv_saddr checks, because
617 * the established check is already unique enough.
619 inet_bind_bucket_for_each(tb, &head->chain) {
620 if (net_eq(ib_net(tb), net) && tb->port == port) {
621 if (tb->fastreuse >= 0 ||
622 tb->fastreuseport >= 0)
623 goto next_port;
624 WARN_ON(hlist_empty(&tb->owners));
625 if (!check_established(death_row, sk,
626 port, &tw))
627 goto ok;
628 goto next_port;
632 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
633 net, head, port);
634 if (!tb) {
635 spin_unlock_bh(&head->lock);
636 return -ENOMEM;
638 tb->fastreuse = -1;
639 tb->fastreuseport = -1;
640 goto ok;
641 next_port:
642 spin_unlock_bh(&head->lock);
643 cond_resched();
646 offset++;
647 if ((offset & 1) && remaining > 1)
648 goto other_parity_scan;
650 return -EADDRNOTAVAIL;
653 hint += i + 2;
655 /* Head lock still held and bh's disabled */
656 inet_bind_hash(sk, tb, port);
657 if (sk_unhashed(sk)) {
658 inet_sk(sk)->inet_sport = htons(port);
659 inet_ehash_nolisten(sk, (struct sock *)tw);
661 if (tw)
662 inet_twsk_bind_unhash(tw, hinfo);
663 spin_unlock(&head->lock);
664 if (tw)
665 inet_twsk_deschedule_put(tw);
666 local_bh_enable();
667 return 0;
671 * Bind a port for a connect operation and hash it.
673 int inet_hash_connect(struct inet_timewait_death_row *death_row,
674 struct sock *sk)
676 u32 port_offset = 0;
678 if (!inet_sk(sk)->inet_num)
679 port_offset = inet_sk_port_offset(sk);
680 return __inet_hash_connect(death_row, sk, port_offset,
681 __inet_check_established);
683 EXPORT_SYMBOL_GPL(inet_hash_connect);
685 void inet_hashinfo_init(struct inet_hashinfo *h)
687 int i;
689 for (i = 0; i < INET_LHTABLE_SIZE; i++) {
690 spin_lock_init(&h->listening_hash[i].lock);
691 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
692 i + LISTENING_NULLS_BASE);
695 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
697 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
699 unsigned int locksz = sizeof(spinlock_t);
700 unsigned int i, nblocks = 1;
702 if (locksz != 0) {
703 /* allocate 2 cache lines or at least one spinlock per cpu */
704 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
705 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
707 /* no more locks than number of hash buckets */
708 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
710 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
711 GFP_KERNEL | __GFP_NOWARN);
712 if (!hashinfo->ehash_locks)
713 hashinfo->ehash_locks = vmalloc(nblocks * locksz);
715 if (!hashinfo->ehash_locks)
716 return -ENOMEM;
718 for (i = 0; i < nblocks; i++)
719 spin_lock_init(&hashinfo->ehash_locks[i]);
721 hashinfo->ehash_locks_mask = nblocks - 1;
722 return 0;
724 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);