2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/rhashtable.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
31 #define HASH_DEFAULT_SIZE 64UL
32 #define HASH_MIN_SIZE 4U
33 #define BUCKET_LOCKS_PER_CPU 32UL
35 static u32
head_hashfn(struct rhashtable
*ht
,
36 const struct bucket_table
*tbl
,
37 const struct rhash_head
*he
)
39 return rht_head_hashfn(ht
, tbl
, he
, ht
->p
);
42 #ifdef CONFIG_PROVE_LOCKING
43 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
45 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
47 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
49 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
51 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
53 spinlock_t
*lock
= rht_bucket_lock(tbl
, hash
);
55 return (debug_locks
) ? lockdep_is_held(lock
) : 1;
57 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
59 #define ASSERT_RHT_MUTEX(HT)
63 static int alloc_bucket_locks(struct rhashtable
*ht
, struct bucket_table
*tbl
,
67 #if defined(CONFIG_PROVE_LOCKING)
68 unsigned int nr_pcpus
= 2;
70 unsigned int nr_pcpus
= num_possible_cpus();
73 nr_pcpus
= min_t(unsigned int, nr_pcpus
, 64UL);
74 size
= roundup_pow_of_two(nr_pcpus
* ht
->p
.locks_mul
);
76 /* Never allocate more than 0.5 locks per bucket */
77 size
= min_t(unsigned int, size
, tbl
->size
>> 1);
79 if (sizeof(spinlock_t
) != 0) {
82 if (size
* sizeof(spinlock_t
) > PAGE_SIZE
&&
84 tbl
->locks
= vmalloc(size
* sizeof(spinlock_t
));
86 if (gfp
!= GFP_KERNEL
)
87 gfp
|= __GFP_NOWARN
| __GFP_NORETRY
;
90 tbl
->locks
= kmalloc_array(size
, sizeof(spinlock_t
),
94 for (i
= 0; i
< size
; i
++)
95 spin_lock_init(&tbl
->locks
[i
]);
97 tbl
->locks_mask
= size
- 1;
102 static void bucket_table_free(const struct bucket_table
*tbl
)
110 static void bucket_table_free_rcu(struct rcu_head
*head
)
112 bucket_table_free(container_of(head
, struct bucket_table
, rcu
));
115 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
119 struct bucket_table
*tbl
= NULL
;
123 size
= sizeof(*tbl
) + nbuckets
* sizeof(tbl
->buckets
[0]);
124 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
) ||
126 tbl
= kzalloc(size
, gfp
| __GFP_NOWARN
| __GFP_NORETRY
);
127 if (tbl
== NULL
&& gfp
== GFP_KERNEL
)
132 tbl
->size
= nbuckets
;
134 if (alloc_bucket_locks(ht
, tbl
, gfp
) < 0) {
135 bucket_table_free(tbl
);
139 INIT_LIST_HEAD(&tbl
->walkers
);
141 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
143 for (i
= 0; i
< nbuckets
; i
++)
144 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
], ht
, i
);
149 static struct bucket_table
*rhashtable_last_table(struct rhashtable
*ht
,
150 struct bucket_table
*tbl
)
152 struct bucket_table
*new_tbl
;
156 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
162 static int rhashtable_rehash_one(struct rhashtable
*ht
, unsigned int old_hash
)
164 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
165 struct bucket_table
*new_tbl
= rhashtable_last_table(ht
,
166 rht_dereference_rcu(old_tbl
->future_tbl
, ht
));
167 struct rhash_head __rcu
**pprev
= &old_tbl
->buckets
[old_hash
];
169 struct rhash_head
*head
, *next
, *entry
;
170 spinlock_t
*new_bucket_lock
;
171 unsigned int new_hash
;
173 rht_for_each(entry
, old_tbl
, old_hash
) {
175 next
= rht_dereference_bucket(entry
->next
, old_tbl
, old_hash
);
177 if (rht_is_a_nulls(next
))
180 pprev
= &entry
->next
;
186 new_hash
= head_hashfn(ht
, new_tbl
, entry
);
188 new_bucket_lock
= rht_bucket_lock(new_tbl
, new_hash
);
190 spin_lock_nested(new_bucket_lock
, SINGLE_DEPTH_NESTING
);
191 head
= rht_dereference_bucket(new_tbl
->buckets
[new_hash
],
194 RCU_INIT_POINTER(entry
->next
, head
);
196 rcu_assign_pointer(new_tbl
->buckets
[new_hash
], entry
);
197 spin_unlock(new_bucket_lock
);
199 rcu_assign_pointer(*pprev
, next
);
205 static void rhashtable_rehash_chain(struct rhashtable
*ht
,
206 unsigned int old_hash
)
208 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
209 spinlock_t
*old_bucket_lock
;
211 old_bucket_lock
= rht_bucket_lock(old_tbl
, old_hash
);
213 spin_lock_bh(old_bucket_lock
);
214 while (!rhashtable_rehash_one(ht
, old_hash
))
217 spin_unlock_bh(old_bucket_lock
);
220 static int rhashtable_rehash_attach(struct rhashtable
*ht
,
221 struct bucket_table
*old_tbl
,
222 struct bucket_table
*new_tbl
)
224 /* Protect future_tbl using the first bucket lock. */
225 spin_lock_bh(old_tbl
->locks
);
227 /* Did somebody beat us to it? */
228 if (rcu_access_pointer(old_tbl
->future_tbl
)) {
229 spin_unlock_bh(old_tbl
->locks
);
233 /* Make insertions go into the new, empty table right away. Deletions
234 * and lookups will be attempted in both tables until we synchronize.
236 rcu_assign_pointer(old_tbl
->future_tbl
, new_tbl
);
238 spin_unlock_bh(old_tbl
->locks
);
243 static int rhashtable_rehash_table(struct rhashtable
*ht
)
245 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
246 struct bucket_table
*new_tbl
;
247 struct rhashtable_walker
*walker
;
248 unsigned int old_hash
;
250 new_tbl
= rht_dereference(old_tbl
->future_tbl
, ht
);
254 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
255 rhashtable_rehash_chain(ht
, old_hash
);
259 /* Publish the new table pointer. */
260 rcu_assign_pointer(ht
->tbl
, new_tbl
);
262 spin_lock(&ht
->lock
);
263 list_for_each_entry(walker
, &old_tbl
->walkers
, list
)
265 spin_unlock(&ht
->lock
);
267 /* Wait for readers. All new readers will see the new
268 * table, and thus no references to the old table will
271 call_rcu(&old_tbl
->rcu
, bucket_table_free_rcu
);
273 return rht_dereference(new_tbl
->future_tbl
, ht
) ? -EAGAIN
: 0;
277 * rhashtable_expand - Expand hash table while allowing concurrent lookups
278 * @ht: the hash table to expand
280 * A secondary bucket array is allocated and the hash entries are migrated.
282 * This function may only be called in a context where it is safe to call
283 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
285 * The caller must ensure that no concurrent resizing occurs by holding
288 * It is valid to have concurrent insertions and deletions protected by per
289 * bucket locks or concurrent RCU protected lookups and traversals.
291 static int rhashtable_expand(struct rhashtable
*ht
)
293 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
296 ASSERT_RHT_MUTEX(ht
);
298 old_tbl
= rhashtable_last_table(ht
, old_tbl
);
300 new_tbl
= bucket_table_alloc(ht
, old_tbl
->size
* 2, GFP_KERNEL
);
304 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
306 bucket_table_free(new_tbl
);
312 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
313 * @ht: the hash table to shrink
315 * This function shrinks the hash table to fit, i.e., the smallest
316 * size would not cause it to expand right away automatically.
318 * The caller must ensure that no concurrent resizing occurs by holding
321 * The caller must ensure that no concurrent table mutations take place.
322 * It is however valid to have concurrent lookups if they are RCU protected.
324 * It is valid to have concurrent insertions and deletions protected by per
325 * bucket locks or concurrent RCU protected lookups and traversals.
327 static int rhashtable_shrink(struct rhashtable
*ht
)
329 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
330 unsigned int nelems
= atomic_read(&ht
->nelems
);
331 unsigned int size
= 0;
334 ASSERT_RHT_MUTEX(ht
);
337 size
= roundup_pow_of_two(nelems
* 3 / 2);
338 if (size
< ht
->p
.min_size
)
339 size
= ht
->p
.min_size
;
341 if (old_tbl
->size
<= size
)
344 if (rht_dereference(old_tbl
->future_tbl
, ht
))
347 new_tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
351 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
353 bucket_table_free(new_tbl
);
358 static void rht_deferred_worker(struct work_struct
*work
)
360 struct rhashtable
*ht
;
361 struct bucket_table
*tbl
;
364 ht
= container_of(work
, struct rhashtable
, run_work
);
365 mutex_lock(&ht
->mutex
);
367 tbl
= rht_dereference(ht
->tbl
, ht
);
368 tbl
= rhashtable_last_table(ht
, tbl
);
370 if (rht_grow_above_75(ht
, tbl
))
371 rhashtable_expand(ht
);
372 else if (ht
->p
.automatic_shrinking
&& rht_shrink_below_30(ht
, tbl
))
373 rhashtable_shrink(ht
);
375 err
= rhashtable_rehash_table(ht
);
377 mutex_unlock(&ht
->mutex
);
380 schedule_work(&ht
->run_work
);
383 static int rhashtable_insert_rehash(struct rhashtable
*ht
,
384 struct bucket_table
*tbl
)
386 struct bucket_table
*old_tbl
;
387 struct bucket_table
*new_tbl
;
391 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
397 if (rht_grow_above_75(ht
, tbl
))
399 /* Do not schedule more than one rehash */
400 else if (old_tbl
!= tbl
)
405 new_tbl
= bucket_table_alloc(ht
, size
, GFP_ATOMIC
);
409 err
= rhashtable_rehash_attach(ht
, tbl
, new_tbl
);
411 bucket_table_free(new_tbl
);
415 schedule_work(&ht
->run_work
);
420 /* Do not fail the insert if someone else did a rehash. */
421 if (likely(rcu_dereference_raw(tbl
->future_tbl
)))
424 /* Schedule async rehash to retry allocation in process context. */
426 schedule_work(&ht
->run_work
);
431 static void *rhashtable_lookup_one(struct rhashtable
*ht
,
432 struct bucket_table
*tbl
, unsigned int hash
,
433 const void *key
, struct rhash_head
*obj
)
435 struct rhashtable_compare_arg arg
= {
439 struct rhash_head __rcu
**pprev
;
440 struct rhash_head
*head
;
443 elasticity
= ht
->elasticity
;
444 pprev
= &tbl
->buckets
[hash
];
445 rht_for_each(head
, tbl
, hash
) {
446 struct rhlist_head
*list
;
447 struct rhlist_head
*plist
;
452 ht
->p
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
453 rhashtable_compare(&arg
, rht_obj(ht
, head
)))) {
459 return rht_obj(ht
, head
);
461 list
= container_of(obj
, struct rhlist_head
, rhead
);
462 plist
= container_of(head
, struct rhlist_head
, rhead
);
464 RCU_INIT_POINTER(list
->next
, plist
);
465 head
= rht_dereference_bucket(head
->next
, tbl
, hash
);
466 RCU_INIT_POINTER(list
->rhead
.next
, head
);
467 rcu_assign_pointer(*pprev
, obj
);
473 return ERR_PTR(-EAGAIN
);
475 return ERR_PTR(-ENOENT
);
478 static struct bucket_table
*rhashtable_insert_one(struct rhashtable
*ht
,
479 struct bucket_table
*tbl
,
481 struct rhash_head
*obj
,
484 struct bucket_table
*new_tbl
;
485 struct rhash_head
*head
;
487 if (!IS_ERR_OR_NULL(data
))
488 return ERR_PTR(-EEXIST
);
490 if (PTR_ERR(data
) != -EAGAIN
&& PTR_ERR(data
) != -ENOENT
)
491 return ERR_CAST(data
);
493 new_tbl
= rcu_dereference(tbl
->future_tbl
);
497 if (PTR_ERR(data
) != -ENOENT
)
498 return ERR_CAST(data
);
500 if (unlikely(rht_grow_above_max(ht
, tbl
)))
501 return ERR_PTR(-E2BIG
);
503 if (unlikely(rht_grow_above_100(ht
, tbl
)))
504 return ERR_PTR(-EAGAIN
);
506 head
= rht_dereference_bucket(tbl
->buckets
[hash
], tbl
, hash
);
508 RCU_INIT_POINTER(obj
->next
, head
);
510 struct rhlist_head
*list
;
512 list
= container_of(obj
, struct rhlist_head
, rhead
);
513 RCU_INIT_POINTER(list
->next
, NULL
);
516 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
518 atomic_inc(&ht
->nelems
);
519 if (rht_grow_above_75(ht
, tbl
))
520 schedule_work(&ht
->run_work
);
525 static void *rhashtable_try_insert(struct rhashtable
*ht
, const void *key
,
526 struct rhash_head
*obj
)
528 struct bucket_table
*new_tbl
;
529 struct bucket_table
*tbl
;
534 tbl
= rcu_dereference(ht
->tbl
);
536 /* All insertions must grab the oldest table containing
537 * the hashed bucket that is yet to be rehashed.
540 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
541 lock
= rht_bucket_lock(tbl
, hash
);
544 if (tbl
->rehash
<= hash
)
547 spin_unlock_bh(lock
);
548 tbl
= rcu_dereference(tbl
->future_tbl
);
551 data
= rhashtable_lookup_one(ht
, tbl
, hash
, key
, obj
);
552 new_tbl
= rhashtable_insert_one(ht
, tbl
, hash
, obj
, data
);
553 if (PTR_ERR(new_tbl
) != -EEXIST
)
554 data
= ERR_CAST(new_tbl
);
556 while (!IS_ERR_OR_NULL(new_tbl
)) {
558 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
559 spin_lock_nested(rht_bucket_lock(tbl
, hash
),
560 SINGLE_DEPTH_NESTING
);
562 data
= rhashtable_lookup_one(ht
, tbl
, hash
, key
, obj
);
563 new_tbl
= rhashtable_insert_one(ht
, tbl
, hash
, obj
, data
);
564 if (PTR_ERR(new_tbl
) != -EEXIST
)
565 data
= ERR_CAST(new_tbl
);
567 spin_unlock(rht_bucket_lock(tbl
, hash
));
570 spin_unlock_bh(lock
);
572 if (PTR_ERR(data
) == -EAGAIN
)
573 data
= ERR_PTR(rhashtable_insert_rehash(ht
, tbl
) ?:
579 void *rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
580 struct rhash_head
*obj
)
586 data
= rhashtable_try_insert(ht
, key
, obj
);
588 } while (PTR_ERR(data
) == -EAGAIN
);
592 EXPORT_SYMBOL_GPL(rhashtable_insert_slow
);
595 * rhashtable_walk_enter - Initialise an iterator
596 * @ht: Table to walk over
597 * @iter: Hash table Iterator
599 * This function prepares a hash table walk.
601 * Note that if you restart a walk after rhashtable_walk_stop you
602 * may see the same object twice. Also, you may miss objects if
603 * there are removals in between rhashtable_walk_stop and the next
604 * call to rhashtable_walk_start.
606 * For a completely stable walk you should construct your own data
607 * structure outside the hash table.
609 * This function may sleep so you must not call it from interrupt
610 * context or with spin locks held.
612 * You must call rhashtable_walk_exit after this function returns.
614 void rhashtable_walk_enter(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
621 spin_lock(&ht
->lock
);
623 rcu_dereference_protected(ht
->tbl
, lockdep_is_held(&ht
->lock
));
624 list_add(&iter
->walker
.list
, &iter
->walker
.tbl
->walkers
);
625 spin_unlock(&ht
->lock
);
627 EXPORT_SYMBOL_GPL(rhashtable_walk_enter
);
630 * rhashtable_walk_exit - Free an iterator
631 * @iter: Hash table Iterator
633 * This function frees resources allocated by rhashtable_walk_init.
635 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
637 spin_lock(&iter
->ht
->lock
);
638 if (iter
->walker
.tbl
)
639 list_del(&iter
->walker
.list
);
640 spin_unlock(&iter
->ht
->lock
);
642 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
645 * rhashtable_walk_start - Start a hash table walk
646 * @iter: Hash table iterator
648 * Start a hash table walk. Note that we take the RCU lock in all
649 * cases including when we return an error. So you must always call
650 * rhashtable_walk_stop to clean up.
652 * Returns zero if successful.
654 * Returns -EAGAIN if resize event occured. Note that the iterator
655 * will rewind back to the beginning and you may use it immediately
656 * by calling rhashtable_walk_next.
658 int rhashtable_walk_start(struct rhashtable_iter
*iter
)
661 struct rhashtable
*ht
= iter
->ht
;
665 spin_lock(&ht
->lock
);
666 if (iter
->walker
.tbl
)
667 list_del(&iter
->walker
.list
);
668 spin_unlock(&ht
->lock
);
670 if (!iter
->walker
.tbl
) {
671 iter
->walker
.tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
677 EXPORT_SYMBOL_GPL(rhashtable_walk_start
);
680 * rhashtable_walk_next - Return the next object and advance the iterator
681 * @iter: Hash table iterator
683 * Note that you must call rhashtable_walk_stop when you are finished
686 * Returns the next object or NULL when the end of the table is reached.
688 * Returns -EAGAIN if resize event occured. Note that the iterator
689 * will rewind back to the beginning and you may continue to use it.
691 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
693 struct bucket_table
*tbl
= iter
->walker
.tbl
;
694 struct rhlist_head
*list
= iter
->list
;
695 struct rhashtable
*ht
= iter
->ht
;
696 struct rhash_head
*p
= iter
->p
;
697 bool rhlist
= ht
->rhlist
;
700 if (!rhlist
|| !(list
= rcu_dereference(list
->next
))) {
701 p
= rcu_dereference(p
->next
);
702 list
= container_of(p
, struct rhlist_head
, rhead
);
707 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
708 int skip
= iter
->skip
;
710 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
712 list
= container_of(p
, struct rhlist_head
,
718 list
= rcu_dereference(list
->next
);
729 if (!rht_is_a_nulls(p
)) {
733 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
741 /* Ensure we see any new tables. */
744 iter
->walker
.tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
745 if (iter
->walker
.tbl
) {
748 return ERR_PTR(-EAGAIN
);
753 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
756 * rhashtable_walk_stop - Finish a hash table walk
757 * @iter: Hash table iterator
759 * Finish a hash table walk.
761 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
764 struct rhashtable
*ht
;
765 struct bucket_table
*tbl
= iter
->walker
.tbl
;
772 spin_lock(&ht
->lock
);
773 if (tbl
->rehash
< tbl
->size
)
774 list_add(&iter
->walker
.list
, &tbl
->walkers
);
776 iter
->walker
.tbl
= NULL
;
777 spin_unlock(&ht
->lock
);
784 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
786 static size_t rounded_hashtable_size(const struct rhashtable_params
*params
)
790 if (params
->nelem_hint
)
791 retsize
= max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
792 (unsigned long)params
->min_size
);
794 retsize
= max(HASH_DEFAULT_SIZE
,
795 (unsigned long)params
->min_size
);
800 static u32
rhashtable_jhash2(const void *key
, u32 length
, u32 seed
)
802 return jhash2(key
, length
, seed
);
806 * rhashtable_init - initialize a new hash table
807 * @ht: hash table to be initialized
808 * @params: configuration parameters
810 * Initializes a new hash table based on the provided configuration
811 * parameters. A table can be configured either with a variable or
814 * Configuration Example 1: Fixed length keys
818 * struct rhash_head node;
821 * struct rhashtable_params params = {
822 * .head_offset = offsetof(struct test_obj, node),
823 * .key_offset = offsetof(struct test_obj, key),
824 * .key_len = sizeof(int),
826 * .nulls_base = (1U << RHT_BASE_SHIFT),
829 * Configuration Example 2: Variable length keys
832 * struct rhash_head node;
835 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
837 * struct test_obj *obj = data;
839 * return [... hash ...];
842 * struct rhashtable_params params = {
843 * .head_offset = offsetof(struct test_obj, node),
845 * .obj_hashfn = my_hash_fn,
848 int rhashtable_init(struct rhashtable
*ht
,
849 const struct rhashtable_params
*params
)
851 struct bucket_table
*tbl
;
854 if ((!params
->key_len
&& !params
->obj_hashfn
) ||
855 (params
->obj_hashfn
&& !params
->obj_cmpfn
))
858 if (params
->nulls_base
&& params
->nulls_base
< (1U << RHT_BASE_SHIFT
))
861 memset(ht
, 0, sizeof(*ht
));
862 mutex_init(&ht
->mutex
);
863 spin_lock_init(&ht
->lock
);
864 memcpy(&ht
->p
, params
, sizeof(*params
));
866 if (params
->min_size
)
867 ht
->p
.min_size
= roundup_pow_of_two(params
->min_size
);
869 if (params
->max_size
)
870 ht
->p
.max_size
= rounddown_pow_of_two(params
->max_size
);
872 if (params
->insecure_max_entries
)
873 ht
->p
.insecure_max_entries
=
874 rounddown_pow_of_two(params
->insecure_max_entries
);
876 ht
->p
.insecure_max_entries
= ht
->p
.max_size
* 2;
878 ht
->p
.min_size
= max(ht
->p
.min_size
, HASH_MIN_SIZE
);
880 size
= rounded_hashtable_size(&ht
->p
);
882 /* The maximum (not average) chain length grows with the
883 * size of the hash table, at a rate of (log N)/(log log N).
884 * The value of 16 is selected so that even if the hash
885 * table grew to 2^32 you would not expect the maximum
886 * chain length to exceed it unless we are under attack
887 * (or extremely unlucky).
889 * As this limit is only to detect attacks, we don't need
890 * to set it to a lower value as you'd need the chain
891 * length to vastly exceed 16 to have any real effect
894 if (!params
->insecure_elasticity
)
897 if (params
->locks_mul
)
898 ht
->p
.locks_mul
= roundup_pow_of_two(params
->locks_mul
);
900 ht
->p
.locks_mul
= BUCKET_LOCKS_PER_CPU
;
902 ht
->key_len
= ht
->p
.key_len
;
903 if (!params
->hashfn
) {
904 ht
->p
.hashfn
= jhash
;
906 if (!(ht
->key_len
& (sizeof(u32
) - 1))) {
907 ht
->key_len
/= sizeof(u32
);
908 ht
->p
.hashfn
= rhashtable_jhash2
;
912 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
916 atomic_set(&ht
->nelems
, 0);
918 RCU_INIT_POINTER(ht
->tbl
, tbl
);
920 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
924 EXPORT_SYMBOL_GPL(rhashtable_init
);
927 * rhltable_init - initialize a new hash list table
928 * @hlt: hash list table to be initialized
929 * @params: configuration parameters
931 * Initializes a new hash list table.
933 * See documentation for rhashtable_init.
935 int rhltable_init(struct rhltable
*hlt
, const struct rhashtable_params
*params
)
939 /* No rhlist NULLs marking for now. */
940 if (params
->nulls_base
)
943 err
= rhashtable_init(&hlt
->ht
, params
);
944 hlt
->ht
.rhlist
= true;
947 EXPORT_SYMBOL_GPL(rhltable_init
);
949 static void rhashtable_free_one(struct rhashtable
*ht
, struct rhash_head
*obj
,
950 void (*free_fn
)(void *ptr
, void *arg
),
953 struct rhlist_head
*list
;
956 free_fn(rht_obj(ht
, obj
), arg
);
960 list
= container_of(obj
, struct rhlist_head
, rhead
);
963 list
= rht_dereference(list
->next
, ht
);
964 free_fn(rht_obj(ht
, obj
), arg
);
969 * rhashtable_free_and_destroy - free elements and destroy hash table
970 * @ht: the hash table to destroy
971 * @free_fn: callback to release resources of element
972 * @arg: pointer passed to free_fn
974 * Stops an eventual async resize. If defined, invokes free_fn for each
975 * element to releasal resources. Please note that RCU protected
976 * readers may still be accessing the elements. Releasing of resources
977 * must occur in a compatible manner. Then frees the bucket array.
979 * This function will eventually sleep to wait for an async resize
980 * to complete. The caller is responsible that no further write operations
981 * occurs in parallel.
983 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
984 void (*free_fn
)(void *ptr
, void *arg
),
987 const struct bucket_table
*tbl
;
990 cancel_work_sync(&ht
->run_work
);
992 mutex_lock(&ht
->mutex
);
993 tbl
= rht_dereference(ht
->tbl
, ht
);
995 for (i
= 0; i
< tbl
->size
; i
++) {
996 struct rhash_head
*pos
, *next
;
999 for (pos
= rht_dereference(tbl
->buckets
[i
], ht
),
1000 next
= !rht_is_a_nulls(pos
) ?
1001 rht_dereference(pos
->next
, ht
) : NULL
;
1002 !rht_is_a_nulls(pos
);
1004 next
= !rht_is_a_nulls(pos
) ?
1005 rht_dereference(pos
->next
, ht
) : NULL
)
1006 rhashtable_free_one(ht
, pos
, free_fn
, arg
);
1010 bucket_table_free(tbl
);
1011 mutex_unlock(&ht
->mutex
);
1013 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy
);
1015 void rhashtable_destroy(struct rhashtable
*ht
)
1017 return rhashtable_free_and_destroy(ht
, NULL
, NULL
);
1019 EXPORT_SYMBOL_GPL(rhashtable_destroy
);