2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
24 #include <linux/jhash.h>
25 #include <linux/random.h>
26 #include <linux/rhashtable.h>
27 #include <linux/err.h>
29 #define HASH_DEFAULT_SIZE 64UL
30 #define HASH_MIN_SIZE 4U
31 #define BUCKET_LOCKS_PER_CPU 128UL
33 static u32
head_hashfn(struct rhashtable
*ht
,
34 const struct bucket_table
*tbl
,
35 const struct rhash_head
*he
)
37 return rht_head_hashfn(ht
, tbl
, he
, ht
->p
);
40 #ifdef CONFIG_PROVE_LOCKING
41 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
43 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
45 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
47 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
49 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
51 spinlock_t
*lock
= rht_bucket_lock(tbl
, hash
);
53 return (debug_locks
) ? lockdep_is_held(lock
) : 1;
55 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
57 #define ASSERT_RHT_MUTEX(HT)
61 static int alloc_bucket_locks(struct rhashtable
*ht
, struct bucket_table
*tbl
,
65 #if defined(CONFIG_PROVE_LOCKING)
66 unsigned int nr_pcpus
= 2;
68 unsigned int nr_pcpus
= num_possible_cpus();
71 nr_pcpus
= min_t(unsigned int, nr_pcpus
, 32UL);
72 size
= roundup_pow_of_two(nr_pcpus
* ht
->p
.locks_mul
);
74 /* Never allocate more than 0.5 locks per bucket */
75 size
= min_t(unsigned int, size
, tbl
->size
>> 1);
77 if (sizeof(spinlock_t
) != 0) {
79 if (size
* sizeof(spinlock_t
) > PAGE_SIZE
&&
81 tbl
->locks
= vmalloc(size
* sizeof(spinlock_t
));
84 tbl
->locks
= kmalloc_array(size
, sizeof(spinlock_t
),
88 for (i
= 0; i
< size
; i
++)
89 spin_lock_init(&tbl
->locks
[i
]);
91 tbl
->locks_mask
= size
- 1;
96 static void bucket_table_free(const struct bucket_table
*tbl
)
104 static void bucket_table_free_rcu(struct rcu_head
*head
)
106 bucket_table_free(container_of(head
, struct bucket_table
, rcu
));
109 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
113 struct bucket_table
*tbl
= NULL
;
117 size
= sizeof(*tbl
) + nbuckets
* sizeof(tbl
->buckets
[0]);
118 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
) ||
120 tbl
= kzalloc(size
, gfp
| __GFP_NOWARN
| __GFP_NORETRY
);
121 if (tbl
== NULL
&& gfp
== GFP_KERNEL
)
126 tbl
->size
= nbuckets
;
128 if (alloc_bucket_locks(ht
, tbl
, gfp
) < 0) {
129 bucket_table_free(tbl
);
133 INIT_LIST_HEAD(&tbl
->walkers
);
135 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
137 for (i
= 0; i
< nbuckets
; i
++)
138 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
], ht
, i
);
143 static struct bucket_table
*rhashtable_last_table(struct rhashtable
*ht
,
144 struct bucket_table
*tbl
)
146 struct bucket_table
*new_tbl
;
150 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
156 static int rhashtable_rehash_one(struct rhashtable
*ht
, unsigned int old_hash
)
158 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
159 struct bucket_table
*new_tbl
= rhashtable_last_table(ht
,
160 rht_dereference_rcu(old_tbl
->future_tbl
, ht
));
161 struct rhash_head __rcu
**pprev
= &old_tbl
->buckets
[old_hash
];
163 struct rhash_head
*head
, *next
, *entry
;
164 spinlock_t
*new_bucket_lock
;
165 unsigned int new_hash
;
167 rht_for_each(entry
, old_tbl
, old_hash
) {
169 next
= rht_dereference_bucket(entry
->next
, old_tbl
, old_hash
);
171 if (rht_is_a_nulls(next
))
174 pprev
= &entry
->next
;
180 new_hash
= head_hashfn(ht
, new_tbl
, entry
);
182 new_bucket_lock
= rht_bucket_lock(new_tbl
, new_hash
);
184 spin_lock_nested(new_bucket_lock
, SINGLE_DEPTH_NESTING
);
185 head
= rht_dereference_bucket(new_tbl
->buckets
[new_hash
],
188 if (rht_is_a_nulls(head
))
189 INIT_RHT_NULLS_HEAD(entry
->next
, ht
, new_hash
);
191 RCU_INIT_POINTER(entry
->next
, head
);
193 rcu_assign_pointer(new_tbl
->buckets
[new_hash
], entry
);
194 spin_unlock(new_bucket_lock
);
196 rcu_assign_pointer(*pprev
, next
);
202 static void rhashtable_rehash_chain(struct rhashtable
*ht
,
203 unsigned int old_hash
)
205 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
206 spinlock_t
*old_bucket_lock
;
208 old_bucket_lock
= rht_bucket_lock(old_tbl
, old_hash
);
210 spin_lock_bh(old_bucket_lock
);
211 while (!rhashtable_rehash_one(ht
, old_hash
))
214 spin_unlock_bh(old_bucket_lock
);
217 static int rhashtable_rehash_attach(struct rhashtable
*ht
,
218 struct bucket_table
*old_tbl
,
219 struct bucket_table
*new_tbl
)
221 /* Protect future_tbl using the first bucket lock. */
222 spin_lock_bh(old_tbl
->locks
);
224 /* Did somebody beat us to it? */
225 if (rcu_access_pointer(old_tbl
->future_tbl
)) {
226 spin_unlock_bh(old_tbl
->locks
);
230 /* Make insertions go into the new, empty table right away. Deletions
231 * and lookups will be attempted in both tables until we synchronize.
233 rcu_assign_pointer(old_tbl
->future_tbl
, new_tbl
);
235 /* Ensure the new table is visible to readers. */
238 spin_unlock_bh(old_tbl
->locks
);
243 static int rhashtable_rehash_table(struct rhashtable
*ht
)
245 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
246 struct bucket_table
*new_tbl
;
247 struct rhashtable_walker
*walker
;
248 unsigned int old_hash
;
250 new_tbl
= rht_dereference(old_tbl
->future_tbl
, ht
);
254 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++)
255 rhashtable_rehash_chain(ht
, old_hash
);
257 /* Publish the new table pointer. */
258 rcu_assign_pointer(ht
->tbl
, new_tbl
);
260 spin_lock(&ht
->lock
);
261 list_for_each_entry(walker
, &old_tbl
->walkers
, list
)
263 spin_unlock(&ht
->lock
);
265 /* Wait for readers. All new readers will see the new
266 * table, and thus no references to the old table will
269 call_rcu(&old_tbl
->rcu
, bucket_table_free_rcu
);
271 return rht_dereference(new_tbl
->future_tbl
, ht
) ? -EAGAIN
: 0;
275 * rhashtable_expand - Expand hash table while allowing concurrent lookups
276 * @ht: the hash table to expand
278 * A secondary bucket array is allocated and the hash entries are migrated.
280 * This function may only be called in a context where it is safe to call
281 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
283 * The caller must ensure that no concurrent resizing occurs by holding
286 * It is valid to have concurrent insertions and deletions protected by per
287 * bucket locks or concurrent RCU protected lookups and traversals.
289 static int rhashtable_expand(struct rhashtable
*ht
)
291 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
294 ASSERT_RHT_MUTEX(ht
);
296 old_tbl
= rhashtable_last_table(ht
, old_tbl
);
298 new_tbl
= bucket_table_alloc(ht
, old_tbl
->size
* 2, GFP_KERNEL
);
302 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
304 bucket_table_free(new_tbl
);
310 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
311 * @ht: the hash table to shrink
313 * This function shrinks the hash table to fit, i.e., the smallest
314 * size would not cause it to expand right away automatically.
316 * The caller must ensure that no concurrent resizing occurs by holding
319 * The caller must ensure that no concurrent table mutations take place.
320 * It is however valid to have concurrent lookups if they are RCU protected.
322 * It is valid to have concurrent insertions and deletions protected by per
323 * bucket locks or concurrent RCU protected lookups and traversals.
325 static int rhashtable_shrink(struct rhashtable
*ht
)
327 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
331 ASSERT_RHT_MUTEX(ht
);
333 size
= roundup_pow_of_two(atomic_read(&ht
->nelems
) * 3 / 2);
334 if (size
< ht
->p
.min_size
)
335 size
= ht
->p
.min_size
;
337 if (old_tbl
->size
<= size
)
340 if (rht_dereference(old_tbl
->future_tbl
, ht
))
343 new_tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
347 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
349 bucket_table_free(new_tbl
);
354 static void rht_deferred_worker(struct work_struct
*work
)
356 struct rhashtable
*ht
;
357 struct bucket_table
*tbl
;
360 ht
= container_of(work
, struct rhashtable
, run_work
);
361 mutex_lock(&ht
->mutex
);
363 tbl
= rht_dereference(ht
->tbl
, ht
);
364 tbl
= rhashtable_last_table(ht
, tbl
);
366 if (rht_grow_above_75(ht
, tbl
))
367 rhashtable_expand(ht
);
368 else if (ht
->p
.automatic_shrinking
&& rht_shrink_below_30(ht
, tbl
))
369 rhashtable_shrink(ht
);
371 err
= rhashtable_rehash_table(ht
);
373 mutex_unlock(&ht
->mutex
);
376 schedule_work(&ht
->run_work
);
379 static bool rhashtable_check_elasticity(struct rhashtable
*ht
,
380 struct bucket_table
*tbl
,
383 unsigned int elasticity
= ht
->elasticity
;
384 struct rhash_head
*head
;
386 rht_for_each(head
, tbl
, hash
)
393 int rhashtable_insert_rehash(struct rhashtable
*ht
)
395 struct bucket_table
*old_tbl
;
396 struct bucket_table
*new_tbl
;
397 struct bucket_table
*tbl
;
401 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
402 tbl
= rhashtable_last_table(ht
, old_tbl
);
406 if (rht_grow_above_75(ht
, tbl
))
408 /* Do not schedule more than one rehash */
409 else if (old_tbl
!= tbl
)
412 new_tbl
= bucket_table_alloc(ht
, size
, GFP_ATOMIC
);
413 if (new_tbl
== NULL
) {
414 /* Schedule async resize/rehash to try allocation
415 * non-atomic context.
417 schedule_work(&ht
->run_work
);
421 err
= rhashtable_rehash_attach(ht
, tbl
, new_tbl
);
423 bucket_table_free(new_tbl
);
427 schedule_work(&ht
->run_work
);
431 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash
);
433 int rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
434 struct rhash_head
*obj
,
435 struct bucket_table
*tbl
)
437 struct rhash_head
*head
;
441 tbl
= rhashtable_last_table(ht
, tbl
);
442 hash
= head_hashfn(ht
, tbl
, obj
);
443 spin_lock_nested(rht_bucket_lock(tbl
, hash
), SINGLE_DEPTH_NESTING
);
446 if (key
&& rhashtable_lookup_fast(ht
, key
, ht
->p
))
450 if (rhashtable_check_elasticity(ht
, tbl
, hash
) ||
451 rht_grow_above_100(ht
, tbl
))
456 head
= rht_dereference_bucket(tbl
->buckets
[hash
], tbl
, hash
);
458 RCU_INIT_POINTER(obj
->next
, head
);
460 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
462 atomic_inc(&ht
->nelems
);
465 spin_unlock(rht_bucket_lock(tbl
, hash
));
469 EXPORT_SYMBOL_GPL(rhashtable_insert_slow
);
472 * rhashtable_walk_init - Initialise an iterator
473 * @ht: Table to walk over
474 * @iter: Hash table Iterator
476 * This function prepares a hash table walk.
478 * Note that if you restart a walk after rhashtable_walk_stop you
479 * may see the same object twice. Also, you may miss objects if
480 * there are removals in between rhashtable_walk_stop and the next
481 * call to rhashtable_walk_start.
483 * For a completely stable walk you should construct your own data
484 * structure outside the hash table.
486 * This function may sleep so you must not call it from interrupt
487 * context or with spin locks held.
489 * You must call rhashtable_walk_exit if this function returns
492 int rhashtable_walk_init(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
499 iter
->walker
= kmalloc(sizeof(*iter
->walker
), GFP_KERNEL
);
503 mutex_lock(&ht
->mutex
);
504 iter
->walker
->tbl
= rht_dereference(ht
->tbl
, ht
);
505 list_add(&iter
->walker
->list
, &iter
->walker
->tbl
->walkers
);
506 mutex_unlock(&ht
->mutex
);
510 EXPORT_SYMBOL_GPL(rhashtable_walk_init
);
513 * rhashtable_walk_exit - Free an iterator
514 * @iter: Hash table Iterator
516 * This function frees resources allocated by rhashtable_walk_init.
518 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
520 mutex_lock(&iter
->ht
->mutex
);
521 if (iter
->walker
->tbl
)
522 list_del(&iter
->walker
->list
);
523 mutex_unlock(&iter
->ht
->mutex
);
526 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
529 * rhashtable_walk_start - Start a hash table walk
530 * @iter: Hash table iterator
532 * Start a hash table walk. Note that we take the RCU lock in all
533 * cases including when we return an error. So you must always call
534 * rhashtable_walk_stop to clean up.
536 * Returns zero if successful.
538 * Returns -EAGAIN if resize event occured. Note that the iterator
539 * will rewind back to the beginning and you may use it immediately
540 * by calling rhashtable_walk_next.
542 int rhashtable_walk_start(struct rhashtable_iter
*iter
)
545 struct rhashtable
*ht
= iter
->ht
;
547 mutex_lock(&ht
->mutex
);
549 if (iter
->walker
->tbl
)
550 list_del(&iter
->walker
->list
);
554 mutex_unlock(&ht
->mutex
);
556 if (!iter
->walker
->tbl
) {
557 iter
->walker
->tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
563 EXPORT_SYMBOL_GPL(rhashtable_walk_start
);
566 * rhashtable_walk_next - Return the next object and advance the iterator
567 * @iter: Hash table iterator
569 * Note that you must call rhashtable_walk_stop when you are finished
572 * Returns the next object or NULL when the end of the table is reached.
574 * Returns -EAGAIN if resize event occured. Note that the iterator
575 * will rewind back to the beginning and you may continue to use it.
577 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
579 struct bucket_table
*tbl
= iter
->walker
->tbl
;
580 struct rhashtable
*ht
= iter
->ht
;
581 struct rhash_head
*p
= iter
->p
;
585 p
= rht_dereference_bucket_rcu(p
->next
, tbl
, iter
->slot
);
589 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
590 int skip
= iter
->skip
;
592 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
599 if (!rht_is_a_nulls(p
)) {
602 obj
= rht_obj(ht
, p
);
609 /* Ensure we see any new tables. */
612 iter
->walker
->tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
613 if (iter
->walker
->tbl
) {
616 return ERR_PTR(-EAGAIN
);
625 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
628 * rhashtable_walk_stop - Finish a hash table walk
629 * @iter: Hash table iterator
631 * Finish a hash table walk.
633 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
636 struct rhashtable
*ht
;
637 struct bucket_table
*tbl
= iter
->walker
->tbl
;
644 spin_lock(&ht
->lock
);
645 if (tbl
->rehash
< tbl
->size
)
646 list_add(&iter
->walker
->list
, &tbl
->walkers
);
648 iter
->walker
->tbl
= NULL
;
649 spin_unlock(&ht
->lock
);
656 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
658 static size_t rounded_hashtable_size(const struct rhashtable_params
*params
)
660 return max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
661 (unsigned long)params
->min_size
);
664 static u32
rhashtable_jhash2(const void *key
, u32 length
, u32 seed
)
666 return jhash2(key
, length
, seed
);
670 * rhashtable_init - initialize a new hash table
671 * @ht: hash table to be initialized
672 * @params: configuration parameters
674 * Initializes a new hash table based on the provided configuration
675 * parameters. A table can be configured either with a variable or
678 * Configuration Example 1: Fixed length keys
682 * struct rhash_head node;
685 * struct rhashtable_params params = {
686 * .head_offset = offsetof(struct test_obj, node),
687 * .key_offset = offsetof(struct test_obj, key),
688 * .key_len = sizeof(int),
690 * .nulls_base = (1U << RHT_BASE_SHIFT),
693 * Configuration Example 2: Variable length keys
696 * struct rhash_head node;
699 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
701 * struct test_obj *obj = data;
703 * return [... hash ...];
706 * struct rhashtable_params params = {
707 * .head_offset = offsetof(struct test_obj, node),
709 * .obj_hashfn = my_hash_fn,
712 int rhashtable_init(struct rhashtable
*ht
,
713 const struct rhashtable_params
*params
)
715 struct bucket_table
*tbl
;
718 size
= HASH_DEFAULT_SIZE
;
720 if ((!params
->key_len
&& !params
->obj_hashfn
) ||
721 (params
->obj_hashfn
&& !params
->obj_cmpfn
))
724 if (params
->nulls_base
&& params
->nulls_base
< (1U << RHT_BASE_SHIFT
))
727 if (params
->nelem_hint
)
728 size
= rounded_hashtable_size(params
);
730 memset(ht
, 0, sizeof(*ht
));
731 mutex_init(&ht
->mutex
);
732 spin_lock_init(&ht
->lock
);
733 memcpy(&ht
->p
, params
, sizeof(*params
));
735 if (params
->min_size
)
736 ht
->p
.min_size
= roundup_pow_of_two(params
->min_size
);
738 if (params
->max_size
)
739 ht
->p
.max_size
= rounddown_pow_of_two(params
->max_size
);
741 ht
->p
.min_size
= max(ht
->p
.min_size
, HASH_MIN_SIZE
);
743 /* The maximum (not average) chain length grows with the
744 * size of the hash table, at a rate of (log N)/(log log N).
745 * The value of 16 is selected so that even if the hash
746 * table grew to 2^32 you would not expect the maximum
747 * chain length to exceed it unless we are under attack
748 * (or extremely unlucky).
750 * As this limit is only to detect attacks, we don't need
751 * to set it to a lower value as you'd need the chain
752 * length to vastly exceed 16 to have any real effect
755 if (!params
->insecure_elasticity
)
758 if (params
->locks_mul
)
759 ht
->p
.locks_mul
= roundup_pow_of_two(params
->locks_mul
);
761 ht
->p
.locks_mul
= BUCKET_LOCKS_PER_CPU
;
763 ht
->key_len
= ht
->p
.key_len
;
764 if (!params
->hashfn
) {
765 ht
->p
.hashfn
= jhash
;
767 if (!(ht
->key_len
& (sizeof(u32
) - 1))) {
768 ht
->key_len
/= sizeof(u32
);
769 ht
->p
.hashfn
= rhashtable_jhash2
;
773 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
777 atomic_set(&ht
->nelems
, 0);
779 RCU_INIT_POINTER(ht
->tbl
, tbl
);
781 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
785 EXPORT_SYMBOL_GPL(rhashtable_init
);
788 * rhashtable_free_and_destroy - free elements and destroy hash table
789 * @ht: the hash table to destroy
790 * @free_fn: callback to release resources of element
791 * @arg: pointer passed to free_fn
793 * Stops an eventual async resize. If defined, invokes free_fn for each
794 * element to releasal resources. Please note that RCU protected
795 * readers may still be accessing the elements. Releasing of resources
796 * must occur in a compatible manner. Then frees the bucket array.
798 * This function will eventually sleep to wait for an async resize
799 * to complete. The caller is responsible that no further write operations
800 * occurs in parallel.
802 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
803 void (*free_fn
)(void *ptr
, void *arg
),
806 const struct bucket_table
*tbl
;
809 cancel_work_sync(&ht
->run_work
);
811 mutex_lock(&ht
->mutex
);
812 tbl
= rht_dereference(ht
->tbl
, ht
);
814 for (i
= 0; i
< tbl
->size
; i
++) {
815 struct rhash_head
*pos
, *next
;
817 for (pos
= rht_dereference(tbl
->buckets
[i
], ht
),
818 next
= !rht_is_a_nulls(pos
) ?
819 rht_dereference(pos
->next
, ht
) : NULL
;
820 !rht_is_a_nulls(pos
);
822 next
= !rht_is_a_nulls(pos
) ?
823 rht_dereference(pos
->next
, ht
) : NULL
)
824 free_fn(rht_obj(ht
, pos
), arg
);
828 bucket_table_free(tbl
);
829 mutex_unlock(&ht
->mutex
);
831 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy
);
833 void rhashtable_destroy(struct rhashtable
*ht
)
835 return rhashtable_free_and_destroy(ht
, NULL
, NULL
);
837 EXPORT_SYMBOL_GPL(rhashtable_destroy
);