1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resizable, Scalable, Concurrent Hash Table
5 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
9 * Code partially derived from nft_hash
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
14 #include <linux/atomic.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/log2.h>
18 #include <linux/sched.h>
19 #include <linux/rculist.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/err.h>
27 #include <linux/export.h>
29 #define HASH_DEFAULT_SIZE 64UL
30 #define HASH_MIN_SIZE 4U
33 union nested_table __rcu
*table
;
34 struct rhash_lock_head
*bucket
;
37 static u32
head_hashfn(struct rhashtable
*ht
,
38 const struct bucket_table
*tbl
,
39 const struct rhash_head
*he
)
41 return rht_head_hashfn(ht
, tbl
, he
, ht
->p
);
44 #ifdef CONFIG_PROVE_LOCKING
45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
47 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
49 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
51 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
53 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
57 if (unlikely(tbl
->nest
))
59 return bit_spin_is_locked(0, (unsigned long *)&tbl
->buckets
[hash
]);
61 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
63 #define ASSERT_RHT_MUTEX(HT)
66 static void nested_table_free(union nested_table
*ntbl
, unsigned int size
)
68 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
69 const unsigned int len
= 1 << shift
;
72 ntbl
= rcu_dereference_raw(ntbl
->table
);
78 for (i
= 0; i
< len
; i
++)
79 nested_table_free(ntbl
+ i
, size
);
85 static void nested_bucket_table_free(const struct bucket_table
*tbl
)
87 unsigned int size
= tbl
->size
>> tbl
->nest
;
88 unsigned int len
= 1 << tbl
->nest
;
89 union nested_table
*ntbl
;
92 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
94 for (i
= 0; i
< len
; i
++)
95 nested_table_free(ntbl
+ i
, size
);
100 static void bucket_table_free(const struct bucket_table
*tbl
)
103 nested_bucket_table_free(tbl
);
108 static void bucket_table_free_rcu(struct rcu_head
*head
)
110 bucket_table_free(container_of(head
, struct bucket_table
, rcu
));
113 static union nested_table
*nested_table_alloc(struct rhashtable
*ht
,
114 union nested_table __rcu
**prev
,
117 union nested_table
*ntbl
;
120 ntbl
= rcu_dereference(*prev
);
124 ntbl
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
127 for (i
= 0; i
< PAGE_SIZE
/ sizeof(ntbl
[0]); i
++)
128 INIT_RHT_NULLS_HEAD(ntbl
[i
].bucket
);
131 if (cmpxchg((union nested_table
**)prev
, NULL
, ntbl
) == NULL
)
133 /* Raced with another thread. */
135 return rcu_dereference(*prev
);
138 static struct bucket_table
*nested_bucket_table_alloc(struct rhashtable
*ht
,
142 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
143 struct bucket_table
*tbl
;
146 if (nbuckets
< (1 << (shift
+ 1)))
149 size
= sizeof(*tbl
) + sizeof(tbl
->buckets
[0]);
151 tbl
= kzalloc(size
, gfp
);
155 if (!nested_table_alloc(ht
, (union nested_table __rcu
**)tbl
->buckets
,
161 tbl
->nest
= (ilog2(nbuckets
) - 1) % shift
+ 1;
166 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
170 struct bucket_table
*tbl
= NULL
;
173 static struct lock_class_key __key
;
175 tbl
= kvzalloc(struct_size(tbl
, buckets
, nbuckets
), gfp
);
179 if (tbl
== NULL
&& (gfp
& ~__GFP_NOFAIL
) != GFP_KERNEL
) {
180 tbl
= nested_bucket_table_alloc(ht
, nbuckets
, gfp
);
187 lockdep_init_map(&tbl
->dep_map
, "rhashtable_bucket", &__key
, 0);
191 rcu_head_init(&tbl
->rcu
);
192 INIT_LIST_HEAD(&tbl
->walkers
);
194 tbl
->hash_rnd
= get_random_u32();
196 for (i
= 0; i
< nbuckets
; i
++)
197 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
]);
202 static struct bucket_table
*rhashtable_last_table(struct rhashtable
*ht
,
203 struct bucket_table
*tbl
)
205 struct bucket_table
*new_tbl
;
209 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
215 static int rhashtable_rehash_one(struct rhashtable
*ht
,
216 struct rhash_lock_head
**bkt
,
217 unsigned int old_hash
)
219 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
220 struct bucket_table
*new_tbl
= rhashtable_last_table(ht
, old_tbl
);
222 struct rhash_head
*head
, *next
, *entry
;
223 struct rhash_head __rcu
**pprev
= NULL
;
224 unsigned int new_hash
;
231 rht_for_each_from(entry
, rht_ptr(bkt
, old_tbl
, old_hash
),
234 next
= rht_dereference_bucket(entry
->next
, old_tbl
, old_hash
);
236 if (rht_is_a_nulls(next
))
239 pprev
= &entry
->next
;
245 new_hash
= head_hashfn(ht
, new_tbl
, entry
);
247 rht_lock_nested(new_tbl
, &new_tbl
->buckets
[new_hash
], SINGLE_DEPTH_NESTING
);
249 head
= rht_ptr(new_tbl
->buckets
+ new_hash
, new_tbl
, new_hash
);
251 RCU_INIT_POINTER(entry
->next
, head
);
253 rht_assign_unlock(new_tbl
, &new_tbl
->buckets
[new_hash
], entry
);
256 rcu_assign_pointer(*pprev
, next
);
258 /* Need to preserved the bit lock. */
259 rht_assign_locked(bkt
, next
);
265 static int rhashtable_rehash_chain(struct rhashtable
*ht
,
266 unsigned int old_hash
)
268 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
269 struct rhash_lock_head
**bkt
= rht_bucket_var(old_tbl
, old_hash
);
274 rht_lock(old_tbl
, bkt
);
276 while (!(err
= rhashtable_rehash_one(ht
, bkt
, old_hash
)))
281 rht_unlock(old_tbl
, bkt
);
286 static int rhashtable_rehash_attach(struct rhashtable
*ht
,
287 struct bucket_table
*old_tbl
,
288 struct bucket_table
*new_tbl
)
290 /* Make insertions go into the new, empty table right away. Deletions
291 * and lookups will be attempted in both tables until we synchronize.
292 * As cmpxchg() provides strong barriers, we do not need
293 * rcu_assign_pointer().
296 if (cmpxchg((struct bucket_table
**)&old_tbl
->future_tbl
, NULL
,
303 static int rhashtable_rehash_table(struct rhashtable
*ht
)
305 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
306 struct bucket_table
*new_tbl
;
307 struct rhashtable_walker
*walker
;
308 unsigned int old_hash
;
311 new_tbl
= rht_dereference(old_tbl
->future_tbl
, ht
);
315 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
316 err
= rhashtable_rehash_chain(ht
, old_hash
);
322 /* Publish the new table pointer. */
323 rcu_assign_pointer(ht
->tbl
, new_tbl
);
325 spin_lock(&ht
->lock
);
326 list_for_each_entry(walker
, &old_tbl
->walkers
, list
)
329 /* Wait for readers. All new readers will see the new
330 * table, and thus no references to the old table will
332 * We do this inside the locked region so that
333 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
334 * to check if it should not re-link the table.
336 call_rcu(&old_tbl
->rcu
, bucket_table_free_rcu
);
337 spin_unlock(&ht
->lock
);
339 return rht_dereference(new_tbl
->future_tbl
, ht
) ? -EAGAIN
: 0;
342 static int rhashtable_rehash_alloc(struct rhashtable
*ht
,
343 struct bucket_table
*old_tbl
,
346 struct bucket_table
*new_tbl
;
349 ASSERT_RHT_MUTEX(ht
);
351 new_tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
355 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
357 bucket_table_free(new_tbl
);
363 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
364 * @ht: the hash table to shrink
366 * This function shrinks the hash table to fit, i.e., the smallest
367 * size would not cause it to expand right away automatically.
369 * The caller must ensure that no concurrent resizing occurs by holding
372 * The caller must ensure that no concurrent table mutations take place.
373 * It is however valid to have concurrent lookups if they are RCU protected.
375 * It is valid to have concurrent insertions and deletions protected by per
376 * bucket locks or concurrent RCU protected lookups and traversals.
378 static int rhashtable_shrink(struct rhashtable
*ht
)
380 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
381 unsigned int nelems
= atomic_read(&ht
->nelems
);
382 unsigned int size
= 0;
385 size
= roundup_pow_of_two(nelems
* 3 / 2);
386 if (size
< ht
->p
.min_size
)
387 size
= ht
->p
.min_size
;
389 if (old_tbl
->size
<= size
)
392 if (rht_dereference(old_tbl
->future_tbl
, ht
))
395 return rhashtable_rehash_alloc(ht
, old_tbl
, size
);
398 static void rht_deferred_worker(struct work_struct
*work
)
400 struct rhashtable
*ht
;
401 struct bucket_table
*tbl
;
404 ht
= container_of(work
, struct rhashtable
, run_work
);
405 mutex_lock(&ht
->mutex
);
407 tbl
= rht_dereference(ht
->tbl
, ht
);
408 tbl
= rhashtable_last_table(ht
, tbl
);
410 if (rht_grow_above_75(ht
, tbl
))
411 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
* 2);
412 else if (ht
->p
.automatic_shrinking
&& rht_shrink_below_30(ht
, tbl
))
413 err
= rhashtable_shrink(ht
);
415 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
);
417 if (!err
|| err
== -EEXIST
) {
420 nerr
= rhashtable_rehash_table(ht
);
424 mutex_unlock(&ht
->mutex
);
427 schedule_work(&ht
->run_work
);
430 static int rhashtable_insert_rehash(struct rhashtable
*ht
,
431 struct bucket_table
*tbl
)
433 struct bucket_table
*old_tbl
;
434 struct bucket_table
*new_tbl
;
438 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
444 if (rht_grow_above_75(ht
, tbl
))
446 /* Do not schedule more than one rehash */
447 else if (old_tbl
!= tbl
)
452 new_tbl
= bucket_table_alloc(ht
, size
, GFP_ATOMIC
| __GFP_NOWARN
);
456 err
= rhashtable_rehash_attach(ht
, tbl
, new_tbl
);
458 bucket_table_free(new_tbl
);
462 schedule_work(&ht
->run_work
);
467 /* Do not fail the insert if someone else did a rehash. */
468 if (likely(rcu_access_pointer(tbl
->future_tbl
)))
471 /* Schedule async rehash to retry allocation in process context. */
473 schedule_work(&ht
->run_work
);
478 static void *rhashtable_lookup_one(struct rhashtable
*ht
,
479 struct rhash_lock_head
**bkt
,
480 struct bucket_table
*tbl
, unsigned int hash
,
481 const void *key
, struct rhash_head
*obj
)
483 struct rhashtable_compare_arg arg
= {
487 struct rhash_head __rcu
**pprev
= NULL
;
488 struct rhash_head
*head
;
491 elasticity
= RHT_ELASTICITY
;
492 rht_for_each_from(head
, rht_ptr(bkt
, tbl
, hash
), tbl
, hash
) {
493 struct rhlist_head
*list
;
494 struct rhlist_head
*plist
;
499 ht
->p
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
500 rhashtable_compare(&arg
, rht_obj(ht
, head
)))) {
506 return rht_obj(ht
, head
);
508 list
= container_of(obj
, struct rhlist_head
, rhead
);
509 plist
= container_of(head
, struct rhlist_head
, rhead
);
511 RCU_INIT_POINTER(list
->next
, plist
);
512 head
= rht_dereference_bucket(head
->next
, tbl
, hash
);
513 RCU_INIT_POINTER(list
->rhead
.next
, head
);
515 rcu_assign_pointer(*pprev
, obj
);
517 /* Need to preserve the bit lock */
518 rht_assign_locked(bkt
, obj
);
524 return ERR_PTR(-EAGAIN
);
526 return ERR_PTR(-ENOENT
);
529 static struct bucket_table
*rhashtable_insert_one(struct rhashtable
*ht
,
530 struct rhash_lock_head
**bkt
,
531 struct bucket_table
*tbl
,
533 struct rhash_head
*obj
,
536 struct bucket_table
*new_tbl
;
537 struct rhash_head
*head
;
539 if (!IS_ERR_OR_NULL(data
))
540 return ERR_PTR(-EEXIST
);
542 if (PTR_ERR(data
) != -EAGAIN
&& PTR_ERR(data
) != -ENOENT
)
543 return ERR_CAST(data
);
545 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
549 if (PTR_ERR(data
) != -ENOENT
)
550 return ERR_CAST(data
);
552 if (unlikely(rht_grow_above_max(ht
, tbl
)))
553 return ERR_PTR(-E2BIG
);
555 if (unlikely(rht_grow_above_100(ht
, tbl
)))
556 return ERR_PTR(-EAGAIN
);
558 head
= rht_ptr(bkt
, tbl
, hash
);
560 RCU_INIT_POINTER(obj
->next
, head
);
562 struct rhlist_head
*list
;
564 list
= container_of(obj
, struct rhlist_head
, rhead
);
565 RCU_INIT_POINTER(list
->next
, NULL
);
568 /* bkt is always the head of the list, so it holds
569 * the lock, which we need to preserve
571 rht_assign_locked(bkt
, obj
);
573 atomic_inc(&ht
->nelems
);
574 if (rht_grow_above_75(ht
, tbl
))
575 schedule_work(&ht
->run_work
);
580 static void *rhashtable_try_insert(struct rhashtable
*ht
, const void *key
,
581 struct rhash_head
*obj
)
583 struct bucket_table
*new_tbl
;
584 struct bucket_table
*tbl
;
585 struct rhash_lock_head
**bkt
;
589 new_tbl
= rcu_dereference(ht
->tbl
);
593 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
594 if (rcu_access_pointer(tbl
->future_tbl
))
596 bkt
= rht_bucket_var(tbl
, hash
);
598 bkt
= rht_bucket_insert(ht
, tbl
, hash
);
600 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
601 data
= ERR_PTR(-EAGAIN
);
604 data
= rhashtable_lookup_one(ht
, bkt
, tbl
,
606 new_tbl
= rhashtable_insert_one(ht
, bkt
, tbl
,
608 if (PTR_ERR(new_tbl
) != -EEXIST
)
609 data
= ERR_CAST(new_tbl
);
611 rht_unlock(tbl
, bkt
);
613 } while (!IS_ERR_OR_NULL(new_tbl
));
615 if (PTR_ERR(data
) == -EAGAIN
)
616 data
= ERR_PTR(rhashtable_insert_rehash(ht
, tbl
) ?:
622 void *rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
623 struct rhash_head
*obj
)
629 data
= rhashtable_try_insert(ht
, key
, obj
);
631 } while (PTR_ERR(data
) == -EAGAIN
);
635 EXPORT_SYMBOL_GPL(rhashtable_insert_slow
);
638 * rhashtable_walk_enter - Initialise an iterator
639 * @ht: Table to walk over
640 * @iter: Hash table Iterator
642 * This function prepares a hash table walk.
644 * Note that if you restart a walk after rhashtable_walk_stop you
645 * may see the same object twice. Also, you may miss objects if
646 * there are removals in between rhashtable_walk_stop and the next
647 * call to rhashtable_walk_start.
649 * For a completely stable walk you should construct your own data
650 * structure outside the hash table.
652 * This function may be called from any process context, including
653 * non-preemptable context, but cannot be called from softirq or
656 * You must call rhashtable_walk_exit after this function returns.
658 void rhashtable_walk_enter(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
664 iter
->end_of_table
= 0;
666 spin_lock(&ht
->lock
);
668 rcu_dereference_protected(ht
->tbl
, lockdep_is_held(&ht
->lock
));
669 list_add(&iter
->walker
.list
, &iter
->walker
.tbl
->walkers
);
670 spin_unlock(&ht
->lock
);
672 EXPORT_SYMBOL_GPL(rhashtable_walk_enter
);
675 * rhashtable_walk_exit - Free an iterator
676 * @iter: Hash table Iterator
678 * This function frees resources allocated by rhashtable_walk_enter.
680 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
682 spin_lock(&iter
->ht
->lock
);
683 if (iter
->walker
.tbl
)
684 list_del(&iter
->walker
.list
);
685 spin_unlock(&iter
->ht
->lock
);
687 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
690 * rhashtable_walk_start_check - Start a hash table walk
691 * @iter: Hash table iterator
693 * Start a hash table walk at the current iterator position. Note that we take
694 * the RCU lock in all cases including when we return an error. So you must
695 * always call rhashtable_walk_stop to clean up.
697 * Returns zero if successful.
699 * Returns -EAGAIN if resize event occured. Note that the iterator
700 * will rewind back to the beginning and you may use it immediately
701 * by calling rhashtable_walk_next.
703 * rhashtable_walk_start is defined as an inline variant that returns
704 * void. This is preferred in cases where the caller would ignore
705 * resize events and always continue.
707 int rhashtable_walk_start_check(struct rhashtable_iter
*iter
)
710 struct rhashtable
*ht
= iter
->ht
;
711 bool rhlist
= ht
->rhlist
;
715 spin_lock(&ht
->lock
);
716 if (iter
->walker
.tbl
)
717 list_del(&iter
->walker
.list
);
718 spin_unlock(&ht
->lock
);
720 if (iter
->end_of_table
)
722 if (!iter
->walker
.tbl
) {
723 iter
->walker
.tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
729 if (iter
->p
&& !rhlist
) {
731 * We need to validate that 'p' is still in the table, and
732 * if so, update 'skip'
734 struct rhash_head
*p
;
736 rht_for_each_rcu(p
, iter
->walker
.tbl
, iter
->slot
) {
744 } else if (iter
->p
&& rhlist
) {
745 /* Need to validate that 'list' is still in the table, and
746 * if so, update 'skip' and 'p'.
748 struct rhash_head
*p
;
749 struct rhlist_head
*list
;
751 rht_for_each_rcu(p
, iter
->walker
.tbl
, iter
->slot
) {
752 for (list
= container_of(p
, struct rhlist_head
, rhead
);
754 list
= rcu_dereference(list
->next
)) {
756 if (list
== iter
->list
) {
768 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check
);
771 * __rhashtable_walk_find_next - Find the next element in a table (or the first
772 * one in case of a new walk).
774 * @iter: Hash table iterator
776 * Returns the found object or NULL when the end of the table is reached.
778 * Returns -EAGAIN if resize event occurred.
780 static void *__rhashtable_walk_find_next(struct rhashtable_iter
*iter
)
782 struct bucket_table
*tbl
= iter
->walker
.tbl
;
783 struct rhlist_head
*list
= iter
->list
;
784 struct rhashtable
*ht
= iter
->ht
;
785 struct rhash_head
*p
= iter
->p
;
786 bool rhlist
= ht
->rhlist
;
791 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
792 int skip
= iter
->skip
;
794 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
796 list
= container_of(p
, struct rhlist_head
,
802 list
= rcu_dereference(list
->next
);
813 if (!rht_is_a_nulls(p
)) {
817 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
825 /* Ensure we see any new tables. */
828 iter
->walker
.tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
829 if (iter
->walker
.tbl
) {
832 return ERR_PTR(-EAGAIN
);
834 iter
->end_of_table
= true;
841 * rhashtable_walk_next - Return the next object and advance the iterator
842 * @iter: Hash table iterator
844 * Note that you must call rhashtable_walk_stop when you are finished
847 * Returns the next object or NULL when the end of the table is reached.
849 * Returns -EAGAIN if resize event occurred. Note that the iterator
850 * will rewind back to the beginning and you may continue to use it.
852 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
854 struct rhlist_head
*list
= iter
->list
;
855 struct rhashtable
*ht
= iter
->ht
;
856 struct rhash_head
*p
= iter
->p
;
857 bool rhlist
= ht
->rhlist
;
860 if (!rhlist
|| !(list
= rcu_dereference(list
->next
))) {
861 p
= rcu_dereference(p
->next
);
862 list
= container_of(p
, struct rhlist_head
, rhead
);
864 if (!rht_is_a_nulls(p
)) {
868 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
871 /* At the end of this slot, switch to next one and then find
872 * next entry from that point.
878 return __rhashtable_walk_find_next(iter
);
880 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
883 * rhashtable_walk_peek - Return the next object but don't advance the iterator
884 * @iter: Hash table iterator
886 * Returns the next object or NULL when the end of the table is reached.
888 * Returns -EAGAIN if resize event occurred. Note that the iterator
889 * will rewind back to the beginning and you may continue to use it.
891 void *rhashtable_walk_peek(struct rhashtable_iter
*iter
)
893 struct rhlist_head
*list
= iter
->list
;
894 struct rhashtable
*ht
= iter
->ht
;
895 struct rhash_head
*p
= iter
->p
;
898 return rht_obj(ht
, ht
->rhlist
? &list
->rhead
: p
);
900 /* No object found in current iter, find next one in the table. */
903 /* A nonzero skip value points to the next entry in the table
904 * beyond that last one that was found. Decrement skip so
905 * we find the current value. __rhashtable_walk_find_next
906 * will restore the original value of skip assuming that
907 * the table hasn't changed.
912 return __rhashtable_walk_find_next(iter
);
914 EXPORT_SYMBOL_GPL(rhashtable_walk_peek
);
917 * rhashtable_walk_stop - Finish a hash table walk
918 * @iter: Hash table iterator
920 * Finish a hash table walk. Does not reset the iterator to the start of the
923 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
926 struct rhashtable
*ht
;
927 struct bucket_table
*tbl
= iter
->walker
.tbl
;
934 spin_lock(&ht
->lock
);
935 if (rcu_head_after_call_rcu(&tbl
->rcu
, bucket_table_free_rcu
))
936 /* This bucket table is being freed, don't re-link it. */
937 iter
->walker
.tbl
= NULL
;
939 list_add(&iter
->walker
.list
, &tbl
->walkers
);
940 spin_unlock(&ht
->lock
);
945 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
947 static size_t rounded_hashtable_size(const struct rhashtable_params
*params
)
951 if (params
->nelem_hint
)
952 retsize
= max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
953 (unsigned long)params
->min_size
);
955 retsize
= max(HASH_DEFAULT_SIZE
,
956 (unsigned long)params
->min_size
);
961 static u32
rhashtable_jhash2(const void *key
, u32 length
, u32 seed
)
963 return jhash2(key
, length
, seed
);
967 * rhashtable_init - initialize a new hash table
968 * @ht: hash table to be initialized
969 * @params: configuration parameters
971 * Initializes a new hash table based on the provided configuration
972 * parameters. A table can be configured either with a variable or
975 * Configuration Example 1: Fixed length keys
979 * struct rhash_head node;
982 * struct rhashtable_params params = {
983 * .head_offset = offsetof(struct test_obj, node),
984 * .key_offset = offsetof(struct test_obj, key),
985 * .key_len = sizeof(int),
989 * Configuration Example 2: Variable length keys
992 * struct rhash_head node;
995 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
997 * struct test_obj *obj = data;
999 * return [... hash ...];
1002 * struct rhashtable_params params = {
1003 * .head_offset = offsetof(struct test_obj, node),
1005 * .obj_hashfn = my_hash_fn,
1008 int rhashtable_init(struct rhashtable
*ht
,
1009 const struct rhashtable_params
*params
)
1011 struct bucket_table
*tbl
;
1014 if ((!params
->key_len
&& !params
->obj_hashfn
) ||
1015 (params
->obj_hashfn
&& !params
->obj_cmpfn
))
1018 memset(ht
, 0, sizeof(*ht
));
1019 mutex_init(&ht
->mutex
);
1020 spin_lock_init(&ht
->lock
);
1021 memcpy(&ht
->p
, params
, sizeof(*params
));
1023 if (params
->min_size
)
1024 ht
->p
.min_size
= roundup_pow_of_two(params
->min_size
);
1026 /* Cap total entries at 2^31 to avoid nelems overflow. */
1027 ht
->max_elems
= 1u << 31;
1029 if (params
->max_size
) {
1030 ht
->p
.max_size
= rounddown_pow_of_two(params
->max_size
);
1031 if (ht
->p
.max_size
< ht
->max_elems
/ 2)
1032 ht
->max_elems
= ht
->p
.max_size
* 2;
1035 ht
->p
.min_size
= max_t(u16
, ht
->p
.min_size
, HASH_MIN_SIZE
);
1037 size
= rounded_hashtable_size(&ht
->p
);
1039 ht
->key_len
= ht
->p
.key_len
;
1040 if (!params
->hashfn
) {
1041 ht
->p
.hashfn
= jhash
;
1043 if (!(ht
->key_len
& (sizeof(u32
) - 1))) {
1044 ht
->key_len
/= sizeof(u32
);
1045 ht
->p
.hashfn
= rhashtable_jhash2
;
1050 * This is api initialization and thus we need to guarantee the
1051 * initial rhashtable allocation. Upon failure, retry with the
1052 * smallest possible size with __GFP_NOFAIL semantics.
1054 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
1055 if (unlikely(tbl
== NULL
)) {
1056 size
= max_t(u16
, ht
->p
.min_size
, HASH_MIN_SIZE
);
1057 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
| __GFP_NOFAIL
);
1060 atomic_set(&ht
->nelems
, 0);
1062 RCU_INIT_POINTER(ht
->tbl
, tbl
);
1064 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
1068 EXPORT_SYMBOL_GPL(rhashtable_init
);
1071 * rhltable_init - initialize a new hash list table
1072 * @hlt: hash list table to be initialized
1073 * @params: configuration parameters
1075 * Initializes a new hash list table.
1077 * See documentation for rhashtable_init.
1079 int rhltable_init(struct rhltable
*hlt
, const struct rhashtable_params
*params
)
1083 err
= rhashtable_init(&hlt
->ht
, params
);
1084 hlt
->ht
.rhlist
= true;
1087 EXPORT_SYMBOL_GPL(rhltable_init
);
1089 static void rhashtable_free_one(struct rhashtable
*ht
, struct rhash_head
*obj
,
1090 void (*free_fn
)(void *ptr
, void *arg
),
1093 struct rhlist_head
*list
;
1096 free_fn(rht_obj(ht
, obj
), arg
);
1100 list
= container_of(obj
, struct rhlist_head
, rhead
);
1103 list
= rht_dereference(list
->next
, ht
);
1104 free_fn(rht_obj(ht
, obj
), arg
);
1109 * rhashtable_free_and_destroy - free elements and destroy hash table
1110 * @ht: the hash table to destroy
1111 * @free_fn: callback to release resources of element
1112 * @arg: pointer passed to free_fn
1114 * Stops an eventual async resize. If defined, invokes free_fn for each
1115 * element to releasal resources. Please note that RCU protected
1116 * readers may still be accessing the elements. Releasing of resources
1117 * must occur in a compatible manner. Then frees the bucket array.
1119 * This function will eventually sleep to wait for an async resize
1120 * to complete. The caller is responsible that no further write operations
1121 * occurs in parallel.
1123 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
1124 void (*free_fn
)(void *ptr
, void *arg
),
1127 struct bucket_table
*tbl
, *next_tbl
;
1130 cancel_work_sync(&ht
->run_work
);
1132 mutex_lock(&ht
->mutex
);
1133 tbl
= rht_dereference(ht
->tbl
, ht
);
1136 for (i
= 0; i
< tbl
->size
; i
++) {
1137 struct rhash_head
*pos
, *next
;
1140 for (pos
= rht_ptr_exclusive(rht_bucket(tbl
, i
)),
1141 next
= !rht_is_a_nulls(pos
) ?
1142 rht_dereference(pos
->next
, ht
) : NULL
;
1143 !rht_is_a_nulls(pos
);
1145 next
= !rht_is_a_nulls(pos
) ?
1146 rht_dereference(pos
->next
, ht
) : NULL
)
1147 rhashtable_free_one(ht
, pos
, free_fn
, arg
);
1151 next_tbl
= rht_dereference(tbl
->future_tbl
, ht
);
1152 bucket_table_free(tbl
);
1157 mutex_unlock(&ht
->mutex
);
1159 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy
);
1161 void rhashtable_destroy(struct rhashtable
*ht
)
1163 return rhashtable_free_and_destroy(ht
, NULL
, NULL
);
1165 EXPORT_SYMBOL_GPL(rhashtable_destroy
);
1167 struct rhash_lock_head
**__rht_bucket_nested(const struct bucket_table
*tbl
,
1170 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1171 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1172 unsigned int size
= tbl
->size
>> tbl
->nest
;
1173 unsigned int subhash
= hash
;
1174 union nested_table
*ntbl
;
1176 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
1177 ntbl
= rht_dereference_bucket_rcu(ntbl
[index
].table
, tbl
, hash
);
1178 subhash
>>= tbl
->nest
;
1180 while (ntbl
&& size
> (1 << shift
)) {
1181 index
= subhash
& ((1 << shift
) - 1);
1182 ntbl
= rht_dereference_bucket_rcu(ntbl
[index
].table
,
1191 return &ntbl
[subhash
].bucket
;
1194 EXPORT_SYMBOL_GPL(__rht_bucket_nested
);
1196 struct rhash_lock_head
**rht_bucket_nested(const struct bucket_table
*tbl
,
1199 static struct rhash_lock_head
*rhnull
;
1202 INIT_RHT_NULLS_HEAD(rhnull
);
1203 return __rht_bucket_nested(tbl
, hash
) ?: &rhnull
;
1205 EXPORT_SYMBOL_GPL(rht_bucket_nested
);
1207 struct rhash_lock_head
**rht_bucket_nested_insert(struct rhashtable
*ht
,
1208 struct bucket_table
*tbl
,
1211 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1212 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1213 unsigned int size
= tbl
->size
>> tbl
->nest
;
1214 union nested_table
*ntbl
;
1216 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
1218 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1219 size
<= (1 << shift
));
1221 while (ntbl
&& size
> (1 << shift
)) {
1222 index
= hash
& ((1 << shift
) - 1);
1225 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1226 size
<= (1 << shift
));
1232 return &ntbl
[hash
].bucket
;
1235 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert
);