2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
20 #include <linux/atomic.h>
21 #include <linux/compiler.h>
22 #include <linux/err.h>
23 #include <linux/errno.h>
24 #include <linux/jhash.h>
25 #include <linux/list_nulls.h>
26 #include <linux/workqueue.h>
27 #include <linux/mutex.h>
28 #include <linux/rcupdate.h>
31 * The end of the chain is marked with a special nulls marks which has
32 * the following format:
34 * +-------+-----------------------------------------------------+-+
36 * +-------+-----------------------------------------------------+-+
38 * Base (4 bits) : Reserved to distinguish between multiple tables.
39 * Specified via &struct rhashtable_params.nulls_base.
40 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
41 * 1 (1 bit) : Nulls marker (always set)
43 * The remaining bits of the next pointer remain unused for now.
45 #define RHT_BASE_BITS 4
46 #define RHT_HASH_BITS 27
47 #define RHT_BASE_SHIFT RHT_HASH_BITS
49 /* Base bits plus 1 bit for nulls marker */
50 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
53 struct rhash_head __rcu
*next
;
57 struct rhash_head rhead
;
58 struct rhlist_head __rcu
*next
;
62 * struct bucket_table - Table of hash buckets
63 * @size: Number of hash buckets
64 * @rehash: Current bucket being rehashed
65 * @hash_rnd: Random seed to fold into hash
66 * @locks_mask: Mask to apply before accessing locks[]
67 * @locks: Array of spinlocks protecting individual buckets
68 * @walkers: List of active walkers
69 * @rcu: RCU structure for freeing the table
70 * @future_tbl: Table under construction during rehashing
71 * @buckets: size * hash buckets
77 unsigned int locks_mask
;
79 struct list_head walkers
;
82 struct bucket_table __rcu
*future_tbl
;
84 struct rhash_head __rcu
*buckets
[] ____cacheline_aligned_in_smp
;
88 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
90 * @key: Key to compare against
92 struct rhashtable_compare_arg
{
93 struct rhashtable
*ht
;
97 typedef u32 (*rht_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
98 typedef u32 (*rht_obj_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
99 typedef int (*rht_obj_cmpfn_t
)(struct rhashtable_compare_arg
*arg
,
105 * struct rhashtable_params - Hash table construction parameters
106 * @nelem_hint: Hint on number of elements, should be 75% of desired size
107 * @key_len: Length of key
108 * @key_offset: Offset of key in struct to be hashed
109 * @head_offset: Offset of rhash_head in struct to be hashed
110 * @insecure_max_entries: Maximum number of entries (may be exceeded)
111 * @max_size: Maximum size while expanding
112 * @min_size: Minimum size while shrinking
113 * @nulls_base: Base value to generate nulls marker
114 * @insecure_elasticity: Set to true to disable chain length checks
115 * @automatic_shrinking: Enable automatic shrinking of tables
116 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
117 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
118 * @obj_hashfn: Function to hash object
119 * @obj_cmpfn: Function to compare key with object
121 struct rhashtable_params
{
126 unsigned int insecure_max_entries
;
127 unsigned int max_size
;
128 unsigned int min_size
;
130 bool insecure_elasticity
;
131 bool automatic_shrinking
;
134 rht_obj_hashfn_t obj_hashfn
;
135 rht_obj_cmpfn_t obj_cmpfn
;
139 * struct rhashtable - Hash table handle
141 * @nelems: Number of elements in table
142 * @key_len: Key length for hashfn
143 * @elasticity: Maximum chain length before rehash
144 * @p: Configuration parameters
145 * @rhlist: True if this is an rhltable
146 * @run_work: Deferred worker to expand/shrink asynchronously
147 * @mutex: Mutex to protect current/future table swapping
148 * @lock: Spin lock to protect walker list
151 struct bucket_table __rcu
*tbl
;
153 unsigned int key_len
;
154 unsigned int elasticity
;
155 struct rhashtable_params p
;
157 struct work_struct run_work
;
163 * struct rhltable - Hash table with duplicate objects in a list
164 * @ht: Underlying rhtable
167 struct rhashtable ht
;
171 * struct rhashtable_walker - Hash table walker
172 * @list: List entry on list of walkers
173 * @tbl: The table that we were walking over
175 struct rhashtable_walker
{
176 struct list_head list
;
177 struct bucket_table
*tbl
;
181 * struct rhashtable_iter - Hash table iterator
182 * @ht: Table to iterate through
183 * @p: Current pointer
184 * @list: Current hash list pointer
185 * @walker: Associated rhashtable walker
186 * @slot: Current slot
187 * @skip: Number of entries to skip in slot
189 struct rhashtable_iter
{
190 struct rhashtable
*ht
;
191 struct rhash_head
*p
;
192 struct rhlist_head
*list
;
193 struct rhashtable_walker walker
;
198 static inline unsigned long rht_marker(const struct rhashtable
*ht
, u32 hash
)
200 return NULLS_MARKER(ht
->p
.nulls_base
+ hash
);
203 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
204 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
206 static inline bool rht_is_a_nulls(const struct rhash_head
*ptr
)
208 return ((unsigned long) ptr
& 1);
211 static inline unsigned long rht_get_nulls_value(const struct rhash_head
*ptr
)
213 return ((unsigned long) ptr
) >> 1;
216 static inline void *rht_obj(const struct rhashtable
*ht
,
217 const struct rhash_head
*he
)
219 return (char *)he
- ht
->p
.head_offset
;
222 static inline unsigned int rht_bucket_index(const struct bucket_table
*tbl
,
225 return (hash
>> RHT_HASH_RESERVED_SPACE
) & (tbl
->size
- 1);
228 static inline unsigned int rht_key_hashfn(
229 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
230 const void *key
, const struct rhashtable_params params
)
234 /* params must be equal to ht->p if it isn't constant. */
235 if (!__builtin_constant_p(params
.key_len
))
236 hash
= ht
->p
.hashfn(key
, ht
->key_len
, tbl
->hash_rnd
);
237 else if (params
.key_len
) {
238 unsigned int key_len
= params
.key_len
;
241 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
242 else if (key_len
& (sizeof(u32
) - 1))
243 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
245 hash
= jhash2(key
, key_len
/ sizeof(u32
),
248 unsigned int key_len
= ht
->p
.key_len
;
251 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
253 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
256 return rht_bucket_index(tbl
, hash
);
259 static inline unsigned int rht_head_hashfn(
260 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
261 const struct rhash_head
*he
, const struct rhashtable_params params
)
263 const char *ptr
= rht_obj(ht
, he
);
265 return likely(params
.obj_hashfn
) ?
266 rht_bucket_index(tbl
, params
.obj_hashfn(ptr
, params
.key_len
?:
269 rht_key_hashfn(ht
, tbl
, ptr
+ params
.key_offset
, params
);
273 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
275 * @tbl: current table
277 static inline bool rht_grow_above_75(const struct rhashtable
*ht
,
278 const struct bucket_table
*tbl
)
280 /* Expand table when exceeding 75% load */
281 return atomic_read(&ht
->nelems
) > (tbl
->size
/ 4 * 3) &&
282 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
286 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
288 * @tbl: current table
290 static inline bool rht_shrink_below_30(const struct rhashtable
*ht
,
291 const struct bucket_table
*tbl
)
293 /* Shrink table beneath 30% load */
294 return atomic_read(&ht
->nelems
) < (tbl
->size
* 3 / 10) &&
295 tbl
->size
> ht
->p
.min_size
;
299 * rht_grow_above_100 - returns true if nelems > table-size
301 * @tbl: current table
303 static inline bool rht_grow_above_100(const struct rhashtable
*ht
,
304 const struct bucket_table
*tbl
)
306 return atomic_read(&ht
->nelems
) > tbl
->size
&&
307 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
311 * rht_grow_above_max - returns true if table is above maximum
313 * @tbl: current table
315 static inline bool rht_grow_above_max(const struct rhashtable
*ht
,
316 const struct bucket_table
*tbl
)
318 return ht
->p
.insecure_max_entries
&&
319 atomic_read(&ht
->nelems
) >= ht
->p
.insecure_max_entries
;
322 /* The bucket lock is selected based on the hash and protects mutations
323 * on a group of hash buckets.
325 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
326 * a single lock always covers both buckets which may both contains
327 * entries which link to the same bucket of the old table during resizing.
328 * This allows to simplify the locking as locking the bucket in both
329 * tables during resize always guarantee protection.
331 * IMPORTANT: When holding the bucket lock of both the old and new table
332 * during expansions and shrinking, the old bucket lock must always be
335 static inline spinlock_t
*rht_bucket_lock(const struct bucket_table
*tbl
,
338 return &tbl
->locks
[hash
& tbl
->locks_mask
];
341 #ifdef CONFIG_PROVE_LOCKING
342 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
);
343 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
);
345 static inline int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
350 static inline int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
,
355 #endif /* CONFIG_PROVE_LOCKING */
357 int rhashtable_init(struct rhashtable
*ht
,
358 const struct rhashtable_params
*params
);
359 int rhltable_init(struct rhltable
*hlt
,
360 const struct rhashtable_params
*params
);
362 void *rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
363 struct rhash_head
*obj
);
365 void rhashtable_walk_enter(struct rhashtable
*ht
,
366 struct rhashtable_iter
*iter
);
367 void rhashtable_walk_exit(struct rhashtable_iter
*iter
);
368 int rhashtable_walk_start(struct rhashtable_iter
*iter
) __acquires(RCU
);
369 void *rhashtable_walk_next(struct rhashtable_iter
*iter
);
370 void rhashtable_walk_stop(struct rhashtable_iter
*iter
) __releases(RCU
);
372 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
373 void (*free_fn
)(void *ptr
, void *arg
),
375 void rhashtable_destroy(struct rhashtable
*ht
);
377 #define rht_dereference(p, ht) \
378 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
380 #define rht_dereference_rcu(p, ht) \
381 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
383 #define rht_dereference_bucket(p, tbl, hash) \
384 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
386 #define rht_dereference_bucket_rcu(p, tbl, hash) \
387 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
389 #define rht_entry(tpos, pos, member) \
390 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
393 * rht_for_each_continue - continue iterating over hash chain
394 * @pos: the &struct rhash_head to use as a loop cursor.
395 * @head: the previous &struct rhash_head to continue from
396 * @tbl: the &struct bucket_table
397 * @hash: the hash value / bucket index
399 #define rht_for_each_continue(pos, head, tbl, hash) \
400 for (pos = rht_dereference_bucket(head, tbl, hash); \
401 !rht_is_a_nulls(pos); \
402 pos = rht_dereference_bucket((pos)->next, tbl, hash))
405 * rht_for_each - iterate over hash chain
406 * @pos: the &struct rhash_head to use as a loop cursor.
407 * @tbl: the &struct bucket_table
408 * @hash: the hash value / bucket index
410 #define rht_for_each(pos, tbl, hash) \
411 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
414 * rht_for_each_entry_continue - continue iterating over hash chain
415 * @tpos: the type * to use as a loop cursor.
416 * @pos: the &struct rhash_head to use as a loop cursor.
417 * @head: the previous &struct rhash_head to continue from
418 * @tbl: the &struct bucket_table
419 * @hash: the hash value / bucket index
420 * @member: name of the &struct rhash_head within the hashable struct.
422 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
423 for (pos = rht_dereference_bucket(head, tbl, hash); \
424 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
425 pos = rht_dereference_bucket((pos)->next, tbl, hash))
428 * rht_for_each_entry - iterate over hash chain of given type
429 * @tpos: the type * to use as a loop cursor.
430 * @pos: the &struct rhash_head to use as a loop cursor.
431 * @tbl: the &struct bucket_table
432 * @hash: the hash value / bucket index
433 * @member: name of the &struct rhash_head within the hashable struct.
435 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
436 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
440 * rht_for_each_entry_safe - safely iterate over hash chain of given type
441 * @tpos: the type * to use as a loop cursor.
442 * @pos: the &struct rhash_head to use as a loop cursor.
443 * @next: the &struct rhash_head to use as next in loop cursor.
444 * @tbl: the &struct bucket_table
445 * @hash: the hash value / bucket index
446 * @member: name of the &struct rhash_head within the hashable struct.
448 * This hash chain list-traversal primitive allows for the looped code to
449 * remove the loop cursor from the list.
451 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
452 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
453 next = !rht_is_a_nulls(pos) ? \
454 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
455 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
457 next = !rht_is_a_nulls(pos) ? \
458 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
461 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
462 * @pos: the &struct rhash_head to use as a loop cursor.
463 * @head: the previous &struct rhash_head to continue from
464 * @tbl: the &struct bucket_table
465 * @hash: the hash value / bucket index
467 * This hash chain list-traversal primitive may safely run concurrently with
468 * the _rcu mutation primitives such as rhashtable_insert() as long as the
469 * traversal is guarded by rcu_read_lock().
471 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
472 for (({barrier(); }), \
473 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
474 !rht_is_a_nulls(pos); \
475 pos = rcu_dereference_raw(pos->next))
478 * rht_for_each_rcu - iterate over rcu hash chain
479 * @pos: the &struct rhash_head to use as a loop cursor.
480 * @tbl: the &struct bucket_table
481 * @hash: the hash value / bucket index
483 * This hash chain list-traversal primitive may safely run concurrently with
484 * the _rcu mutation primitives such as rhashtable_insert() as long as the
485 * traversal is guarded by rcu_read_lock().
487 #define rht_for_each_rcu(pos, tbl, hash) \
488 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
491 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
492 * @tpos: the type * to use as a loop cursor.
493 * @pos: the &struct rhash_head to use as a loop cursor.
494 * @head: the previous &struct rhash_head to continue from
495 * @tbl: the &struct bucket_table
496 * @hash: the hash value / bucket index
497 * @member: name of the &struct rhash_head within the hashable struct.
499 * This hash chain list-traversal primitive may safely run concurrently with
500 * the _rcu mutation primitives such as rhashtable_insert() as long as the
501 * traversal is guarded by rcu_read_lock().
503 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
504 for (({barrier(); }), \
505 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
506 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
507 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
510 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
511 * @tpos: the type * to use as a loop cursor.
512 * @pos: the &struct rhash_head to use as a loop cursor.
513 * @tbl: the &struct bucket_table
514 * @hash: the hash value / bucket index
515 * @member: name of the &struct rhash_head within the hashable struct.
517 * This hash chain list-traversal primitive may safely run concurrently with
518 * the _rcu mutation primitives such as rhashtable_insert() as long as the
519 * traversal is guarded by rcu_read_lock().
521 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
522 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
526 * rhl_for_each_rcu - iterate over rcu hash table list
527 * @pos: the &struct rlist_head to use as a loop cursor.
528 * @list: the head of the list
530 * This hash chain list-traversal primitive should be used on the
531 * list returned by rhltable_lookup.
533 #define rhl_for_each_rcu(pos, list) \
534 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
537 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
538 * @tpos: the type * to use as a loop cursor.
539 * @pos: the &struct rlist_head to use as a loop cursor.
540 * @list: the head of the list
541 * @member: name of the &struct rlist_head within the hashable struct.
543 * This hash chain list-traversal primitive should be used on the
544 * list returned by rhltable_lookup.
546 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
547 for (pos = list; pos && rht_entry(tpos, pos, member); \
548 pos = rcu_dereference_raw(pos->next))
550 static inline int rhashtable_compare(struct rhashtable_compare_arg
*arg
,
553 struct rhashtable
*ht
= arg
->ht
;
554 const char *ptr
= obj
;
556 return memcmp(ptr
+ ht
->p
.key_offset
, arg
->key
, ht
->p
.key_len
);
559 /* Internal function, do not use. */
560 static inline struct rhash_head
*__rhashtable_lookup(
561 struct rhashtable
*ht
, const void *key
,
562 const struct rhashtable_params params
)
564 struct rhashtable_compare_arg arg
= {
568 const struct bucket_table
*tbl
;
569 struct rhash_head
*he
;
572 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
574 hash
= rht_key_hashfn(ht
, tbl
, key
, params
);
575 rht_for_each_rcu(he
, tbl
, hash
) {
576 if (params
.obj_cmpfn
?
577 params
.obj_cmpfn(&arg
, rht_obj(ht
, he
)) :
578 rhashtable_compare(&arg
, rht_obj(ht
, he
)))
583 /* Ensure we see any new tables. */
586 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
594 * rhashtable_lookup - search hash table
596 * @key: the pointer to the key
597 * @params: hash table parameters
599 * Computes the hash value for the key and traverses the bucket chain looking
600 * for a entry with an identical key. The first matching entry is returned.
602 * This must only be called under the RCU read lock.
604 * Returns the first entry on which the compare function returned true.
606 static inline void *rhashtable_lookup(
607 struct rhashtable
*ht
, const void *key
,
608 const struct rhashtable_params params
)
610 struct rhash_head
*he
= __rhashtable_lookup(ht
, key
, params
);
612 return he
? rht_obj(ht
, he
) : NULL
;
616 * rhashtable_lookup_fast - search hash table, without RCU read lock
618 * @key: the pointer to the key
619 * @params: hash table parameters
621 * Computes the hash value for the key and traverses the bucket chain looking
622 * for a entry with an identical key. The first matching entry is returned.
624 * Only use this function when you have other mechanisms guaranteeing
625 * that the object won't go away after the RCU read lock is released.
627 * Returns the first entry on which the compare function returned true.
629 static inline void *rhashtable_lookup_fast(
630 struct rhashtable
*ht
, const void *key
,
631 const struct rhashtable_params params
)
636 obj
= rhashtable_lookup(ht
, key
, params
);
643 * rhltable_lookup - search hash list table
645 * @key: the pointer to the key
646 * @params: hash table parameters
648 * Computes the hash value for the key and traverses the bucket chain looking
649 * for a entry with an identical key. All matching entries are returned
652 * This must only be called under the RCU read lock.
654 * Returns the list of entries that match the given key.
656 static inline struct rhlist_head
*rhltable_lookup(
657 struct rhltable
*hlt
, const void *key
,
658 const struct rhashtable_params params
)
660 struct rhash_head
*he
= __rhashtable_lookup(&hlt
->ht
, key
, params
);
662 return he
? container_of(he
, struct rhlist_head
, rhead
) : NULL
;
665 /* Internal function, please use rhashtable_insert_fast() instead. This
666 * function returns the existing element already in hashes in there is a clash,
667 * otherwise it returns an error via ERR_PTR().
669 static inline void *__rhashtable_insert_fast(
670 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
671 const struct rhashtable_params params
, bool rhlist
)
673 struct rhashtable_compare_arg arg
= {
677 struct rhash_head __rcu
**pprev
;
678 struct bucket_table
*tbl
;
679 struct rhash_head
*head
;
687 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
688 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
689 lock
= rht_bucket_lock(tbl
, hash
);
692 if (unlikely(rht_dereference_bucket(tbl
->future_tbl
, tbl
, hash
))) {
694 spin_unlock_bh(lock
);
696 return rhashtable_insert_slow(ht
, key
, obj
);
699 elasticity
= ht
->elasticity
;
700 pprev
= &tbl
->buckets
[hash
];
701 rht_for_each(head
, tbl
, hash
) {
702 struct rhlist_head
*plist
;
703 struct rhlist_head
*list
;
708 params
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
709 rhashtable_compare(&arg
, rht_obj(ht
, head
))))
712 data
= rht_obj(ht
, head
);
718 list
= container_of(obj
, struct rhlist_head
, rhead
);
719 plist
= container_of(head
, struct rhlist_head
, rhead
);
721 RCU_INIT_POINTER(list
->next
, plist
);
722 head
= rht_dereference_bucket(head
->next
, tbl
, hash
);
723 RCU_INIT_POINTER(list
->rhead
.next
, head
);
724 rcu_assign_pointer(*pprev
, obj
);
732 data
= ERR_PTR(-E2BIG
);
733 if (unlikely(rht_grow_above_max(ht
, tbl
)))
736 if (unlikely(rht_grow_above_100(ht
, tbl
)))
739 head
= rht_dereference_bucket(tbl
->buckets
[hash
], tbl
, hash
);
741 RCU_INIT_POINTER(obj
->next
, head
);
743 struct rhlist_head
*list
;
745 list
= container_of(obj
, struct rhlist_head
, rhead
);
746 RCU_INIT_POINTER(list
->next
, NULL
);
749 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
751 atomic_inc(&ht
->nelems
);
752 if (rht_grow_above_75(ht
, tbl
))
753 schedule_work(&ht
->run_work
);
759 spin_unlock_bh(lock
);
766 * rhashtable_insert_fast - insert object into hash table
768 * @obj: pointer to hash head inside object
769 * @params: hash table parameters
771 * Will take a per bucket spinlock to protect against mutual mutations
772 * on the same bucket. Multiple insertions may occur in parallel unless
773 * they map to the same bucket lock.
775 * It is safe to call this function from atomic context.
777 * Will trigger an automatic deferred table resizing if the size grows
778 * beyond the watermark indicated by grow_decision() which can be passed
779 * to rhashtable_init().
781 static inline int rhashtable_insert_fast(
782 struct rhashtable
*ht
, struct rhash_head
*obj
,
783 const struct rhashtable_params params
)
787 ret
= __rhashtable_insert_fast(ht
, NULL
, obj
, params
, false);
791 return ret
== NULL
? 0 : -EEXIST
;
795 * rhltable_insert_key - insert object into hash list table
796 * @hlt: hash list table
797 * @key: the pointer to the key
798 * @list: pointer to hash list head inside object
799 * @params: hash table parameters
801 * Will take a per bucket spinlock to protect against mutual mutations
802 * on the same bucket. Multiple insertions may occur in parallel unless
803 * they map to the same bucket lock.
805 * It is safe to call this function from atomic context.
807 * Will trigger an automatic deferred table resizing if the size grows
808 * beyond the watermark indicated by grow_decision() which can be passed
809 * to rhashtable_init().
811 static inline int rhltable_insert_key(
812 struct rhltable
*hlt
, const void *key
, struct rhlist_head
*list
,
813 const struct rhashtable_params params
)
815 return PTR_ERR(__rhashtable_insert_fast(&hlt
->ht
, key
, &list
->rhead
,
820 * rhltable_insert - insert object into hash list table
821 * @hlt: hash list table
822 * @list: pointer to hash list head inside object
823 * @params: hash table parameters
825 * Will take a per bucket spinlock to protect against mutual mutations
826 * on the same bucket. Multiple insertions may occur in parallel unless
827 * they map to the same bucket lock.
829 * It is safe to call this function from atomic context.
831 * Will trigger an automatic deferred table resizing if the size grows
832 * beyond the watermark indicated by grow_decision() which can be passed
833 * to rhashtable_init().
835 static inline int rhltable_insert(
836 struct rhltable
*hlt
, struct rhlist_head
*list
,
837 const struct rhashtable_params params
)
839 const char *key
= rht_obj(&hlt
->ht
, &list
->rhead
);
841 key
+= params
.key_offset
;
843 return rhltable_insert_key(hlt
, key
, list
, params
);
847 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
849 * @obj: pointer to hash head inside object
850 * @params: hash table parameters
852 * Locks down the bucket chain in both the old and new table if a resize
853 * is in progress to ensure that writers can't remove from the old table
854 * and can't insert to the new table during the atomic operation of search
855 * and insertion. Searches for duplicates in both the old and new table if
856 * a resize is in progress.
858 * This lookup function may only be used for fixed key hash table (key_len
859 * parameter set). It will BUG() if used inappropriately.
861 * It is safe to call this function from atomic context.
863 * Will trigger an automatic deferred table resizing if the size grows
864 * beyond the watermark indicated by grow_decision() which can be passed
865 * to rhashtable_init().
867 static inline int rhashtable_lookup_insert_fast(
868 struct rhashtable
*ht
, struct rhash_head
*obj
,
869 const struct rhashtable_params params
)
871 const char *key
= rht_obj(ht
, obj
);
874 BUG_ON(ht
->p
.obj_hashfn
);
876 ret
= __rhashtable_insert_fast(ht
, key
+ ht
->p
.key_offset
, obj
, params
,
881 return ret
== NULL
? 0 : -EEXIST
;
885 * rhashtable_lookup_insert_key - search and insert object to hash table
889 * @obj: pointer to hash head inside object
890 * @params: hash table parameters
892 * Locks down the bucket chain in both the old and new table if a resize
893 * is in progress to ensure that writers can't remove from the old table
894 * and can't insert to the new table during the atomic operation of search
895 * and insertion. Searches for duplicates in both the old and new table if
896 * a resize is in progress.
898 * Lookups may occur in parallel with hashtable mutations and resizing.
900 * Will trigger an automatic deferred table resizing if the size grows
901 * beyond the watermark indicated by grow_decision() which can be passed
902 * to rhashtable_init().
904 * Returns zero on success.
906 static inline int rhashtable_lookup_insert_key(
907 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
908 const struct rhashtable_params params
)
912 BUG_ON(!ht
->p
.obj_hashfn
|| !key
);
914 ret
= __rhashtable_insert_fast(ht
, key
, obj
, params
, false);
918 return ret
== NULL
? 0 : -EEXIST
;
922 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
924 * @obj: pointer to hash head inside object
925 * @params: hash table parameters
926 * @data: pointer to element data already in hashes
928 * Just like rhashtable_lookup_insert_key(), but this function returns the
929 * object if it exists, NULL if it does not and the insertion was successful,
930 * and an ERR_PTR otherwise.
932 static inline void *rhashtable_lookup_get_insert_key(
933 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
934 const struct rhashtable_params params
)
936 BUG_ON(!ht
->p
.obj_hashfn
|| !key
);
938 return __rhashtable_insert_fast(ht
, key
, obj
, params
, false);
941 /* Internal function, please use rhashtable_remove_fast() instead */
942 static inline int __rhashtable_remove_fast_one(
943 struct rhashtable
*ht
, struct bucket_table
*tbl
,
944 struct rhash_head
*obj
, const struct rhashtable_params params
,
947 struct rhash_head __rcu
**pprev
;
948 struct rhash_head
*he
;
953 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
954 lock
= rht_bucket_lock(tbl
, hash
);
958 pprev
= &tbl
->buckets
[hash
];
959 rht_for_each(he
, tbl
, hash
) {
960 struct rhlist_head
*list
;
962 list
= container_of(he
, struct rhlist_head
, rhead
);
965 struct rhlist_head __rcu
**lpprev
;
973 lpprev
= &list
->next
;
974 list
= rht_dereference_bucket(list
->next
,
976 } while (list
&& obj
!= &list
->rhead
);
981 list
= rht_dereference_bucket(list
->next
, tbl
, hash
);
982 RCU_INIT_POINTER(*lpprev
, list
);
987 obj
= rht_dereference_bucket(obj
->next
, tbl
, hash
);
991 list
= rht_dereference_bucket(list
->next
, tbl
, hash
);
993 RCU_INIT_POINTER(list
->rhead
.next
, obj
);
999 rcu_assign_pointer(*pprev
, obj
);
1003 spin_unlock_bh(lock
);
1006 atomic_dec(&ht
->nelems
);
1007 if (unlikely(ht
->p
.automatic_shrinking
&&
1008 rht_shrink_below_30(ht
, tbl
)))
1009 schedule_work(&ht
->run_work
);
1016 /* Internal function, please use rhashtable_remove_fast() instead */
1017 static inline int __rhashtable_remove_fast(
1018 struct rhashtable
*ht
, struct rhash_head
*obj
,
1019 const struct rhashtable_params params
, bool rhlist
)
1021 struct bucket_table
*tbl
;
1026 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
1028 /* Because we have already taken (and released) the bucket
1029 * lock in old_tbl, if we find that future_tbl is not yet
1030 * visible then that guarantees the entry to still be in
1031 * the old tbl if it exists.
1033 while ((err
= __rhashtable_remove_fast_one(ht
, tbl
, obj
, params
,
1035 (tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
)))
1044 * rhashtable_remove_fast - remove object from hash table
1046 * @obj: pointer to hash head inside object
1047 * @params: hash table parameters
1049 * Since the hash chain is single linked, the removal operation needs to
1050 * walk the bucket chain upon removal. The removal operation is thus
1051 * considerable slow if the hash table is not correctly sized.
1053 * Will automatically shrink the table via rhashtable_expand() if the
1054 * shrink_decision function specified at rhashtable_init() returns true.
1056 * Returns zero on success, -ENOENT if the entry could not be found.
1058 static inline int rhashtable_remove_fast(
1059 struct rhashtable
*ht
, struct rhash_head
*obj
,
1060 const struct rhashtable_params params
)
1062 return __rhashtable_remove_fast(ht
, obj
, params
, false);
1066 * rhltable_remove - remove object from hash list table
1067 * @hlt: hash list table
1068 * @list: pointer to hash list head inside object
1069 * @params: hash table parameters
1071 * Since the hash chain is single linked, the removal operation needs to
1072 * walk the bucket chain upon removal. The removal operation is thus
1073 * considerable slow if the hash table is not correctly sized.
1075 * Will automatically shrink the table via rhashtable_expand() if the
1076 * shrink_decision function specified at rhashtable_init() returns true.
1078 * Returns zero on success, -ENOENT if the entry could not be found.
1080 static inline int rhltable_remove(
1081 struct rhltable
*hlt
, struct rhlist_head
*list
,
1082 const struct rhashtable_params params
)
1084 return __rhashtable_remove_fast(&hlt
->ht
, &list
->rhead
, params
, true);
1087 /* Internal function, please use rhashtable_replace_fast() instead */
1088 static inline int __rhashtable_replace_fast(
1089 struct rhashtable
*ht
, struct bucket_table
*tbl
,
1090 struct rhash_head
*obj_old
, struct rhash_head
*obj_new
,
1091 const struct rhashtable_params params
)
1093 struct rhash_head __rcu
**pprev
;
1094 struct rhash_head
*he
;
1099 /* Minimally, the old and new objects must have same hash
1100 * (which should mean identifiers are the same).
1102 hash
= rht_head_hashfn(ht
, tbl
, obj_old
, params
);
1103 if (hash
!= rht_head_hashfn(ht
, tbl
, obj_new
, params
))
1106 lock
= rht_bucket_lock(tbl
, hash
);
1110 pprev
= &tbl
->buckets
[hash
];
1111 rht_for_each(he
, tbl
, hash
) {
1112 if (he
!= obj_old
) {
1117 rcu_assign_pointer(obj_new
->next
, obj_old
->next
);
1118 rcu_assign_pointer(*pprev
, obj_new
);
1123 spin_unlock_bh(lock
);
1129 * rhashtable_replace_fast - replace an object in hash table
1131 * @obj_old: pointer to hash head inside object being replaced
1132 * @obj_new: pointer to hash head inside object which is new
1133 * @params: hash table parameters
1135 * Replacing an object doesn't affect the number of elements in the hash table
1136 * or bucket, so we don't need to worry about shrinking or expanding the
1139 * Returns zero on success, -ENOENT if the entry could not be found,
1140 * -EINVAL if hash is not the same for the old and new objects.
1142 static inline int rhashtable_replace_fast(
1143 struct rhashtable
*ht
, struct rhash_head
*obj_old
,
1144 struct rhash_head
*obj_new
,
1145 const struct rhashtable_params params
)
1147 struct bucket_table
*tbl
;
1152 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
1154 /* Because we have already taken (and released) the bucket
1155 * lock in old_tbl, if we find that future_tbl is not yet
1156 * visible then that guarantees the entry to still be in
1157 * the old tbl if it exists.
1159 while ((err
= __rhashtable_replace_fast(ht
, tbl
, obj_old
,
1160 obj_new
, params
)) &&
1161 (tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
)))
1169 /* Obsolete function, do not use in new code. */
1170 static inline int rhashtable_walk_init(struct rhashtable
*ht
,
1171 struct rhashtable_iter
*iter
, gfp_t gfp
)
1173 rhashtable_walk_enter(ht
, iter
);
1178 * rhltable_walk_enter - Initialise an iterator
1179 * @hlt: Table to walk over
1180 * @iter: Hash table Iterator
1182 * This function prepares a hash table walk.
1184 * Note that if you restart a walk after rhashtable_walk_stop you
1185 * may see the same object twice. Also, you may miss objects if
1186 * there are removals in between rhashtable_walk_stop and the next
1187 * call to rhashtable_walk_start.
1189 * For a completely stable walk you should construct your own data
1190 * structure outside the hash table.
1192 * This function may sleep so you must not call it from interrupt
1193 * context or with spin locks held.
1195 * You must call rhashtable_walk_exit after this function returns.
1197 static inline void rhltable_walk_enter(struct rhltable
*hlt
,
1198 struct rhashtable_iter
*iter
)
1200 return rhashtable_walk_enter(&hlt
->ht
, iter
);
1204 * rhltable_free_and_destroy - free elements and destroy hash list table
1205 * @hlt: the hash list table to destroy
1206 * @free_fn: callback to release resources of element
1207 * @arg: pointer passed to free_fn
1209 * See documentation for rhashtable_free_and_destroy.
1211 static inline void rhltable_free_and_destroy(struct rhltable
*hlt
,
1212 void (*free_fn
)(void *ptr
,
1216 return rhashtable_free_and_destroy(&hlt
->ht
, free_fn
, arg
);
1219 static inline void rhltable_destroy(struct rhltable
*hlt
)
1221 return rhltable_free_and_destroy(hlt
, NULL
, NULL
);
1224 #endif /* _LINUX_RHASHTABLE_H */