mtd: powernv: Add powernv flash MTD abstraction driver
[linux/fpc-iii.git] / lib / rhashtable.c
blobb28df4019adedfe182d5c719da62b6bf05c031fe
1 /*
2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/mm.h>
24 #include <linux/jhash.h>
25 #include <linux/random.h>
26 #include <linux/rhashtable.h>
27 #include <linux/err.h>
29 #define HASH_DEFAULT_SIZE 64UL
30 #define HASH_MIN_SIZE 4U
31 #define BUCKET_LOCKS_PER_CPU 128UL
33 static u32 head_hashfn(struct rhashtable *ht,
34 const struct bucket_table *tbl,
35 const struct rhash_head *he)
37 return rht_head_hashfn(ht, tbl, he, ht->p);
40 #ifdef CONFIG_PROVE_LOCKING
41 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
43 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
45 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
47 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
49 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
51 spinlock_t *lock = rht_bucket_lock(tbl, hash);
53 return (debug_locks) ? lockdep_is_held(lock) : 1;
55 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
56 #else
57 #define ASSERT_RHT_MUTEX(HT)
58 #endif
61 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
62 gfp_t gfp)
64 unsigned int i, size;
65 #if defined(CONFIG_PROVE_LOCKING)
66 unsigned int nr_pcpus = 2;
67 #else
68 unsigned int nr_pcpus = num_possible_cpus();
69 #endif
71 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
72 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
74 /* Never allocate more than 0.5 locks per bucket */
75 size = min_t(unsigned int, size, tbl->size >> 1);
77 if (sizeof(spinlock_t) != 0) {
78 #ifdef CONFIG_NUMA
79 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
80 gfp == GFP_KERNEL)
81 tbl->locks = vmalloc(size * sizeof(spinlock_t));
82 else
83 #endif
84 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
85 gfp);
86 if (!tbl->locks)
87 return -ENOMEM;
88 for (i = 0; i < size; i++)
89 spin_lock_init(&tbl->locks[i]);
91 tbl->locks_mask = size - 1;
93 return 0;
96 static void bucket_table_free(const struct bucket_table *tbl)
98 if (tbl)
99 kvfree(tbl->locks);
101 kvfree(tbl);
104 static void bucket_table_free_rcu(struct rcu_head *head)
106 bucket_table_free(container_of(head, struct bucket_table, rcu));
109 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
110 size_t nbuckets,
111 gfp_t gfp)
113 struct bucket_table *tbl = NULL;
114 size_t size;
115 int i;
117 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
118 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
119 gfp != GFP_KERNEL)
120 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
121 if (tbl == NULL && gfp == GFP_KERNEL)
122 tbl = vzalloc(size);
123 if (tbl == NULL)
124 return NULL;
126 tbl->size = nbuckets;
128 if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
129 bucket_table_free(tbl);
130 return NULL;
133 INIT_LIST_HEAD(&tbl->walkers);
135 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
137 for (i = 0; i < nbuckets; i++)
138 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
140 return tbl;
143 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
144 struct bucket_table *tbl)
146 struct bucket_table *new_tbl;
148 do {
149 new_tbl = tbl;
150 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
151 } while (tbl);
153 return new_tbl;
156 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
158 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
159 struct bucket_table *new_tbl = rhashtable_last_table(ht,
160 rht_dereference_rcu(old_tbl->future_tbl, ht));
161 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
162 int err = -ENOENT;
163 struct rhash_head *head, *next, *entry;
164 spinlock_t *new_bucket_lock;
165 unsigned int new_hash;
167 rht_for_each(entry, old_tbl, old_hash) {
168 err = 0;
169 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
171 if (rht_is_a_nulls(next))
172 break;
174 pprev = &entry->next;
177 if (err)
178 goto out;
180 new_hash = head_hashfn(ht, new_tbl, entry);
182 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
184 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
185 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
186 new_tbl, new_hash);
188 if (rht_is_a_nulls(head))
189 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
190 else
191 RCU_INIT_POINTER(entry->next, head);
193 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
194 spin_unlock(new_bucket_lock);
196 rcu_assign_pointer(*pprev, next);
198 out:
199 return err;
202 static void rhashtable_rehash_chain(struct rhashtable *ht,
203 unsigned int old_hash)
205 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
206 spinlock_t *old_bucket_lock;
208 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
210 spin_lock_bh(old_bucket_lock);
211 while (!rhashtable_rehash_one(ht, old_hash))
213 old_tbl->rehash++;
214 spin_unlock_bh(old_bucket_lock);
217 static int rhashtable_rehash_attach(struct rhashtable *ht,
218 struct bucket_table *old_tbl,
219 struct bucket_table *new_tbl)
221 /* Protect future_tbl using the first bucket lock. */
222 spin_lock_bh(old_tbl->locks);
224 /* Did somebody beat us to it? */
225 if (rcu_access_pointer(old_tbl->future_tbl)) {
226 spin_unlock_bh(old_tbl->locks);
227 return -EEXIST;
230 /* Make insertions go into the new, empty table right away. Deletions
231 * and lookups will be attempted in both tables until we synchronize.
233 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
235 /* Ensure the new table is visible to readers. */
236 smp_wmb();
238 spin_unlock_bh(old_tbl->locks);
240 return 0;
243 static int rhashtable_rehash_table(struct rhashtable *ht)
245 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
246 struct bucket_table *new_tbl;
247 struct rhashtable_walker *walker;
248 unsigned int old_hash;
250 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
251 if (!new_tbl)
252 return 0;
254 for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
255 rhashtable_rehash_chain(ht, old_hash);
257 /* Publish the new table pointer. */
258 rcu_assign_pointer(ht->tbl, new_tbl);
260 spin_lock(&ht->lock);
261 list_for_each_entry(walker, &old_tbl->walkers, list)
262 walker->tbl = NULL;
263 spin_unlock(&ht->lock);
265 /* Wait for readers. All new readers will see the new
266 * table, and thus no references to the old table will
267 * remain.
269 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
271 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
275 * rhashtable_expand - Expand hash table while allowing concurrent lookups
276 * @ht: the hash table to expand
278 * A secondary bucket array is allocated and the hash entries are migrated.
280 * This function may only be called in a context where it is safe to call
281 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
283 * The caller must ensure that no concurrent resizing occurs by holding
284 * ht->mutex.
286 * It is valid to have concurrent insertions and deletions protected by per
287 * bucket locks or concurrent RCU protected lookups and traversals.
289 static int rhashtable_expand(struct rhashtable *ht)
291 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
292 int err;
294 ASSERT_RHT_MUTEX(ht);
296 old_tbl = rhashtable_last_table(ht, old_tbl);
298 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
299 if (new_tbl == NULL)
300 return -ENOMEM;
302 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
303 if (err)
304 bucket_table_free(new_tbl);
306 return err;
310 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
311 * @ht: the hash table to shrink
313 * This function shrinks the hash table to fit, i.e., the smallest
314 * size would not cause it to expand right away automatically.
316 * The caller must ensure that no concurrent resizing occurs by holding
317 * ht->mutex.
319 * The caller must ensure that no concurrent table mutations take place.
320 * It is however valid to have concurrent lookups if they are RCU protected.
322 * It is valid to have concurrent insertions and deletions protected by per
323 * bucket locks or concurrent RCU protected lookups and traversals.
325 static int rhashtable_shrink(struct rhashtable *ht)
327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
328 unsigned int size;
329 int err;
331 ASSERT_RHT_MUTEX(ht);
333 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
334 if (size < ht->p.min_size)
335 size = ht->p.min_size;
337 if (old_tbl->size <= size)
338 return 0;
340 if (rht_dereference(old_tbl->future_tbl, ht))
341 return -EEXIST;
343 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
344 if (new_tbl == NULL)
345 return -ENOMEM;
347 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
348 if (err)
349 bucket_table_free(new_tbl);
351 return err;
354 static void rht_deferred_worker(struct work_struct *work)
356 struct rhashtable *ht;
357 struct bucket_table *tbl;
358 int err = 0;
360 ht = container_of(work, struct rhashtable, run_work);
361 mutex_lock(&ht->mutex);
363 tbl = rht_dereference(ht->tbl, ht);
364 tbl = rhashtable_last_table(ht, tbl);
366 if (rht_grow_above_75(ht, tbl))
367 rhashtable_expand(ht);
368 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
369 rhashtable_shrink(ht);
371 err = rhashtable_rehash_table(ht);
373 mutex_unlock(&ht->mutex);
375 if (err)
376 schedule_work(&ht->run_work);
379 static bool rhashtable_check_elasticity(struct rhashtable *ht,
380 struct bucket_table *tbl,
381 unsigned int hash)
383 unsigned int elasticity = ht->elasticity;
384 struct rhash_head *head;
386 rht_for_each(head, tbl, hash)
387 if (!--elasticity)
388 return true;
390 return false;
393 int rhashtable_insert_rehash(struct rhashtable *ht)
395 struct bucket_table *old_tbl;
396 struct bucket_table *new_tbl;
397 struct bucket_table *tbl;
398 unsigned int size;
399 int err;
401 old_tbl = rht_dereference_rcu(ht->tbl, ht);
402 tbl = rhashtable_last_table(ht, old_tbl);
404 size = tbl->size;
406 if (rht_grow_above_75(ht, tbl))
407 size *= 2;
408 /* Do not schedule more than one rehash */
409 else if (old_tbl != tbl)
410 return -EBUSY;
412 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
413 if (new_tbl == NULL) {
414 /* Schedule async resize/rehash to try allocation
415 * non-atomic context.
417 schedule_work(&ht->run_work);
418 return -ENOMEM;
421 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
422 if (err) {
423 bucket_table_free(new_tbl);
424 if (err == -EEXIST)
425 err = 0;
426 } else
427 schedule_work(&ht->run_work);
429 return err;
431 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
433 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
434 struct rhash_head *obj,
435 struct bucket_table *tbl)
437 struct rhash_head *head;
438 unsigned int hash;
439 int err;
441 tbl = rhashtable_last_table(ht, tbl);
442 hash = head_hashfn(ht, tbl, obj);
443 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
445 err = -EEXIST;
446 if (key && rhashtable_lookup_fast(ht, key, ht->p))
447 goto exit;
449 err = -EAGAIN;
450 if (rhashtable_check_elasticity(ht, tbl, hash) ||
451 rht_grow_above_100(ht, tbl))
452 goto exit;
454 err = 0;
456 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
458 RCU_INIT_POINTER(obj->next, head);
460 rcu_assign_pointer(tbl->buckets[hash], obj);
462 atomic_inc(&ht->nelems);
464 exit:
465 spin_unlock(rht_bucket_lock(tbl, hash));
467 return err;
469 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
472 * rhashtable_walk_init - Initialise an iterator
473 * @ht: Table to walk over
474 * @iter: Hash table Iterator
476 * This function prepares a hash table walk.
478 * Note that if you restart a walk after rhashtable_walk_stop you
479 * may see the same object twice. Also, you may miss objects if
480 * there are removals in between rhashtable_walk_stop and the next
481 * call to rhashtable_walk_start.
483 * For a completely stable walk you should construct your own data
484 * structure outside the hash table.
486 * This function may sleep so you must not call it from interrupt
487 * context or with spin locks held.
489 * You must call rhashtable_walk_exit if this function returns
490 * successfully.
492 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
494 iter->ht = ht;
495 iter->p = NULL;
496 iter->slot = 0;
497 iter->skip = 0;
499 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
500 if (!iter->walker)
501 return -ENOMEM;
503 mutex_lock(&ht->mutex);
504 iter->walker->tbl = rht_dereference(ht->tbl, ht);
505 list_add(&iter->walker->list, &iter->walker->tbl->walkers);
506 mutex_unlock(&ht->mutex);
508 return 0;
510 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
513 * rhashtable_walk_exit - Free an iterator
514 * @iter: Hash table Iterator
516 * This function frees resources allocated by rhashtable_walk_init.
518 void rhashtable_walk_exit(struct rhashtable_iter *iter)
520 mutex_lock(&iter->ht->mutex);
521 if (iter->walker->tbl)
522 list_del(&iter->walker->list);
523 mutex_unlock(&iter->ht->mutex);
524 kfree(iter->walker);
526 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
529 * rhashtable_walk_start - Start a hash table walk
530 * @iter: Hash table iterator
532 * Start a hash table walk. Note that we take the RCU lock in all
533 * cases including when we return an error. So you must always call
534 * rhashtable_walk_stop to clean up.
536 * Returns zero if successful.
538 * Returns -EAGAIN if resize event occured. Note that the iterator
539 * will rewind back to the beginning and you may use it immediately
540 * by calling rhashtable_walk_next.
542 int rhashtable_walk_start(struct rhashtable_iter *iter)
543 __acquires(RCU)
545 struct rhashtable *ht = iter->ht;
547 mutex_lock(&ht->mutex);
549 if (iter->walker->tbl)
550 list_del(&iter->walker->list);
552 rcu_read_lock();
554 mutex_unlock(&ht->mutex);
556 if (!iter->walker->tbl) {
557 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
558 return -EAGAIN;
561 return 0;
563 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
566 * rhashtable_walk_next - Return the next object and advance the iterator
567 * @iter: Hash table iterator
569 * Note that you must call rhashtable_walk_stop when you are finished
570 * with the walk.
572 * Returns the next object or NULL when the end of the table is reached.
574 * Returns -EAGAIN if resize event occured. Note that the iterator
575 * will rewind back to the beginning and you may continue to use it.
577 void *rhashtable_walk_next(struct rhashtable_iter *iter)
579 struct bucket_table *tbl = iter->walker->tbl;
580 struct rhashtable *ht = iter->ht;
581 struct rhash_head *p = iter->p;
582 void *obj = NULL;
584 if (p) {
585 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
586 goto next;
589 for (; iter->slot < tbl->size; iter->slot++) {
590 int skip = iter->skip;
592 rht_for_each_rcu(p, tbl, iter->slot) {
593 if (!skip)
594 break;
595 skip--;
598 next:
599 if (!rht_is_a_nulls(p)) {
600 iter->skip++;
601 iter->p = p;
602 obj = rht_obj(ht, p);
603 goto out;
606 iter->skip = 0;
609 /* Ensure we see any new tables. */
610 smp_rmb();
612 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
613 if (iter->walker->tbl) {
614 iter->slot = 0;
615 iter->skip = 0;
616 return ERR_PTR(-EAGAIN);
619 iter->p = NULL;
621 out:
623 return obj;
625 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
628 * rhashtable_walk_stop - Finish a hash table walk
629 * @iter: Hash table iterator
631 * Finish a hash table walk.
633 void rhashtable_walk_stop(struct rhashtable_iter *iter)
634 __releases(RCU)
636 struct rhashtable *ht;
637 struct bucket_table *tbl = iter->walker->tbl;
639 if (!tbl)
640 goto out;
642 ht = iter->ht;
644 spin_lock(&ht->lock);
645 if (tbl->rehash < tbl->size)
646 list_add(&iter->walker->list, &tbl->walkers);
647 else
648 iter->walker->tbl = NULL;
649 spin_unlock(&ht->lock);
651 iter->p = NULL;
653 out:
654 rcu_read_unlock();
656 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
658 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
660 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
661 (unsigned long)params->min_size);
664 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
666 return jhash2(key, length, seed);
670 * rhashtable_init - initialize a new hash table
671 * @ht: hash table to be initialized
672 * @params: configuration parameters
674 * Initializes a new hash table based on the provided configuration
675 * parameters. A table can be configured either with a variable or
676 * fixed length key:
678 * Configuration Example 1: Fixed length keys
679 * struct test_obj {
680 * int key;
681 * void * my_member;
682 * struct rhash_head node;
683 * };
685 * struct rhashtable_params params = {
686 * .head_offset = offsetof(struct test_obj, node),
687 * .key_offset = offsetof(struct test_obj, key),
688 * .key_len = sizeof(int),
689 * .hashfn = jhash,
690 * .nulls_base = (1U << RHT_BASE_SHIFT),
691 * };
693 * Configuration Example 2: Variable length keys
694 * struct test_obj {
695 * [...]
696 * struct rhash_head node;
697 * };
699 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
701 * struct test_obj *obj = data;
703 * return [... hash ...];
706 * struct rhashtable_params params = {
707 * .head_offset = offsetof(struct test_obj, node),
708 * .hashfn = jhash,
709 * .obj_hashfn = my_hash_fn,
710 * };
712 int rhashtable_init(struct rhashtable *ht,
713 const struct rhashtable_params *params)
715 struct bucket_table *tbl;
716 size_t size;
718 size = HASH_DEFAULT_SIZE;
720 if ((!params->key_len && !params->obj_hashfn) ||
721 (params->obj_hashfn && !params->obj_cmpfn))
722 return -EINVAL;
724 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
725 return -EINVAL;
727 if (params->nelem_hint)
728 size = rounded_hashtable_size(params);
730 memset(ht, 0, sizeof(*ht));
731 mutex_init(&ht->mutex);
732 spin_lock_init(&ht->lock);
733 memcpy(&ht->p, params, sizeof(*params));
735 if (params->min_size)
736 ht->p.min_size = roundup_pow_of_two(params->min_size);
738 if (params->max_size)
739 ht->p.max_size = rounddown_pow_of_two(params->max_size);
741 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
743 /* The maximum (not average) chain length grows with the
744 * size of the hash table, at a rate of (log N)/(log log N).
745 * The value of 16 is selected so that even if the hash
746 * table grew to 2^32 you would not expect the maximum
747 * chain length to exceed it unless we are under attack
748 * (or extremely unlucky).
750 * As this limit is only to detect attacks, we don't need
751 * to set it to a lower value as you'd need the chain
752 * length to vastly exceed 16 to have any real effect
753 * on the system.
755 if (!params->insecure_elasticity)
756 ht->elasticity = 16;
758 if (params->locks_mul)
759 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
760 else
761 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
763 ht->key_len = ht->p.key_len;
764 if (!params->hashfn) {
765 ht->p.hashfn = jhash;
767 if (!(ht->key_len & (sizeof(u32) - 1))) {
768 ht->key_len /= sizeof(u32);
769 ht->p.hashfn = rhashtable_jhash2;
773 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
774 if (tbl == NULL)
775 return -ENOMEM;
777 atomic_set(&ht->nelems, 0);
779 RCU_INIT_POINTER(ht->tbl, tbl);
781 INIT_WORK(&ht->run_work, rht_deferred_worker);
783 return 0;
785 EXPORT_SYMBOL_GPL(rhashtable_init);
788 * rhashtable_free_and_destroy - free elements and destroy hash table
789 * @ht: the hash table to destroy
790 * @free_fn: callback to release resources of element
791 * @arg: pointer passed to free_fn
793 * Stops an eventual async resize. If defined, invokes free_fn for each
794 * element to releasal resources. Please note that RCU protected
795 * readers may still be accessing the elements. Releasing of resources
796 * must occur in a compatible manner. Then frees the bucket array.
798 * This function will eventually sleep to wait for an async resize
799 * to complete. The caller is responsible that no further write operations
800 * occurs in parallel.
802 void rhashtable_free_and_destroy(struct rhashtable *ht,
803 void (*free_fn)(void *ptr, void *arg),
804 void *arg)
806 const struct bucket_table *tbl;
807 unsigned int i;
809 cancel_work_sync(&ht->run_work);
811 mutex_lock(&ht->mutex);
812 tbl = rht_dereference(ht->tbl, ht);
813 if (free_fn) {
814 for (i = 0; i < tbl->size; i++) {
815 struct rhash_head *pos, *next;
817 for (pos = rht_dereference(tbl->buckets[i], ht),
818 next = !rht_is_a_nulls(pos) ?
819 rht_dereference(pos->next, ht) : NULL;
820 !rht_is_a_nulls(pos);
821 pos = next,
822 next = !rht_is_a_nulls(pos) ?
823 rht_dereference(pos->next, ht) : NULL)
824 free_fn(rht_obj(ht, pos), arg);
828 bucket_table_free(tbl);
829 mutex_unlock(&ht->mutex);
831 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
833 void rhashtable_destroy(struct rhashtable *ht)
835 return rhashtable_free_and_destroy(ht, NULL, NULL);
837 EXPORT_SYMBOL_GPL(rhashtable_destroy);