1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resizable, Scalable, Concurrent Hash Table
5 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
9 * Code partially derived from nft_hash
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
14 #include <linux/atomic.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/log2.h>
18 #include <linux/sched.h>
19 #include <linux/rculist.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/err.h>
27 #include <linux/export.h>
29 #define HASH_DEFAULT_SIZE 64UL
30 #define HASH_MIN_SIZE 4U
33 union nested_table __rcu
*table
;
34 struct rhash_lock_head __rcu
*bucket
;
37 static u32
head_hashfn(struct rhashtable
*ht
,
38 const struct bucket_table
*tbl
,
39 const struct rhash_head
*he
)
41 return rht_head_hashfn(ht
, tbl
, he
, ht
->p
);
44 #ifdef CONFIG_PROVE_LOCKING
45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
47 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
49 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
51 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
53 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
57 if (unlikely(tbl
->nest
))
59 return bit_spin_is_locked(0, (unsigned long *)&tbl
->buckets
[hash
]);
61 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
63 #define ASSERT_RHT_MUTEX(HT)
66 static inline union nested_table
*nested_table_top(
67 const struct bucket_table
*tbl
)
69 /* The top-level bucket entry does not need RCU protection
70 * because it's set at the same time as tbl->nest.
72 return (void *)rcu_dereference_protected(tbl
->buckets
[0], 1);
75 static void nested_table_free(union nested_table
*ntbl
, unsigned int size
)
77 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
78 const unsigned int len
= 1 << shift
;
81 ntbl
= rcu_dereference_protected(ntbl
->table
, 1);
87 for (i
= 0; i
< len
; i
++)
88 nested_table_free(ntbl
+ i
, size
);
94 static void nested_bucket_table_free(const struct bucket_table
*tbl
)
96 unsigned int size
= tbl
->size
>> tbl
->nest
;
97 unsigned int len
= 1 << tbl
->nest
;
98 union nested_table
*ntbl
;
101 ntbl
= nested_table_top(tbl
);
103 for (i
= 0; i
< len
; i
++)
104 nested_table_free(ntbl
+ i
, size
);
109 static void bucket_table_free(const struct bucket_table
*tbl
)
112 nested_bucket_table_free(tbl
);
117 static void bucket_table_free_rcu(struct rcu_head
*head
)
119 bucket_table_free(container_of(head
, struct bucket_table
, rcu
));
122 static union nested_table
*nested_table_alloc(struct rhashtable
*ht
,
123 union nested_table __rcu
**prev
,
126 union nested_table
*ntbl
;
129 ntbl
= rcu_dereference(*prev
);
133 ntbl
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
136 for (i
= 0; i
< PAGE_SIZE
/ sizeof(ntbl
[0]); i
++)
137 INIT_RHT_NULLS_HEAD(ntbl
[i
].bucket
);
140 if (cmpxchg((union nested_table
**)prev
, NULL
, ntbl
) == NULL
)
142 /* Raced with another thread. */
144 return rcu_dereference(*prev
);
147 static struct bucket_table
*nested_bucket_table_alloc(struct rhashtable
*ht
,
151 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
152 struct bucket_table
*tbl
;
155 if (nbuckets
< (1 << (shift
+ 1)))
158 size
= sizeof(*tbl
) + sizeof(tbl
->buckets
[0]);
160 tbl
= kzalloc(size
, gfp
);
164 if (!nested_table_alloc(ht
, (union nested_table __rcu
**)tbl
->buckets
,
170 tbl
->nest
= (ilog2(nbuckets
) - 1) % shift
+ 1;
175 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
179 struct bucket_table
*tbl
= NULL
;
182 static struct lock_class_key __key
;
184 tbl
= kvzalloc(struct_size(tbl
, buckets
, nbuckets
), gfp
);
188 if (tbl
== NULL
&& (gfp
& ~__GFP_NOFAIL
) != GFP_KERNEL
) {
189 tbl
= nested_bucket_table_alloc(ht
, nbuckets
, gfp
);
196 lockdep_init_map(&tbl
->dep_map
, "rhashtable_bucket", &__key
, 0);
200 rcu_head_init(&tbl
->rcu
);
201 INIT_LIST_HEAD(&tbl
->walkers
);
203 tbl
->hash_rnd
= get_random_u32();
205 for (i
= 0; i
< nbuckets
; i
++)
206 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
]);
211 static struct bucket_table
*rhashtable_last_table(struct rhashtable
*ht
,
212 struct bucket_table
*tbl
)
214 struct bucket_table
*new_tbl
;
218 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
224 static int rhashtable_rehash_one(struct rhashtable
*ht
,
225 struct rhash_lock_head __rcu
**bkt
,
226 unsigned int old_hash
)
228 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
229 struct bucket_table
*new_tbl
= rhashtable_last_table(ht
, old_tbl
);
231 struct rhash_head
*head
, *next
, *entry
;
232 struct rhash_head __rcu
**pprev
= NULL
;
233 unsigned int new_hash
;
241 rht_for_each_from(entry
, rht_ptr(bkt
, old_tbl
, old_hash
),
244 next
= rht_dereference_bucket(entry
->next
, old_tbl
, old_hash
);
246 if (rht_is_a_nulls(next
))
249 pprev
= &entry
->next
;
255 new_hash
= head_hashfn(ht
, new_tbl
, entry
);
257 flags
= rht_lock_nested(new_tbl
, &new_tbl
->buckets
[new_hash
],
258 SINGLE_DEPTH_NESTING
);
260 head
= rht_ptr(new_tbl
->buckets
+ new_hash
, new_tbl
, new_hash
);
262 RCU_INIT_POINTER(entry
->next
, head
);
264 rht_assign_unlock(new_tbl
, &new_tbl
->buckets
[new_hash
], entry
, flags
);
267 rcu_assign_pointer(*pprev
, next
);
269 /* Need to preserved the bit lock. */
270 rht_assign_locked(bkt
, next
);
276 static int rhashtable_rehash_chain(struct rhashtable
*ht
,
277 unsigned int old_hash
)
279 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
280 struct rhash_lock_head __rcu
**bkt
= rht_bucket_var(old_tbl
, old_hash
);
286 flags
= rht_lock(old_tbl
, bkt
);
288 while (!(err
= rhashtable_rehash_one(ht
, bkt
, old_hash
)))
293 rht_unlock(old_tbl
, bkt
, flags
);
298 static int rhashtable_rehash_attach(struct rhashtable
*ht
,
299 struct bucket_table
*old_tbl
,
300 struct bucket_table
*new_tbl
)
302 /* Make insertions go into the new, empty table right away. Deletions
303 * and lookups will be attempted in both tables until we synchronize.
304 * As cmpxchg() provides strong barriers, we do not need
305 * rcu_assign_pointer().
308 if (cmpxchg((struct bucket_table
**)&old_tbl
->future_tbl
, NULL
,
315 static int rhashtable_rehash_table(struct rhashtable
*ht
)
317 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
318 struct bucket_table
*new_tbl
;
319 struct rhashtable_walker
*walker
;
320 unsigned int old_hash
;
323 new_tbl
= rht_dereference(old_tbl
->future_tbl
, ht
);
327 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
328 err
= rhashtable_rehash_chain(ht
, old_hash
);
334 /* Publish the new table pointer. */
335 rcu_assign_pointer(ht
->tbl
, new_tbl
);
337 spin_lock(&ht
->lock
);
338 list_for_each_entry(walker
, &old_tbl
->walkers
, list
)
341 /* Wait for readers. All new readers will see the new
342 * table, and thus no references to the old table will
344 * We do this inside the locked region so that
345 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
346 * to check if it should not re-link the table.
348 call_rcu(&old_tbl
->rcu
, bucket_table_free_rcu
);
349 spin_unlock(&ht
->lock
);
351 return rht_dereference(new_tbl
->future_tbl
, ht
) ? -EAGAIN
: 0;
354 static int rhashtable_rehash_alloc(struct rhashtable
*ht
,
355 struct bucket_table
*old_tbl
,
358 struct bucket_table
*new_tbl
;
361 ASSERT_RHT_MUTEX(ht
);
363 new_tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
367 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
369 bucket_table_free(new_tbl
);
375 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
376 * @ht: the hash table to shrink
378 * This function shrinks the hash table to fit, i.e., the smallest
379 * size would not cause it to expand right away automatically.
381 * The caller must ensure that no concurrent resizing occurs by holding
384 * The caller must ensure that no concurrent table mutations take place.
385 * It is however valid to have concurrent lookups if they are RCU protected.
387 * It is valid to have concurrent insertions and deletions protected by per
388 * bucket locks or concurrent RCU protected lookups and traversals.
390 static int rhashtable_shrink(struct rhashtable
*ht
)
392 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
393 unsigned int nelems
= atomic_read(&ht
->nelems
);
394 unsigned int size
= 0;
397 size
= roundup_pow_of_two(nelems
* 3 / 2);
398 if (size
< ht
->p
.min_size
)
399 size
= ht
->p
.min_size
;
401 if (old_tbl
->size
<= size
)
404 if (rht_dereference(old_tbl
->future_tbl
, ht
))
407 return rhashtable_rehash_alloc(ht
, old_tbl
, size
);
410 static void rht_deferred_worker(struct work_struct
*work
)
412 struct rhashtable
*ht
;
413 struct bucket_table
*tbl
;
416 ht
= container_of(work
, struct rhashtable
, run_work
);
417 mutex_lock(&ht
->mutex
);
419 tbl
= rht_dereference(ht
->tbl
, ht
);
420 tbl
= rhashtable_last_table(ht
, tbl
);
422 if (rht_grow_above_75(ht
, tbl
))
423 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
* 2);
424 else if (ht
->p
.automatic_shrinking
&& rht_shrink_below_30(ht
, tbl
))
425 err
= rhashtable_shrink(ht
);
427 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
);
429 if (!err
|| err
== -EEXIST
) {
432 nerr
= rhashtable_rehash_table(ht
);
436 mutex_unlock(&ht
->mutex
);
439 schedule_work(&ht
->run_work
);
442 static int rhashtable_insert_rehash(struct rhashtable
*ht
,
443 struct bucket_table
*tbl
)
445 struct bucket_table
*old_tbl
;
446 struct bucket_table
*new_tbl
;
450 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
456 if (rht_grow_above_75(ht
, tbl
))
458 /* Do not schedule more than one rehash */
459 else if (old_tbl
!= tbl
)
464 new_tbl
= bucket_table_alloc(ht
, size
, GFP_ATOMIC
| __GFP_NOWARN
);
468 err
= rhashtable_rehash_attach(ht
, tbl
, new_tbl
);
470 bucket_table_free(new_tbl
);
474 schedule_work(&ht
->run_work
);
479 /* Do not fail the insert if someone else did a rehash. */
480 if (likely(rcu_access_pointer(tbl
->future_tbl
)))
483 /* Schedule async rehash to retry allocation in process context. */
485 schedule_work(&ht
->run_work
);
490 static void *rhashtable_lookup_one(struct rhashtable
*ht
,
491 struct rhash_lock_head __rcu
**bkt
,
492 struct bucket_table
*tbl
, unsigned int hash
,
493 const void *key
, struct rhash_head
*obj
)
495 struct rhashtable_compare_arg arg
= {
499 struct rhash_head __rcu
**pprev
= NULL
;
500 struct rhash_head
*head
;
503 elasticity
= RHT_ELASTICITY
;
504 rht_for_each_from(head
, rht_ptr(bkt
, tbl
, hash
), tbl
, hash
) {
505 struct rhlist_head
*list
;
506 struct rhlist_head
*plist
;
511 ht
->p
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
512 rhashtable_compare(&arg
, rht_obj(ht
, head
)))) {
518 return rht_obj(ht
, head
);
520 list
= container_of(obj
, struct rhlist_head
, rhead
);
521 plist
= container_of(head
, struct rhlist_head
, rhead
);
523 RCU_INIT_POINTER(list
->next
, plist
);
524 head
= rht_dereference_bucket(head
->next
, tbl
, hash
);
525 RCU_INIT_POINTER(list
->rhead
.next
, head
);
527 rcu_assign_pointer(*pprev
, obj
);
529 /* Need to preserve the bit lock */
530 rht_assign_locked(bkt
, obj
);
536 return ERR_PTR(-EAGAIN
);
538 return ERR_PTR(-ENOENT
);
541 static struct bucket_table
*rhashtable_insert_one(
542 struct rhashtable
*ht
, struct rhash_lock_head __rcu
**bkt
,
543 struct bucket_table
*tbl
, unsigned int hash
, struct rhash_head
*obj
,
546 struct bucket_table
*new_tbl
;
547 struct rhash_head
*head
;
549 if (!IS_ERR_OR_NULL(data
))
550 return ERR_PTR(-EEXIST
);
552 if (PTR_ERR(data
) != -EAGAIN
&& PTR_ERR(data
) != -ENOENT
)
553 return ERR_CAST(data
);
555 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
559 if (PTR_ERR(data
) != -ENOENT
)
560 return ERR_CAST(data
);
562 if (unlikely(rht_grow_above_max(ht
, tbl
)))
563 return ERR_PTR(-E2BIG
);
565 if (unlikely(rht_grow_above_100(ht
, tbl
)))
566 return ERR_PTR(-EAGAIN
);
568 head
= rht_ptr(bkt
, tbl
, hash
);
570 RCU_INIT_POINTER(obj
->next
, head
);
572 struct rhlist_head
*list
;
574 list
= container_of(obj
, struct rhlist_head
, rhead
);
575 RCU_INIT_POINTER(list
->next
, NULL
);
578 /* bkt is always the head of the list, so it holds
579 * the lock, which we need to preserve
581 rht_assign_locked(bkt
, obj
);
583 atomic_inc(&ht
->nelems
);
584 if (rht_grow_above_75(ht
, tbl
))
585 schedule_work(&ht
->run_work
);
590 static void *rhashtable_try_insert(struct rhashtable
*ht
, const void *key
,
591 struct rhash_head
*obj
)
593 struct bucket_table
*new_tbl
;
594 struct bucket_table
*tbl
;
595 struct rhash_lock_head __rcu
**bkt
;
600 new_tbl
= rcu_dereference(ht
->tbl
);
604 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
605 if (rcu_access_pointer(tbl
->future_tbl
))
607 bkt
= rht_bucket_var(tbl
, hash
);
609 bkt
= rht_bucket_insert(ht
, tbl
, hash
);
611 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
612 data
= ERR_PTR(-EAGAIN
);
614 flags
= rht_lock(tbl
, bkt
);
615 data
= rhashtable_lookup_one(ht
, bkt
, tbl
,
617 new_tbl
= rhashtable_insert_one(ht
, bkt
, tbl
,
619 if (PTR_ERR(new_tbl
) != -EEXIST
)
620 data
= ERR_CAST(new_tbl
);
622 rht_unlock(tbl
, bkt
, flags
);
624 } while (!IS_ERR_OR_NULL(new_tbl
));
626 if (PTR_ERR(data
) == -EAGAIN
)
627 data
= ERR_PTR(rhashtable_insert_rehash(ht
, tbl
) ?:
633 void *rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
634 struct rhash_head
*obj
)
640 data
= rhashtable_try_insert(ht
, key
, obj
);
642 } while (PTR_ERR(data
) == -EAGAIN
);
646 EXPORT_SYMBOL_GPL(rhashtable_insert_slow
);
649 * rhashtable_walk_enter - Initialise an iterator
650 * @ht: Table to walk over
651 * @iter: Hash table Iterator
653 * This function prepares a hash table walk.
655 * Note that if you restart a walk after rhashtable_walk_stop you
656 * may see the same object twice. Also, you may miss objects if
657 * there are removals in between rhashtable_walk_stop and the next
658 * call to rhashtable_walk_start.
660 * For a completely stable walk you should construct your own data
661 * structure outside the hash table.
663 * This function may be called from any process context, including
664 * non-preemptable context, but cannot be called from softirq or
667 * You must call rhashtable_walk_exit after this function returns.
669 void rhashtable_walk_enter(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
675 iter
->end_of_table
= 0;
677 spin_lock(&ht
->lock
);
679 rcu_dereference_protected(ht
->tbl
, lockdep_is_held(&ht
->lock
));
680 list_add(&iter
->walker
.list
, &iter
->walker
.tbl
->walkers
);
681 spin_unlock(&ht
->lock
);
683 EXPORT_SYMBOL_GPL(rhashtable_walk_enter
);
686 * rhashtable_walk_exit - Free an iterator
687 * @iter: Hash table Iterator
689 * This function frees resources allocated by rhashtable_walk_enter.
691 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
693 spin_lock(&iter
->ht
->lock
);
694 if (iter
->walker
.tbl
)
695 list_del(&iter
->walker
.list
);
696 spin_unlock(&iter
->ht
->lock
);
698 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
701 * rhashtable_walk_start_check - Start a hash table walk
702 * @iter: Hash table iterator
704 * Start a hash table walk at the current iterator position. Note that we take
705 * the RCU lock in all cases including when we return an error. So you must
706 * always call rhashtable_walk_stop to clean up.
708 * Returns zero if successful.
710 * Returns -EAGAIN if resize event occurred. Note that the iterator
711 * will rewind back to the beginning and you may use it immediately
712 * by calling rhashtable_walk_next.
714 * rhashtable_walk_start is defined as an inline variant that returns
715 * void. This is preferred in cases where the caller would ignore
716 * resize events and always continue.
718 int rhashtable_walk_start_check(struct rhashtable_iter
*iter
)
721 struct rhashtable
*ht
= iter
->ht
;
722 bool rhlist
= ht
->rhlist
;
726 spin_lock(&ht
->lock
);
727 if (iter
->walker
.tbl
)
728 list_del(&iter
->walker
.list
);
729 spin_unlock(&ht
->lock
);
731 if (iter
->end_of_table
)
733 if (!iter
->walker
.tbl
) {
734 iter
->walker
.tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
740 if (iter
->p
&& !rhlist
) {
742 * We need to validate that 'p' is still in the table, and
743 * if so, update 'skip'
745 struct rhash_head
*p
;
747 rht_for_each_rcu(p
, iter
->walker
.tbl
, iter
->slot
) {
755 } else if (iter
->p
&& rhlist
) {
756 /* Need to validate that 'list' is still in the table, and
757 * if so, update 'skip' and 'p'.
759 struct rhash_head
*p
;
760 struct rhlist_head
*list
;
762 rht_for_each_rcu(p
, iter
->walker
.tbl
, iter
->slot
) {
763 for (list
= container_of(p
, struct rhlist_head
, rhead
);
765 list
= rcu_dereference(list
->next
)) {
767 if (list
== iter
->list
) {
779 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check
);
782 * __rhashtable_walk_find_next - Find the next element in a table (or the first
783 * one in case of a new walk).
785 * @iter: Hash table iterator
787 * Returns the found object or NULL when the end of the table is reached.
789 * Returns -EAGAIN if resize event occurred.
791 static void *__rhashtable_walk_find_next(struct rhashtable_iter
*iter
)
793 struct bucket_table
*tbl
= iter
->walker
.tbl
;
794 struct rhlist_head
*list
= iter
->list
;
795 struct rhashtable
*ht
= iter
->ht
;
796 struct rhash_head
*p
= iter
->p
;
797 bool rhlist
= ht
->rhlist
;
802 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
803 int skip
= iter
->skip
;
805 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
807 list
= container_of(p
, struct rhlist_head
,
813 list
= rcu_dereference(list
->next
);
824 if (!rht_is_a_nulls(p
)) {
828 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
836 /* Ensure we see any new tables. */
839 iter
->walker
.tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
840 if (iter
->walker
.tbl
) {
843 return ERR_PTR(-EAGAIN
);
845 iter
->end_of_table
= true;
852 * rhashtable_walk_next - Return the next object and advance the iterator
853 * @iter: Hash table iterator
855 * Note that you must call rhashtable_walk_stop when you are finished
858 * Returns the next object or NULL when the end of the table is reached.
860 * Returns -EAGAIN if resize event occurred. Note that the iterator
861 * will rewind back to the beginning and you may continue to use it.
863 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
865 struct rhlist_head
*list
= iter
->list
;
866 struct rhashtable
*ht
= iter
->ht
;
867 struct rhash_head
*p
= iter
->p
;
868 bool rhlist
= ht
->rhlist
;
871 if (!rhlist
|| !(list
= rcu_dereference(list
->next
))) {
872 p
= rcu_dereference(p
->next
);
873 list
= container_of(p
, struct rhlist_head
, rhead
);
875 if (!rht_is_a_nulls(p
)) {
879 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
882 /* At the end of this slot, switch to next one and then find
883 * next entry from that point.
889 return __rhashtable_walk_find_next(iter
);
891 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
894 * rhashtable_walk_peek - Return the next object but don't advance the iterator
895 * @iter: Hash table iterator
897 * Returns the next object or NULL when the end of the table is reached.
899 * Returns -EAGAIN if resize event occurred. Note that the iterator
900 * will rewind back to the beginning and you may continue to use it.
902 void *rhashtable_walk_peek(struct rhashtable_iter
*iter
)
904 struct rhlist_head
*list
= iter
->list
;
905 struct rhashtable
*ht
= iter
->ht
;
906 struct rhash_head
*p
= iter
->p
;
909 return rht_obj(ht
, ht
->rhlist
? &list
->rhead
: p
);
911 /* No object found in current iter, find next one in the table. */
914 /* A nonzero skip value points to the next entry in the table
915 * beyond that last one that was found. Decrement skip so
916 * we find the current value. __rhashtable_walk_find_next
917 * will restore the original value of skip assuming that
918 * the table hasn't changed.
923 return __rhashtable_walk_find_next(iter
);
925 EXPORT_SYMBOL_GPL(rhashtable_walk_peek
);
928 * rhashtable_walk_stop - Finish a hash table walk
929 * @iter: Hash table iterator
931 * Finish a hash table walk. Does not reset the iterator to the start of the
934 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
937 struct rhashtable
*ht
;
938 struct bucket_table
*tbl
= iter
->walker
.tbl
;
945 spin_lock(&ht
->lock
);
946 if (rcu_head_after_call_rcu(&tbl
->rcu
, bucket_table_free_rcu
))
947 /* This bucket table is being freed, don't re-link it. */
948 iter
->walker
.tbl
= NULL
;
950 list_add(&iter
->walker
.list
, &tbl
->walkers
);
951 spin_unlock(&ht
->lock
);
956 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
958 static size_t rounded_hashtable_size(const struct rhashtable_params
*params
)
962 if (params
->nelem_hint
)
963 retsize
= max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
964 (unsigned long)params
->min_size
);
966 retsize
= max(HASH_DEFAULT_SIZE
,
967 (unsigned long)params
->min_size
);
972 static u32
rhashtable_jhash2(const void *key
, u32 length
, u32 seed
)
974 return jhash2(key
, length
, seed
);
978 * rhashtable_init - initialize a new hash table
979 * @ht: hash table to be initialized
980 * @params: configuration parameters
982 * Initializes a new hash table based on the provided configuration
983 * parameters. A table can be configured either with a variable or
986 * Configuration Example 1: Fixed length keys
990 * struct rhash_head node;
993 * struct rhashtable_params params = {
994 * .head_offset = offsetof(struct test_obj, node),
995 * .key_offset = offsetof(struct test_obj, key),
996 * .key_len = sizeof(int),
1000 * Configuration Example 2: Variable length keys
1003 * struct rhash_head node;
1006 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1008 * struct test_obj *obj = data;
1010 * return [... hash ...];
1013 * struct rhashtable_params params = {
1014 * .head_offset = offsetof(struct test_obj, node),
1016 * .obj_hashfn = my_hash_fn,
1019 int rhashtable_init(struct rhashtable
*ht
,
1020 const struct rhashtable_params
*params
)
1022 struct bucket_table
*tbl
;
1025 if ((!params
->key_len
&& !params
->obj_hashfn
) ||
1026 (params
->obj_hashfn
&& !params
->obj_cmpfn
))
1029 memset(ht
, 0, sizeof(*ht
));
1030 mutex_init(&ht
->mutex
);
1031 spin_lock_init(&ht
->lock
);
1032 memcpy(&ht
->p
, params
, sizeof(*params
));
1034 if (params
->min_size
)
1035 ht
->p
.min_size
= roundup_pow_of_two(params
->min_size
);
1037 /* Cap total entries at 2^31 to avoid nelems overflow. */
1038 ht
->max_elems
= 1u << 31;
1040 if (params
->max_size
) {
1041 ht
->p
.max_size
= rounddown_pow_of_two(params
->max_size
);
1042 if (ht
->p
.max_size
< ht
->max_elems
/ 2)
1043 ht
->max_elems
= ht
->p
.max_size
* 2;
1046 ht
->p
.min_size
= max_t(u16
, ht
->p
.min_size
, HASH_MIN_SIZE
);
1048 size
= rounded_hashtable_size(&ht
->p
);
1050 ht
->key_len
= ht
->p
.key_len
;
1051 if (!params
->hashfn
) {
1052 ht
->p
.hashfn
= jhash
;
1054 if (!(ht
->key_len
& (sizeof(u32
) - 1))) {
1055 ht
->key_len
/= sizeof(u32
);
1056 ht
->p
.hashfn
= rhashtable_jhash2
;
1061 * This is api initialization and thus we need to guarantee the
1062 * initial rhashtable allocation. Upon failure, retry with the
1063 * smallest possible size with __GFP_NOFAIL semantics.
1065 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
1066 if (unlikely(tbl
== NULL
)) {
1067 size
= max_t(u16
, ht
->p
.min_size
, HASH_MIN_SIZE
);
1068 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
| __GFP_NOFAIL
);
1071 atomic_set(&ht
->nelems
, 0);
1073 RCU_INIT_POINTER(ht
->tbl
, tbl
);
1075 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
1079 EXPORT_SYMBOL_GPL(rhashtable_init
);
1082 * rhltable_init - initialize a new hash list table
1083 * @hlt: hash list table to be initialized
1084 * @params: configuration parameters
1086 * Initializes a new hash list table.
1088 * See documentation for rhashtable_init.
1090 int rhltable_init(struct rhltable
*hlt
, const struct rhashtable_params
*params
)
1094 err
= rhashtable_init(&hlt
->ht
, params
);
1095 hlt
->ht
.rhlist
= true;
1098 EXPORT_SYMBOL_GPL(rhltable_init
);
1100 static void rhashtable_free_one(struct rhashtable
*ht
, struct rhash_head
*obj
,
1101 void (*free_fn
)(void *ptr
, void *arg
),
1104 struct rhlist_head
*list
;
1107 free_fn(rht_obj(ht
, obj
), arg
);
1111 list
= container_of(obj
, struct rhlist_head
, rhead
);
1114 list
= rht_dereference(list
->next
, ht
);
1115 free_fn(rht_obj(ht
, obj
), arg
);
1120 * rhashtable_free_and_destroy - free elements and destroy hash table
1121 * @ht: the hash table to destroy
1122 * @free_fn: callback to release resources of element
1123 * @arg: pointer passed to free_fn
1125 * Stops an eventual async resize. If defined, invokes free_fn for each
1126 * element to releasal resources. Please note that RCU protected
1127 * readers may still be accessing the elements. Releasing of resources
1128 * must occur in a compatible manner. Then frees the bucket array.
1130 * This function will eventually sleep to wait for an async resize
1131 * to complete. The caller is responsible that no further write operations
1132 * occurs in parallel.
1134 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
1135 void (*free_fn
)(void *ptr
, void *arg
),
1138 struct bucket_table
*tbl
, *next_tbl
;
1141 cancel_work_sync(&ht
->run_work
);
1143 mutex_lock(&ht
->mutex
);
1144 tbl
= rht_dereference(ht
->tbl
, ht
);
1147 for (i
= 0; i
< tbl
->size
; i
++) {
1148 struct rhash_head
*pos
, *next
;
1151 for (pos
= rht_ptr_exclusive(rht_bucket(tbl
, i
)),
1152 next
= !rht_is_a_nulls(pos
) ?
1153 rht_dereference(pos
->next
, ht
) : NULL
;
1154 !rht_is_a_nulls(pos
);
1156 next
= !rht_is_a_nulls(pos
) ?
1157 rht_dereference(pos
->next
, ht
) : NULL
)
1158 rhashtable_free_one(ht
, pos
, free_fn
, arg
);
1162 next_tbl
= rht_dereference(tbl
->future_tbl
, ht
);
1163 bucket_table_free(tbl
);
1168 mutex_unlock(&ht
->mutex
);
1170 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy
);
1172 void rhashtable_destroy(struct rhashtable
*ht
)
1174 return rhashtable_free_and_destroy(ht
, NULL
, NULL
);
1176 EXPORT_SYMBOL_GPL(rhashtable_destroy
);
1178 struct rhash_lock_head __rcu
**__rht_bucket_nested(
1179 const struct bucket_table
*tbl
, unsigned int hash
)
1181 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1182 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1183 unsigned int size
= tbl
->size
>> tbl
->nest
;
1184 unsigned int subhash
= hash
;
1185 union nested_table
*ntbl
;
1187 ntbl
= nested_table_top(tbl
);
1188 ntbl
= rht_dereference_bucket_rcu(ntbl
[index
].table
, tbl
, hash
);
1189 subhash
>>= tbl
->nest
;
1191 while (ntbl
&& size
> (1 << shift
)) {
1192 index
= subhash
& ((1 << shift
) - 1);
1193 ntbl
= rht_dereference_bucket_rcu(ntbl
[index
].table
,
1202 return &ntbl
[subhash
].bucket
;
1205 EXPORT_SYMBOL_GPL(__rht_bucket_nested
);
1207 struct rhash_lock_head __rcu
**rht_bucket_nested(
1208 const struct bucket_table
*tbl
, unsigned int hash
)
1210 static struct rhash_lock_head __rcu
*rhnull
;
1213 INIT_RHT_NULLS_HEAD(rhnull
);
1214 return __rht_bucket_nested(tbl
, hash
) ?: &rhnull
;
1216 EXPORT_SYMBOL_GPL(rht_bucket_nested
);
1218 struct rhash_lock_head __rcu
**rht_bucket_nested_insert(
1219 struct rhashtable
*ht
, struct bucket_table
*tbl
, unsigned int hash
)
1221 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1222 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1223 unsigned int size
= tbl
->size
>> tbl
->nest
;
1224 union nested_table
*ntbl
;
1226 ntbl
= nested_table_top(tbl
);
1228 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1229 size
<= (1 << shift
));
1231 while (ntbl
&& size
> (1 << shift
)) {
1232 index
= hash
& ((1 << shift
) - 1);
1235 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1236 size
<= (1 << shift
));
1242 return &ntbl
[hash
].bucket
;
1245 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert
);