]> git.ipfire.org Git - thirdparty/linux.git/blame - lib/rhashtable.c
mm: remove both instances of __vmalloc_node_flags
[thirdparty/linux.git] / lib / rhashtable.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
7e1e7763
TG
2/*
3 * Resizable, Scalable, Concurrent Hash Table
4 *
02fd97c3 5 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
a5ec68e3 6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7e1e7763
TG
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 *
7e1e7763 9 * Code partially derived from nft_hash
02fd97c3
HX
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
7e1e7763
TG
12 */
13
07ee0722 14#include <linux/atomic.h>
7e1e7763
TG
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/log2.h>
5beb5c90 18#include <linux/sched.h>
b2d09103 19#include <linux/rculist.h>
7e1e7763
TG
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/mm.h>
87545899 23#include <linux/jhash.h>
7e1e7763
TG
24#include <linux/random.h>
25#include <linux/rhashtable.h>
61d7b097 26#include <linux/err.h>
6d795413 27#include <linux/export.h>
7e1e7763
TG
28
29#define HASH_DEFAULT_SIZE 64UL
c2e213cf 30#define HASH_MIN_SIZE 4U
97defe1e 31
da20420f
HX
32union nested_table {
33 union nested_table __rcu *table;
ba6306e3 34 struct rhash_lock_head *bucket;
da20420f
HX
35};
36
988dfbd7 37static u32 head_hashfn(struct rhashtable *ht,
8d24c0b4
TG
38 const struct bucket_table *tbl,
39 const struct rhash_head *he)
7e1e7763 40{
02fd97c3 41 return rht_head_hashfn(ht, tbl, he, ht->p);
7e1e7763
TG
42}
43
a03eaec0 44#ifdef CONFIG_PROVE_LOCKING
a03eaec0 45#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
a03eaec0
TG
46
47int lockdep_rht_mutex_is_held(struct rhashtable *ht)
48{
49 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
50}
51EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
52
53int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
54{
8f0db018
N
55 if (!debug_locks)
56 return 1;
57 if (unlikely(tbl->nest))
58 return 1;
ca0b709d 59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
a03eaec0
TG
60}
61EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
62#else
63#define ASSERT_RHT_MUTEX(HT)
a03eaec0
TG
64#endif
65
da20420f
HX
66static void nested_table_free(union nested_table *ntbl, unsigned int size)
67{
68 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
69 const unsigned int len = 1 << shift;
70 unsigned int i;
71
72 ntbl = rcu_dereference_raw(ntbl->table);
73 if (!ntbl)
74 return;
75
76 if (size > len) {
77 size >>= shift;
78 for (i = 0; i < len; i++)
79 nested_table_free(ntbl + i, size);
80 }
81
82 kfree(ntbl);
83}
84
85static void nested_bucket_table_free(const struct bucket_table *tbl)
86{
87 unsigned int size = tbl->size >> tbl->nest;
88 unsigned int len = 1 << tbl->nest;
89 union nested_table *ntbl;
90 unsigned int i;
91
92 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
93
94 for (i = 0; i < len; i++)
95 nested_table_free(ntbl + i, size);
96
97 kfree(ntbl);
98}
99
97defe1e
TG
100static void bucket_table_free(const struct bucket_table *tbl)
101{
da20420f
HX
102 if (tbl->nest)
103 nested_bucket_table_free(tbl);
104
97defe1e
TG
105 kvfree(tbl);
106}
107
9d901bc0
HX
108static void bucket_table_free_rcu(struct rcu_head *head)
109{
110 bucket_table_free(container_of(head, struct bucket_table, rcu));
111}
112
da20420f
HX
113static union nested_table *nested_table_alloc(struct rhashtable *ht,
114 union nested_table __rcu **prev,
5af68ef7 115 bool leaf)
da20420f
HX
116{
117 union nested_table *ntbl;
118 int i;
119
120 ntbl = rcu_dereference(*prev);
121 if (ntbl)
122 return ntbl;
123
124 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
125
5af68ef7
N
126 if (ntbl && leaf) {
127 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
9b4f64a2 128 INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
da20420f
HX
129 }
130
e9458a4e 131 if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
7a41c294
N
132 return ntbl;
133 /* Raced with another thread. */
134 kfree(ntbl);
135 return rcu_dereference(*prev);
da20420f
HX
136}
137
138static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
139 size_t nbuckets,
140 gfp_t gfp)
141{
142 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
143 struct bucket_table *tbl;
144 size_t size;
145
146 if (nbuckets < (1 << (shift + 1)))
147 return NULL;
148
149 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
150
151 tbl = kzalloc(size, gfp);
152 if (!tbl)
153 return NULL;
154
155 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
5af68ef7 156 false)) {
da20420f
HX
157 kfree(tbl);
158 return NULL;
159 }
160
161 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
162
163 return tbl;
164}
165
97defe1e 166static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
b9ecfdaa
HX
167 size_t nbuckets,
168 gfp_t gfp)
7e1e7763 169{
eb6d1abf 170 struct bucket_table *tbl = NULL;
8f0db018 171 size_t size;
f89bd6f8 172 int i;
149212f0 173 static struct lock_class_key __key;
7e1e7763 174
c252aa3e 175 tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
da20420f
HX
176
177 size = nbuckets;
178
2d22ecf6 179 if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
da20420f
HX
180 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
181 nbuckets = 0;
182 }
2d22ecf6 183
7e1e7763
TG
184 if (tbl == NULL)
185 return NULL;
186
149212f0
N
187 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
188
da20420f 189 tbl->size = size;
7e1e7763 190
4feb7c7a 191 rcu_head_init(&tbl->rcu);
eddee5ba
HX
192 INIT_LIST_HEAD(&tbl->walkers);
193
d48ad080 194 tbl->hash_rnd = get_random_u32();
5269b53d 195
f89bd6f8 196 for (i = 0; i < nbuckets; i++)
9b4f64a2 197 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
f89bd6f8 198
97defe1e 199 return tbl;
7e1e7763
TG
200}
201
b824478b
HX
202static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
203 struct bucket_table *tbl)
204{
205 struct bucket_table *new_tbl;
206
207 do {
208 new_tbl = tbl;
209 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
210 } while (tbl);
211
212 return new_tbl;
213}
214
8f0db018 215static int rhashtable_rehash_one(struct rhashtable *ht,
ba6306e3 216 struct rhash_lock_head **bkt,
8f0db018 217 unsigned int old_hash)
a5ec68e3 218{
aa34a6cb 219 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
c0690016 220 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
da20420f 221 int err = -EAGAIN;
aa34a6cb 222 struct rhash_head *head, *next, *entry;
e4edbe3c 223 struct rhash_head __rcu **pprev = NULL;
299e5c32 224 unsigned int new_hash;
aa34a6cb 225
da20420f
HX
226 if (new_tbl->nest)
227 goto out;
228
229 err = -ENOENT;
230
adc6a3ab
N
231 rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
232 old_tbl, old_hash) {
aa34a6cb
HX
233 err = 0;
234 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
235
236 if (rht_is_a_nulls(next))
237 break;
a5ec68e3 238
aa34a6cb
HX
239 pprev = &entry->next;
240 }
a5ec68e3 241
aa34a6cb
HX
242 if (err)
243 goto out;
97defe1e 244
aa34a6cb 245 new_hash = head_hashfn(ht, new_tbl, entry);
7e1e7763 246
149212f0 247 rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
7e1e7763 248
adc6a3ab 249 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
97defe1e 250
7def0f95 251 RCU_INIT_POINTER(entry->next, head);
a5ec68e3 252
149212f0 253 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
97defe1e 254
8f0db018
N
255 if (pprev)
256 rcu_assign_pointer(*pprev, next);
257 else
258 /* Need to preserved the bit lock. */
f4712b46 259 rht_assign_locked(bkt, next);
7e1e7763 260
aa34a6cb
HX
261out:
262 return err;
263}
97defe1e 264
da20420f 265static int rhashtable_rehash_chain(struct rhashtable *ht,
299e5c32 266 unsigned int old_hash)
aa34a6cb
HX
267{
268 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
ba6306e3 269 struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
da20420f 270 int err;
aa34a6cb 271
8f0db018
N
272 if (!bkt)
273 return 0;
149212f0 274 rht_lock(old_tbl, bkt);
a5ec68e3 275
8f0db018 276 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
aa34a6cb 277 ;
da20420f 278
4feb7c7a 279 if (err == -ENOENT)
da20420f 280 err = 0;
149212f0 281 rht_unlock(old_tbl, bkt);
da20420f
HX
282
283 return err;
97defe1e
TG
284}
285
b824478b
HX
286static int rhashtable_rehash_attach(struct rhashtable *ht,
287 struct bucket_table *old_tbl,
288 struct bucket_table *new_tbl)
97defe1e 289{
aa34a6cb
HX
290 /* Make insertions go into the new, empty table right away. Deletions
291 * and lookups will be attempted in both tables until we synchronize.
0ad66449
N
292 * As cmpxchg() provides strong barriers, we do not need
293 * rcu_assign_pointer().
aa34a6cb 294 */
aa34a6cb 295
e9458a4e
HX
296 if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
297 new_tbl) != NULL)
0ad66449 298 return -EEXIST;
b824478b
HX
299
300 return 0;
301}
302
303static int rhashtable_rehash_table(struct rhashtable *ht)
304{
305 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
306 struct bucket_table *new_tbl;
307 struct rhashtable_walker *walker;
299e5c32 308 unsigned int old_hash;
da20420f 309 int err;
b824478b
HX
310
311 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
312 if (!new_tbl)
313 return 0;
314
da20420f
HX
315 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
316 err = rhashtable_rehash_chain(ht, old_hash);
317 if (err)
318 return err;
ae6da1f5 319 cond_resched();
da20420f 320 }
aa34a6cb
HX
321
322 /* Publish the new table pointer. */
323 rcu_assign_pointer(ht->tbl, new_tbl);
324
ba7c95ea 325 spin_lock(&ht->lock);
eddee5ba
HX
326 list_for_each_entry(walker, &old_tbl->walkers, list)
327 walker->tbl = NULL;
328
aa34a6cb
HX
329 /* Wait for readers. All new readers will see the new
330 * table, and thus no references to the old table will
331 * remain.
4feb7c7a
N
332 * We do this inside the locked region so that
333 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
334 * to check if it should not re-link the table.
aa34a6cb 335 */
9d901bc0 336 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
4feb7c7a 337 spin_unlock(&ht->lock);
b824478b
HX
338
339 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
7e1e7763
TG
340}
341
da20420f
HX
342static int rhashtable_rehash_alloc(struct rhashtable *ht,
343 struct bucket_table *old_tbl,
344 unsigned int size)
7e1e7763 345{
da20420f 346 struct bucket_table *new_tbl;
b824478b 347 int err;
7e1e7763
TG
348
349 ASSERT_RHT_MUTEX(ht);
350
da20420f 351 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
7e1e7763
TG
352 if (new_tbl == NULL)
353 return -ENOMEM;
354
b824478b
HX
355 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
356 if (err)
357 bucket_table_free(new_tbl);
358
359 return err;
7e1e7763 360}
7e1e7763
TG
361
362/**
363 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
364 * @ht: the hash table to shrink
7e1e7763 365 *
18093d1c
HX
366 * This function shrinks the hash table to fit, i.e., the smallest
367 * size would not cause it to expand right away automatically.
7e1e7763 368 *
97defe1e
TG
369 * The caller must ensure that no concurrent resizing occurs by holding
370 * ht->mutex.
371 *
7e1e7763
TG
372 * The caller must ensure that no concurrent table mutations take place.
373 * It is however valid to have concurrent lookups if they are RCU protected.
97defe1e
TG
374 *
375 * It is valid to have concurrent insertions and deletions protected by per
376 * bucket locks or concurrent RCU protected lookups and traversals.
7e1e7763 377 */
b824478b 378static int rhashtable_shrink(struct rhashtable *ht)
7e1e7763 379{
da20420f 380 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
12311959
VN
381 unsigned int nelems = atomic_read(&ht->nelems);
382 unsigned int size = 0;
7e1e7763 383
12311959
VN
384 if (nelems)
385 size = roundup_pow_of_two(nelems * 3 / 2);
18093d1c
HX
386 if (size < ht->p.min_size)
387 size = ht->p.min_size;
388
389 if (old_tbl->size <= size)
390 return 0;
391
b824478b
HX
392 if (rht_dereference(old_tbl->future_tbl, ht))
393 return -EEXIST;
394
da20420f 395 return rhashtable_rehash_alloc(ht, old_tbl, size);
7e1e7763 396}
7e1e7763 397
97defe1e
TG
398static void rht_deferred_worker(struct work_struct *work)
399{
400 struct rhashtable *ht;
401 struct bucket_table *tbl;
b824478b 402 int err = 0;
97defe1e 403
57699a40 404 ht = container_of(work, struct rhashtable, run_work);
97defe1e 405 mutex_lock(&ht->mutex);
28134a53 406
97defe1e 407 tbl = rht_dereference(ht->tbl, ht);
b824478b 408 tbl = rhashtable_last_table(ht, tbl);
97defe1e 409
a5b6846f 410 if (rht_grow_above_75(ht, tbl))
da20420f 411 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
b5e2c150 412 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
da20420f
HX
413 err = rhashtable_shrink(ht);
414 else if (tbl->nest)
415 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
b824478b 416
408f13ef
HX
417 if (!err || err == -EEXIST) {
418 int nerr;
419
420 nerr = rhashtable_rehash_table(ht);
421 err = err ?: nerr;
422 }
b824478b 423
97defe1e 424 mutex_unlock(&ht->mutex);
b824478b
HX
425
426 if (err)
427 schedule_work(&ht->run_work);
97defe1e
TG
428}
429
ca26893f
HX
430static int rhashtable_insert_rehash(struct rhashtable *ht,
431 struct bucket_table *tbl)
ccd57b1b
HX
432{
433 struct bucket_table *old_tbl;
434 struct bucket_table *new_tbl;
ccd57b1b
HX
435 unsigned int size;
436 int err;
437
438 old_tbl = rht_dereference_rcu(ht->tbl, ht);
ccd57b1b
HX
439
440 size = tbl->size;
441
3cf92222
HX
442 err = -EBUSY;
443
ccd57b1b
HX
444 if (rht_grow_above_75(ht, tbl))
445 size *= 2;
a87b9ebf
TG
446 /* Do not schedule more than one rehash */
447 else if (old_tbl != tbl)
3cf92222
HX
448 goto fail;
449
450 err = -ENOMEM;
ccd57b1b 451
93f976b5 452 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
3cf92222
HX
453 if (new_tbl == NULL)
454 goto fail;
ccd57b1b
HX
455
456 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
457 if (err) {
458 bucket_table_free(new_tbl);
459 if (err == -EEXIST)
460 err = 0;
461 } else
462 schedule_work(&ht->run_work);
463
464 return err;
3cf92222
HX
465
466fail:
467 /* Do not fail the insert if someone else did a rehash. */
c0690016 468 if (likely(rcu_access_pointer(tbl->future_tbl)))
3cf92222
HX
469 return 0;
470
471 /* Schedule async rehash to retry allocation in process context. */
472 if (err == -ENOMEM)
473 schedule_work(&ht->run_work);
474
475 return err;
ccd57b1b 476}
ccd57b1b 477
ca26893f 478static void *rhashtable_lookup_one(struct rhashtable *ht,
ba6306e3 479 struct rhash_lock_head **bkt,
ca26893f
HX
480 struct bucket_table *tbl, unsigned int hash,
481 const void *key, struct rhash_head *obj)
02fd97c3 482{
ca26893f
HX
483 struct rhashtable_compare_arg arg = {
484 .ht = ht,
485 .key = key,
486 };
e4edbe3c 487 struct rhash_head __rcu **pprev = NULL;
02fd97c3 488 struct rhash_head *head;
ca26893f 489 int elasticity;
02fd97c3 490
5f8ddeab 491 elasticity = RHT_ELASTICITY;
adc6a3ab 492 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
ca26893f
HX
493 struct rhlist_head *list;
494 struct rhlist_head *plist;
495
496 elasticity--;
497 if (!key ||
498 (ht->p.obj_cmpfn ?
499 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
d3dcf8eb
PB
500 rhashtable_compare(&arg, rht_obj(ht, head)))) {
501 pprev = &head->next;
ca26893f 502 continue;
d3dcf8eb 503 }
ca26893f
HX
504
505 if (!ht->rhlist)
506 return rht_obj(ht, head);
507
508 list = container_of(obj, struct rhlist_head, rhead);
509 plist = container_of(head, struct rhlist_head, rhead);
510
511 RCU_INIT_POINTER(list->next, plist);
512 head = rht_dereference_bucket(head->next, tbl, hash);
513 RCU_INIT_POINTER(list->rhead.next, head);
8f0db018
N
514 if (pprev)
515 rcu_assign_pointer(*pprev, obj);
516 else
517 /* Need to preserve the bit lock */
f4712b46 518 rht_assign_locked(bkt, obj);
ca26893f
HX
519
520 return NULL;
5ca8cc5b 521 }
02fd97c3 522
ca26893f
HX
523 if (elasticity <= 0)
524 return ERR_PTR(-EAGAIN);
525
526 return ERR_PTR(-ENOENT);
527}
528
529static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
ba6306e3 530 struct rhash_lock_head **bkt,
ca26893f
HX
531 struct bucket_table *tbl,
532 unsigned int hash,
533 struct rhash_head *obj,
534 void *data)
535{
536 struct bucket_table *new_tbl;
537 struct rhash_head *head;
538
539 if (!IS_ERR_OR_NULL(data))
540 return ERR_PTR(-EEXIST);
07ee0722 541
ca26893f
HX
542 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
543 return ERR_CAST(data);
ccd57b1b 544
c0690016 545 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
ca26893f
HX
546 if (new_tbl)
547 return new_tbl;
548
549 if (PTR_ERR(data) != -ENOENT)
550 return ERR_CAST(data);
551
552 if (unlikely(rht_grow_above_max(ht, tbl)))
553 return ERR_PTR(-E2BIG);
554
555 if (unlikely(rht_grow_above_100(ht, tbl)))
556 return ERR_PTR(-EAGAIN);
02fd97c3 557
adc6a3ab 558 head = rht_ptr(bkt, tbl, hash);
02fd97c3
HX
559
560 RCU_INIT_POINTER(obj->next, head);
ca26893f
HX
561 if (ht->rhlist) {
562 struct rhlist_head *list;
563
564 list = container_of(obj, struct rhlist_head, rhead);
565 RCU_INIT_POINTER(list->next, NULL);
566 }
02fd97c3 567
8f0db018
N
568 /* bkt is always the head of the list, so it holds
569 * the lock, which we need to preserve
570 */
f4712b46 571 rht_assign_locked(bkt, obj);
02fd97c3
HX
572
573 atomic_inc(&ht->nelems);
ca26893f
HX
574 if (rht_grow_above_75(ht, tbl))
575 schedule_work(&ht->run_work);
02fd97c3 576
ca26893f
HX
577 return NULL;
578}
02fd97c3 579
ca26893f
HX
580static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
581 struct rhash_head *obj)
582{
583 struct bucket_table *new_tbl;
584 struct bucket_table *tbl;
ba6306e3 585 struct rhash_lock_head **bkt;
ca26893f 586 unsigned int hash;
ca26893f
HX
587 void *data;
588
4feb7c7a 589 new_tbl = rcu_dereference(ht->tbl);
ca26893f 590
4feb7c7a 591 do {
ca26893f
HX
592 tbl = new_tbl;
593 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
8f0db018
N
594 if (rcu_access_pointer(tbl->future_tbl))
595 /* Failure is OK */
596 bkt = rht_bucket_var(tbl, hash);
597 else
598 bkt = rht_bucket_insert(ht, tbl, hash);
599 if (bkt == NULL) {
600 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
601 data = ERR_PTR(-EAGAIN);
602 } else {
149212f0 603 rht_lock(tbl, bkt);
8f0db018
N
604 data = rhashtable_lookup_one(ht, bkt, tbl,
605 hash, key, obj);
606 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
607 hash, obj, data);
608 if (PTR_ERR(new_tbl) != -EEXIST)
609 data = ERR_CAST(new_tbl);
610
149212f0 611 rht_unlock(tbl, bkt);
8f0db018 612 }
4feb7c7a 613 } while (!IS_ERR_OR_NULL(new_tbl));
ca26893f
HX
614
615 if (PTR_ERR(data) == -EAGAIN)
616 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
617 -EAGAIN);
618
619 return data;
620}
621
622void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
623 struct rhash_head *obj)
624{
625 void *data;
626
627 do {
628 rcu_read_lock();
629 data = rhashtable_try_insert(ht, key, obj);
630 rcu_read_unlock();
631 } while (PTR_ERR(data) == -EAGAIN);
632
633 return data;
02fd97c3
HX
634}
635EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
636
f2dba9c6 637/**
246779dd 638 * rhashtable_walk_enter - Initialise an iterator
f2dba9c6
HX
639 * @ht: Table to walk over
640 * @iter: Hash table Iterator
641 *
642 * This function prepares a hash table walk.
643 *
644 * Note that if you restart a walk after rhashtable_walk_stop you
645 * may see the same object twice. Also, you may miss objects if
646 * there are removals in between rhashtable_walk_stop and the next
647 * call to rhashtable_walk_start.
648 *
649 * For a completely stable walk you should construct your own data
650 * structure outside the hash table.
651 *
82266e98
N
652 * This function may be called from any process context, including
653 * non-preemptable context, but cannot be called from softirq or
654 * hardirq context.
f2dba9c6 655 *
246779dd 656 * You must call rhashtable_walk_exit after this function returns.
f2dba9c6 657 */
246779dd 658void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
f2dba9c6
HX
659{
660 iter->ht = ht;
661 iter->p = NULL;
662 iter->slot = 0;
663 iter->skip = 0;
2db54b47 664 iter->end_of_table = 0;
f2dba9c6 665
c6ff5268 666 spin_lock(&ht->lock);
246779dd 667 iter->walker.tbl =
179ccc0a 668 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
246779dd 669 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
c6ff5268 670 spin_unlock(&ht->lock);
f2dba9c6 671}
246779dd 672EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
f2dba9c6
HX
673
674/**
675 * rhashtable_walk_exit - Free an iterator
676 * @iter: Hash table Iterator
677 *
6c4128f6 678 * This function frees resources allocated by rhashtable_walk_enter.
f2dba9c6
HX
679 */
680void rhashtable_walk_exit(struct rhashtable_iter *iter)
681{
c6ff5268 682 spin_lock(&iter->ht->lock);
246779dd
HX
683 if (iter->walker.tbl)
684 list_del(&iter->walker.list);
c6ff5268 685 spin_unlock(&iter->ht->lock);
f2dba9c6
HX
686}
687EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
688
689/**
97a6ec4a 690 * rhashtable_walk_start_check - Start a hash table walk
f2dba9c6
HX
691 * @iter: Hash table iterator
692 *
0647169c
AG
693 * Start a hash table walk at the current iterator position. Note that we take
694 * the RCU lock in all cases including when we return an error. So you must
695 * always call rhashtable_walk_stop to clean up.
f2dba9c6
HX
696 *
697 * Returns zero if successful.
698 *
699 * Returns -EAGAIN if resize event occured. Note that the iterator
700 * will rewind back to the beginning and you may use it immediately
701 * by calling rhashtable_walk_next.
97a6ec4a
TH
702 *
703 * rhashtable_walk_start is defined as an inline variant that returns
704 * void. This is preferred in cases where the caller would ignore
705 * resize events and always continue.
f2dba9c6 706 */
97a6ec4a 707int rhashtable_walk_start_check(struct rhashtable_iter *iter)
db4374f4 708 __acquires(RCU)
f2dba9c6 709{
eddee5ba 710 struct rhashtable *ht = iter->ht;
5d240a89 711 bool rhlist = ht->rhlist;
eddee5ba 712
c6ff5268 713 rcu_read_lock();
eddee5ba 714
c6ff5268 715 spin_lock(&ht->lock);
246779dd
HX
716 if (iter->walker.tbl)
717 list_del(&iter->walker.list);
c6ff5268 718 spin_unlock(&ht->lock);
eddee5ba 719
5d240a89
N
720 if (iter->end_of_table)
721 return 0;
722 if (!iter->walker.tbl) {
246779dd 723 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
b41cc04b
N
724 iter->slot = 0;
725 iter->skip = 0;
f2dba9c6
HX
726 return -EAGAIN;
727 }
728
5d240a89
N
729 if (iter->p && !rhlist) {
730 /*
731 * We need to validate that 'p' is still in the table, and
732 * if so, update 'skip'
733 */
734 struct rhash_head *p;
735 int skip = 0;
736 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
737 skip++;
738 if (p == iter->p) {
739 iter->skip = skip;
740 goto found;
741 }
742 }
743 iter->p = NULL;
744 } else if (iter->p && rhlist) {
745 /* Need to validate that 'list' is still in the table, and
746 * if so, update 'skip' and 'p'.
747 */
748 struct rhash_head *p;
749 struct rhlist_head *list;
750 int skip = 0;
751 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
752 for (list = container_of(p, struct rhlist_head, rhead);
753 list;
754 list = rcu_dereference(list->next)) {
755 skip++;
756 if (list == iter->list) {
757 iter->p = p;
c643ecf3 758 iter->skip = skip;
5d240a89
N
759 goto found;
760 }
761 }
762 }
763 iter->p = NULL;
764 }
765found:
f2dba9c6
HX
766 return 0;
767}
97a6ec4a 768EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
f2dba9c6
HX
769
770/**
2db54b47
TH
771 * __rhashtable_walk_find_next - Find the next element in a table (or the first
772 * one in case of a new walk).
773 *
f2dba9c6
HX
774 * @iter: Hash table iterator
775 *
2db54b47 776 * Returns the found object or NULL when the end of the table is reached.
f2dba9c6 777 *
2db54b47 778 * Returns -EAGAIN if resize event occurred.
f2dba9c6 779 */
2db54b47 780static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
f2dba9c6 781{
246779dd 782 struct bucket_table *tbl = iter->walker.tbl;
ca26893f 783 struct rhlist_head *list = iter->list;
f2dba9c6
HX
784 struct rhashtable *ht = iter->ht;
785 struct rhash_head *p = iter->p;
ca26893f 786 bool rhlist = ht->rhlist;
f2dba9c6 787
2db54b47
TH
788 if (!tbl)
789 return NULL;
f2dba9c6
HX
790
791 for (; iter->slot < tbl->size; iter->slot++) {
792 int skip = iter->skip;
793
794 rht_for_each_rcu(p, tbl, iter->slot) {
ca26893f
HX
795 if (rhlist) {
796 list = container_of(p, struct rhlist_head,
797 rhead);
798 do {
799 if (!skip)
800 goto next;
801 skip--;
802 list = rcu_dereference(list->next);
803 } while (list);
804
805 continue;
806 }
f2dba9c6
HX
807 if (!skip)
808 break;
809 skip--;
810 }
811
812next:
813 if (!rht_is_a_nulls(p)) {
814 iter->skip++;
815 iter->p = p;
ca26893f
HX
816 iter->list = list;
817 return rht_obj(ht, rhlist ? &list->rhead : p);
f2dba9c6
HX
818 }
819
820 iter->skip = 0;
821 }
822
142b942a
PS
823 iter->p = NULL;
824
d88252f9
HX
825 /* Ensure we see any new tables. */
826 smp_rmb();
827
246779dd
HX
828 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
829 if (iter->walker.tbl) {
f2dba9c6
HX
830 iter->slot = 0;
831 iter->skip = 0;
f2dba9c6 832 return ERR_PTR(-EAGAIN);
2db54b47
TH
833 } else {
834 iter->end_of_table = true;
f2dba9c6
HX
835 }
836
c936a79f 837 return NULL;
f2dba9c6 838}
2db54b47
TH
839
840/**
841 * rhashtable_walk_next - Return the next object and advance the iterator
842 * @iter: Hash table iterator
843 *
844 * Note that you must call rhashtable_walk_stop when you are finished
845 * with the walk.
846 *
847 * Returns the next object or NULL when the end of the table is reached.
848 *
849 * Returns -EAGAIN if resize event occurred. Note that the iterator
850 * will rewind back to the beginning and you may continue to use it.
851 */
852void *rhashtable_walk_next(struct rhashtable_iter *iter)
853{
854 struct rhlist_head *list = iter->list;
855 struct rhashtable *ht = iter->ht;
856 struct rhash_head *p = iter->p;
857 bool rhlist = ht->rhlist;
858
859 if (p) {
860 if (!rhlist || !(list = rcu_dereference(list->next))) {
861 p = rcu_dereference(p->next);
862 list = container_of(p, struct rhlist_head, rhead);
863 }
864 if (!rht_is_a_nulls(p)) {
865 iter->skip++;
866 iter->p = p;
867 iter->list = list;
868 return rht_obj(ht, rhlist ? &list->rhead : p);
869 }
870
871 /* At the end of this slot, switch to next one and then find
872 * next entry from that point.
873 */
874 iter->skip = 0;
875 iter->slot++;
876 }
877
878 return __rhashtable_walk_find_next(iter);
879}
f2dba9c6
HX
880EXPORT_SYMBOL_GPL(rhashtable_walk_next);
881
2db54b47
TH
882/**
883 * rhashtable_walk_peek - Return the next object but don't advance the iterator
884 * @iter: Hash table iterator
885 *
886 * Returns the next object or NULL when the end of the table is reached.
887 *
888 * Returns -EAGAIN if resize event occurred. Note that the iterator
889 * will rewind back to the beginning and you may continue to use it.
890 */
891void *rhashtable_walk_peek(struct rhashtable_iter *iter)
892{
893 struct rhlist_head *list = iter->list;
894 struct rhashtable *ht = iter->ht;
895 struct rhash_head *p = iter->p;
896
897 if (p)
898 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
899
900 /* No object found in current iter, find next one in the table. */
901
902 if (iter->skip) {
903 /* A nonzero skip value points to the next entry in the table
904 * beyond that last one that was found. Decrement skip so
905 * we find the current value. __rhashtable_walk_find_next
906 * will restore the original value of skip assuming that
907 * the table hasn't changed.
908 */
909 iter->skip--;
910 }
911
912 return __rhashtable_walk_find_next(iter);
913}
914EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
915
f2dba9c6
HX
916/**
917 * rhashtable_walk_stop - Finish a hash table walk
918 * @iter: Hash table iterator
919 *
0647169c
AG
920 * Finish a hash table walk. Does not reset the iterator to the start of the
921 * hash table.
f2dba9c6
HX
922 */
923void rhashtable_walk_stop(struct rhashtable_iter *iter)
db4374f4 924 __releases(RCU)
f2dba9c6 925{
eddee5ba 926 struct rhashtable *ht;
246779dd 927 struct bucket_table *tbl = iter->walker.tbl;
eddee5ba 928
eddee5ba 929 if (!tbl)
963ecbd4 930 goto out;
eddee5ba
HX
931
932 ht = iter->ht;
933
ba7c95ea 934 spin_lock(&ht->lock);
4feb7c7a
N
935 if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
936 /* This bucket table is being freed, don't re-link it. */
246779dd 937 iter->walker.tbl = NULL;
4feb7c7a
N
938 else
939 list_add(&iter->walker.list, &tbl->walkers);
ba7c95ea 940 spin_unlock(&ht->lock);
eddee5ba 941
963ecbd4
HX
942out:
943 rcu_read_unlock();
f2dba9c6
HX
944}
945EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
946
488fb86e 947static size_t rounded_hashtable_size(const struct rhashtable_params *params)
7e1e7763 948{
107d01f5
DB
949 size_t retsize;
950
951 if (params->nelem_hint)
952 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
953 (unsigned long)params->min_size);
954 else
955 retsize = max(HASH_DEFAULT_SIZE,
956 (unsigned long)params->min_size);
957
958 return retsize;
7e1e7763
TG
959}
960
31ccde2d
HX
961static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
962{
963 return jhash2(key, length, seed);
964}
965
7e1e7763
TG
966/**
967 * rhashtable_init - initialize a new hash table
968 * @ht: hash table to be initialized
969 * @params: configuration parameters
970 *
971 * Initializes a new hash table based on the provided configuration
972 * parameters. A table can be configured either with a variable or
973 * fixed length key:
974 *
975 * Configuration Example 1: Fixed length keys
976 * struct test_obj {
977 * int key;
978 * void * my_member;
979 * struct rhash_head node;
980 * };
981 *
982 * struct rhashtable_params params = {
983 * .head_offset = offsetof(struct test_obj, node),
984 * .key_offset = offsetof(struct test_obj, key),
985 * .key_len = sizeof(int),
87545899 986 * .hashfn = jhash,
7e1e7763
TG
987 * };
988 *
989 * Configuration Example 2: Variable length keys
990 * struct test_obj {
991 * [...]
992 * struct rhash_head node;
993 * };
994 *
49f7b33e 995 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
7e1e7763
TG
996 * {
997 * struct test_obj *obj = data;
998 *
999 * return [... hash ...];
1000 * }
1001 *
1002 * struct rhashtable_params params = {
1003 * .head_offset = offsetof(struct test_obj, node),
87545899 1004 * .hashfn = jhash,
7e1e7763 1005 * .obj_hashfn = my_hash_fn,
7e1e7763
TG
1006 * };
1007 */
488fb86e
HX
1008int rhashtable_init(struct rhashtable *ht,
1009 const struct rhashtable_params *params)
7e1e7763
TG
1010{
1011 struct bucket_table *tbl;
1012 size_t size;
1013
31ccde2d 1014 if ((!params->key_len && !params->obj_hashfn) ||
02fd97c3 1015 (params->obj_hashfn && !params->obj_cmpfn))
7e1e7763
TG
1016 return -EINVAL;
1017
97defe1e
TG
1018 memset(ht, 0, sizeof(*ht));
1019 mutex_init(&ht->mutex);
ba7c95ea 1020 spin_lock_init(&ht->lock);
97defe1e
TG
1021 memcpy(&ht->p, params, sizeof(*params));
1022
a998f712
TG
1023 if (params->min_size)
1024 ht->p.min_size = roundup_pow_of_two(params->min_size);
1025
6d684e54
HX
1026 /* Cap total entries at 2^31 to avoid nelems overflow. */
1027 ht->max_elems = 1u << 31;
2d2ab658
HX
1028
1029 if (params->max_size) {
1030 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1031 if (ht->p.max_size < ht->max_elems / 2)
1032 ht->max_elems = ht->p.max_size * 2;
1033 }
6d684e54 1034
48e75b43 1035 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
a998f712 1036
107d01f5 1037 size = rounded_hashtable_size(&ht->p);
3a324606 1038
31ccde2d
HX
1039 ht->key_len = ht->p.key_len;
1040 if (!params->hashfn) {
1041 ht->p.hashfn = jhash;
1042
1043 if (!(ht->key_len & (sizeof(u32) - 1))) {
1044 ht->key_len /= sizeof(u32);
1045 ht->p.hashfn = rhashtable_jhash2;
1046 }
1047 }
1048
2d22ecf6
DB
1049 /*
1050 * This is api initialization and thus we need to guarantee the
1051 * initial rhashtable allocation. Upon failure, retry with the
1052 * smallest possible size with __GFP_NOFAIL semantics.
1053 */
b9ecfdaa 1054 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
2d22ecf6
DB
1055 if (unlikely(tbl == NULL)) {
1056 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1057 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1058 }
7e1e7763 1059
545a148e 1060 atomic_set(&ht->nelems, 0);
a5b6846f 1061
7e1e7763
TG
1062 RCU_INIT_POINTER(ht->tbl, tbl);
1063
4c4b52d9 1064 INIT_WORK(&ht->run_work, rht_deferred_worker);
97defe1e 1065
7e1e7763
TG
1066 return 0;
1067}
1068EXPORT_SYMBOL_GPL(rhashtable_init);
1069
ca26893f
HX
1070/**
1071 * rhltable_init - initialize a new hash list table
1072 * @hlt: hash list table to be initialized
1073 * @params: configuration parameters
1074 *
1075 * Initializes a new hash list table.
1076 *
1077 * See documentation for rhashtable_init.
1078 */
1079int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1080{
1081 int err;
1082
ca26893f
HX
1083 err = rhashtable_init(&hlt->ht, params);
1084 hlt->ht.rhlist = true;
1085 return err;
1086}
1087EXPORT_SYMBOL_GPL(rhltable_init);
1088
1089static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1090 void (*free_fn)(void *ptr, void *arg),
1091 void *arg)
1092{
1093 struct rhlist_head *list;
1094
1095 if (!ht->rhlist) {
1096 free_fn(rht_obj(ht, obj), arg);
1097 return;
1098 }
1099
1100 list = container_of(obj, struct rhlist_head, rhead);
1101 do {
1102 obj = &list->rhead;
1103 list = rht_dereference(list->next, ht);
1104 free_fn(rht_obj(ht, obj), arg);
1105 } while (list);
1106}
1107
7e1e7763 1108/**
6b6f302c 1109 * rhashtable_free_and_destroy - free elements and destroy hash table
7e1e7763 1110 * @ht: the hash table to destroy
6b6f302c
TG
1111 * @free_fn: callback to release resources of element
1112 * @arg: pointer passed to free_fn
7e1e7763 1113 *
6b6f302c
TG
1114 * Stops an eventual async resize. If defined, invokes free_fn for each
1115 * element to releasal resources. Please note that RCU protected
1116 * readers may still be accessing the elements. Releasing of resources
1117 * must occur in a compatible manner. Then frees the bucket array.
1118 *
1119 * This function will eventually sleep to wait for an async resize
1120 * to complete. The caller is responsible that no further write operations
1121 * occurs in parallel.
7e1e7763 1122 */
6b6f302c
TG
1123void rhashtable_free_and_destroy(struct rhashtable *ht,
1124 void (*free_fn)(void *ptr, void *arg),
1125 void *arg)
7e1e7763 1126{
0026129c 1127 struct bucket_table *tbl, *next_tbl;
6b6f302c 1128 unsigned int i;
97defe1e 1129
4c4b52d9 1130 cancel_work_sync(&ht->run_work);
97defe1e 1131
57699a40 1132 mutex_lock(&ht->mutex);
6b6f302c 1133 tbl = rht_dereference(ht->tbl, ht);
0026129c 1134restart:
6b6f302c
TG
1135 if (free_fn) {
1136 for (i = 0; i < tbl->size; i++) {
1137 struct rhash_head *pos, *next;
1138
ae6da1f5 1139 cond_resched();
adc6a3ab 1140 for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
6b6f302c
TG
1141 next = !rht_is_a_nulls(pos) ?
1142 rht_dereference(pos->next, ht) : NULL;
1143 !rht_is_a_nulls(pos);
1144 pos = next,
1145 next = !rht_is_a_nulls(pos) ?
1146 rht_dereference(pos->next, ht) : NULL)
ca26893f 1147 rhashtable_free_one(ht, pos, free_fn, arg);
6b6f302c
TG
1148 }
1149 }
1150
0026129c 1151 next_tbl = rht_dereference(tbl->future_tbl, ht);
6b6f302c 1152 bucket_table_free(tbl);
0026129c
TY
1153 if (next_tbl) {
1154 tbl = next_tbl;
1155 goto restart;
1156 }
97defe1e 1157 mutex_unlock(&ht->mutex);
7e1e7763 1158}
6b6f302c
TG
1159EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1160
1161void rhashtable_destroy(struct rhashtable *ht)
1162{
1163 return rhashtable_free_and_destroy(ht, NULL, NULL);
1164}
7e1e7763 1165EXPORT_SYMBOL_GPL(rhashtable_destroy);
da20420f 1166
ba6306e3
HX
1167struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
1168 unsigned int hash)
da20420f
HX
1169{
1170 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
da20420f
HX
1171 unsigned int index = hash & ((1 << tbl->nest) - 1);
1172 unsigned int size = tbl->size >> tbl->nest;
1173 unsigned int subhash = hash;
1174 union nested_table *ntbl;
1175
1176 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
c4d2603d 1177 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
da20420f
HX
1178 subhash >>= tbl->nest;
1179
1180 while (ntbl && size > (1 << shift)) {
1181 index = subhash & ((1 << shift) - 1);
c4d2603d
HX
1182 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1183 tbl, hash);
da20420f
HX
1184 size >>= shift;
1185 subhash >>= shift;
1186 }
1187
ff302db9
N
1188 if (!ntbl)
1189 return NULL;
da20420f
HX
1190
1191 return &ntbl[subhash].bucket;
1192
1193}
ff302db9
N
1194EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1195
ba6306e3
HX
1196struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
1197 unsigned int hash)
ff302db9 1198{
ba6306e3 1199 static struct rhash_lock_head *rhnull;
ff302db9
N
1200
1201 if (!rhnull)
1202 INIT_RHT_NULLS_HEAD(rhnull);
1203 return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1204}
da20420f
HX
1205EXPORT_SYMBOL_GPL(rht_bucket_nested);
1206
ba6306e3
HX
1207struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
1208 struct bucket_table *tbl,
1209 unsigned int hash)
da20420f
HX
1210{
1211 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1212 unsigned int index = hash & ((1 << tbl->nest) - 1);
1213 unsigned int size = tbl->size >> tbl->nest;
1214 union nested_table *ntbl;
da20420f
HX
1215
1216 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1217 hash >>= tbl->nest;
da20420f 1218 ntbl = nested_table_alloc(ht, &ntbl[index].table,
5af68ef7 1219 size <= (1 << shift));
da20420f
HX
1220
1221 while (ntbl && size > (1 << shift)) {
1222 index = hash & ((1 << shift) - 1);
1223 size >>= shift;
1224 hash >>= shift;
da20420f 1225 ntbl = nested_table_alloc(ht, &ntbl[index].table,
5af68ef7 1226 size <= (1 << shift));
da20420f
HX
1227 }
1228
1229 if (!ntbl)
1230 return NULL;
1231
1232 return &ntbl[hash].bucket;
1233
1234}
1235EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);