]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * Resizable, Scalable, Concurrent Hash Table | |
4 | * | |
5 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | |
6 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> | |
7 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> | |
8 | * | |
9 | * Code partially derived from nft_hash | |
10 | * Rewritten with rehash code from br_multicast plus single list | |
11 | * pointer as suggested by Josh Triplett | |
12 | */ | |
13 | ||
14 | #include <linux/atomic.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/log2.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/rculist.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/vmalloc.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/jhash.h> | |
24 | #include <linux/random.h> | |
25 | #include <linux/rhashtable.h> | |
26 | #include <linux/err.h> | |
27 | #include <linux/export.h> | |
28 | ||
29 | #define HASH_DEFAULT_SIZE 64UL | |
30 | #define HASH_MIN_SIZE 4U | |
31 | ||
32 | union nested_table { | |
33 | union nested_table __rcu *table; | |
34 | struct rhash_lock_head __rcu *bucket; | |
35 | }; | |
36 | ||
37 | static u32 head_hashfn(struct rhashtable *ht, | |
38 | const struct bucket_table *tbl, | |
39 | const struct rhash_head *he) | |
40 | { | |
41 | return rht_head_hashfn(ht, tbl, he, ht->p); | |
42 | } | |
43 | ||
44 | #ifdef CONFIG_PROVE_LOCKING | |
45 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) | |
46 | ||
47 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) | |
48 | { | |
49 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; | |
50 | } | |
51 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | |
52 | ||
53 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) | |
54 | { | |
55 | if (!debug_locks) | |
56 | return 1; | |
57 | if (unlikely(tbl->nest)) | |
58 | return 1; | |
59 | return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); | |
60 | } | |
61 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | |
62 | #else | |
63 | #define ASSERT_RHT_MUTEX(HT) | |
64 | #endif | |
65 | ||
66 | static inline union nested_table *nested_table_top( | |
67 | const struct bucket_table *tbl) | |
68 | { | |
69 | /* The top-level bucket entry does not need RCU protection | |
70 | * because it's set at the same time as tbl->nest. | |
71 | */ | |
72 | return (void *)rcu_dereference_protected(tbl->buckets[0], 1); | |
73 | } | |
74 | ||
75 | static void nested_table_free(union nested_table *ntbl, unsigned int size) | |
76 | { | |
77 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
78 | const unsigned int len = 1 << shift; | |
79 | unsigned int i; | |
80 | ||
81 | ntbl = rcu_dereference_protected(ntbl->table, 1); | |
82 | if (!ntbl) | |
83 | return; | |
84 | ||
85 | if (size > len) { | |
86 | size >>= shift; | |
87 | for (i = 0; i < len; i++) | |
88 | nested_table_free(ntbl + i, size); | |
89 | } | |
90 | ||
91 | kfree(ntbl); | |
92 | } | |
93 | ||
94 | static void nested_bucket_table_free(const struct bucket_table *tbl) | |
95 | { | |
96 | unsigned int size = tbl->size >> tbl->nest; | |
97 | unsigned int len = 1 << tbl->nest; | |
98 | union nested_table *ntbl; | |
99 | unsigned int i; | |
100 | ||
101 | ntbl = nested_table_top(tbl); | |
102 | ||
103 | for (i = 0; i < len; i++) | |
104 | nested_table_free(ntbl + i, size); | |
105 | ||
106 | kfree(ntbl); | |
107 | } | |
108 | ||
109 | static void bucket_table_free(const struct bucket_table *tbl) | |
110 | { | |
111 | if (tbl->nest) | |
112 | nested_bucket_table_free(tbl); | |
113 | ||
114 | kvfree(tbl); | |
115 | } | |
116 | ||
117 | static void bucket_table_free_rcu(struct rcu_head *head) | |
118 | { | |
119 | bucket_table_free(container_of(head, struct bucket_table, rcu)); | |
120 | } | |
121 | ||
122 | static union nested_table *nested_table_alloc(struct rhashtable *ht, | |
123 | union nested_table __rcu **prev, | |
124 | bool leaf) | |
125 | { | |
126 | union nested_table *ntbl; | |
127 | int i; | |
128 | ||
129 | ntbl = rcu_dereference(*prev); | |
130 | if (ntbl) | |
131 | return ntbl; | |
132 | ||
133 | ntbl = alloc_hooks_tag(ht->alloc_tag, | |
134 | kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO)); | |
135 | ||
136 | if (ntbl && leaf) { | |
137 | for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) | |
138 | INIT_RHT_NULLS_HEAD(ntbl[i].bucket); | |
139 | } | |
140 | ||
141 | if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) | |
142 | return ntbl; | |
143 | /* Raced with another thread. */ | |
144 | kfree(ntbl); | |
145 | return rcu_dereference(*prev); | |
146 | } | |
147 | ||
148 | static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, | |
149 | size_t nbuckets, | |
150 | gfp_t gfp) | |
151 | { | |
152 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
153 | struct bucket_table *tbl; | |
154 | size_t size; | |
155 | ||
156 | if (nbuckets < (1 << (shift + 1))) | |
157 | return NULL; | |
158 | ||
159 | size = sizeof(*tbl) + sizeof(tbl->buckets[0]); | |
160 | ||
161 | tbl = alloc_hooks_tag(ht->alloc_tag, | |
162 | kmalloc_noprof(size, gfp|__GFP_ZERO)); | |
163 | if (!tbl) | |
164 | return NULL; | |
165 | ||
166 | if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, | |
167 | false)) { | |
168 | kfree(tbl); | |
169 | return NULL; | |
170 | } | |
171 | ||
172 | tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; | |
173 | ||
174 | return tbl; | |
175 | } | |
176 | ||
177 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |
178 | size_t nbuckets, | |
179 | gfp_t gfp) | |
180 | { | |
181 | struct bucket_table *tbl = NULL; | |
182 | size_t size; | |
183 | int i; | |
184 | static struct lock_class_key __key; | |
185 | ||
186 | tbl = alloc_hooks_tag(ht->alloc_tag, | |
187 | kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets), | |
188 | gfp|__GFP_ZERO, NUMA_NO_NODE)); | |
189 | ||
190 | size = nbuckets; | |
191 | ||
192 | if (tbl == NULL && !gfpflags_allow_blocking(gfp)) { | |
193 | tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); | |
194 | nbuckets = 0; | |
195 | } | |
196 | ||
197 | if (tbl == NULL) | |
198 | return NULL; | |
199 | ||
200 | lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); | |
201 | ||
202 | tbl->size = size; | |
203 | ||
204 | rcu_head_init(&tbl->rcu); | |
205 | INIT_LIST_HEAD(&tbl->walkers); | |
206 | ||
207 | tbl->hash_rnd = get_random_u32(); | |
208 | ||
209 | for (i = 0; i < nbuckets; i++) | |
210 | INIT_RHT_NULLS_HEAD(tbl->buckets[i]); | |
211 | ||
212 | return tbl; | |
213 | } | |
214 | ||
215 | static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, | |
216 | struct bucket_table *tbl) | |
217 | { | |
218 | struct bucket_table *new_tbl; | |
219 | ||
220 | do { | |
221 | new_tbl = tbl; | |
222 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
223 | } while (tbl); | |
224 | ||
225 | return new_tbl; | |
226 | } | |
227 | ||
228 | static int rhashtable_rehash_one(struct rhashtable *ht, | |
229 | struct rhash_lock_head __rcu **bkt, | |
230 | unsigned int old_hash) | |
231 | { | |
232 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
233 | struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); | |
234 | int err = -EAGAIN; | |
235 | struct rhash_head *head, *next, *entry; | |
236 | struct rhash_head __rcu **pprev = NULL; | |
237 | unsigned int new_hash; | |
238 | unsigned long flags; | |
239 | ||
240 | if (new_tbl->nest) | |
241 | goto out; | |
242 | ||
243 | err = -ENOENT; | |
244 | ||
245 | rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), | |
246 | old_tbl, old_hash) { | |
247 | err = 0; | |
248 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); | |
249 | ||
250 | if (rht_is_a_nulls(next)) | |
251 | break; | |
252 | ||
253 | pprev = &entry->next; | |
254 | } | |
255 | ||
256 | if (err) | |
257 | goto out; | |
258 | ||
259 | new_hash = head_hashfn(ht, new_tbl, entry); | |
260 | ||
261 | flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], | |
262 | SINGLE_DEPTH_NESTING); | |
263 | ||
264 | head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); | |
265 | ||
266 | RCU_INIT_POINTER(entry->next, head); | |
267 | ||
268 | rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags); | |
269 | ||
270 | if (pprev) | |
271 | rcu_assign_pointer(*pprev, next); | |
272 | else | |
273 | /* Need to preserved the bit lock. */ | |
274 | rht_assign_locked(bkt, next); | |
275 | ||
276 | out: | |
277 | return err; | |
278 | } | |
279 | ||
280 | static int rhashtable_rehash_chain(struct rhashtable *ht, | |
281 | unsigned int old_hash) | |
282 | { | |
283 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
284 | struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); | |
285 | unsigned long flags; | |
286 | int err; | |
287 | ||
288 | if (!bkt) | |
289 | return 0; | |
290 | flags = rht_lock(old_tbl, bkt); | |
291 | ||
292 | while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) | |
293 | ; | |
294 | ||
295 | if (err == -ENOENT) | |
296 | err = 0; | |
297 | rht_unlock(old_tbl, bkt, flags); | |
298 | ||
299 | return err; | |
300 | } | |
301 | ||
302 | static int rhashtable_rehash_attach(struct rhashtable *ht, | |
303 | struct bucket_table *old_tbl, | |
304 | struct bucket_table *new_tbl) | |
305 | { | |
306 | /* Make insertions go into the new, empty table right away. Deletions | |
307 | * and lookups will be attempted in both tables until we synchronize. | |
308 | * As cmpxchg() provides strong barriers, we do not need | |
309 | * rcu_assign_pointer(). | |
310 | */ | |
311 | ||
312 | if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, | |
313 | new_tbl) != NULL) | |
314 | return -EEXIST; | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | static int rhashtable_rehash_table(struct rhashtable *ht) | |
320 | { | |
321 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
322 | struct bucket_table *new_tbl; | |
323 | struct rhashtable_walker *walker; | |
324 | unsigned int old_hash; | |
325 | int err; | |
326 | ||
327 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | |
328 | if (!new_tbl) | |
329 | return 0; | |
330 | ||
331 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { | |
332 | err = rhashtable_rehash_chain(ht, old_hash); | |
333 | if (err) | |
334 | return err; | |
335 | cond_resched(); | |
336 | } | |
337 | ||
338 | /* Publish the new table pointer. */ | |
339 | rcu_assign_pointer(ht->tbl, new_tbl); | |
340 | ||
341 | spin_lock(&ht->lock); | |
342 | list_for_each_entry(walker, &old_tbl->walkers, list) | |
343 | walker->tbl = NULL; | |
344 | ||
345 | /* Wait for readers. All new readers will see the new | |
346 | * table, and thus no references to the old table will | |
347 | * remain. | |
348 | * We do this inside the locked region so that | |
349 | * rhashtable_walk_stop() can use rcu_head_after_call_rcu() | |
350 | * to check if it should not re-link the table. | |
351 | */ | |
352 | call_rcu(&old_tbl->rcu, bucket_table_free_rcu); | |
353 | spin_unlock(&ht->lock); | |
354 | ||
355 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; | |
356 | } | |
357 | ||
358 | static int rhashtable_rehash_alloc(struct rhashtable *ht, | |
359 | struct bucket_table *old_tbl, | |
360 | unsigned int size) | |
361 | { | |
362 | struct bucket_table *new_tbl; | |
363 | int err; | |
364 | ||
365 | ASSERT_RHT_MUTEX(ht); | |
366 | ||
367 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); | |
368 | if (new_tbl == NULL) | |
369 | return -ENOMEM; | |
370 | ||
371 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); | |
372 | if (err) | |
373 | bucket_table_free(new_tbl); | |
374 | ||
375 | return err; | |
376 | } | |
377 | ||
378 | /** | |
379 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | |
380 | * @ht: the hash table to shrink | |
381 | * | |
382 | * This function shrinks the hash table to fit, i.e., the smallest | |
383 | * size would not cause it to expand right away automatically. | |
384 | * | |
385 | * The caller must ensure that no concurrent resizing occurs by holding | |
386 | * ht->mutex. | |
387 | * | |
388 | * The caller must ensure that no concurrent table mutations take place. | |
389 | * It is however valid to have concurrent lookups if they are RCU protected. | |
390 | * | |
391 | * It is valid to have concurrent insertions and deletions protected by per | |
392 | * bucket locks or concurrent RCU protected lookups and traversals. | |
393 | */ | |
394 | static int rhashtable_shrink(struct rhashtable *ht) | |
395 | { | |
396 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
397 | unsigned int nelems = atomic_read(&ht->nelems); | |
398 | unsigned int size = 0; | |
399 | ||
400 | if (nelems) | |
401 | size = roundup_pow_of_two(nelems * 3 / 2); | |
402 | if (size < ht->p.min_size) | |
403 | size = ht->p.min_size; | |
404 | ||
405 | if (old_tbl->size <= size) | |
406 | return 0; | |
407 | ||
408 | if (rht_dereference(old_tbl->future_tbl, ht)) | |
409 | return -EEXIST; | |
410 | ||
411 | return rhashtable_rehash_alloc(ht, old_tbl, size); | |
412 | } | |
413 | ||
414 | static void rht_deferred_worker(struct work_struct *work) | |
415 | { | |
416 | struct rhashtable *ht; | |
417 | struct bucket_table *tbl; | |
418 | int err = 0; | |
419 | ||
420 | ht = container_of(work, struct rhashtable, run_work); | |
421 | mutex_lock(&ht->mutex); | |
422 | ||
423 | tbl = rht_dereference(ht->tbl, ht); | |
424 | tbl = rhashtable_last_table(ht, tbl); | |
425 | ||
426 | if (rht_grow_above_75(ht, tbl)) | |
427 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); | |
428 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) | |
429 | err = rhashtable_shrink(ht); | |
430 | else if (tbl->nest) | |
431 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size); | |
432 | ||
433 | if (!err || err == -EEXIST) { | |
434 | int nerr; | |
435 | ||
436 | nerr = rhashtable_rehash_table(ht); | |
437 | err = err ?: nerr; | |
438 | } | |
439 | ||
440 | mutex_unlock(&ht->mutex); | |
441 | ||
442 | if (err) | |
443 | schedule_work(&ht->run_work); | |
444 | } | |
445 | ||
446 | static int rhashtable_insert_rehash(struct rhashtable *ht, | |
447 | struct bucket_table *tbl) | |
448 | { | |
449 | struct bucket_table *old_tbl; | |
450 | struct bucket_table *new_tbl; | |
451 | unsigned int size; | |
452 | int err; | |
453 | ||
454 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | |
455 | ||
456 | size = tbl->size; | |
457 | ||
458 | err = -EBUSY; | |
459 | ||
460 | if (rht_grow_above_75(ht, tbl)) | |
461 | size *= 2; | |
462 | /* Do not schedule more than one rehash */ | |
463 | else if (old_tbl != tbl) | |
464 | goto fail; | |
465 | ||
466 | err = -ENOMEM; | |
467 | ||
468 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); | |
469 | if (new_tbl == NULL) | |
470 | goto fail; | |
471 | ||
472 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | |
473 | if (err) { | |
474 | bucket_table_free(new_tbl); | |
475 | if (err == -EEXIST) | |
476 | err = 0; | |
477 | } else | |
478 | schedule_work(&ht->run_work); | |
479 | ||
480 | return err; | |
481 | ||
482 | fail: | |
483 | /* Do not fail the insert if someone else did a rehash. */ | |
484 | if (likely(rcu_access_pointer(tbl->future_tbl))) | |
485 | return 0; | |
486 | ||
487 | /* Schedule async rehash to retry allocation in process context. */ | |
488 | if (err == -ENOMEM) | |
489 | schedule_work(&ht->run_work); | |
490 | ||
491 | return err; | |
492 | } | |
493 | ||
494 | static void *rhashtable_lookup_one(struct rhashtable *ht, | |
495 | struct rhash_lock_head __rcu **bkt, | |
496 | struct bucket_table *tbl, unsigned int hash, | |
497 | const void *key, struct rhash_head *obj) | |
498 | { | |
499 | struct rhashtable_compare_arg arg = { | |
500 | .ht = ht, | |
501 | .key = key, | |
502 | }; | |
503 | struct rhash_head __rcu **pprev = NULL; | |
504 | struct rhash_head *head; | |
505 | int elasticity; | |
506 | ||
507 | elasticity = RHT_ELASTICITY; | |
508 | rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { | |
509 | struct rhlist_head *list; | |
510 | struct rhlist_head *plist; | |
511 | ||
512 | elasticity--; | |
513 | if (!key || | |
514 | (ht->p.obj_cmpfn ? | |
515 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : | |
516 | rhashtable_compare(&arg, rht_obj(ht, head)))) { | |
517 | pprev = &head->next; | |
518 | continue; | |
519 | } | |
520 | ||
521 | if (!ht->rhlist) | |
522 | return rht_obj(ht, head); | |
523 | ||
524 | list = container_of(obj, struct rhlist_head, rhead); | |
525 | plist = container_of(head, struct rhlist_head, rhead); | |
526 | ||
527 | RCU_INIT_POINTER(list->next, plist); | |
528 | head = rht_dereference_bucket(head->next, tbl, hash); | |
529 | RCU_INIT_POINTER(list->rhead.next, head); | |
530 | if (pprev) | |
531 | rcu_assign_pointer(*pprev, obj); | |
532 | else | |
533 | /* Need to preserve the bit lock */ | |
534 | rht_assign_locked(bkt, obj); | |
535 | ||
536 | return NULL; | |
537 | } | |
538 | ||
539 | if (elasticity <= 0) | |
540 | return ERR_PTR(-EAGAIN); | |
541 | ||
542 | return ERR_PTR(-ENOENT); | |
543 | } | |
544 | ||
545 | static struct bucket_table *rhashtable_insert_one( | |
546 | struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, | |
547 | struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, | |
548 | void *data) | |
549 | { | |
550 | struct bucket_table *new_tbl; | |
551 | struct rhash_head *head; | |
552 | ||
553 | if (!IS_ERR_OR_NULL(data)) | |
554 | return ERR_PTR(-EEXIST); | |
555 | ||
556 | if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) | |
557 | return ERR_CAST(data); | |
558 | ||
559 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
560 | if (new_tbl) | |
561 | return new_tbl; | |
562 | ||
563 | if (PTR_ERR(data) != -ENOENT) | |
564 | return ERR_CAST(data); | |
565 | ||
566 | if (unlikely(rht_grow_above_max(ht, tbl))) | |
567 | return ERR_PTR(-E2BIG); | |
568 | ||
569 | if (unlikely(rht_grow_above_100(ht, tbl))) | |
570 | return ERR_PTR(-EAGAIN); | |
571 | ||
572 | head = rht_ptr(bkt, tbl, hash); | |
573 | ||
574 | RCU_INIT_POINTER(obj->next, head); | |
575 | if (ht->rhlist) { | |
576 | struct rhlist_head *list; | |
577 | ||
578 | list = container_of(obj, struct rhlist_head, rhead); | |
579 | RCU_INIT_POINTER(list->next, NULL); | |
580 | } | |
581 | ||
582 | /* bkt is always the head of the list, so it holds | |
583 | * the lock, which we need to preserve | |
584 | */ | |
585 | rht_assign_locked(bkt, obj); | |
586 | ||
587 | return NULL; | |
588 | } | |
589 | ||
590 | static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, | |
591 | struct rhash_head *obj) | |
592 | { | |
593 | struct bucket_table *new_tbl; | |
594 | struct bucket_table *tbl; | |
595 | struct rhash_lock_head __rcu **bkt; | |
596 | unsigned long flags; | |
597 | unsigned int hash; | |
598 | void *data; | |
599 | ||
600 | new_tbl = rcu_dereference(ht->tbl); | |
601 | ||
602 | do { | |
603 | tbl = new_tbl; | |
604 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); | |
605 | if (rcu_access_pointer(tbl->future_tbl)) | |
606 | /* Failure is OK */ | |
607 | bkt = rht_bucket_var(tbl, hash); | |
608 | else | |
609 | bkt = rht_bucket_insert(ht, tbl, hash); | |
610 | if (bkt == NULL) { | |
611 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
612 | data = ERR_PTR(-EAGAIN); | |
613 | } else { | |
614 | bool inserted; | |
615 | ||
616 | flags = rht_lock(tbl, bkt); | |
617 | data = rhashtable_lookup_one(ht, bkt, tbl, | |
618 | hash, key, obj); | |
619 | new_tbl = rhashtable_insert_one(ht, bkt, tbl, | |
620 | hash, obj, data); | |
621 | inserted = data && !new_tbl; | |
622 | if (inserted) | |
623 | atomic_inc(&ht->nelems); | |
624 | if (PTR_ERR(new_tbl) != -EEXIST) | |
625 | data = ERR_CAST(new_tbl); | |
626 | ||
627 | rht_unlock(tbl, bkt, flags); | |
628 | ||
629 | if (inserted && rht_grow_above_75(ht, tbl)) | |
630 | schedule_work(&ht->run_work); | |
631 | } | |
632 | } while (!IS_ERR_OR_NULL(new_tbl)); | |
633 | ||
634 | if (PTR_ERR(data) == -EAGAIN) | |
635 | data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: | |
636 | -EAGAIN); | |
637 | ||
638 | return data; | |
639 | } | |
640 | ||
641 | void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |
642 | struct rhash_head *obj) | |
643 | { | |
644 | void *data; | |
645 | ||
646 | do { | |
647 | rcu_read_lock(); | |
648 | data = rhashtable_try_insert(ht, key, obj); | |
649 | rcu_read_unlock(); | |
650 | } while (PTR_ERR(data) == -EAGAIN); | |
651 | ||
652 | return data; | |
653 | } | |
654 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | |
655 | ||
656 | /** | |
657 | * rhashtable_walk_enter - Initialise an iterator | |
658 | * @ht: Table to walk over | |
659 | * @iter: Hash table Iterator | |
660 | * | |
661 | * This function prepares a hash table walk. | |
662 | * | |
663 | * Note that if you restart a walk after rhashtable_walk_stop you | |
664 | * may see the same object twice. Also, you may miss objects if | |
665 | * there are removals in between rhashtable_walk_stop and the next | |
666 | * call to rhashtable_walk_start. | |
667 | * | |
668 | * For a completely stable walk you should construct your own data | |
669 | * structure outside the hash table. | |
670 | * | |
671 | * This function may be called from any process context, including | |
672 | * non-preemptible context, but cannot be called from softirq or | |
673 | * hardirq context. | |
674 | * | |
675 | * You must call rhashtable_walk_exit after this function returns. | |
676 | */ | |
677 | void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) | |
678 | { | |
679 | iter->ht = ht; | |
680 | iter->p = NULL; | |
681 | iter->slot = 0; | |
682 | iter->skip = 0; | |
683 | iter->end_of_table = 0; | |
684 | ||
685 | spin_lock(&ht->lock); | |
686 | iter->walker.tbl = | |
687 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); | |
688 | list_add(&iter->walker.list, &iter->walker.tbl->walkers); | |
689 | spin_unlock(&ht->lock); | |
690 | } | |
691 | EXPORT_SYMBOL_GPL(rhashtable_walk_enter); | |
692 | ||
693 | /** | |
694 | * rhashtable_walk_exit - Free an iterator | |
695 | * @iter: Hash table Iterator | |
696 | * | |
697 | * This function frees resources allocated by rhashtable_walk_enter. | |
698 | */ | |
699 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | |
700 | { | |
701 | spin_lock(&iter->ht->lock); | |
702 | if (iter->walker.tbl) | |
703 | list_del(&iter->walker.list); | |
704 | spin_unlock(&iter->ht->lock); | |
705 | } | |
706 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | |
707 | ||
708 | /** | |
709 | * rhashtable_walk_start_check - Start a hash table walk | |
710 | * @iter: Hash table iterator | |
711 | * | |
712 | * Start a hash table walk at the current iterator position. Note that we take | |
713 | * the RCU lock in all cases including when we return an error. So you must | |
714 | * always call rhashtable_walk_stop to clean up. | |
715 | * | |
716 | * Returns zero if successful. | |
717 | * | |
718 | * Returns -EAGAIN if resize event occurred. Note that the iterator | |
719 | * will rewind back to the beginning and you may use it immediately | |
720 | * by calling rhashtable_walk_next. | |
721 | * | |
722 | * rhashtable_walk_start is defined as an inline variant that returns | |
723 | * void. This is preferred in cases where the caller would ignore | |
724 | * resize events and always continue. | |
725 | */ | |
726 | int rhashtable_walk_start_check(struct rhashtable_iter *iter) | |
727 | __acquires(RCU) | |
728 | { | |
729 | struct rhashtable *ht = iter->ht; | |
730 | bool rhlist = ht->rhlist; | |
731 | ||
732 | rcu_read_lock(); | |
733 | ||
734 | spin_lock(&ht->lock); | |
735 | if (iter->walker.tbl) | |
736 | list_del(&iter->walker.list); | |
737 | spin_unlock(&ht->lock); | |
738 | ||
739 | if (iter->end_of_table) | |
740 | return 0; | |
741 | if (!iter->walker.tbl) { | |
742 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); | |
743 | iter->slot = 0; | |
744 | iter->skip = 0; | |
745 | return -EAGAIN; | |
746 | } | |
747 | ||
748 | if (iter->p && !rhlist) { | |
749 | /* | |
750 | * We need to validate that 'p' is still in the table, and | |
751 | * if so, update 'skip' | |
752 | */ | |
753 | struct rhash_head *p; | |
754 | int skip = 0; | |
755 | rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { | |
756 | skip++; | |
757 | if (p == iter->p) { | |
758 | iter->skip = skip; | |
759 | goto found; | |
760 | } | |
761 | } | |
762 | iter->p = NULL; | |
763 | } else if (iter->p && rhlist) { | |
764 | /* Need to validate that 'list' is still in the table, and | |
765 | * if so, update 'skip' and 'p'. | |
766 | */ | |
767 | struct rhash_head *p; | |
768 | struct rhlist_head *list; | |
769 | int skip = 0; | |
770 | rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { | |
771 | for (list = container_of(p, struct rhlist_head, rhead); | |
772 | list; | |
773 | list = rcu_dereference(list->next)) { | |
774 | skip++; | |
775 | if (list == iter->list) { | |
776 | iter->p = p; | |
777 | iter->skip = skip; | |
778 | goto found; | |
779 | } | |
780 | } | |
781 | } | |
782 | iter->p = NULL; | |
783 | } | |
784 | found: | |
785 | return 0; | |
786 | } | |
787 | EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); | |
788 | ||
789 | /** | |
790 | * __rhashtable_walk_find_next - Find the next element in a table (or the first | |
791 | * one in case of a new walk). | |
792 | * | |
793 | * @iter: Hash table iterator | |
794 | * | |
795 | * Returns the found object or NULL when the end of the table is reached. | |
796 | * | |
797 | * Returns -EAGAIN if resize event occurred. | |
798 | */ | |
799 | static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) | |
800 | { | |
801 | struct bucket_table *tbl = iter->walker.tbl; | |
802 | struct rhlist_head *list = iter->list; | |
803 | struct rhashtable *ht = iter->ht; | |
804 | struct rhash_head *p = iter->p; | |
805 | bool rhlist = ht->rhlist; | |
806 | ||
807 | if (!tbl) | |
808 | return NULL; | |
809 | ||
810 | for (; iter->slot < tbl->size; iter->slot++) { | |
811 | int skip = iter->skip; | |
812 | ||
813 | rht_for_each_rcu(p, tbl, iter->slot) { | |
814 | if (rhlist) { | |
815 | list = container_of(p, struct rhlist_head, | |
816 | rhead); | |
817 | do { | |
818 | if (!skip) | |
819 | goto next; | |
820 | skip--; | |
821 | list = rcu_dereference(list->next); | |
822 | } while (list); | |
823 | ||
824 | continue; | |
825 | } | |
826 | if (!skip) | |
827 | break; | |
828 | skip--; | |
829 | } | |
830 | ||
831 | next: | |
832 | if (!rht_is_a_nulls(p)) { | |
833 | iter->skip++; | |
834 | iter->p = p; | |
835 | iter->list = list; | |
836 | return rht_obj(ht, rhlist ? &list->rhead : p); | |
837 | } | |
838 | ||
839 | iter->skip = 0; | |
840 | } | |
841 | ||
842 | iter->p = NULL; | |
843 | ||
844 | /* Ensure we see any new tables. */ | |
845 | smp_rmb(); | |
846 | ||
847 | iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
848 | if (iter->walker.tbl) { | |
849 | iter->slot = 0; | |
850 | iter->skip = 0; | |
851 | return ERR_PTR(-EAGAIN); | |
852 | } else { | |
853 | iter->end_of_table = true; | |
854 | } | |
855 | ||
856 | return NULL; | |
857 | } | |
858 | ||
859 | /** | |
860 | * rhashtable_walk_next - Return the next object and advance the iterator | |
861 | * @iter: Hash table iterator | |
862 | * | |
863 | * Note that you must call rhashtable_walk_stop when you are finished | |
864 | * with the walk. | |
865 | * | |
866 | * Returns the next object or NULL when the end of the table is reached. | |
867 | * | |
868 | * Returns -EAGAIN if resize event occurred. Note that the iterator | |
869 | * will rewind back to the beginning and you may continue to use it. | |
870 | */ | |
871 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | |
872 | { | |
873 | struct rhlist_head *list = iter->list; | |
874 | struct rhashtable *ht = iter->ht; | |
875 | struct rhash_head *p = iter->p; | |
876 | bool rhlist = ht->rhlist; | |
877 | ||
878 | if (p) { | |
879 | if (!rhlist || !(list = rcu_dereference(list->next))) { | |
880 | p = rcu_dereference(p->next); | |
881 | list = container_of(p, struct rhlist_head, rhead); | |
882 | } | |
883 | if (!rht_is_a_nulls(p)) { | |
884 | iter->skip++; | |
885 | iter->p = p; | |
886 | iter->list = list; | |
887 | return rht_obj(ht, rhlist ? &list->rhead : p); | |
888 | } | |
889 | ||
890 | /* At the end of this slot, switch to next one and then find | |
891 | * next entry from that point. | |
892 | */ | |
893 | iter->skip = 0; | |
894 | iter->slot++; | |
895 | } | |
896 | ||
897 | return __rhashtable_walk_find_next(iter); | |
898 | } | |
899 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); | |
900 | ||
901 | /** | |
902 | * rhashtable_walk_peek - Return the next object but don't advance the iterator | |
903 | * @iter: Hash table iterator | |
904 | * | |
905 | * Returns the next object or NULL when the end of the table is reached. | |
906 | * | |
907 | * Returns -EAGAIN if resize event occurred. Note that the iterator | |
908 | * will rewind back to the beginning and you may continue to use it. | |
909 | */ | |
910 | void *rhashtable_walk_peek(struct rhashtable_iter *iter) | |
911 | { | |
912 | struct rhlist_head *list = iter->list; | |
913 | struct rhashtable *ht = iter->ht; | |
914 | struct rhash_head *p = iter->p; | |
915 | ||
916 | if (p) | |
917 | return rht_obj(ht, ht->rhlist ? &list->rhead : p); | |
918 | ||
919 | /* No object found in current iter, find next one in the table. */ | |
920 | ||
921 | if (iter->skip) { | |
922 | /* A nonzero skip value points to the next entry in the table | |
923 | * beyond that last one that was found. Decrement skip so | |
924 | * we find the current value. __rhashtable_walk_find_next | |
925 | * will restore the original value of skip assuming that | |
926 | * the table hasn't changed. | |
927 | */ | |
928 | iter->skip--; | |
929 | } | |
930 | ||
931 | return __rhashtable_walk_find_next(iter); | |
932 | } | |
933 | EXPORT_SYMBOL_GPL(rhashtable_walk_peek); | |
934 | ||
935 | /** | |
936 | * rhashtable_walk_stop - Finish a hash table walk | |
937 | * @iter: Hash table iterator | |
938 | * | |
939 | * Finish a hash table walk. Does not reset the iterator to the start of the | |
940 | * hash table. | |
941 | */ | |
942 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | |
943 | __releases(RCU) | |
944 | { | |
945 | struct rhashtable *ht; | |
946 | struct bucket_table *tbl = iter->walker.tbl; | |
947 | ||
948 | if (!tbl) | |
949 | goto out; | |
950 | ||
951 | ht = iter->ht; | |
952 | ||
953 | spin_lock(&ht->lock); | |
954 | if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) | |
955 | /* This bucket table is being freed, don't re-link it. */ | |
956 | iter->walker.tbl = NULL; | |
957 | else | |
958 | list_add(&iter->walker.list, &tbl->walkers); | |
959 | spin_unlock(&ht->lock); | |
960 | ||
961 | out: | |
962 | rcu_read_unlock(); | |
963 | } | |
964 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | |
965 | ||
966 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) | |
967 | { | |
968 | size_t retsize; | |
969 | ||
970 | if (params->nelem_hint) | |
971 | retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | |
972 | (unsigned long)params->min_size); | |
973 | else | |
974 | retsize = max(HASH_DEFAULT_SIZE, | |
975 | (unsigned long)params->min_size); | |
976 | ||
977 | return retsize; | |
978 | } | |
979 | ||
980 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) | |
981 | { | |
982 | return jhash2(key, length, seed); | |
983 | } | |
984 | ||
985 | /** | |
986 | * rhashtable_init - initialize a new hash table | |
987 | * @ht: hash table to be initialized | |
988 | * @params: configuration parameters | |
989 | * | |
990 | * Initializes a new hash table based on the provided configuration | |
991 | * parameters. A table can be configured either with a variable or | |
992 | * fixed length key: | |
993 | * | |
994 | * Configuration Example 1: Fixed length keys | |
995 | * struct test_obj { | |
996 | * int key; | |
997 | * void * my_member; | |
998 | * struct rhash_head node; | |
999 | * }; | |
1000 | * | |
1001 | * struct rhashtable_params params = { | |
1002 | * .head_offset = offsetof(struct test_obj, node), | |
1003 | * .key_offset = offsetof(struct test_obj, key), | |
1004 | * .key_len = sizeof(int), | |
1005 | * .hashfn = jhash, | |
1006 | * }; | |
1007 | * | |
1008 | * Configuration Example 2: Variable length keys | |
1009 | * struct test_obj { | |
1010 | * [...] | |
1011 | * struct rhash_head node; | |
1012 | * }; | |
1013 | * | |
1014 | * u32 my_hash_fn(const void *data, u32 len, u32 seed) | |
1015 | * { | |
1016 | * struct test_obj *obj = data; | |
1017 | * | |
1018 | * return [... hash ...]; | |
1019 | * } | |
1020 | * | |
1021 | * struct rhashtable_params params = { | |
1022 | * .head_offset = offsetof(struct test_obj, node), | |
1023 | * .hashfn = jhash, | |
1024 | * .obj_hashfn = my_hash_fn, | |
1025 | * }; | |
1026 | */ | |
1027 | int rhashtable_init_noprof(struct rhashtable *ht, | |
1028 | const struct rhashtable_params *params) | |
1029 | { | |
1030 | struct bucket_table *tbl; | |
1031 | size_t size; | |
1032 | ||
1033 | if ((!params->key_len && !params->obj_hashfn) || | |
1034 | (params->obj_hashfn && !params->obj_cmpfn)) | |
1035 | return -EINVAL; | |
1036 | ||
1037 | memset(ht, 0, sizeof(*ht)); | |
1038 | mutex_init(&ht->mutex); | |
1039 | spin_lock_init(&ht->lock); | |
1040 | memcpy(&ht->p, params, sizeof(*params)); | |
1041 | ||
1042 | alloc_tag_record(ht->alloc_tag); | |
1043 | ||
1044 | if (params->min_size) | |
1045 | ht->p.min_size = roundup_pow_of_two(params->min_size); | |
1046 | ||
1047 | /* Cap total entries at 2^31 to avoid nelems overflow. */ | |
1048 | ht->max_elems = 1u << 31; | |
1049 | ||
1050 | if (params->max_size) { | |
1051 | ht->p.max_size = rounddown_pow_of_two(params->max_size); | |
1052 | if (ht->p.max_size < ht->max_elems / 2) | |
1053 | ht->max_elems = ht->p.max_size * 2; | |
1054 | } | |
1055 | ||
1056 | ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); | |
1057 | ||
1058 | size = rounded_hashtable_size(&ht->p); | |
1059 | ||
1060 | ht->key_len = ht->p.key_len; | |
1061 | if (!params->hashfn) { | |
1062 | ht->p.hashfn = jhash; | |
1063 | ||
1064 | if (!(ht->key_len & (sizeof(u32) - 1))) { | |
1065 | ht->key_len /= sizeof(u32); | |
1066 | ht->p.hashfn = rhashtable_jhash2; | |
1067 | } | |
1068 | } | |
1069 | ||
1070 | /* | |
1071 | * This is api initialization and thus we need to guarantee the | |
1072 | * initial rhashtable allocation. Upon failure, retry with the | |
1073 | * smallest possible size with __GFP_NOFAIL semantics. | |
1074 | */ | |
1075 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); | |
1076 | if (unlikely(tbl == NULL)) { | |
1077 | size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); | |
1078 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); | |
1079 | } | |
1080 | ||
1081 | atomic_set(&ht->nelems, 0); | |
1082 | ||
1083 | RCU_INIT_POINTER(ht->tbl, tbl); | |
1084 | ||
1085 | INIT_WORK(&ht->run_work, rht_deferred_worker); | |
1086 | ||
1087 | return 0; | |
1088 | } | |
1089 | EXPORT_SYMBOL_GPL(rhashtable_init_noprof); | |
1090 | ||
1091 | /** | |
1092 | * rhltable_init - initialize a new hash list table | |
1093 | * @hlt: hash list table to be initialized | |
1094 | * @params: configuration parameters | |
1095 | * | |
1096 | * Initializes a new hash list table. | |
1097 | * | |
1098 | * See documentation for rhashtable_init. | |
1099 | */ | |
1100 | int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params) | |
1101 | { | |
1102 | int err; | |
1103 | ||
1104 | err = rhashtable_init_noprof(&hlt->ht, params); | |
1105 | hlt->ht.rhlist = true; | |
1106 | return err; | |
1107 | } | |
1108 | EXPORT_SYMBOL_GPL(rhltable_init_noprof); | |
1109 | ||
1110 | static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, | |
1111 | void (*free_fn)(void *ptr, void *arg), | |
1112 | void *arg) | |
1113 | { | |
1114 | struct rhlist_head *list; | |
1115 | ||
1116 | if (!ht->rhlist) { | |
1117 | free_fn(rht_obj(ht, obj), arg); | |
1118 | return; | |
1119 | } | |
1120 | ||
1121 | list = container_of(obj, struct rhlist_head, rhead); | |
1122 | do { | |
1123 | obj = &list->rhead; | |
1124 | list = rht_dereference(list->next, ht); | |
1125 | free_fn(rht_obj(ht, obj), arg); | |
1126 | } while (list); | |
1127 | } | |
1128 | ||
1129 | /** | |
1130 | * rhashtable_free_and_destroy - free elements and destroy hash table | |
1131 | * @ht: the hash table to destroy | |
1132 | * @free_fn: callback to release resources of element | |
1133 | * @arg: pointer passed to free_fn | |
1134 | * | |
1135 | * Stops an eventual async resize. If defined, invokes free_fn for each | |
1136 | * element to releasal resources. Please note that RCU protected | |
1137 | * readers may still be accessing the elements. Releasing of resources | |
1138 | * must occur in a compatible manner. Then frees the bucket array. | |
1139 | * | |
1140 | * This function will eventually sleep to wait for an async resize | |
1141 | * to complete. The caller is responsible that no further write operations | |
1142 | * occurs in parallel. | |
1143 | */ | |
1144 | void rhashtable_free_and_destroy(struct rhashtable *ht, | |
1145 | void (*free_fn)(void *ptr, void *arg), | |
1146 | void *arg) | |
1147 | { | |
1148 | struct bucket_table *tbl, *next_tbl; | |
1149 | unsigned int i; | |
1150 | ||
1151 | cancel_work_sync(&ht->run_work); | |
1152 | ||
1153 | mutex_lock(&ht->mutex); | |
1154 | tbl = rht_dereference(ht->tbl, ht); | |
1155 | restart: | |
1156 | if (free_fn) { | |
1157 | for (i = 0; i < tbl->size; i++) { | |
1158 | struct rhash_head *pos, *next; | |
1159 | ||
1160 | cond_resched(); | |
1161 | for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), | |
1162 | next = !rht_is_a_nulls(pos) ? | |
1163 | rht_dereference(pos->next, ht) : NULL; | |
1164 | !rht_is_a_nulls(pos); | |
1165 | pos = next, | |
1166 | next = !rht_is_a_nulls(pos) ? | |
1167 | rht_dereference(pos->next, ht) : NULL) | |
1168 | rhashtable_free_one(ht, pos, free_fn, arg); | |
1169 | } | |
1170 | } | |
1171 | ||
1172 | next_tbl = rht_dereference(tbl->future_tbl, ht); | |
1173 | bucket_table_free(tbl); | |
1174 | if (next_tbl) { | |
1175 | tbl = next_tbl; | |
1176 | goto restart; | |
1177 | } | |
1178 | mutex_unlock(&ht->mutex); | |
1179 | } | |
1180 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); | |
1181 | ||
1182 | void rhashtable_destroy(struct rhashtable *ht) | |
1183 | { | |
1184 | return rhashtable_free_and_destroy(ht, NULL, NULL); | |
1185 | } | |
1186 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | |
1187 | ||
1188 | struct rhash_lock_head __rcu **__rht_bucket_nested( | |
1189 | const struct bucket_table *tbl, unsigned int hash) | |
1190 | { | |
1191 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
1192 | unsigned int index = hash & ((1 << tbl->nest) - 1); | |
1193 | unsigned int size = tbl->size >> tbl->nest; | |
1194 | unsigned int subhash = hash; | |
1195 | union nested_table *ntbl; | |
1196 | ||
1197 | ntbl = nested_table_top(tbl); | |
1198 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); | |
1199 | subhash >>= tbl->nest; | |
1200 | ||
1201 | while (ntbl && size > (1 << shift)) { | |
1202 | index = subhash & ((1 << shift) - 1); | |
1203 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, | |
1204 | tbl, hash); | |
1205 | size >>= shift; | |
1206 | subhash >>= shift; | |
1207 | } | |
1208 | ||
1209 | if (!ntbl) | |
1210 | return NULL; | |
1211 | ||
1212 | return &ntbl[subhash].bucket; | |
1213 | ||
1214 | } | |
1215 | EXPORT_SYMBOL_GPL(__rht_bucket_nested); | |
1216 | ||
1217 | struct rhash_lock_head __rcu **rht_bucket_nested( | |
1218 | const struct bucket_table *tbl, unsigned int hash) | |
1219 | { | |
1220 | static struct rhash_lock_head __rcu *rhnull; | |
1221 | ||
1222 | if (!rhnull) | |
1223 | INIT_RHT_NULLS_HEAD(rhnull); | |
1224 | return __rht_bucket_nested(tbl, hash) ?: &rhnull; | |
1225 | } | |
1226 | EXPORT_SYMBOL_GPL(rht_bucket_nested); | |
1227 | ||
1228 | struct rhash_lock_head __rcu **rht_bucket_nested_insert( | |
1229 | struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) | |
1230 | { | |
1231 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
1232 | unsigned int index = hash & ((1 << tbl->nest) - 1); | |
1233 | unsigned int size = tbl->size >> tbl->nest; | |
1234 | union nested_table *ntbl; | |
1235 | ||
1236 | ntbl = nested_table_top(tbl); | |
1237 | hash >>= tbl->nest; | |
1238 | ntbl = nested_table_alloc(ht, &ntbl[index].table, | |
1239 | size <= (1 << shift)); | |
1240 | ||
1241 | while (ntbl && size > (1 << shift)) { | |
1242 | index = hash & ((1 << shift) - 1); | |
1243 | size >>= shift; | |
1244 | hash >>= shift; | |
1245 | ntbl = nested_table_alloc(ht, &ntbl[index].table, | |
1246 | size <= (1 << shift)); | |
1247 | } | |
1248 | ||
1249 | if (!ntbl) | |
1250 | return NULL; | |
1251 | ||
1252 | return &ntbl[hash].bucket; | |
1253 | ||
1254 | } | |
1255 | EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); |