1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 This file is part of systemd.
5 Copyright 2010 Lennart Poettering
6 Copyright 2014 Michal Schmidt
14 #include "alloc-util.h"
19 #include "process-util.h"
20 #include "random-util.h"
22 #include "siphash24.h"
23 #include "string-util.h"
27 #if ENABLE_DEBUG_HASHMAP
33 * Implementation of hashmaps.
35 * - uses less RAM compared to closed addressing (chaining), because
36 * our entries are small (especially in Sets, which tend to contain
37 * the majority of entries in systemd).
38 * Collision resolution: Robin Hood
39 * - tends to equalize displacement of entries from their optimal buckets.
40 * Probe sequence: linear
41 * - though theoretically worse than random probing/uniform hashing/double
42 * hashing, it is good for cache locality.
45 * Celis, P. 1986. Robin Hood Hashing.
46 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
47 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
48 * - The results are derived for random probing. Suggests deletion with
49 * tombstones and two mean-centered search methods. None of that works
50 * well for linear probing.
52 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
53 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
54 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
55 * http://www.math.uu.se/~svante/papers/sj157.pdf
56 * - Applies to Robin Hood with linear probing. Contains remarks on
57 * the unsuitability of mean-centered search with linear probing.
59 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
60 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
61 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
62 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
63 * in a successful search), and Janson writes about displacement. C = d + 1.
65 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
66 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
67 * - Explanation of backward shift deletion with pictures.
69 * Khuong, P. 2013. The Other Robin Hood Hashing.
70 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
71 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
75 * XXX Ideas for improvement:
76 * For unordered hashmaps, randomize iteration order, similarly to Perl:
77 * http://blog.booking.com/hardening-perls-hash-function.html
80 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
81 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
82 #define INV_KEEP_FREE 5U
84 /* Fields common to entries of all hashmap/set types */
85 struct hashmap_base_entry
{
89 /* Entry types for specific hashmap/set types
90 * hashmap_base_entry must be at the beginning of each entry struct. */
92 struct plain_hashmap_entry
{
93 struct hashmap_base_entry b
;
97 struct ordered_hashmap_entry
{
98 struct plain_hashmap_entry p
;
99 unsigned iterate_next
, iterate_previous
;
103 struct hashmap_base_entry b
;
106 /* In several functions it is advantageous to have the hash table extended
107 * virtually by a couple of additional buckets. We reserve special index values
108 * for these "swap" buckets. */
109 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
110 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
111 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
112 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
114 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
115 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
117 assert_cc(IDX_FIRST
== _IDX_SWAP_END
);
118 assert_cc(IDX_FIRST
== _IDX_ITERATOR_FIRST
);
120 /* Storage space for the "swap" buckets.
121 * All entry types can fit into a ordered_hashmap_entry. */
122 struct swap_entries
{
123 struct ordered_hashmap_entry e
[_IDX_SWAP_END
- _IDX_SWAP_BEGIN
];
126 /* Distance from Initial Bucket */
127 typedef uint8_t dib_raw_t
;
128 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
129 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
130 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
131 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
133 #define DIB_FREE UINT_MAX
135 #if ENABLE_DEBUG_HASHMAP
136 struct hashmap_debug_info
{
137 LIST_FIELDS(struct hashmap_debug_info
, debug_list
);
138 unsigned max_entries
; /* high watermark of n_entries */
140 /* who allocated this hashmap */
145 /* fields to detect modification while iterating */
146 unsigned put_count
; /* counts puts into the hashmap */
147 unsigned rem_count
; /* counts removals from hashmap */
148 unsigned last_rem_idx
; /* remembers last removal index */
151 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
152 static LIST_HEAD(struct hashmap_debug_info
, hashmap_debug_list
);
153 static pthread_mutex_t hashmap_debug_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
155 #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
157 #else /* !ENABLE_DEBUG_HASHMAP */
158 #define HASHMAP_DEBUG_FIELDS
159 #endif /* ENABLE_DEBUG_HASHMAP */
163 HASHMAP_TYPE_ORDERED
,
168 struct _packed_ indirect_storage
{
169 void *storage
; /* where buckets and DIBs are stored */
170 uint8_t hash_key
[HASH_KEY_SIZE
]; /* hash key; changes during resize */
172 unsigned n_entries
; /* number of stored entries */
173 unsigned n_buckets
; /* number of buckets */
175 unsigned idx_lowest_entry
; /* Index below which all buckets are free.
176 Makes "while(hashmap_steal_first())" loops
177 O(n) instead of O(n^2) for unordered hashmaps. */
178 uint8_t _pad
[3]; /* padding for the whole HashmapBase */
179 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
182 struct direct_storage
{
183 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
184 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
185 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
186 uint8_t storage
[sizeof(struct indirect_storage
)];
189 #define DIRECT_BUCKETS(entry_t) \
190 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
192 /* We should be able to store at least one entry directly. */
193 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry
) >= 1);
195 /* We have 3 bits for n_direct_entries. */
196 assert_cc(DIRECT_BUCKETS(struct set_entry
) < (1 << 3));
198 /* Hashmaps with directly stored entries all use this shared hash key.
199 * It's no big deal if the key is guessed, because there can be only
200 * a handful of directly stored entries in a hashmap. When a hashmap
201 * outgrows direct storage, it gets its own key for indirect storage. */
202 static uint8_t shared_hash_key
[HASH_KEY_SIZE
];
203 static bool shared_hash_key_initialized
;
205 /* Fields that all hashmap/set types must have */
207 const struct hash_ops
*hash_ops
; /* hash and compare ops to use */
210 struct indirect_storage indirect
; /* if has_indirect */
211 struct direct_storage direct
; /* if !has_indirect */
214 enum HashmapType type
:2; /* HASHMAP_TYPE_* */
215 bool has_indirect
:1; /* whether indirect storage is used */
216 unsigned n_direct_entries
:3; /* Number of entries in direct storage.
217 * Only valid if !has_indirect. */
218 bool from_pool
:1; /* whether was allocated from mempool */
219 bool dirty
:1; /* whether dirtied since last iterated_cache_get() */
220 bool cached
:1; /* whether this hashmap is being cached */
221 HASHMAP_DEBUG_FIELDS
/* optional hashmap_debug_info */
224 /* Specific hash types
225 * HashmapBase must be at the beginning of each hashmap struct. */
228 struct HashmapBase b
;
231 struct OrderedHashmap
{
232 struct HashmapBase b
;
233 unsigned iterate_list_head
, iterate_list_tail
;
237 struct HashmapBase b
;
240 typedef struct CacheMem
{
242 size_t n_populated
, n_allocated
;
246 struct IteratedCache
{
247 HashmapBase
*hashmap
;
248 CacheMem keys
, values
;
251 DEFINE_MEMPOOL(hashmap_pool
, Hashmap
, 8);
252 DEFINE_MEMPOOL(ordered_hashmap_pool
, OrderedHashmap
, 8);
253 /* No need for a separate Set pool */
254 assert_cc(sizeof(Hashmap
) == sizeof(Set
));
256 struct hashmap_type_info
{
259 struct mempool
*mempool
;
260 unsigned n_direct_buckets
;
263 static const struct hashmap_type_info hashmap_type_info
[_HASHMAP_TYPE_MAX
] = {
264 [HASHMAP_TYPE_PLAIN
] = {
265 .head_size
= sizeof(Hashmap
),
266 .entry_size
= sizeof(struct plain_hashmap_entry
),
267 .mempool
= &hashmap_pool
,
268 .n_direct_buckets
= DIRECT_BUCKETS(struct plain_hashmap_entry
),
270 [HASHMAP_TYPE_ORDERED
] = {
271 .head_size
= sizeof(OrderedHashmap
),
272 .entry_size
= sizeof(struct ordered_hashmap_entry
),
273 .mempool
= &ordered_hashmap_pool
,
274 .n_direct_buckets
= DIRECT_BUCKETS(struct ordered_hashmap_entry
),
276 [HASHMAP_TYPE_SET
] = {
277 .head_size
= sizeof(Set
),
278 .entry_size
= sizeof(struct set_entry
),
279 .mempool
= &hashmap_pool
,
280 .n_direct_buckets
= DIRECT_BUCKETS(struct set_entry
),
285 __attribute__((destructor
)) static void cleanup_pools(void) {
286 _cleanup_free_
char *t
= NULL
;
289 /* Be nice to valgrind */
291 /* The pool is only allocated by the main thread, but the memory can
292 * be passed to other threads. Let's clean up if we are the main thread
293 * and no other threads are live. */
294 if (!is_main_thread())
297 r
= get_proc_field("/proc/self/status", "Threads", WHITESPACE
, &t
);
298 if (r
< 0 || !streq(t
, "1"))
301 mempool_drop(&hashmap_pool
);
302 mempool_drop(&ordered_hashmap_pool
);
306 static unsigned n_buckets(HashmapBase
*h
) {
307 return h
->has_indirect
? h
->indirect
.n_buckets
308 : hashmap_type_info
[h
->type
].n_direct_buckets
;
311 static unsigned n_entries(HashmapBase
*h
) {
312 return h
->has_indirect
? h
->indirect
.n_entries
313 : h
->n_direct_entries
;
316 static void n_entries_inc(HashmapBase
*h
) {
318 h
->indirect
.n_entries
++;
320 h
->n_direct_entries
++;
323 static void n_entries_dec(HashmapBase
*h
) {
325 h
->indirect
.n_entries
--;
327 h
->n_direct_entries
--;
330 static void *storage_ptr(HashmapBase
*h
) {
331 return h
->has_indirect
? h
->indirect
.storage
335 static uint8_t *hash_key(HashmapBase
*h
) {
336 return h
->has_indirect
? h
->indirect
.hash_key
340 static unsigned base_bucket_hash(HashmapBase
*h
, const void *p
) {
341 struct siphash state
;
344 siphash24_init(&state
, hash_key(h
));
346 h
->hash_ops
->hash(p
, &state
);
348 hash
= siphash24_finalize(&state
);
350 return (unsigned) (hash
% n_buckets(h
));
352 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
354 static inline void base_set_dirty(HashmapBase
*h
) {
357 #define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
359 static void get_hash_key(uint8_t hash_key
[HASH_KEY_SIZE
], bool reuse_is_ok
) {
360 static uint8_t current
[HASH_KEY_SIZE
];
361 static bool current_initialized
= false;
363 /* Returns a hash function key to use. In order to keep things
364 * fast we will not generate a new key each time we allocate a
365 * new hash table. Instead, we'll just reuse the most recently
366 * generated one, except if we never generated one or when we
367 * are rehashing an entire hash table because we reached a
370 if (!current_initialized
|| !reuse_is_ok
) {
371 random_bytes(current
, sizeof(current
));
372 current_initialized
= true;
375 memcpy(hash_key
, current
, sizeof(current
));
378 static struct hashmap_base_entry
*bucket_at(HashmapBase
*h
, unsigned idx
) {
379 return (struct hashmap_base_entry
*)
380 ((uint8_t*) storage_ptr(h
) + idx
* hashmap_type_info
[h
->type
].entry_size
);
383 static struct plain_hashmap_entry
*plain_bucket_at(Hashmap
*h
, unsigned idx
) {
384 return (struct plain_hashmap_entry
*) bucket_at(HASHMAP_BASE(h
), idx
);
387 static struct ordered_hashmap_entry
*ordered_bucket_at(OrderedHashmap
*h
, unsigned idx
) {
388 return (struct ordered_hashmap_entry
*) bucket_at(HASHMAP_BASE(h
), idx
);
391 static struct set_entry
*set_bucket_at(Set
*h
, unsigned idx
) {
392 return (struct set_entry
*) bucket_at(HASHMAP_BASE(h
), idx
);
395 static struct ordered_hashmap_entry
*bucket_at_swap(struct swap_entries
*swap
, unsigned idx
) {
396 return &swap
->e
[idx
- _IDX_SWAP_BEGIN
];
399 /* Returns a pointer to the bucket at index idx.
400 * Understands real indexes and swap indexes, hence "_virtual". */
401 static struct hashmap_base_entry
*bucket_at_virtual(HashmapBase
*h
, struct swap_entries
*swap
,
403 if (idx
< _IDX_SWAP_BEGIN
)
404 return bucket_at(h
, idx
);
406 if (idx
< _IDX_SWAP_END
)
407 return &bucket_at_swap(swap
, idx
)->p
.b
;
409 assert_not_reached("Invalid index");
412 static dib_raw_t
*dib_raw_ptr(HashmapBase
*h
) {
414 ((uint8_t*) storage_ptr(h
) + hashmap_type_info
[h
->type
].entry_size
* n_buckets(h
));
417 static unsigned bucket_distance(HashmapBase
*h
, unsigned idx
, unsigned from
) {
418 return idx
>= from
? idx
- from
419 : n_buckets(h
) + idx
- from
;
422 static unsigned bucket_calculate_dib(HashmapBase
*h
, unsigned idx
, dib_raw_t raw_dib
) {
423 unsigned initial_bucket
;
425 if (raw_dib
== DIB_RAW_FREE
)
428 if (_likely_(raw_dib
< DIB_RAW_OVERFLOW
))
432 * Having an overflow DIB value is very unlikely. The hash function
433 * would have to be bad. For example, in a table of size 2^24 filled
434 * to load factor 0.9 the maximum observed DIB is only about 60.
435 * In theory (assuming I used Maxima correctly), for an infinite size
436 * hash table with load factor 0.8 the probability of a given entry
437 * having DIB > 40 is 1.9e-8.
438 * This returns the correct DIB value by recomputing the hash value in
439 * the unlikely case. XXX Hitting this case could be a hint to rehash.
441 initial_bucket
= bucket_hash(h
, bucket_at(h
, idx
)->key
);
442 return bucket_distance(h
, idx
, initial_bucket
);
445 static void bucket_set_dib(HashmapBase
*h
, unsigned idx
, unsigned dib
) {
446 dib_raw_ptr(h
)[idx
] = dib
!= DIB_FREE
? MIN(dib
, DIB_RAW_OVERFLOW
) : DIB_RAW_FREE
;
449 static unsigned skip_free_buckets(HashmapBase
*h
, unsigned idx
) {
452 dibs
= dib_raw_ptr(h
);
454 for ( ; idx
< n_buckets(h
); idx
++)
455 if (dibs
[idx
] != DIB_RAW_FREE
)
461 static void bucket_mark_free(HashmapBase
*h
, unsigned idx
) {
462 memzero(bucket_at(h
, idx
), hashmap_type_info
[h
->type
].entry_size
);
463 bucket_set_dib(h
, idx
, DIB_FREE
);
466 static void bucket_move_entry(HashmapBase
*h
, struct swap_entries
*swap
,
467 unsigned from
, unsigned to
) {
468 struct hashmap_base_entry
*e_from
, *e_to
;
472 e_from
= bucket_at_virtual(h
, swap
, from
);
473 e_to
= bucket_at_virtual(h
, swap
, to
);
475 memcpy(e_to
, e_from
, hashmap_type_info
[h
->type
].entry_size
);
477 if (h
->type
== HASHMAP_TYPE_ORDERED
) {
478 OrderedHashmap
*lh
= (OrderedHashmap
*) h
;
479 struct ordered_hashmap_entry
*le
, *le_to
;
481 le_to
= (struct ordered_hashmap_entry
*) e_to
;
483 if (le_to
->iterate_next
!= IDX_NIL
) {
484 le
= (struct ordered_hashmap_entry
*)
485 bucket_at_virtual(h
, swap
, le_to
->iterate_next
);
486 le
->iterate_previous
= to
;
489 if (le_to
->iterate_previous
!= IDX_NIL
) {
490 le
= (struct ordered_hashmap_entry
*)
491 bucket_at_virtual(h
, swap
, le_to
->iterate_previous
);
492 le
->iterate_next
= to
;
495 if (lh
->iterate_list_head
== from
)
496 lh
->iterate_list_head
= to
;
497 if (lh
->iterate_list_tail
== from
)
498 lh
->iterate_list_tail
= to
;
502 static unsigned next_idx(HashmapBase
*h
, unsigned idx
) {
503 return (idx
+ 1U) % n_buckets(h
);
506 static unsigned prev_idx(HashmapBase
*h
, unsigned idx
) {
507 return (n_buckets(h
) + idx
- 1U) % n_buckets(h
);
510 static void *entry_value(HashmapBase
*h
, struct hashmap_base_entry
*e
) {
513 case HASHMAP_TYPE_PLAIN
:
514 case HASHMAP_TYPE_ORDERED
:
515 return ((struct plain_hashmap_entry
*)e
)->value
;
517 case HASHMAP_TYPE_SET
:
518 return (void*) e
->key
;
521 assert_not_reached("Unknown hashmap type");
525 static void base_remove_entry(HashmapBase
*h
, unsigned idx
) {
526 unsigned left
, right
, prev
, dib
;
527 dib_raw_t raw_dib
, *dibs
;
529 dibs
= dib_raw_ptr(h
);
530 assert(dibs
[idx
] != DIB_RAW_FREE
);
532 #if ENABLE_DEBUG_HASHMAP
533 h
->debug
.rem_count
++;
534 h
->debug
.last_rem_idx
= idx
;
538 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
539 for (right
= next_idx(h
, left
); ; right
= next_idx(h
, right
)) {
540 raw_dib
= dibs
[right
];
541 if (IN_SET(raw_dib
, 0, DIB_RAW_FREE
))
544 /* The buckets are not supposed to be all occupied and with DIB > 0.
545 * That would mean we could make everyone better off by shifting them
546 * backward. This scenario is impossible. */
547 assert(left
!= right
);
550 if (h
->type
== HASHMAP_TYPE_ORDERED
) {
551 OrderedHashmap
*lh
= (OrderedHashmap
*) h
;
552 struct ordered_hashmap_entry
*le
= ordered_bucket_at(lh
, idx
);
554 if (le
->iterate_next
!= IDX_NIL
)
555 ordered_bucket_at(lh
, le
->iterate_next
)->iterate_previous
= le
->iterate_previous
;
557 lh
->iterate_list_tail
= le
->iterate_previous
;
559 if (le
->iterate_previous
!= IDX_NIL
)
560 ordered_bucket_at(lh
, le
->iterate_previous
)->iterate_next
= le
->iterate_next
;
562 lh
->iterate_list_head
= le
->iterate_next
;
565 /* Now shift all buckets in the interval (left, right) one step backwards */
566 for (prev
= left
, left
= next_idx(h
, left
); left
!= right
;
567 prev
= left
, left
= next_idx(h
, left
)) {
568 dib
= bucket_calculate_dib(h
, left
, dibs
[left
]);
570 bucket_move_entry(h
, NULL
, left
, prev
);
571 bucket_set_dib(h
, prev
, dib
- 1);
574 bucket_mark_free(h
, prev
);
578 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
580 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap
*h
, Iterator
*i
) {
581 struct ordered_hashmap_entry
*e
;
587 if (i
->idx
== IDX_NIL
)
590 if (i
->idx
== IDX_FIRST
&& h
->iterate_list_head
== IDX_NIL
)
593 if (i
->idx
== IDX_FIRST
) {
594 idx
= h
->iterate_list_head
;
595 e
= ordered_bucket_at(h
, idx
);
598 e
= ordered_bucket_at(h
, idx
);
600 * We allow removing the current entry while iterating, but removal may cause
601 * a backward shift. The next entry may thus move one bucket to the left.
602 * To detect when it happens, we remember the key pointer of the entry we were
603 * going to iterate next. If it does not match, there was a backward shift.
605 if (e
->p
.b
.key
!= i
->next_key
) {
606 idx
= prev_idx(HASHMAP_BASE(h
), idx
);
607 e
= ordered_bucket_at(h
, idx
);
609 assert(e
->p
.b
.key
== i
->next_key
);
612 #if ENABLE_DEBUG_HASHMAP
616 if (e
->iterate_next
!= IDX_NIL
) {
617 struct ordered_hashmap_entry
*n
;
618 i
->idx
= e
->iterate_next
;
619 n
= ordered_bucket_at(h
, i
->idx
);
620 i
->next_key
= n
->p
.b
.key
;
631 static unsigned hashmap_iterate_in_internal_order(HashmapBase
*h
, Iterator
*i
) {
637 if (i
->idx
== IDX_NIL
)
640 if (i
->idx
== IDX_FIRST
) {
641 /* fast forward to the first occupied bucket */
642 if (h
->has_indirect
) {
643 i
->idx
= skip_free_buckets(h
, h
->indirect
.idx_lowest_entry
);
644 h
->indirect
.idx_lowest_entry
= i
->idx
;
646 i
->idx
= skip_free_buckets(h
, 0);
648 if (i
->idx
== IDX_NIL
)
651 struct hashmap_base_entry
*e
;
655 e
= bucket_at(h
, i
->idx
);
657 * We allow removing the current entry while iterating, but removal may cause
658 * a backward shift. The next entry may thus move one bucket to the left.
659 * To detect when it happens, we remember the key pointer of the entry we were
660 * going to iterate next. If it does not match, there was a backward shift.
662 if (e
->key
!= i
->next_key
)
663 e
= bucket_at(h
, --i
->idx
);
665 assert(e
->key
== i
->next_key
);
669 #if ENABLE_DEBUG_HASHMAP
673 i
->idx
= skip_free_buckets(h
, i
->idx
+ 1);
674 if (i
->idx
!= IDX_NIL
)
675 i
->next_key
= bucket_at(h
, i
->idx
)->key
;
686 static unsigned hashmap_iterate_entry(HashmapBase
*h
, Iterator
*i
) {
692 #if ENABLE_DEBUG_HASHMAP
693 if (i
->idx
== IDX_FIRST
) {
694 i
->put_count
= h
->debug
.put_count
;
695 i
->rem_count
= h
->debug
.rem_count
;
697 /* While iterating, must not add any new entries */
698 assert(i
->put_count
== h
->debug
.put_count
);
699 /* ... or remove entries other than the current one */
700 assert(i
->rem_count
== h
->debug
.rem_count
||
701 (i
->rem_count
== h
->debug
.rem_count
- 1 &&
702 i
->prev_idx
== h
->debug
.last_rem_idx
));
703 /* Reset our removals counter */
704 i
->rem_count
= h
->debug
.rem_count
;
708 return h
->type
== HASHMAP_TYPE_ORDERED
? hashmap_iterate_in_insertion_order((OrderedHashmap
*) h
, i
)
709 : hashmap_iterate_in_internal_order(h
, i
);
712 bool internal_hashmap_iterate(HashmapBase
*h
, Iterator
*i
, void **value
, const void **key
) {
713 struct hashmap_base_entry
*e
;
717 idx
= hashmap_iterate_entry(h
, i
);
718 if (idx
== IDX_NIL
) {
727 e
= bucket_at(h
, idx
);
728 data
= entry_value(h
, e
);
737 bool set_iterate(Set
*s
, Iterator
*i
, void **value
) {
738 return internal_hashmap_iterate(HASHMAP_BASE(s
), i
, value
, NULL
);
741 #define HASHMAP_FOREACH_IDX(idx, h, i) \
742 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
744 (idx) = hashmap_iterate_entry((h), &(i)))
746 IteratedCache
*internal_hashmap_iterated_cache_new(HashmapBase
*h
) {
747 IteratedCache
*cache
;
755 cache
= new0(IteratedCache
, 1);
765 static void reset_direct_storage(HashmapBase
*h
) {
766 const struct hashmap_type_info
*hi
= &hashmap_type_info
[h
->type
];
769 assert(!h
->has_indirect
);
771 p
= mempset(h
->direct
.storage
, 0, hi
->entry_size
* hi
->n_direct_buckets
);
772 memset(p
, DIB_RAW_INIT
, sizeof(dib_raw_t
) * hi
->n_direct_buckets
);
775 static struct HashmapBase
*hashmap_base_new(const struct hash_ops
*hash_ops
, enum HashmapType type HASHMAP_DEBUG_PARAMS
) {
777 const struct hashmap_type_info
*hi
= &hashmap_type_info
[type
];
780 use_pool
= is_main_thread();
782 h
= use_pool
? mempool_alloc0_tile(hi
->mempool
) : malloc0(hi
->head_size
);
788 h
->from_pool
= use_pool
;
789 h
->hash_ops
= hash_ops
? hash_ops
: &trivial_hash_ops
;
791 if (type
== HASHMAP_TYPE_ORDERED
) {
792 OrderedHashmap
*lh
= (OrderedHashmap
*)h
;
793 lh
->iterate_list_head
= lh
->iterate_list_tail
= IDX_NIL
;
796 reset_direct_storage(h
);
798 if (!shared_hash_key_initialized
) {
799 random_bytes(shared_hash_key
, sizeof(shared_hash_key
));
800 shared_hash_key_initialized
= true;
803 #if ENABLE_DEBUG_HASHMAP
804 h
->debug
.func
= func
;
805 h
->debug
.file
= file
;
806 h
->debug
.line
= line
;
807 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex
) == 0);
808 LIST_PREPEND(debug_list
, hashmap_debug_list
, &h
->debug
);
809 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex
) == 0);
815 Hashmap
*internal_hashmap_new(const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
816 return (Hashmap
*) hashmap_base_new(hash_ops
, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS
);
819 OrderedHashmap
*internal_ordered_hashmap_new(const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
820 return (OrderedHashmap
*) hashmap_base_new(hash_ops
, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS
);
823 Set
*internal_set_new(const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
824 return (Set
*) hashmap_base_new(hash_ops
, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS
);
827 static int hashmap_base_ensure_allocated(HashmapBase
**h
, const struct hash_ops
*hash_ops
,
828 enum HashmapType type HASHMAP_DEBUG_PARAMS
) {
836 q
= hashmap_base_new(hash_ops
, type HASHMAP_DEBUG_PASS_ARGS
);
844 int internal_hashmap_ensure_allocated(Hashmap
**h
, const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
845 return hashmap_base_ensure_allocated((HashmapBase
**)h
, hash_ops
, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS
);
848 int internal_ordered_hashmap_ensure_allocated(OrderedHashmap
**h
, const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
849 return hashmap_base_ensure_allocated((HashmapBase
**)h
, hash_ops
, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS
);
852 int internal_set_ensure_allocated(Set
**s
, const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
853 return hashmap_base_ensure_allocated((HashmapBase
**)s
, hash_ops
, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS
);
856 static void hashmap_free_no_clear(HashmapBase
*h
) {
857 assert(!h
->has_indirect
);
858 assert(!h
->n_direct_entries
);
860 #if ENABLE_DEBUG_HASHMAP
861 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex
) == 0);
862 LIST_REMOVE(debug_list
, hashmap_debug_list
, &h
->debug
);
863 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex
) == 0);
867 mempool_free_tile(hashmap_type_info
[h
->type
].mempool
, h
);
872 HashmapBase
*internal_hashmap_free(HashmapBase
*h
) {
874 /* Free the hashmap, but nothing in it */
877 internal_hashmap_clear(h
);
878 hashmap_free_no_clear(h
);
884 HashmapBase
*internal_hashmap_free_free(HashmapBase
*h
) {
886 /* Free the hashmap and all data objects in it, but not the
890 internal_hashmap_clear_free(h
);
891 hashmap_free_no_clear(h
);
897 Hashmap
*hashmap_free_free_free(Hashmap
*h
) {
899 /* Free the hashmap and all data and key objects in it */
902 hashmap_clear_free_free(h
);
903 hashmap_free_no_clear(HASHMAP_BASE(h
));
909 void internal_hashmap_clear(HashmapBase
*h
) {
913 if (h
->has_indirect
) {
914 free(h
->indirect
.storage
);
915 h
->has_indirect
= false;
918 h
->n_direct_entries
= 0;
919 reset_direct_storage(h
);
921 if (h
->type
== HASHMAP_TYPE_ORDERED
) {
922 OrderedHashmap
*lh
= (OrderedHashmap
*) h
;
923 lh
->iterate_list_head
= lh
->iterate_list_tail
= IDX_NIL
;
929 void internal_hashmap_clear_free(HashmapBase
*h
) {
935 for (idx
= skip_free_buckets(h
, 0); idx
!= IDX_NIL
;
936 idx
= skip_free_buckets(h
, idx
+ 1))
937 free(entry_value(h
, bucket_at(h
, idx
)));
939 internal_hashmap_clear(h
);
942 void hashmap_clear_free_free(Hashmap
*h
) {
948 for (idx
= skip_free_buckets(HASHMAP_BASE(h
), 0); idx
!= IDX_NIL
;
949 idx
= skip_free_buckets(HASHMAP_BASE(h
), idx
+ 1)) {
950 struct plain_hashmap_entry
*e
= plain_bucket_at(h
, idx
);
951 free((void*)e
->b
.key
);
955 internal_hashmap_clear(HASHMAP_BASE(h
));
958 static int resize_buckets(HashmapBase
*h
, unsigned entries_add
);
961 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
962 * Performs Robin Hood swaps as it goes. The entry to put must be placed
963 * by the caller into swap slot IDX_PUT.
964 * If used for in-place resizing, may leave a displaced entry in swap slot
965 * IDX_PUT. Caller must rehash it next.
966 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
969 static bool hashmap_put_robin_hood(HashmapBase
*h
, unsigned idx
,
970 struct swap_entries
*swap
) {
971 dib_raw_t raw_dib
, *dibs
;
972 unsigned dib
, distance
;
974 #if ENABLE_DEBUG_HASHMAP
975 h
->debug
.put_count
++;
978 dibs
= dib_raw_ptr(h
);
980 for (distance
= 0; ; distance
++) {
982 if (IN_SET(raw_dib
, DIB_RAW_FREE
, DIB_RAW_REHASH
)) {
983 if (raw_dib
== DIB_RAW_REHASH
)
984 bucket_move_entry(h
, swap
, idx
, IDX_TMP
);
986 if (h
->has_indirect
&& h
->indirect
.idx_lowest_entry
> idx
)
987 h
->indirect
.idx_lowest_entry
= idx
;
989 bucket_set_dib(h
, idx
, distance
);
990 bucket_move_entry(h
, swap
, IDX_PUT
, idx
);
991 if (raw_dib
== DIB_RAW_REHASH
) {
992 bucket_move_entry(h
, swap
, IDX_TMP
, IDX_PUT
);
999 dib
= bucket_calculate_dib(h
, idx
, raw_dib
);
1001 if (dib
< distance
) {
1002 /* Found a wealthier entry. Go Robin Hood! */
1003 bucket_set_dib(h
, idx
, distance
);
1005 /* swap the entries */
1006 bucket_move_entry(h
, swap
, idx
, IDX_TMP
);
1007 bucket_move_entry(h
, swap
, IDX_PUT
, idx
);
1008 bucket_move_entry(h
, swap
, IDX_TMP
, IDX_PUT
);
1013 idx
= next_idx(h
, idx
);
1018 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1019 * The caller must place the entry (only its key and value, not link indexes)
1020 * in swap slot IDX_PUT.
1021 * Caller must ensure: the key does not exist yet in the hashmap.
1022 * that resize is not needed if !may_resize.
1023 * Returns: 1 if entry was put successfully.
1024 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1025 * Cannot return -ENOMEM if !may_resize.
1027 static int hashmap_base_put_boldly(HashmapBase
*h
, unsigned idx
,
1028 struct swap_entries
*swap
, bool may_resize
) {
1029 struct ordered_hashmap_entry
*new_entry
;
1032 assert(idx
< n_buckets(h
));
1034 new_entry
= bucket_at_swap(swap
, IDX_PUT
);
1037 r
= resize_buckets(h
, 1);
1041 idx
= bucket_hash(h
, new_entry
->p
.b
.key
);
1043 assert(n_entries(h
) < n_buckets(h
));
1045 if (h
->type
== HASHMAP_TYPE_ORDERED
) {
1046 OrderedHashmap
*lh
= (OrderedHashmap
*) h
;
1048 new_entry
->iterate_next
= IDX_NIL
;
1049 new_entry
->iterate_previous
= lh
->iterate_list_tail
;
1051 if (lh
->iterate_list_tail
!= IDX_NIL
) {
1052 struct ordered_hashmap_entry
*old_tail
;
1054 old_tail
= ordered_bucket_at(lh
, lh
->iterate_list_tail
);
1055 assert(old_tail
->iterate_next
== IDX_NIL
);
1056 old_tail
->iterate_next
= IDX_PUT
;
1059 lh
->iterate_list_tail
= IDX_PUT
;
1060 if (lh
->iterate_list_head
== IDX_NIL
)
1061 lh
->iterate_list_head
= IDX_PUT
;
1064 assert_se(hashmap_put_robin_hood(h
, idx
, swap
) == false);
1067 #if ENABLE_DEBUG_HASHMAP
1068 h
->debug
.max_entries
= MAX(h
->debug
.max_entries
, n_entries(h
));
1075 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1076 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1079 * Returns 0 if resize is not needed.
1080 * 1 if successfully resized.
1081 * -ENOMEM on allocation failure.
1083 static int resize_buckets(HashmapBase
*h
, unsigned entries_add
) {
1084 struct swap_entries swap
;
1086 dib_raw_t
*old_dibs
, *new_dibs
;
1087 const struct hashmap_type_info
*hi
;
1088 unsigned idx
, optimal_idx
;
1089 unsigned old_n_buckets
, new_n_buckets
, n_rehashed
, new_n_entries
;
1095 hi
= &hashmap_type_info
[h
->type
];
1096 new_n_entries
= n_entries(h
) + entries_add
;
1099 if (_unlikely_(new_n_entries
< entries_add
))
1102 /* For direct storage we allow 100% load, because it's tiny. */
1103 if (!h
->has_indirect
&& new_n_entries
<= hi
->n_direct_buckets
)
1107 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1108 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1110 new_n_buckets
= new_n_entries
+ new_n_entries
/ (INV_KEEP_FREE
- 1);
1112 if (_unlikely_(new_n_buckets
< new_n_entries
))
1115 if (_unlikely_(new_n_buckets
> UINT_MAX
/ (hi
->entry_size
+ sizeof(dib_raw_t
))))
1118 old_n_buckets
= n_buckets(h
);
1120 if (_likely_(new_n_buckets
<= old_n_buckets
))
1123 new_shift
= log2u_round_up(MAX(
1124 new_n_buckets
* (hi
->entry_size
+ sizeof(dib_raw_t
)),
1125 2 * sizeof(struct direct_storage
)));
1127 /* Realloc storage (buckets and DIB array). */
1128 new_storage
= realloc(h
->has_indirect
? h
->indirect
.storage
: NULL
,
1133 /* Must upgrade direct to indirect storage. */
1134 if (!h
->has_indirect
) {
1135 memcpy(new_storage
, h
->direct
.storage
,
1136 old_n_buckets
* (hi
->entry_size
+ sizeof(dib_raw_t
)));
1137 h
->indirect
.n_entries
= h
->n_direct_entries
;
1138 h
->indirect
.idx_lowest_entry
= 0;
1139 h
->n_direct_entries
= 0;
1142 /* Get a new hash key. If we've just upgraded to indirect storage,
1143 * allow reusing a previously generated key. It's still a different key
1144 * from the shared one that we used for direct storage. */
1145 get_hash_key(h
->indirect
.hash_key
, !h
->has_indirect
);
1147 h
->has_indirect
= true;
1148 h
->indirect
.storage
= new_storage
;
1149 h
->indirect
.n_buckets
= (1U << new_shift
) /
1150 (hi
->entry_size
+ sizeof(dib_raw_t
));
1152 old_dibs
= (dib_raw_t
*)((uint8_t*) new_storage
+ hi
->entry_size
* old_n_buckets
);
1153 new_dibs
= dib_raw_ptr(h
);
1156 * Move the DIB array to the new place, replacing valid DIB values with
1157 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1158 * Note: Overlap is not possible, because we have at least doubled the
1159 * number of buckets and dib_raw_t is smaller than any entry type.
1161 for (idx
= 0; idx
< old_n_buckets
; idx
++) {
1162 assert(old_dibs
[idx
] != DIB_RAW_REHASH
);
1163 new_dibs
[idx
] = old_dibs
[idx
] == DIB_RAW_FREE
? DIB_RAW_FREE
1167 /* Zero the area of newly added entries (including the old DIB area) */
1168 memzero(bucket_at(h
, old_n_buckets
),
1169 (n_buckets(h
) - old_n_buckets
) * hi
->entry_size
);
1171 /* The upper half of the new DIB array needs initialization */
1172 memset(&new_dibs
[old_n_buckets
], DIB_RAW_INIT
,
1173 (n_buckets(h
) - old_n_buckets
) * sizeof(dib_raw_t
));
1175 /* Rehash entries that need it */
1177 for (idx
= 0; idx
< old_n_buckets
; idx
++) {
1178 if (new_dibs
[idx
] != DIB_RAW_REHASH
)
1181 optimal_idx
= bucket_hash(h
, bucket_at(h
, idx
)->key
);
1184 * Not much to do if by luck the entry hashes to its current
1185 * location. Just set its DIB.
1187 if (optimal_idx
== idx
) {
1193 new_dibs
[idx
] = DIB_RAW_FREE
;
1194 bucket_move_entry(h
, &swap
, idx
, IDX_PUT
);
1195 /* bucket_move_entry does not clear the source */
1196 memzero(bucket_at(h
, idx
), hi
->entry_size
);
1200 * Find the new bucket for the current entry. This may make
1201 * another entry homeless and load it into IDX_PUT.
1203 rehash_next
= hashmap_put_robin_hood(h
, optimal_idx
, &swap
);
1206 /* Did the current entry displace another one? */
1208 optimal_idx
= bucket_hash(h
, bucket_at_swap(&swap
, IDX_PUT
)->p
.b
.key
);
1209 } while (rehash_next
);
1212 assert(n_rehashed
== n_entries(h
));
1218 * Finds an entry with a matching key
1219 * Returns: index of the found entry, or IDX_NIL if not found.
1221 static unsigned base_bucket_scan(HashmapBase
*h
, unsigned idx
, const void *key
) {
1222 struct hashmap_base_entry
*e
;
1223 unsigned dib
, distance
;
1224 dib_raw_t
*dibs
= dib_raw_ptr(h
);
1226 assert(idx
< n_buckets(h
));
1228 for (distance
= 0; ; distance
++) {
1229 if (dibs
[idx
] == DIB_RAW_FREE
)
1232 dib
= bucket_calculate_dib(h
, idx
, dibs
[idx
]);
1236 if (dib
== distance
) {
1237 e
= bucket_at(h
, idx
);
1238 if (h
->hash_ops
->compare(e
->key
, key
) == 0)
1242 idx
= next_idx(h
, idx
);
1245 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1247 int hashmap_put(Hashmap
*h
, const void *key
, void *value
) {
1248 struct swap_entries swap
;
1249 struct plain_hashmap_entry
*e
;
1254 hash
= bucket_hash(h
, key
);
1255 idx
= bucket_scan(h
, hash
, key
);
1256 if (idx
!= IDX_NIL
) {
1257 e
= plain_bucket_at(h
, idx
);
1258 if (e
->value
== value
)
1263 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
;
1266 return hashmap_put_boldly(h
, hash
, &swap
, true);
1269 int set_put(Set
*s
, const void *key
) {
1270 struct swap_entries swap
;
1271 struct hashmap_base_entry
*e
;
1276 hash
= bucket_hash(s
, key
);
1277 idx
= bucket_scan(s
, hash
, key
);
1281 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
.b
;
1283 return hashmap_put_boldly(s
, hash
, &swap
, true);
1286 int hashmap_replace(Hashmap
*h
, const void *key
, void *value
) {
1287 struct swap_entries swap
;
1288 struct plain_hashmap_entry
*e
;
1293 hash
= bucket_hash(h
, key
);
1294 idx
= bucket_scan(h
, hash
, key
);
1295 if (idx
!= IDX_NIL
) {
1296 e
= plain_bucket_at(h
, idx
);
1297 #if ENABLE_DEBUG_HASHMAP
1298 /* Although the key is equal, the key pointer may have changed,
1299 * and this would break our assumption for iterating. So count
1300 * this operation as incompatible with iteration. */
1301 if (e
->b
.key
!= key
) {
1302 h
->b
.debug
.put_count
++;
1303 h
->b
.debug
.rem_count
++;
1304 h
->b
.debug
.last_rem_idx
= idx
;
1309 hashmap_set_dirty(h
);
1314 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
;
1317 return hashmap_put_boldly(h
, hash
, &swap
, true);
1320 int hashmap_update(Hashmap
*h
, const void *key
, void *value
) {
1321 struct plain_hashmap_entry
*e
;
1326 hash
= bucket_hash(h
, key
);
1327 idx
= bucket_scan(h
, hash
, key
);
1331 e
= plain_bucket_at(h
, idx
);
1333 hashmap_set_dirty(h
);
1338 void *internal_hashmap_get(HashmapBase
*h
, const void *key
) {
1339 struct hashmap_base_entry
*e
;
1345 hash
= bucket_hash(h
, key
);
1346 idx
= bucket_scan(h
, hash
, key
);
1350 e
= bucket_at(h
, idx
);
1351 return entry_value(h
, e
);
1354 void *hashmap_get2(Hashmap
*h
, const void *key
, void **key2
) {
1355 struct plain_hashmap_entry
*e
;
1361 hash
= bucket_hash(h
, key
);
1362 idx
= bucket_scan(h
, hash
, key
);
1366 e
= plain_bucket_at(h
, idx
);
1368 *key2
= (void*) e
->b
.key
;
1373 bool internal_hashmap_contains(HashmapBase
*h
, const void *key
) {
1379 hash
= bucket_hash(h
, key
);
1380 return bucket_scan(h
, hash
, key
) != IDX_NIL
;
1383 void *internal_hashmap_remove(HashmapBase
*h
, const void *key
) {
1384 struct hashmap_base_entry
*e
;
1391 hash
= bucket_hash(h
, key
);
1392 idx
= bucket_scan(h
, hash
, key
);
1396 e
= bucket_at(h
, idx
);
1397 data
= entry_value(h
, e
);
1398 remove_entry(h
, idx
);
1403 void *hashmap_remove2(Hashmap
*h
, const void *key
, void **rkey
) {
1404 struct plain_hashmap_entry
*e
;
1414 hash
= bucket_hash(h
, key
);
1415 idx
= bucket_scan(h
, hash
, key
);
1416 if (idx
== IDX_NIL
) {
1422 e
= plain_bucket_at(h
, idx
);
1425 *rkey
= (void*) e
->b
.key
;
1427 remove_entry(h
, idx
);
1432 int hashmap_remove_and_put(Hashmap
*h
, const void *old_key
, const void *new_key
, void *value
) {
1433 struct swap_entries swap
;
1434 struct plain_hashmap_entry
*e
;
1435 unsigned old_hash
, new_hash
, idx
;
1440 old_hash
= bucket_hash(h
, old_key
);
1441 idx
= bucket_scan(h
, old_hash
, old_key
);
1445 new_hash
= bucket_hash(h
, new_key
);
1446 if (bucket_scan(h
, new_hash
, new_key
) != IDX_NIL
)
1449 remove_entry(h
, idx
);
1451 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
;
1454 assert_se(hashmap_put_boldly(h
, new_hash
, &swap
, false) == 1);
1459 int set_remove_and_put(Set
*s
, const void *old_key
, const void *new_key
) {
1460 struct swap_entries swap
;
1461 struct hashmap_base_entry
*e
;
1462 unsigned old_hash
, new_hash
, idx
;
1467 old_hash
= bucket_hash(s
, old_key
);
1468 idx
= bucket_scan(s
, old_hash
, old_key
);
1472 new_hash
= bucket_hash(s
, new_key
);
1473 if (bucket_scan(s
, new_hash
, new_key
) != IDX_NIL
)
1476 remove_entry(s
, idx
);
1478 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
.b
;
1480 assert_se(hashmap_put_boldly(s
, new_hash
, &swap
, false) == 1);
1485 int hashmap_remove_and_replace(Hashmap
*h
, const void *old_key
, const void *new_key
, void *value
) {
1486 struct swap_entries swap
;
1487 struct plain_hashmap_entry
*e
;
1488 unsigned old_hash
, new_hash
, idx_old
, idx_new
;
1493 old_hash
= bucket_hash(h
, old_key
);
1494 idx_old
= bucket_scan(h
, old_hash
, old_key
);
1495 if (idx_old
== IDX_NIL
)
1498 old_key
= bucket_at(HASHMAP_BASE(h
), idx_old
)->key
;
1500 new_hash
= bucket_hash(h
, new_key
);
1501 idx_new
= bucket_scan(h
, new_hash
, new_key
);
1502 if (idx_new
!= IDX_NIL
)
1503 if (idx_old
!= idx_new
) {
1504 remove_entry(h
, idx_new
);
1505 /* Compensate for a possible backward shift. */
1506 if (old_key
!= bucket_at(HASHMAP_BASE(h
), idx_old
)->key
)
1507 idx_old
= prev_idx(HASHMAP_BASE(h
), idx_old
);
1508 assert(old_key
== bucket_at(HASHMAP_BASE(h
), idx_old
)->key
);
1511 remove_entry(h
, idx_old
);
1513 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
;
1516 assert_se(hashmap_put_boldly(h
, new_hash
, &swap
, false) == 1);
1521 void *hashmap_remove_value(Hashmap
*h
, const void *key
, void *value
) {
1522 struct plain_hashmap_entry
*e
;
1528 hash
= bucket_hash(h
, key
);
1529 idx
= bucket_scan(h
, hash
, key
);
1533 e
= plain_bucket_at(h
, idx
);
1534 if (e
->value
!= value
)
1537 remove_entry(h
, idx
);
1542 static unsigned find_first_entry(HashmapBase
*h
) {
1543 Iterator i
= ITERATOR_FIRST
;
1545 if (!h
|| !n_entries(h
))
1548 return hashmap_iterate_entry(h
, &i
);
1551 void *internal_hashmap_first(HashmapBase
*h
) {
1554 idx
= find_first_entry(h
);
1558 return entry_value(h
, bucket_at(h
, idx
));
1561 void *internal_hashmap_first_key(HashmapBase
*h
) {
1562 struct hashmap_base_entry
*e
;
1565 idx
= find_first_entry(h
);
1569 e
= bucket_at(h
, idx
);
1570 return (void*) e
->key
;
1573 void *internal_hashmap_steal_first(HashmapBase
*h
) {
1574 struct hashmap_base_entry
*e
;
1578 idx
= find_first_entry(h
);
1582 e
= bucket_at(h
, idx
);
1583 data
= entry_value(h
, e
);
1584 remove_entry(h
, idx
);
1589 void *internal_hashmap_steal_first_key(HashmapBase
*h
) {
1590 struct hashmap_base_entry
*e
;
1594 idx
= find_first_entry(h
);
1598 e
= bucket_at(h
, idx
);
1599 key
= (void*) e
->key
;
1600 remove_entry(h
, idx
);
1605 unsigned internal_hashmap_size(HashmapBase
*h
) {
1610 return n_entries(h
);
1613 unsigned internal_hashmap_buckets(HashmapBase
*h
) {
1618 return n_buckets(h
);
1621 int internal_hashmap_merge(Hashmap
*h
, Hashmap
*other
) {
1627 HASHMAP_FOREACH_IDX(idx
, HASHMAP_BASE(other
), i
) {
1628 struct plain_hashmap_entry
*pe
= plain_bucket_at(other
, idx
);
1631 r
= hashmap_put(h
, pe
->b
.key
, pe
->value
);
1632 if (r
< 0 && r
!= -EEXIST
)
1639 int set_merge(Set
*s
, Set
*other
) {
1645 HASHMAP_FOREACH_IDX(idx
, HASHMAP_BASE(other
), i
) {
1646 struct set_entry
*se
= set_bucket_at(other
, idx
);
1649 r
= set_put(s
, se
->b
.key
);
1657 int internal_hashmap_reserve(HashmapBase
*h
, unsigned entries_add
) {
1662 r
= resize_buckets(h
, entries_add
);
1670 * The same as hashmap_merge(), but every new item from other is moved to h.
1671 * Keys already in h are skipped and stay in other.
1672 * Returns: 0 on success.
1673 * -ENOMEM on alloc failure, in which case no move has been done.
1675 int internal_hashmap_move(HashmapBase
*h
, HashmapBase
*other
) {
1676 struct swap_entries swap
;
1677 struct hashmap_base_entry
*e
, *n
;
1687 assert(other
->type
== h
->type
);
1690 * This reserves buckets for the worst case, where none of other's
1691 * entries are yet present in h. This is preferable to risking
1692 * an allocation failure in the middle of the moving and having to
1693 * rollback or return a partial result.
1695 r
= resize_buckets(h
, n_entries(other
));
1699 HASHMAP_FOREACH_IDX(idx
, other
, i
) {
1702 e
= bucket_at(other
, idx
);
1703 h_hash
= bucket_hash(h
, e
->key
);
1704 if (bucket_scan(h
, h_hash
, e
->key
) != IDX_NIL
)
1707 n
= &bucket_at_swap(&swap
, IDX_PUT
)->p
.b
;
1709 if (h
->type
!= HASHMAP_TYPE_SET
)
1710 ((struct plain_hashmap_entry
*) n
)->value
=
1711 ((struct plain_hashmap_entry
*) e
)->value
;
1712 assert_se(hashmap_put_boldly(h
, h_hash
, &swap
, false) == 1);
1714 remove_entry(other
, idx
);
1720 int internal_hashmap_move_one(HashmapBase
*h
, HashmapBase
*other
, const void *key
) {
1721 struct swap_entries swap
;
1722 unsigned h_hash
, other_hash
, idx
;
1723 struct hashmap_base_entry
*e
, *n
;
1728 h_hash
= bucket_hash(h
, key
);
1729 if (bucket_scan(h
, h_hash
, key
) != IDX_NIL
)
1735 assert(other
->type
== h
->type
);
1737 other_hash
= bucket_hash(other
, key
);
1738 idx
= bucket_scan(other
, other_hash
, key
);
1742 e
= bucket_at(other
, idx
);
1744 n
= &bucket_at_swap(&swap
, IDX_PUT
)->p
.b
;
1746 if (h
->type
!= HASHMAP_TYPE_SET
)
1747 ((struct plain_hashmap_entry
*) n
)->value
=
1748 ((struct plain_hashmap_entry
*) e
)->value
;
1749 r
= hashmap_put_boldly(h
, h_hash
, &swap
, true);
1753 remove_entry(other
, idx
);
1757 HashmapBase
*internal_hashmap_copy(HashmapBase
*h
) {
1763 copy
= hashmap_base_new(h
->hash_ops
, h
->type HASHMAP_DEBUG_SRC_ARGS
);
1768 case HASHMAP_TYPE_PLAIN
:
1769 case HASHMAP_TYPE_ORDERED
:
1770 r
= hashmap_merge((Hashmap
*)copy
, (Hashmap
*)h
);
1772 case HASHMAP_TYPE_SET
:
1773 r
= set_merge((Set
*)copy
, (Set
*)h
);
1776 assert_not_reached("Unknown hashmap type");
1780 internal_hashmap_free(copy
);
1787 char **internal_hashmap_get_strv(HashmapBase
*h
) {
1792 sv
= new(char*, n_entries(h
)+1);
1797 HASHMAP_FOREACH_IDX(idx
, h
, i
)
1798 sv
[n
++] = entry_value(h
, bucket_at(h
, idx
));
1804 void *ordered_hashmap_next(OrderedHashmap
*h
, const void *key
) {
1805 struct ordered_hashmap_entry
*e
;
1811 hash
= bucket_hash(h
, key
);
1812 idx
= bucket_scan(h
, hash
, key
);
1816 e
= ordered_bucket_at(h
, idx
);
1817 if (e
->iterate_next
== IDX_NIL
)
1819 return ordered_bucket_at(h
, e
->iterate_next
)->p
.value
;
1822 int set_consume(Set
*s
, void *value
) {
1828 r
= set_put(s
, value
);
1835 int set_put_strdup(Set
*s
, const char *p
) {
1841 if (set_contains(s
, (char*) p
))
1848 return set_consume(s
, c
);
1851 int set_put_strdupv(Set
*s
, char **l
) {
1857 STRV_FOREACH(i
, l
) {
1858 r
= set_put_strdup(s
, *i
);
1868 int set_put_strsplit(Set
*s
, const char *v
, const char *separators
, ExtractFlags flags
) {
1878 r
= extract_first_word(&p
, &word
, separators
, flags
);
1882 r
= set_consume(s
, word
);
1888 /* expand the cachemem if needed, return true if newly (re)activated. */
1889 static int cachemem_maintain(CacheMem
*mem
, unsigned size
) {
1892 if (!GREEDY_REALLOC(mem
->ptr
, mem
->n_allocated
, size
)) {
1905 int iterated_cache_get(IteratedCache
*cache
, const void ***res_keys
, const void ***res_values
, unsigned *res_n_entries
) {
1906 bool sync_keys
= false, sync_values
= false;
1911 assert(cache
->hashmap
);
1913 size
= n_entries(cache
->hashmap
);
1916 r
= cachemem_maintain(&cache
->keys
, size
);
1922 cache
->keys
.active
= false;
1925 r
= cachemem_maintain(&cache
->values
, size
);
1931 cache
->values
.active
= false;
1933 if (cache
->hashmap
->dirty
) {
1934 if (cache
->keys
.active
)
1936 if (cache
->values
.active
)
1939 cache
->hashmap
->dirty
= false;
1942 if (sync_keys
|| sync_values
) {
1947 HASHMAP_FOREACH_IDX(idx
, cache
->hashmap
, iter
) {
1948 struct hashmap_base_entry
*e
;
1950 e
= bucket_at(cache
->hashmap
, idx
);
1953 cache
->keys
.ptr
[i
] = e
->key
;
1955 cache
->values
.ptr
[i
] = entry_value(cache
->hashmap
, e
);
1961 *res_keys
= cache
->keys
.ptr
;
1963 *res_values
= cache
->values
.ptr
;
1965 *res_n_entries
= size
;
1970 IteratedCache
*iterated_cache_free(IteratedCache
*cache
) {
1972 free(cache
->keys
.ptr
);
1973 free(cache
->values
.ptr
);