1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
8 #if HAVE_VALGRIND_VALGRIND_H
9 # include <valgrind/valgrind.h>
12 #include "alloc-util.h"
15 #include "logarithm.h"
17 #include "memory-util.h"
19 #include "missing_syscall.h"
20 #include "process-util.h"
21 #include "random-util.h"
23 #include "siphash24.h"
24 #include "string-util.h"
27 #if ENABLE_DEBUG_HASHMAP
32 * Implementation of hashmaps.
34 * - uses less RAM compared to closed addressing (chaining), because
35 * our entries are small (especially in Sets, which tend to contain
36 * the majority of entries in systemd).
37 * Collision resolution: Robin Hood
38 * - tends to equalize displacement of entries from their optimal buckets.
39 * Probe sequence: linear
40 * - though theoretically worse than random probing/uniform hashing/double
41 * hashing, it is good for cache locality.
44 * Celis, P. 1986. Robin Hood Hashing.
45 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
46 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
47 * - The results are derived for random probing. Suggests deletion with
48 * tombstones and two mean-centered search methods. None of that works
49 * well for linear probing.
51 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
52 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
53 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
54 * http://www.math.uu.se/~svante/papers/sj157.pdf
55 * - Applies to Robin Hood with linear probing. Contains remarks on
56 * the unsuitability of mean-centered search with linear probing.
58 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
59 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
60 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
61 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
62 * in a successful search), and Janson writes about displacement. C = d + 1.
64 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
65 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
66 * - Explanation of backward shift deletion with pictures.
68 * Khuong, P. 2013. The Other Robin Hood Hashing.
69 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
70 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
74 * XXX Ideas for improvement:
75 * For unordered hashmaps, randomize iteration order, similarly to Perl:
76 * http://blog.booking.com/hardening-perls-hash-function.html
79 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
80 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
81 #define INV_KEEP_FREE 5U
83 /* Fields common to entries of all hashmap/set types */
84 struct hashmap_base_entry
{
88 /* Entry types for specific hashmap/set types
89 * hashmap_base_entry must be at the beginning of each entry struct. */
91 struct plain_hashmap_entry
{
92 struct hashmap_base_entry b
;
96 struct ordered_hashmap_entry
{
97 struct plain_hashmap_entry p
;
98 unsigned iterate_next
, iterate_previous
;
102 struct hashmap_base_entry b
;
105 /* In several functions it is advantageous to have the hash table extended
106 * virtually by a couple of additional buckets. We reserve special index values
107 * for these "swap" buckets. */
108 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
109 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
110 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
111 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
113 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
114 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
116 assert_cc(IDX_FIRST
== _IDX_SWAP_END
);
117 assert_cc(IDX_FIRST
== _IDX_ITERATOR_FIRST
);
119 /* Storage space for the "swap" buckets.
120 * All entry types can fit into an ordered_hashmap_entry. */
121 struct swap_entries
{
122 struct ordered_hashmap_entry e
[_IDX_SWAP_END
- _IDX_SWAP_BEGIN
];
125 /* Distance from Initial Bucket */
126 typedef uint8_t dib_raw_t
;
127 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
128 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
129 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
130 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
132 #define DIB_FREE UINT_MAX
134 #if ENABLE_DEBUG_HASHMAP
135 struct hashmap_debug_info
{
136 LIST_FIELDS(struct hashmap_debug_info
, debug_list
);
137 unsigned max_entries
; /* high watermark of n_entries */
139 /* who allocated this hashmap */
144 /* fields to detect modification while iterating */
145 unsigned put_count
; /* counts puts into the hashmap */
146 unsigned rem_count
; /* counts removals from hashmap */
147 unsigned last_rem_idx
; /* remembers last removal index */
150 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
151 static LIST_HEAD(struct hashmap_debug_info
, hashmap_debug_list
);
152 static pthread_mutex_t hashmap_debug_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
157 HASHMAP_TYPE_ORDERED
,
162 struct _packed_ indirect_storage
{
163 void *storage
; /* where buckets and DIBs are stored */
164 uint8_t hash_key
[HASH_KEY_SIZE
]; /* hash key; changes during resize */
166 unsigned n_entries
; /* number of stored entries */
167 unsigned n_buckets
; /* number of buckets */
169 unsigned idx_lowest_entry
; /* Index below which all buckets are free.
170 Makes "while (hashmap_steal_first())" loops
171 O(n) instead of O(n^2) for unordered hashmaps. */
172 uint8_t _pad
[3]; /* padding for the whole HashmapBase */
173 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
176 struct direct_storage
{
177 /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit.
178 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit,
179 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */
180 uint8_t storage
[sizeof(struct indirect_storage
)];
183 #define DIRECT_BUCKETS(entry_t) \
184 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
186 /* We should be able to store at least one entry directly. */
187 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry
) >= 1);
189 /* We have 3 bits for n_direct_entries. */
190 assert_cc(DIRECT_BUCKETS(struct set_entry
) < (1 << 3));
192 /* Hashmaps with directly stored entries all use this shared hash key.
193 * It's no big deal if the key is guessed, because there can be only
194 * a handful of directly stored entries in a hashmap. When a hashmap
195 * outgrows direct storage, it gets its own key for indirect storage. */
196 static uint8_t shared_hash_key
[HASH_KEY_SIZE
];
198 /* Fields that all hashmap/set types must have */
200 const struct hash_ops
*hash_ops
; /* hash and compare ops to use */
203 struct indirect_storage indirect
; /* if has_indirect */
204 struct direct_storage direct
; /* if !has_indirect */
207 enum HashmapType type
:2; /* HASHMAP_TYPE_* */
208 bool has_indirect
:1; /* whether indirect storage is used */
209 unsigned n_direct_entries
:3; /* Number of entries in direct storage.
210 * Only valid if !has_indirect. */
211 bool from_pool
:1; /* whether was allocated from mempool */
212 bool dirty
:1; /* whether dirtied since last iterated_cache_get() */
213 bool cached
:1; /* whether this hashmap is being cached */
215 #if ENABLE_DEBUG_HASHMAP
216 struct hashmap_debug_info debug
;
220 /* Specific hash types
221 * HashmapBase must be at the beginning of each hashmap struct. */
224 struct HashmapBase b
;
227 struct OrderedHashmap
{
228 struct HashmapBase b
;
229 unsigned iterate_list_head
, iterate_list_tail
;
233 struct HashmapBase b
;
236 typedef struct CacheMem
{
242 struct IteratedCache
{
243 HashmapBase
*hashmap
;
244 CacheMem keys
, values
;
247 DEFINE_MEMPOOL(hashmap_pool
, Hashmap
, 8);
248 DEFINE_MEMPOOL(ordered_hashmap_pool
, OrderedHashmap
, 8);
249 /* No need for a separate Set pool */
250 assert_cc(sizeof(Hashmap
) == sizeof(Set
));
252 struct hashmap_type_info
{
255 struct mempool
*mempool
;
256 unsigned n_direct_buckets
;
259 static _used_
const struct hashmap_type_info hashmap_type_info
[_HASHMAP_TYPE_MAX
] = {
260 [HASHMAP_TYPE_PLAIN
] = {
261 .head_size
= sizeof(Hashmap
),
262 .entry_size
= sizeof(struct plain_hashmap_entry
),
263 .mempool
= &hashmap_pool
,
264 .n_direct_buckets
= DIRECT_BUCKETS(struct plain_hashmap_entry
),
266 [HASHMAP_TYPE_ORDERED
] = {
267 .head_size
= sizeof(OrderedHashmap
),
268 .entry_size
= sizeof(struct ordered_hashmap_entry
),
269 .mempool
= &ordered_hashmap_pool
,
270 .n_direct_buckets
= DIRECT_BUCKETS(struct ordered_hashmap_entry
),
272 [HASHMAP_TYPE_SET
] = {
273 .head_size
= sizeof(Set
),
274 .entry_size
= sizeof(struct set_entry
),
275 .mempool
= &hashmap_pool
,
276 .n_direct_buckets
= DIRECT_BUCKETS(struct set_entry
),
280 void hashmap_trim_pools(void) {
283 /* The pool is only allocated by the main thread, but the memory can be passed to other
284 * threads. Let's clean up if we are the main thread and no other threads are live. */
286 /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
287 * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
288 if (getpid() != gettid())
289 return (void) log_debug("Not cleaning up memory pools, not in main thread.");
291 r
= get_process_threads(0);
293 return (void) log_debug_errno(r
, "Failed to determine number of threads, not cleaning up memory pools: %m");
295 return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
297 mempool_trim(&hashmap_pool
);
298 mempool_trim(&ordered_hashmap_pool
);
301 #if HAVE_VALGRIND_VALGRIND_H
302 _destructor_
static void cleanup_pools(void) {
303 /* Be nice to valgrind */
304 if (RUNNING_ON_VALGRIND
)
305 hashmap_trim_pools();
309 static unsigned n_buckets(HashmapBase
*h
) {
310 return h
->has_indirect
? h
->indirect
.n_buckets
311 : hashmap_type_info
[h
->type
].n_direct_buckets
;
314 static unsigned n_entries(HashmapBase
*h
) {
315 return h
->has_indirect
? h
->indirect
.n_entries
316 : h
->n_direct_entries
;
319 static void n_entries_inc(HashmapBase
*h
) {
321 h
->indirect
.n_entries
++;
323 h
->n_direct_entries
++;
326 static void n_entries_dec(HashmapBase
*h
) {
328 h
->indirect
.n_entries
--;
330 h
->n_direct_entries
--;
333 static void* storage_ptr(HashmapBase
*h
) {
334 return h
->has_indirect
? h
->indirect
.storage
338 static uint8_t* hash_key(HashmapBase
*h
) {
339 return h
->has_indirect
? h
->indirect
.hash_key
343 static unsigned base_bucket_hash(HashmapBase
*h
, const void *p
) {
344 struct siphash state
;
347 siphash24_init(&state
, hash_key(h
));
349 h
->hash_ops
->hash(p
, &state
);
351 hash
= siphash24_finalize(&state
);
353 return (unsigned) (hash
% n_buckets(h
));
355 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
357 static void base_set_dirty(HashmapBase
*h
) {
360 #define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
362 static void get_hash_key(uint8_t hash_key
[HASH_KEY_SIZE
], bool reuse_is_ok
) {
363 static uint8_t current
[HASH_KEY_SIZE
];
364 static bool current_initialized
= false;
366 /* Returns a hash function key to use. In order to keep things
367 * fast we will not generate a new key each time we allocate a
368 * new hash table. Instead, we'll just reuse the most recently
369 * generated one, except if we never generated one or when we
370 * are rehashing an entire hash table because we reached a
373 if (!current_initialized
|| !reuse_is_ok
) {
374 random_bytes(current
, sizeof(current
));
375 current_initialized
= true;
378 memcpy(hash_key
, current
, sizeof(current
));
381 static struct hashmap_base_entry
* bucket_at(HashmapBase
*h
, unsigned idx
) {
382 return CAST_ALIGN_PTR(
383 struct hashmap_base_entry
,
384 (uint8_t *) storage_ptr(h
) + idx
* hashmap_type_info
[h
->type
].entry_size
);
387 static struct plain_hashmap_entry
* plain_bucket_at(Hashmap
*h
, unsigned idx
) {
388 return (struct plain_hashmap_entry
*) bucket_at(HASHMAP_BASE(h
), idx
);
391 static struct ordered_hashmap_entry
* ordered_bucket_at(OrderedHashmap
*h
, unsigned idx
) {
392 return (struct ordered_hashmap_entry
*) bucket_at(HASHMAP_BASE(h
), idx
);
395 static struct set_entry
*set_bucket_at(Set
*h
, unsigned idx
) {
396 return (struct set_entry
*) bucket_at(HASHMAP_BASE(h
), idx
);
399 static struct ordered_hashmap_entry
* bucket_at_swap(struct swap_entries
*swap
, unsigned idx
) {
400 return &swap
->e
[idx
- _IDX_SWAP_BEGIN
];
403 /* Returns a pointer to the bucket at index idx.
404 * Understands real indexes and swap indexes, hence "_virtual". */
405 static struct hashmap_base_entry
* bucket_at_virtual(HashmapBase
*h
, struct swap_entries
*swap
,
407 if (idx
< _IDX_SWAP_BEGIN
)
408 return bucket_at(h
, idx
);
410 if (idx
< _IDX_SWAP_END
)
411 return &bucket_at_swap(swap
, idx
)->p
.b
;
413 assert_not_reached();
416 static dib_raw_t
* dib_raw_ptr(HashmapBase
*h
) {
418 ((uint8_t*) storage_ptr(h
) + hashmap_type_info
[h
->type
].entry_size
* n_buckets(h
));
421 static unsigned bucket_distance(HashmapBase
*h
, unsigned idx
, unsigned from
) {
422 return idx
>= from
? idx
- from
423 : n_buckets(h
) + idx
- from
;
426 static unsigned bucket_calculate_dib(HashmapBase
*h
, unsigned idx
, dib_raw_t raw_dib
) {
427 unsigned initial_bucket
;
429 if (raw_dib
== DIB_RAW_FREE
)
432 if (_likely_(raw_dib
< DIB_RAW_OVERFLOW
))
436 * Having an overflow DIB value is very unlikely. The hash function
437 * would have to be bad. For example, in a table of size 2^24 filled
438 * to load factor 0.9 the maximum observed DIB is only about 60.
439 * In theory (assuming I used Maxima correctly), for an infinite size
440 * hash table with load factor 0.8 the probability of a given entry
441 * having DIB > 40 is 1.9e-8.
442 * This returns the correct DIB value by recomputing the hash value in
443 * the unlikely case. XXX Hitting this case could be a hint to rehash.
445 initial_bucket
= bucket_hash(h
, bucket_at(h
, idx
)->key
);
446 return bucket_distance(h
, idx
, initial_bucket
);
449 static void bucket_set_dib(HashmapBase
*h
, unsigned idx
, unsigned dib
) {
450 dib_raw_ptr(h
)[idx
] = dib
!= DIB_FREE
? MIN(dib
, DIB_RAW_OVERFLOW
) : DIB_RAW_FREE
;
453 static unsigned skip_free_buckets(HashmapBase
*h
, unsigned idx
) {
456 dibs
= dib_raw_ptr(h
);
458 for ( ; idx
< n_buckets(h
); idx
++)
459 if (dibs
[idx
] != DIB_RAW_FREE
)
465 static void bucket_mark_free(HashmapBase
*h
, unsigned idx
) {
466 memzero(bucket_at(h
, idx
), hashmap_type_info
[h
->type
].entry_size
);
467 bucket_set_dib(h
, idx
, DIB_FREE
);
470 static void bucket_move_entry(HashmapBase
*h
, struct swap_entries
*swap
,
471 unsigned from
, unsigned to
) {
472 struct hashmap_base_entry
*e_from
, *e_to
;
476 e_from
= bucket_at_virtual(h
, swap
, from
);
477 e_to
= bucket_at_virtual(h
, swap
, to
);
479 memcpy(e_to
, e_from
, hashmap_type_info
[h
->type
].entry_size
);
481 if (h
->type
== HASHMAP_TYPE_ORDERED
) {
482 OrderedHashmap
*lh
= (OrderedHashmap
*) h
;
483 struct ordered_hashmap_entry
*le
, *le_to
;
485 le_to
= (struct ordered_hashmap_entry
*) e_to
;
487 if (le_to
->iterate_next
!= IDX_NIL
) {
488 le
= (struct ordered_hashmap_entry
*)
489 bucket_at_virtual(h
, swap
, le_to
->iterate_next
);
490 le
->iterate_previous
= to
;
493 if (le_to
->iterate_previous
!= IDX_NIL
) {
494 le
= (struct ordered_hashmap_entry
*)
495 bucket_at_virtual(h
, swap
, le_to
->iterate_previous
);
496 le
->iterate_next
= to
;
499 if (lh
->iterate_list_head
== from
)
500 lh
->iterate_list_head
= to
;
501 if (lh
->iterate_list_tail
== from
)
502 lh
->iterate_list_tail
= to
;
506 static unsigned next_idx(HashmapBase
*h
, unsigned idx
) {
507 return (idx
+ 1U) % n_buckets(h
);
510 static unsigned prev_idx(HashmapBase
*h
, unsigned idx
) {
511 return (n_buckets(h
) + idx
- 1U) % n_buckets(h
);
514 static void* entry_value(HashmapBase
*h
, struct hashmap_base_entry
*e
) {
517 case HASHMAP_TYPE_PLAIN
:
518 case HASHMAP_TYPE_ORDERED
:
519 return ((struct plain_hashmap_entry
*)e
)->value
;
521 case HASHMAP_TYPE_SET
:
522 return (void*) e
->key
;
525 assert_not_reached();
529 static void base_remove_entry(HashmapBase
*h
, unsigned idx
) {
530 unsigned left
, right
, prev
, dib
;
531 dib_raw_t raw_dib
, *dibs
;
533 dibs
= dib_raw_ptr(h
);
534 assert(dibs
[idx
] != DIB_RAW_FREE
);
536 #if ENABLE_DEBUG_HASHMAP
537 h
->debug
.rem_count
++;
538 h
->debug
.last_rem_idx
= idx
;
542 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
543 for (right
= next_idx(h
, left
); ; right
= next_idx(h
, right
)) {
544 raw_dib
= dibs
[right
];
545 if (IN_SET(raw_dib
, 0, DIB_RAW_FREE
))
548 /* The buckets are not supposed to be all occupied and with DIB > 0.
549 * That would mean we could make everyone better off by shifting them
550 * backward. This scenario is impossible. */
551 assert(left
!= right
);
554 if (h
->type
== HASHMAP_TYPE_ORDERED
) {
555 OrderedHashmap
*lh
= (OrderedHashmap
*) h
;
556 struct ordered_hashmap_entry
*le
= ordered_bucket_at(lh
, idx
);
558 if (le
->iterate_next
!= IDX_NIL
)
559 ordered_bucket_at(lh
, le
->iterate_next
)->iterate_previous
= le
->iterate_previous
;
561 lh
->iterate_list_tail
= le
->iterate_previous
;
563 if (le
->iterate_previous
!= IDX_NIL
)
564 ordered_bucket_at(lh
, le
->iterate_previous
)->iterate_next
= le
->iterate_next
;
566 lh
->iterate_list_head
= le
->iterate_next
;
569 /* Now shift all buckets in the interval (left, right) one step backwards */
570 for (prev
= left
, left
= next_idx(h
, left
); left
!= right
;
571 prev
= left
, left
= next_idx(h
, left
)) {
572 dib
= bucket_calculate_dib(h
, left
, dibs
[left
]);
574 bucket_move_entry(h
, NULL
, left
, prev
);
575 bucket_set_dib(h
, prev
, dib
- 1);
578 bucket_mark_free(h
, prev
);
582 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
584 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap
*h
, Iterator
*i
) {
585 struct ordered_hashmap_entry
*e
;
591 if (i
->idx
== IDX_NIL
)
594 if (i
->idx
== IDX_FIRST
&& h
->iterate_list_head
== IDX_NIL
)
597 if (i
->idx
== IDX_FIRST
) {
598 idx
= h
->iterate_list_head
;
599 e
= ordered_bucket_at(h
, idx
);
602 e
= ordered_bucket_at(h
, idx
);
604 * We allow removing the current entry while iterating, but removal may cause
605 * a backward shift. The next entry may thus move one bucket to the left.
606 * To detect when it happens, we remember the key pointer of the entry we were
607 * going to iterate next. If it does not match, there was a backward shift.
609 if (e
->p
.b
.key
!= i
->next_key
) {
610 idx
= prev_idx(HASHMAP_BASE(h
), idx
);
611 e
= ordered_bucket_at(h
, idx
);
613 assert(e
->p
.b
.key
== i
->next_key
);
616 #if ENABLE_DEBUG_HASHMAP
620 if (e
->iterate_next
!= IDX_NIL
) {
621 struct ordered_hashmap_entry
*n
;
622 i
->idx
= e
->iterate_next
;
623 n
= ordered_bucket_at(h
, i
->idx
);
624 i
->next_key
= n
->p
.b
.key
;
635 static unsigned hashmap_iterate_in_internal_order(HashmapBase
*h
, Iterator
*i
) {
641 if (i
->idx
== IDX_NIL
)
644 if (i
->idx
== IDX_FIRST
) {
645 /* fast forward to the first occupied bucket */
646 if (h
->has_indirect
) {
647 i
->idx
= skip_free_buckets(h
, h
->indirect
.idx_lowest_entry
);
648 h
->indirect
.idx_lowest_entry
= i
->idx
;
650 i
->idx
= skip_free_buckets(h
, 0);
652 if (i
->idx
== IDX_NIL
)
655 struct hashmap_base_entry
*e
;
659 e
= bucket_at(h
, i
->idx
);
661 * We allow removing the current entry while iterating, but removal may cause
662 * a backward shift. The next entry may thus move one bucket to the left.
663 * To detect when it happens, we remember the key pointer of the entry we were
664 * going to iterate next. If it does not match, there was a backward shift.
666 if (e
->key
!= i
->next_key
)
667 e
= bucket_at(h
, --i
->idx
);
669 assert(e
->key
== i
->next_key
);
673 #if ENABLE_DEBUG_HASHMAP
677 i
->idx
= skip_free_buckets(h
, i
->idx
+ 1);
678 if (i
->idx
!= IDX_NIL
)
679 i
->next_key
= bucket_at(h
, i
->idx
)->key
;
690 static unsigned hashmap_iterate_entry(HashmapBase
*h
, Iterator
*i
) {
696 #if ENABLE_DEBUG_HASHMAP
697 if (i
->idx
== IDX_FIRST
) {
698 i
->put_count
= h
->debug
.put_count
;
699 i
->rem_count
= h
->debug
.rem_count
;
701 /* While iterating, must not add any new entries */
702 assert(i
->put_count
== h
->debug
.put_count
);
703 /* ... or remove entries other than the current one */
704 assert(i
->rem_count
== h
->debug
.rem_count
||
705 (i
->rem_count
== h
->debug
.rem_count
- 1 &&
706 i
->prev_idx
== h
->debug
.last_rem_idx
));
707 /* Reset our removals counter */
708 i
->rem_count
= h
->debug
.rem_count
;
712 return h
->type
== HASHMAP_TYPE_ORDERED
? hashmap_iterate_in_insertion_order((OrderedHashmap
*) h
, i
)
713 : hashmap_iterate_in_internal_order(h
, i
);
716 bool _hashmap_iterate(HashmapBase
*h
, Iterator
*i
, void **value
, const void **key
) {
717 struct hashmap_base_entry
*e
;
721 idx
= hashmap_iterate_entry(h
, i
);
722 if (idx
== IDX_NIL
) {
731 e
= bucket_at(h
, idx
);
732 data
= entry_value(h
, e
);
741 #define HASHMAP_FOREACH_IDX(idx, h, i) \
742 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
744 (idx) = hashmap_iterate_entry((h), &(i)))
746 IteratedCache
* _hashmap_iterated_cache_new(HashmapBase
*h
) {
747 IteratedCache
*cache
;
755 cache
= new0(IteratedCache
, 1);
765 static void reset_direct_storage(HashmapBase
*h
) {
766 const struct hashmap_type_info
*hi
= &hashmap_type_info
[h
->type
];
769 assert(!h
->has_indirect
);
771 p
= mempset(h
->direct
.storage
, 0, hi
->entry_size
* hi
->n_direct_buckets
);
772 memset(p
, DIB_RAW_INIT
, sizeof(dib_raw_t
) * hi
->n_direct_buckets
);
775 static void shared_hash_key_initialize(void) {
776 random_bytes(shared_hash_key
, sizeof(shared_hash_key
));
779 static struct HashmapBase
* hashmap_base_new(const struct hash_ops
*hash_ops
, enum HashmapType type HASHMAP_DEBUG_PARAMS
) {
781 const struct hashmap_type_info
*hi
= &hashmap_type_info
[type
];
783 bool use_pool
= mempool_enabled
&& mempool_enabled(); /* mempool_enabled is a weak symbol */
785 h
= use_pool
? mempool_alloc0_tile(hi
->mempool
) : malloc0(hi
->head_size
);
790 h
->from_pool
= use_pool
;
791 h
->hash_ops
= hash_ops
?: &trivial_hash_ops
;
793 if (type
== HASHMAP_TYPE_ORDERED
) {
794 OrderedHashmap
*lh
= (OrderedHashmap
*)h
;
795 lh
->iterate_list_head
= lh
->iterate_list_tail
= IDX_NIL
;
798 reset_direct_storage(h
);
800 static pthread_once_t once
= PTHREAD_ONCE_INIT
;
801 assert_se(pthread_once(&once
, shared_hash_key_initialize
) == 0);
803 #if ENABLE_DEBUG_HASHMAP
804 h
->debug
.func
= func
;
805 h
->debug
.file
= file
;
806 h
->debug
.line
= line
;
807 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex
) == 0);
808 LIST_PREPEND(debug_list
, hashmap_debug_list
, &h
->debug
);
809 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex
) == 0);
815 Hashmap
*_hashmap_new(const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
816 return (Hashmap
*) hashmap_base_new(hash_ops
, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS
);
819 OrderedHashmap
*_ordered_hashmap_new(const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
820 return (OrderedHashmap
*) hashmap_base_new(hash_ops
, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS
);
823 Set
*_set_new(const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
824 return (Set
*) hashmap_base_new(hash_ops
, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS
);
827 static int hashmap_base_ensure_allocated(HashmapBase
**h
, const struct hash_ops
*hash_ops
,
828 enum HashmapType type HASHMAP_DEBUG_PARAMS
) {
836 q
= hashmap_base_new(hash_ops
, type HASHMAP_DEBUG_PASS_ARGS
);
844 int _hashmap_ensure_allocated(Hashmap
**h
, const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
845 return hashmap_base_ensure_allocated((HashmapBase
**)h
, hash_ops
, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS
);
848 int _ordered_hashmap_ensure_allocated(OrderedHashmap
**h
, const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
849 return hashmap_base_ensure_allocated((HashmapBase
**)h
, hash_ops
, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS
);
852 int _set_ensure_allocated(Set
**s
, const struct hash_ops
*hash_ops HASHMAP_DEBUG_PARAMS
) {
853 return hashmap_base_ensure_allocated((HashmapBase
**)s
, hash_ops
, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS
);
856 int _hashmap_ensure_put(Hashmap
**h
, const struct hash_ops
*hash_ops
, const void *key
, void *value HASHMAP_DEBUG_PARAMS
) {
859 r
= _hashmap_ensure_allocated(h
, hash_ops HASHMAP_DEBUG_PASS_ARGS
);
863 return hashmap_put(*h
, key
, value
);
866 int _ordered_hashmap_ensure_put(OrderedHashmap
**h
, const struct hash_ops
*hash_ops
, const void *key
, void *value HASHMAP_DEBUG_PARAMS
) {
869 r
= _ordered_hashmap_ensure_allocated(h
, hash_ops HASHMAP_DEBUG_PASS_ARGS
);
873 return ordered_hashmap_put(*h
, key
, value
);
876 static void hashmap_free_no_clear(HashmapBase
*h
) {
877 assert(!h
->has_indirect
);
878 assert(h
->n_direct_entries
== 0);
880 #if ENABLE_DEBUG_HASHMAP
881 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex
) == 0);
882 LIST_REMOVE(debug_list
, hashmap_debug_list
, &h
->debug
);
883 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex
) == 0);
887 /* Ensure that the object didn't get migrated between threads. */
888 assert_se(is_main_thread());
889 mempool_free_tile(hashmap_type_info
[h
->type
].mempool
, h
);
894 HashmapBase
* _hashmap_free(HashmapBase
*h
, free_func_t default_free_key
, free_func_t default_free_value
) {
896 _hashmap_clear(h
, default_free_key
, default_free_value
);
897 hashmap_free_no_clear(h
);
903 void _hashmap_clear(HashmapBase
*h
, free_func_t default_free_key
, free_func_t default_free_value
) {
904 free_func_t free_key
, free_value
;
908 free_key
= h
->hash_ops
->free_key
?: default_free_key
;
909 free_value
= h
->hash_ops
->free_value
?: default_free_value
;
911 if (free_key
|| free_value
) {
913 /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
914 * hash table, and only then call the destructor functions. If these destructors then try to unregister
915 * themselves from our hash table a second time, the entry is already gone. */
917 while (_hashmap_size(h
) > 0) {
921 v
= _hashmap_first_key_and_value(h
, true, &k
);
931 if (h
->has_indirect
) {
932 free(h
->indirect
.storage
);
933 h
->has_indirect
= false;
936 h
->n_direct_entries
= 0;
937 reset_direct_storage(h
);
939 if (h
->type
== HASHMAP_TYPE_ORDERED
) {
940 OrderedHashmap
*lh
= (OrderedHashmap
*) h
;
941 lh
->iterate_list_head
= lh
->iterate_list_tail
= IDX_NIL
;
947 static int resize_buckets(HashmapBase
*h
, unsigned entries_add
);
950 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
951 * Performs Robin Hood swaps as it goes. The entry to put must be placed
952 * by the caller into swap slot IDX_PUT.
953 * If used for in-place resizing, may leave a displaced entry in swap slot
954 * IDX_PUT. Caller must rehash it next.
955 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
958 static bool hashmap_put_robin_hood(HashmapBase
*h
, unsigned idx
,
959 struct swap_entries
*swap
) {
960 dib_raw_t raw_dib
, *dibs
;
961 unsigned dib
, distance
;
963 #if ENABLE_DEBUG_HASHMAP
964 h
->debug
.put_count
++;
967 dibs
= dib_raw_ptr(h
);
969 for (distance
= 0; ; distance
++) {
971 if (IN_SET(raw_dib
, DIB_RAW_FREE
, DIB_RAW_REHASH
)) {
972 if (raw_dib
== DIB_RAW_REHASH
)
973 bucket_move_entry(h
, swap
, idx
, IDX_TMP
);
975 if (h
->has_indirect
&& h
->indirect
.idx_lowest_entry
> idx
)
976 h
->indirect
.idx_lowest_entry
= idx
;
978 bucket_set_dib(h
, idx
, distance
);
979 bucket_move_entry(h
, swap
, IDX_PUT
, idx
);
980 if (raw_dib
== DIB_RAW_REHASH
) {
981 bucket_move_entry(h
, swap
, IDX_TMP
, IDX_PUT
);
988 dib
= bucket_calculate_dib(h
, idx
, raw_dib
);
990 if (dib
< distance
) {
991 /* Found a wealthier entry. Go Robin Hood! */
992 bucket_set_dib(h
, idx
, distance
);
994 /* swap the entries */
995 bucket_move_entry(h
, swap
, idx
, IDX_TMP
);
996 bucket_move_entry(h
, swap
, IDX_PUT
, idx
);
997 bucket_move_entry(h
, swap
, IDX_TMP
, IDX_PUT
);
1002 idx
= next_idx(h
, idx
);
1007 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1008 * The caller must place the entry (only its key and value, not link indexes)
1009 * in swap slot IDX_PUT.
1010 * Caller must ensure: the key does not exist yet in the hashmap.
1011 * that resize is not needed if !may_resize.
1012 * Returns: 1 if entry was put successfully.
1013 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1014 * Cannot return -ENOMEM if !may_resize.
1016 static int hashmap_base_put_boldly(HashmapBase
*h
, unsigned idx
,
1017 struct swap_entries
*swap
, bool may_resize
) {
1018 struct ordered_hashmap_entry
*new_entry
;
1021 assert(idx
< n_buckets(h
));
1023 new_entry
= bucket_at_swap(swap
, IDX_PUT
);
1026 r
= resize_buckets(h
, 1);
1030 idx
= bucket_hash(h
, new_entry
->p
.b
.key
);
1032 assert(n_entries(h
) < n_buckets(h
));
1034 if (h
->type
== HASHMAP_TYPE_ORDERED
) {
1035 OrderedHashmap
*lh
= (OrderedHashmap
*) h
;
1037 new_entry
->iterate_next
= IDX_NIL
;
1038 new_entry
->iterate_previous
= lh
->iterate_list_tail
;
1040 if (lh
->iterate_list_tail
!= IDX_NIL
) {
1041 struct ordered_hashmap_entry
*old_tail
;
1043 old_tail
= ordered_bucket_at(lh
, lh
->iterate_list_tail
);
1044 assert(old_tail
->iterate_next
== IDX_NIL
);
1045 old_tail
->iterate_next
= IDX_PUT
;
1048 lh
->iterate_list_tail
= IDX_PUT
;
1049 if (lh
->iterate_list_head
== IDX_NIL
)
1050 lh
->iterate_list_head
= IDX_PUT
;
1053 assert_se(hashmap_put_robin_hood(h
, idx
, swap
) == false);
1056 #if ENABLE_DEBUG_HASHMAP
1057 h
->debug
.max_entries
= MAX(h
->debug
.max_entries
, n_entries(h
));
1064 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1065 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1068 * Returns 0 if resize is not needed.
1069 * 1 if successfully resized.
1070 * -ENOMEM on allocation failure.
1072 static int resize_buckets(HashmapBase
*h
, unsigned entries_add
) {
1073 struct swap_entries swap
;
1075 dib_raw_t
*old_dibs
, *new_dibs
;
1076 const struct hashmap_type_info
*hi
;
1077 unsigned idx
, optimal_idx
;
1078 unsigned old_n_buckets
, new_n_buckets
, n_rehashed
, new_n_entries
;
1084 hi
= &hashmap_type_info
[h
->type
];
1085 new_n_entries
= n_entries(h
) + entries_add
;
1088 if (_unlikely_(new_n_entries
< entries_add
))
1091 /* For direct storage we allow 100% load, because it's tiny. */
1092 if (!h
->has_indirect
&& new_n_entries
<= hi
->n_direct_buckets
)
1096 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1097 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1099 new_n_buckets
= new_n_entries
+ new_n_entries
/ (INV_KEEP_FREE
- 1);
1101 if (_unlikely_(new_n_buckets
< new_n_entries
))
1104 if (_unlikely_(new_n_buckets
> UINT_MAX
/ (hi
->entry_size
+ sizeof(dib_raw_t
))))
1107 old_n_buckets
= n_buckets(h
);
1109 if (_likely_(new_n_buckets
<= old_n_buckets
))
1112 new_shift
= log2u_round_up(MAX(
1113 new_n_buckets
* (hi
->entry_size
+ sizeof(dib_raw_t
)),
1114 2 * sizeof(struct direct_storage
)));
1116 /* Realloc storage (buckets and DIB array). */
1117 new_storage
= realloc(h
->has_indirect
? h
->indirect
.storage
: NULL
,
1122 /* Must upgrade direct to indirect storage. */
1123 if (!h
->has_indirect
) {
1124 memcpy(new_storage
, h
->direct
.storage
,
1125 old_n_buckets
* (hi
->entry_size
+ sizeof(dib_raw_t
)));
1126 h
->indirect
.n_entries
= h
->n_direct_entries
;
1127 h
->indirect
.idx_lowest_entry
= 0;
1128 h
->n_direct_entries
= 0;
1131 /* Get a new hash key. If we've just upgraded to indirect storage,
1132 * allow reusing a previously generated key. It's still a different key
1133 * from the shared one that we used for direct storage. */
1134 get_hash_key(h
->indirect
.hash_key
, !h
->has_indirect
);
1136 h
->has_indirect
= true;
1137 h
->indirect
.storage
= new_storage
;
1138 h
->indirect
.n_buckets
= (1U << new_shift
) /
1139 (hi
->entry_size
+ sizeof(dib_raw_t
));
1141 old_dibs
= (dib_raw_t
*)((uint8_t*) new_storage
+ hi
->entry_size
* old_n_buckets
);
1142 new_dibs
= dib_raw_ptr(h
);
1145 * Move the DIB array to the new place, replacing valid DIB values with
1146 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1147 * Note: Overlap is not possible, because we have at least doubled the
1148 * number of buckets and dib_raw_t is smaller than any entry type.
1150 for (idx
= 0; idx
< old_n_buckets
; idx
++) {
1151 assert(old_dibs
[idx
] != DIB_RAW_REHASH
);
1152 new_dibs
[idx
] = old_dibs
[idx
] == DIB_RAW_FREE
? DIB_RAW_FREE
1156 /* Zero the area of newly added entries (including the old DIB area) */
1157 memzero(bucket_at(h
, old_n_buckets
),
1158 (n_buckets(h
) - old_n_buckets
) * hi
->entry_size
);
1160 /* The upper half of the new DIB array needs initialization */
1161 memset(&new_dibs
[old_n_buckets
], DIB_RAW_INIT
,
1162 (n_buckets(h
) - old_n_buckets
) * sizeof(dib_raw_t
));
1164 /* Rehash entries that need it */
1166 for (idx
= 0; idx
< old_n_buckets
; idx
++) {
1167 if (new_dibs
[idx
] != DIB_RAW_REHASH
)
1170 optimal_idx
= bucket_hash(h
, bucket_at(h
, idx
)->key
);
1173 * Not much to do if by luck the entry hashes to its current
1174 * location. Just set its DIB.
1176 if (optimal_idx
== idx
) {
1182 new_dibs
[idx
] = DIB_RAW_FREE
;
1183 bucket_move_entry(h
, &swap
, idx
, IDX_PUT
);
1184 /* bucket_move_entry does not clear the source */
1185 memzero(bucket_at(h
, idx
), hi
->entry_size
);
1189 * Find the new bucket for the current entry. This may make
1190 * another entry homeless and load it into IDX_PUT.
1192 rehash_next
= hashmap_put_robin_hood(h
, optimal_idx
, &swap
);
1195 /* Did the current entry displace another one? */
1197 optimal_idx
= bucket_hash(h
, bucket_at_swap(&swap
, IDX_PUT
)->p
.b
.key
);
1198 } while (rehash_next
);
1201 assert_se(n_rehashed
== n_entries(h
));
1207 * Finds an entry with a matching key
1208 * Returns: index of the found entry, or IDX_NIL if not found.
1210 static unsigned base_bucket_scan(HashmapBase
*h
, unsigned idx
, const void *key
) {
1211 struct hashmap_base_entry
*e
;
1212 unsigned dib
, distance
;
1213 dib_raw_t
*dibs
= dib_raw_ptr(h
);
1215 assert(idx
< n_buckets(h
));
1217 for (distance
= 0; ; distance
++) {
1218 if (dibs
[idx
] == DIB_RAW_FREE
)
1221 dib
= bucket_calculate_dib(h
, idx
, dibs
[idx
]);
1225 if (dib
== distance
) {
1226 e
= bucket_at(h
, idx
);
1227 if (h
->hash_ops
->compare(e
->key
, key
) == 0)
1231 idx
= next_idx(h
, idx
);
1234 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1236 int hashmap_put(Hashmap
*h
, const void *key
, void *value
) {
1237 struct swap_entries swap
;
1238 struct plain_hashmap_entry
*e
;
1243 hash
= bucket_hash(h
, key
);
1244 idx
= bucket_scan(h
, hash
, key
);
1245 if (idx
!= IDX_NIL
) {
1246 e
= plain_bucket_at(h
, idx
);
1247 if (e
->value
== value
)
1252 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
;
1255 return hashmap_put_boldly(h
, hash
, &swap
, true);
1258 int set_put(Set
*s
, const void *key
) {
1259 struct swap_entries swap
;
1260 struct hashmap_base_entry
*e
;
1265 hash
= bucket_hash(s
, key
);
1266 idx
= bucket_scan(s
, hash
, key
);
1270 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
.b
;
1272 return hashmap_put_boldly(s
, hash
, &swap
, true);
1275 int _set_ensure_put(Set
**s
, const struct hash_ops
*hash_ops
, const void *key HASHMAP_DEBUG_PARAMS
) {
1278 r
= _set_ensure_allocated(s
, hash_ops HASHMAP_DEBUG_PASS_ARGS
);
1282 return set_put(*s
, key
);
1285 int _set_ensure_consume(Set
**s
, const struct hash_ops
*hash_ops
, void *key HASHMAP_DEBUG_PARAMS
) {
1288 r
= _set_ensure_put(s
, hash_ops
, key HASHMAP_DEBUG_PASS_ARGS
);
1290 if (hash_ops
&& hash_ops
->free_key
)
1291 hash_ops
->free_key(key
);
1299 int hashmap_replace(Hashmap
*h
, const void *key
, void *value
) {
1300 struct swap_entries swap
;
1301 struct plain_hashmap_entry
*e
;
1306 hash
= bucket_hash(h
, key
);
1307 idx
= bucket_scan(h
, hash
, key
);
1308 if (idx
!= IDX_NIL
) {
1309 e
= plain_bucket_at(h
, idx
);
1310 #if ENABLE_DEBUG_HASHMAP
1311 /* Although the key is equal, the key pointer may have changed,
1312 * and this would break our assumption for iterating. So count
1313 * this operation as incompatible with iteration. */
1314 if (e
->b
.key
!= key
) {
1315 h
->b
.debug
.put_count
++;
1316 h
->b
.debug
.rem_count
++;
1317 h
->b
.debug
.last_rem_idx
= idx
;
1322 hashmap_set_dirty(h
);
1327 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
;
1330 return hashmap_put_boldly(h
, hash
, &swap
, true);
1333 int hashmap_update(Hashmap
*h
, const void *key
, void *value
) {
1334 struct plain_hashmap_entry
*e
;
1339 hash
= bucket_hash(h
, key
);
1340 idx
= bucket_scan(h
, hash
, key
);
1344 e
= plain_bucket_at(h
, idx
);
1346 hashmap_set_dirty(h
);
1351 void* _hashmap_get(HashmapBase
*h
, const void *key
) {
1352 struct hashmap_base_entry
*e
;
1358 hash
= bucket_hash(h
, key
);
1359 idx
= bucket_scan(h
, hash
, key
);
1363 e
= bucket_at(h
, idx
);
1364 return entry_value(h
, e
);
1367 void* hashmap_get2(Hashmap
*h
, const void *key
, void **key2
) {
1368 struct plain_hashmap_entry
*e
;
1374 hash
= bucket_hash(h
, key
);
1375 idx
= bucket_scan(h
, hash
, key
);
1379 e
= plain_bucket_at(h
, idx
);
1381 *key2
= (void*) e
->b
.key
;
1386 bool _hashmap_contains(HashmapBase
*h
, const void *key
) {
1392 hash
= bucket_hash(h
, key
);
1393 return bucket_scan(h
, hash
, key
) != IDX_NIL
;
1396 void* _hashmap_remove(HashmapBase
*h
, const void *key
) {
1397 struct hashmap_base_entry
*e
;
1404 hash
= bucket_hash(h
, key
);
1405 idx
= bucket_scan(h
, hash
, key
);
1409 e
= bucket_at(h
, idx
);
1410 data
= entry_value(h
, e
);
1411 remove_entry(h
, idx
);
1416 void* hashmap_remove2(Hashmap
*h
, const void *key
, void **rkey
) {
1417 struct plain_hashmap_entry
*e
;
1427 hash
= bucket_hash(h
, key
);
1428 idx
= bucket_scan(h
, hash
, key
);
1429 if (idx
== IDX_NIL
) {
1435 e
= plain_bucket_at(h
, idx
);
1438 *rkey
= (void*) e
->b
.key
;
1440 remove_entry(h
, idx
);
1445 int hashmap_remove_and_put(Hashmap
*h
, const void *old_key
, const void *new_key
, void *value
) {
1446 struct swap_entries swap
;
1447 struct plain_hashmap_entry
*e
;
1448 unsigned old_hash
, new_hash
, idx
;
1453 old_hash
= bucket_hash(h
, old_key
);
1454 idx
= bucket_scan(h
, old_hash
, old_key
);
1458 new_hash
= bucket_hash(h
, new_key
);
1459 if (bucket_scan(h
, new_hash
, new_key
) != IDX_NIL
)
1462 remove_entry(h
, idx
);
1464 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
;
1467 assert_se(hashmap_put_boldly(h
, new_hash
, &swap
, false) == 1);
1472 int set_remove_and_put(Set
*s
, const void *old_key
, const void *new_key
) {
1473 struct swap_entries swap
;
1474 struct hashmap_base_entry
*e
;
1475 unsigned old_hash
, new_hash
, idx
;
1480 old_hash
= bucket_hash(s
, old_key
);
1481 idx
= bucket_scan(s
, old_hash
, old_key
);
1485 new_hash
= bucket_hash(s
, new_key
);
1486 if (bucket_scan(s
, new_hash
, new_key
) != IDX_NIL
)
1489 remove_entry(s
, idx
);
1491 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
.b
;
1493 assert_se(hashmap_put_boldly(s
, new_hash
, &swap
, false) == 1);
1498 int hashmap_remove_and_replace(Hashmap
*h
, const void *old_key
, const void *new_key
, void *value
) {
1499 struct swap_entries swap
;
1500 struct plain_hashmap_entry
*e
;
1501 unsigned old_hash
, new_hash
, idx_old
, idx_new
;
1506 old_hash
= bucket_hash(h
, old_key
);
1507 idx_old
= bucket_scan(h
, old_hash
, old_key
);
1508 if (idx_old
== IDX_NIL
)
1511 old_key
= bucket_at(HASHMAP_BASE(h
), idx_old
)->key
;
1513 new_hash
= bucket_hash(h
, new_key
);
1514 idx_new
= bucket_scan(h
, new_hash
, new_key
);
1515 if (idx_new
!= IDX_NIL
)
1516 if (idx_old
!= idx_new
) {
1517 remove_entry(h
, idx_new
);
1518 /* Compensate for a possible backward shift. */
1519 if (old_key
!= bucket_at(HASHMAP_BASE(h
), idx_old
)->key
)
1520 idx_old
= prev_idx(HASHMAP_BASE(h
), idx_old
);
1521 assert(old_key
== bucket_at(HASHMAP_BASE(h
), idx_old
)->key
);
1524 remove_entry(h
, idx_old
);
1526 e
= &bucket_at_swap(&swap
, IDX_PUT
)->p
;
1529 assert_se(hashmap_put_boldly(h
, new_hash
, &swap
, false) == 1);
1534 void* _hashmap_remove_value(HashmapBase
*h
, const void *key
, void *value
) {
1535 struct hashmap_base_entry
*e
;
1541 hash
= bucket_hash(h
, key
);
1542 idx
= bucket_scan(h
, hash
, key
);
1546 e
= bucket_at(h
, idx
);
1547 if (entry_value(h
, e
) != value
)
1550 remove_entry(h
, idx
);
1555 static unsigned find_first_entry(HashmapBase
*h
) {
1556 Iterator i
= ITERATOR_FIRST
;
1558 if (!h
|| !n_entries(h
))
1561 return hashmap_iterate_entry(h
, &i
);
1564 void* _hashmap_first_key_and_value(HashmapBase
*h
, bool remove
, void **ret_key
) {
1565 struct hashmap_base_entry
*e
;
1569 idx
= find_first_entry(h
);
1570 if (idx
== IDX_NIL
) {
1576 e
= bucket_at(h
, idx
);
1577 key
= (void*) e
->key
;
1578 data
= entry_value(h
, e
);
1581 remove_entry(h
, idx
);
1589 unsigned _hashmap_size(HashmapBase
*h
) {
1593 return n_entries(h
);
1596 unsigned _hashmap_buckets(HashmapBase
*h
) {
1600 return n_buckets(h
);
1603 int _hashmap_merge(Hashmap
*h
, Hashmap
*other
) {
1609 HASHMAP_FOREACH_IDX(idx
, HASHMAP_BASE(other
), i
) {
1610 struct plain_hashmap_entry
*pe
= plain_bucket_at(other
, idx
);
1613 r
= hashmap_put(h
, pe
->b
.key
, pe
->value
);
1614 if (r
< 0 && r
!= -EEXIST
)
1621 int set_merge(Set
*s
, Set
*other
) {
1627 HASHMAP_FOREACH_IDX(idx
, HASHMAP_BASE(other
), i
) {
1628 struct set_entry
*se
= set_bucket_at(other
, idx
);
1631 r
= set_put(s
, se
->b
.key
);
1639 int _hashmap_reserve(HashmapBase
*h
, unsigned entries_add
) {
1644 r
= resize_buckets(h
, entries_add
);
1652 * The same as hashmap_merge(), but every new item from other is moved to h.
1653 * Keys already in h are skipped and stay in other.
1654 * Returns: 0 on success.
1655 * -ENOMEM on alloc failure, in which case no move has been done.
1657 int _hashmap_move(HashmapBase
*h
, HashmapBase
*other
) {
1658 struct swap_entries swap
;
1659 struct hashmap_base_entry
*e
, *n
;
1669 assert(other
->type
== h
->type
);
1672 * This reserves buckets for the worst case, where none of other's
1673 * entries are yet present in h. This is preferable to risking
1674 * an allocation failure in the middle of the moving and having to
1675 * rollback or return a partial result.
1677 r
= resize_buckets(h
, n_entries(other
));
1681 HASHMAP_FOREACH_IDX(idx
, other
, i
) {
1684 e
= bucket_at(other
, idx
);
1685 h_hash
= bucket_hash(h
, e
->key
);
1686 if (bucket_scan(h
, h_hash
, e
->key
) != IDX_NIL
)
1689 n
= &bucket_at_swap(&swap
, IDX_PUT
)->p
.b
;
1691 if (h
->type
!= HASHMAP_TYPE_SET
)
1692 ((struct plain_hashmap_entry
*) n
)->value
=
1693 ((struct plain_hashmap_entry
*) e
)->value
;
1694 assert_se(hashmap_put_boldly(h
, h_hash
, &swap
, false) == 1);
1696 remove_entry(other
, idx
);
1702 int _hashmap_move_one(HashmapBase
*h
, HashmapBase
*other
, const void *key
) {
1703 struct swap_entries swap
;
1704 unsigned h_hash
, other_hash
, idx
;
1705 struct hashmap_base_entry
*e
, *n
;
1710 h_hash
= bucket_hash(h
, key
);
1711 if (bucket_scan(h
, h_hash
, key
) != IDX_NIL
)
1717 assert(other
->type
== h
->type
);
1719 other_hash
= bucket_hash(other
, key
);
1720 idx
= bucket_scan(other
, other_hash
, key
);
1724 e
= bucket_at(other
, idx
);
1726 n
= &bucket_at_swap(&swap
, IDX_PUT
)->p
.b
;
1728 if (h
->type
!= HASHMAP_TYPE_SET
)
1729 ((struct plain_hashmap_entry
*) n
)->value
=
1730 ((struct plain_hashmap_entry
*) e
)->value
;
1731 r
= hashmap_put_boldly(h
, h_hash
, &swap
, true);
1735 remove_entry(other
, idx
);
1739 HashmapBase
* _hashmap_copy(HashmapBase
*h HASHMAP_DEBUG_PARAMS
) {
1745 copy
= hashmap_base_new(h
->hash_ops
, h
->type HASHMAP_DEBUG_PASS_ARGS
);
1750 case HASHMAP_TYPE_PLAIN
:
1751 case HASHMAP_TYPE_ORDERED
:
1752 r
= hashmap_merge((Hashmap
*)copy
, (Hashmap
*)h
);
1754 case HASHMAP_TYPE_SET
:
1755 r
= set_merge((Set
*)copy
, (Set
*)h
);
1758 assert_not_reached();
1762 return _hashmap_free(copy
, NULL
, NULL
);
1767 char** _hashmap_get_strv(HashmapBase
*h
) {
1773 return new0(char*, 1);
1775 sv
= new(char*, n_entries(h
)+1);
1780 HASHMAP_FOREACH_IDX(idx
, h
, i
)
1781 sv
[n
++] = entry_value(h
, bucket_at(h
, idx
));
1787 void* ordered_hashmap_next(OrderedHashmap
*h
, const void *key
) {
1788 struct ordered_hashmap_entry
*e
;
1794 hash
= bucket_hash(h
, key
);
1795 idx
= bucket_scan(h
, hash
, key
);
1799 e
= ordered_bucket_at(h
, idx
);
1800 if (e
->iterate_next
== IDX_NIL
)
1802 return ordered_bucket_at(h
, e
->iterate_next
)->p
.value
;
1805 int set_consume(Set
*s
, void *value
) {
1811 r
= set_put(s
, value
);
1818 int _hashmap_put_strdup_full(Hashmap
**h
, const struct hash_ops
*hash_ops
, const char *k
, const char *v HASHMAP_DEBUG_PARAMS
) {
1821 r
= _hashmap_ensure_allocated(h
, hash_ops HASHMAP_DEBUG_PASS_ARGS
);
1825 _cleanup_free_
char *kdup
= NULL
, *vdup
= NULL
;
1837 r
= hashmap_put(*h
, kdup
, vdup
);
1839 if (r
== -EEXIST
&& streq_ptr(v
, hashmap_get(*h
, kdup
)))
1844 /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1845 assert(vdup
== NULL
|| r
> 0);
1852 int _set_put_strndup_full(Set
**s
, const struct hash_ops
*hash_ops
, const char *p
, size_t n HASHMAP_DEBUG_PARAMS
) {
1859 r
= _set_ensure_allocated(s
, hash_ops HASHMAP_DEBUG_PASS_ARGS
);
1863 if (n
== SIZE_MAX
) {
1864 if (set_contains(*s
, (char*) p
))
1873 return set_consume(*s
, c
);
1876 int _set_put_strdupv_full(Set
**s
, const struct hash_ops
*hash_ops
, char **l HASHMAP_DEBUG_PARAMS
) {
1881 STRV_FOREACH(i
, l
) {
1882 r
= _set_put_strndup_full(s
, hash_ops
, *i
, SIZE_MAX HASHMAP_DEBUG_PASS_ARGS
);
1892 int set_put_strsplit(Set
*s
, const char *v
, const char *separators
, ExtractFlags flags
) {
1893 const char *p
= ASSERT_PTR(v
);
1901 r
= extract_first_word(&p
, &word
, separators
, flags
);
1905 r
= set_consume(s
, word
);
1911 /* expand the cachemem if needed, return true if newly (re)activated. */
1912 static int cachemem_maintain(CacheMem
*mem
, size_t size
) {
1915 if (!GREEDY_REALLOC(mem
->ptr
, size
)) {
1928 int iterated_cache_get(IteratedCache
*cache
, const void ***res_keys
, const void ***res_values
, unsigned *res_n_entries
) {
1929 bool sync_keys
= false, sync_values
= false;
1934 assert(cache
->hashmap
);
1936 size
= n_entries(cache
->hashmap
);
1939 r
= cachemem_maintain(&cache
->keys
, size
);
1945 cache
->keys
.active
= false;
1948 r
= cachemem_maintain(&cache
->values
, size
);
1954 cache
->values
.active
= false;
1956 if (cache
->hashmap
->dirty
) {
1957 if (cache
->keys
.active
)
1959 if (cache
->values
.active
)
1962 cache
->hashmap
->dirty
= false;
1965 if (sync_keys
|| sync_values
) {
1970 HASHMAP_FOREACH_IDX(idx
, cache
->hashmap
, iter
) {
1971 struct hashmap_base_entry
*e
;
1973 e
= bucket_at(cache
->hashmap
, idx
);
1976 cache
->keys
.ptr
[i
] = e
->key
;
1978 cache
->values
.ptr
[i
] = entry_value(cache
->hashmap
, e
);
1984 *res_keys
= cache
->keys
.ptr
;
1986 *res_values
= cache
->values
.ptr
;
1988 *res_n_entries
= size
;
1993 IteratedCache
* iterated_cache_free(IteratedCache
*cache
) {
1995 free(cache
->keys
.ptr
);
1996 free(cache
->values
.ptr
);
1999 return mfree(cache
);
2002 int set_strjoin(Set
*s
, const char *separator
, bool wrap_with_separator
, char **ret
) {
2003 _cleanup_free_
char *str
= NULL
;
2004 size_t separator_len
, len
= 0;
2010 if (set_isempty(s
)) {
2015 separator_len
= strlen_ptr(separator
);
2017 if (separator_len
== 0)
2018 wrap_with_separator
= false;
2020 first
= !wrap_with_separator
;
2022 SET_FOREACH(value
, s
) {
2023 size_t l
= strlen_ptr(value
);
2028 if (!GREEDY_REALLOC(str
, len
+ l
+ (first
? 0 : separator_len
) + (wrap_with_separator
? separator_len
: 0) + 1))
2031 if (separator_len
> 0 && !first
) {
2032 memcpy(str
+ len
, separator
, separator_len
);
2033 len
+= separator_len
;
2036 memcpy(str
+ len
, value
, l
);
2041 if (wrap_with_separator
) {
2042 memcpy(str
+ len
, separator
, separator_len
);
2043 len
+= separator_len
;
2048 *ret
= TAKE_PTR(str
);
2052 bool set_equal(Set
*a
, Set
*b
) {
2055 /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2061 if (set_isempty(a
) && set_isempty(b
))
2064 if (set_size(a
) != set_size(b
)) /* Cheap check that hopefully catches a lot of inequality cases
2069 if (!set_contains(b
, p
))
2072 /* If we have the same hashops, then we don't need to check things backwards given we compared the
2073 * size and that all of a is in b. */
2074 if (a
->b
.hash_ops
== b
->b
.hash_ops
)
2078 if (!set_contains(a
, p
))
2084 static bool set_fnmatch_one(Set
*patterns
, const char *needle
) {
2089 /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2091 SET_FOREACH(p
, patterns
)
2092 if (fnmatch(p
, needle
, 0) == 0)
2098 bool set_fnmatch(Set
*include_patterns
, Set
*exclude_patterns
, const char *needle
) {
2101 if (set_fnmatch_one(exclude_patterns
, needle
))
2104 if (set_isempty(include_patterns
))
2107 return set_fnmatch_one(include_patterns
, needle
);