]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
basic/hashmap,set: inline trivial set_iterate() wrapper
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
a7334b09 2
60918275 3#include <errno.h>
11c3a366 4#include <stdint.h>
d4510856 5#include <stdlib.h>
60918275 6
b5efdb8a 7#include "alloc-util.h"
556c7bae 8#include "fileio.h"
b4f60743 9#include "hashmap.h"
60918275 10#include "macro.h"
0a970718 11#include "memory-util.h"
b3dcf58e 12#include "mempool.h"
f5947a5e 13#include "missing_syscall.h"
d4510856 14#include "process-util.h"
3df3e884 15#include "random-util.h"
d4510856
LP
16#include "set.h"
17#include "siphash24.h"
556c7bae 18#include "string-util.h"
d4510856 19#include "strv.h"
60918275 20
349cc4a5 21#if ENABLE_DEBUG_HASHMAP
3d4db144 22#include <pthread.h>
2eec67ac
TA
23#include "list.h"
24#endif
25
89439d4f
MS
26/*
27 * Implementation of hashmaps.
28 * Addressing: open
29 * - uses less RAM compared to closed addressing (chaining), because
30 * our entries are small (especially in Sets, which tend to contain
31 * the majority of entries in systemd).
32 * Collision resolution: Robin Hood
33 * - tends to equalize displacement of entries from their optimal buckets.
34 * Probe sequence: linear
35 * - though theoretically worse than random probing/uniform hashing/double
36 * hashing, it is good for cache locality.
37 *
38 * References:
39 * Celis, P. 1986. Robin Hood Hashing.
40 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
41 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
42 * - The results are derived for random probing. Suggests deletion with
43 * tombstones and two mean-centered search methods. None of that works
44 * well for linear probing.
45 *
46 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
47 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
48 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
49 * http://www.math.uu.se/~svante/papers/sj157.pdf
50 * - Applies to Robin Hood with linear probing. Contains remarks on
51 * the unsuitability of mean-centered search with linear probing.
52 *
53 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
54 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
55 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
56 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
57 * in a successful search), and Janson writes about displacement. C = d + 1.
58 *
59 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
60 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
61 * - Explanation of backward shift deletion with pictures.
62 *
63 * Khuong, P. 2013. The Other Robin Hood Hashing.
64 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
65 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
66 */
67
68/*
69 * XXX Ideas for improvement:
70 * For unordered hashmaps, randomize iteration order, similarly to Perl:
71 * http://blog.booking.com/hardening-perls-hash-function.html
72 */
73
74/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
75 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
76#define INV_KEEP_FREE 5U
77
78/* Fields common to entries of all hashmap/set types */
79struct hashmap_base_entry {
60918275 80 const void *key;
89439d4f
MS
81};
82
83/* Entry types for specific hashmap/set types
84 * hashmap_base_entry must be at the beginning of each entry struct. */
85
86struct plain_hashmap_entry {
87 struct hashmap_base_entry b;
60918275 88 void *value;
60918275
LP
89};
90
89439d4f
MS
91struct ordered_hashmap_entry {
92 struct plain_hashmap_entry p;
93 unsigned iterate_next, iterate_previous;
94};
60918275 95
89439d4f
MS
96struct set_entry {
97 struct hashmap_base_entry b;
98};
45fa9e29 99
89439d4f
MS
100/* In several functions it is advantageous to have the hash table extended
101 * virtually by a couple of additional buckets. We reserve special index values
102 * for these "swap" buckets. */
103#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
104#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
105#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
106#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 107
89439d4f
MS
108#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
109#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
110
111assert_cc(IDX_FIRST == _IDX_SWAP_END);
112assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
113
114/* Storage space for the "swap" buckets.
115 * All entry types can fit into a ordered_hashmap_entry. */
116struct swap_entries {
117 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
118};
119
89439d4f
MS
120/* Distance from Initial Bucket */
121typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
122#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
123#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
124#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
125#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
126
127#define DIB_FREE UINT_MAX
128
349cc4a5 129#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
130struct hashmap_debug_info {
131 LIST_FIELDS(struct hashmap_debug_info, debug_list);
132 unsigned max_entries; /* high watermark of n_entries */
133
134 /* who allocated this hashmap */
135 int line;
136 const char *file;
137 const char *func;
138
139 /* fields to detect modification while iterating */
140 unsigned put_count; /* counts puts into the hashmap */
141 unsigned rem_count; /* counts removals from hashmap */
142 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
143};
144
89439d4f
MS
145/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
146static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 147static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
55825de5 148#endif
39c2a6f1 149
89439d4f
MS
150enum HashmapType {
151 HASHMAP_TYPE_PLAIN,
152 HASHMAP_TYPE_ORDERED,
153 HASHMAP_TYPE_SET,
154 _HASHMAP_TYPE_MAX
155};
39c2a6f1 156
89439d4f 157struct _packed_ indirect_storage {
1a39bc8c 158 void *storage; /* where buckets and DIBs are stored */
89439d4f
MS
159 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
160
161 unsigned n_entries; /* number of stored entries */
162 unsigned n_buckets; /* number of buckets */
163
164 unsigned idx_lowest_entry; /* Index below which all buckets are free.
165 Makes "while(hashmap_steal_first())" loops
166 O(n) instead of O(n^2) for unordered hashmaps. */
167 uint8_t _pad[3]; /* padding for the whole HashmapBase */
168 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
169};
170
171struct direct_storage {
172 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
173 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
174 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
1a39bc8c 175 uint8_t storage[sizeof(struct indirect_storage)];
89439d4f
MS
176};
177
178#define DIRECT_BUCKETS(entry_t) \
179 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
180
181/* We should be able to store at least one entry directly. */
182assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
183
184/* We have 3 bits for n_direct_entries. */
185assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
186
187/* Hashmaps with directly stored entries all use this shared hash key.
188 * It's no big deal if the key is guessed, because there can be only
189 * a handful of directly stored entries in a hashmap. When a hashmap
190 * outgrows direct storage, it gets its own key for indirect storage. */
191static uint8_t shared_hash_key[HASH_KEY_SIZE];
192static bool shared_hash_key_initialized;
193
194/* Fields that all hashmap/set types must have */
195struct HashmapBase {
196 const struct hash_ops *hash_ops; /* hash and compare ops to use */
197
198 union _packed_ {
199 struct indirect_storage indirect; /* if has_indirect */
200 struct direct_storage direct; /* if !has_indirect */
201 };
202
203 enum HashmapType type:2; /* HASHMAP_TYPE_* */
204 bool has_indirect:1; /* whether indirect storage is used */
205 unsigned n_direct_entries:3; /* Number of entries in direct storage.
206 * Only valid if !has_indirect. */
207 bool from_pool:1; /* whether was allocated from mempool */
45ea84d8
VC
208 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
209 bool cached:1; /* whether this hashmap is being cached */
55825de5
ZJS
210
211#if ENABLE_DEBUG_HASHMAP
212 struct hashmap_debug_info debug;
213#endif
89439d4f
MS
214};
215
216/* Specific hash types
217 * HashmapBase must be at the beginning of each hashmap struct. */
218
219struct Hashmap {
220 struct HashmapBase b;
221};
222
223struct OrderedHashmap {
224 struct HashmapBase b;
225 unsigned iterate_list_head, iterate_list_tail;
226};
227
228struct Set {
229 struct HashmapBase b;
230};
231
45ea84d8
VC
232typedef struct CacheMem {
233 const void **ptr;
234 size_t n_populated, n_allocated;
235 bool active:1;
236} CacheMem;
237
238struct IteratedCache {
239 HashmapBase *hashmap;
240 CacheMem keys, values;
241};
242
89439d4f
MS
243DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
244DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
245/* No need for a separate Set pool */
246assert_cc(sizeof(Hashmap) == sizeof(Set));
247
248struct hashmap_type_info {
249 size_t head_size;
250 size_t entry_size;
251 struct mempool *mempool;
252 unsigned n_direct_buckets;
253};
254
43874aa7 255static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
89439d4f
MS
256 [HASHMAP_TYPE_PLAIN] = {
257 .head_size = sizeof(Hashmap),
258 .entry_size = sizeof(struct plain_hashmap_entry),
259 .mempool = &hashmap_pool,
260 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
261 },
262 [HASHMAP_TYPE_ORDERED] = {
263 .head_size = sizeof(OrderedHashmap),
264 .entry_size = sizeof(struct ordered_hashmap_entry),
265 .mempool = &ordered_hashmap_pool,
266 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
267 },
268 [HASHMAP_TYPE_SET] = {
269 .head_size = sizeof(Set),
270 .entry_size = sizeof(struct set_entry),
271 .mempool = &hashmap_pool,
272 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
273 },
274};
39c2a6f1 275
d18cb393 276#if VALGRIND
d34dae18 277_destructor_ static void cleanup_pools(void) {
556c7bae
ZJS
278 _cleanup_free_ char *t = NULL;
279 int r;
280
281 /* Be nice to valgrind */
282
283 /* The pool is only allocated by the main thread, but the memory can
284 * be passed to other threads. Let's clean up if we are the main thread
285 * and no other threads are live. */
31c9d74d
FS
286 /* We build our own is_main_thread() here, which doesn't use C11
287 * TLS based caching of the result. That's because valgrind apparently
288 * doesn't like malloc() (which C11 TLS internally uses) to be called
289 * from a GCC destructors. */
290 if (getpid() != gettid())
556c7bae
ZJS
291 return;
292
293 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
294 if (r < 0 || !streq(t, "1"))
295 return;
296
297 mempool_drop(&hashmap_pool);
298 mempool_drop(&ordered_hashmap_pool);
299}
300#endif
301
89439d4f
MS
302static unsigned n_buckets(HashmapBase *h) {
303 return h->has_indirect ? h->indirect.n_buckets
304 : hashmap_type_info[h->type].n_direct_buckets;
305}
306
307static unsigned n_entries(HashmapBase *h) {
308 return h->has_indirect ? h->indirect.n_entries
309 : h->n_direct_entries;
310}
311
312static void n_entries_inc(HashmapBase *h) {
313 if (h->has_indirect)
314 h->indirect.n_entries++;
315 else
316 h->n_direct_entries++;
317}
318
319static void n_entries_dec(HashmapBase *h) {
320 if (h->has_indirect)
321 h->indirect.n_entries--;
322 else
323 h->n_direct_entries--;
324}
325
1a39bc8c 326static void *storage_ptr(HashmapBase *h) {
89439d4f
MS
327 return h->has_indirect ? h->indirect.storage
328 : h->direct.storage;
329}
330
331static uint8_t *hash_key(HashmapBase *h) {
332 return h->has_indirect ? h->indirect.hash_key
333 : shared_hash_key;
334}
335
336static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 337 struct siphash state;
0cb3c286 338 uint64_t hash;
b826ab58 339
0cb3c286 340 siphash24_init(&state, hash_key(h));
b826ab58
TG
341
342 h->hash_ops->hash(p, &state);
343
933f9cae 344 hash = siphash24_finalize(&state);
0cb3c286
TG
345
346 return (unsigned) (hash % n_buckets(h));
9bf3b535 347}
89439d4f 348#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535 349
a1e92eee 350static void base_set_dirty(HashmapBase *h) {
84dcca75
VC
351 h->dirty = true;
352}
353#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
354
9bf3b535
LP
355static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
356 static uint8_t current[HASH_KEY_SIZE];
357 static bool current_initialized = false;
358
359 /* Returns a hash function key to use. In order to keep things
360 * fast we will not generate a new key each time we allocate a
361 * new hash table. Instead, we'll just reuse the most recently
362 * generated one, except if we never generated one or when we
363 * are rehashing an entire hash table because we reached a
364 * fill level */
365
366 if (!current_initialized || !reuse_is_ok) {
367 random_bytes(current, sizeof(current));
368 current_initialized = true;
369 }
370
371 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
372}
373
89439d4f
MS
374static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
375 return (struct hashmap_base_entry*)
1a39bc8c 376 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
89439d4f
MS
377}
378
379static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
380 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
381}
382
383static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
384 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
385}
39c2a6f1 386
89439d4f
MS
387static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
388 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
389}
39c2a6f1 390
89439d4f
MS
391static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
392 return &swap->e[idx - _IDX_SWAP_BEGIN];
393}
39c2a6f1 394
89439d4f
MS
395/* Returns a pointer to the bucket at index idx.
396 * Understands real indexes and swap indexes, hence "_virtual". */
397static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
398 unsigned idx) {
399 if (idx < _IDX_SWAP_BEGIN)
400 return bucket_at(h, idx);
401
402 if (idx < _IDX_SWAP_END)
403 return &bucket_at_swap(swap, idx)->p.b;
404
405 assert_not_reached("Invalid index");
406}
407
408static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
409 return (dib_raw_t*)
1a39bc8c 410 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
89439d4f
MS
411}
412
413static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
414 return idx >= from ? idx - from
415 : n_buckets(h) + idx - from;
416}
417
418static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
419 unsigned initial_bucket;
420
421 if (raw_dib == DIB_RAW_FREE)
422 return DIB_FREE;
423
424 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
425 return raw_dib;
426
427 /*
428 * Having an overflow DIB value is very unlikely. The hash function
429 * would have to be bad. For example, in a table of size 2^24 filled
430 * to load factor 0.9 the maximum observed DIB is only about 60.
431 * In theory (assuming I used Maxima correctly), for an infinite size
432 * hash table with load factor 0.8 the probability of a given entry
433 * having DIB > 40 is 1.9e-8.
434 * This returns the correct DIB value by recomputing the hash value in
435 * the unlikely case. XXX Hitting this case could be a hint to rehash.
436 */
437 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
438 return bucket_distance(h, idx, initial_bucket);
439}
440
441static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
442 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
443}
444
445static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
446 dib_raw_t *dibs;
447
448 dibs = dib_raw_ptr(h);
449
450 for ( ; idx < n_buckets(h); idx++)
451 if (dibs[idx] != DIB_RAW_FREE)
452 return idx;
453
454 return IDX_NIL;
455}
456
457static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 458 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
459 bucket_set_dib(h, idx, DIB_FREE);
460}
461
462static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
463 unsigned from, unsigned to) {
464 struct hashmap_base_entry *e_from, *e_to;
465
466 assert(from != to);
39c2a6f1 467
89439d4f
MS
468 e_from = bucket_at_virtual(h, swap, from);
469 e_to = bucket_at_virtual(h, swap, to);
470
471 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
472
473 if (h->type == HASHMAP_TYPE_ORDERED) {
474 OrderedHashmap *lh = (OrderedHashmap*) h;
475 struct ordered_hashmap_entry *le, *le_to;
476
477 le_to = (struct ordered_hashmap_entry*) e_to;
478
479 if (le_to->iterate_next != IDX_NIL) {
480 le = (struct ordered_hashmap_entry*)
481 bucket_at_virtual(h, swap, le_to->iterate_next);
482 le->iterate_previous = to;
483 }
484
485 if (le_to->iterate_previous != IDX_NIL) {
486 le = (struct ordered_hashmap_entry*)
487 bucket_at_virtual(h, swap, le_to->iterate_previous);
488 le->iterate_next = to;
489 }
490
491 if (lh->iterate_list_head == from)
492 lh->iterate_list_head = to;
493 if (lh->iterate_list_tail == from)
494 lh->iterate_list_tail = to;
39c2a6f1 495 }
89439d4f 496}
60918275 497
89439d4f
MS
498static unsigned next_idx(HashmapBase *h, unsigned idx) {
499 return (idx + 1U) % n_buckets(h);
500}
60918275 501
89439d4f
MS
502static unsigned prev_idx(HashmapBase *h, unsigned idx) {
503 return (n_buckets(h) + idx - 1U) % n_buckets(h);
504}
60918275 505
89439d4f
MS
506static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
507 switch (h->type) {
45fa9e29 508
89439d4f
MS
509 case HASHMAP_TYPE_PLAIN:
510 case HASHMAP_TYPE_ORDERED:
511 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 512
89439d4f
MS
513 case HASHMAP_TYPE_SET:
514 return (void*) e->key;
a3b6fafe 515
89439d4f
MS
516 default:
517 assert_not_reached("Unknown hashmap type");
518 }
60918275
LP
519}
520
89439d4f
MS
521static void base_remove_entry(HashmapBase *h, unsigned idx) {
522 unsigned left, right, prev, dib;
523 dib_raw_t raw_dib, *dibs;
45fa9e29 524
89439d4f
MS
525 dibs = dib_raw_ptr(h);
526 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 527
349cc4a5 528#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
529 h->debug.rem_count++;
530 h->debug.last_rem_idx = idx;
531#endif
034c6ed7 532
89439d4f
MS
533 left = idx;
534 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
535 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
536 raw_dib = dibs[right];
4c701096 537 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
89439d4f
MS
538 break;
539
540 /* The buckets are not supposed to be all occupied and with DIB > 0.
541 * That would mean we could make everyone better off by shifting them
542 * backward. This scenario is impossible. */
543 assert(left != right);
544 }
034c6ed7 545
89439d4f
MS
546 if (h->type == HASHMAP_TYPE_ORDERED) {
547 OrderedHashmap *lh = (OrderedHashmap*) h;
548 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
549
550 if (le->iterate_next != IDX_NIL)
551 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
552 else
553 lh->iterate_list_tail = le->iterate_previous;
554
555 if (le->iterate_previous != IDX_NIL)
556 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
557 else
558 lh->iterate_list_head = le->iterate_next;
559 }
560
561 /* Now shift all buckets in the interval (left, right) one step backwards */
562 for (prev = left, left = next_idx(h, left); left != right;
563 prev = left, left = next_idx(h, left)) {
564 dib = bucket_calculate_dib(h, left, dibs[left]);
565 assert(dib != 0);
566 bucket_move_entry(h, NULL, left, prev);
567 bucket_set_dib(h, prev, dib - 1);
568 }
569
570 bucket_mark_free(h, prev);
571 n_entries_dec(h);
84dcca75 572 base_set_dirty(h);
034c6ed7 573}
89439d4f
MS
574#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
575
576static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
577 struct ordered_hashmap_entry *e;
578 unsigned idx;
034c6ed7 579
101d8e63 580 assert(h);
89439d4f
MS
581 assert(i);
582
583 if (i->idx == IDX_NIL)
584 goto at_end;
585
586 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
587 goto at_end;
588
589 if (i->idx == IDX_FIRST) {
590 idx = h->iterate_list_head;
591 e = ordered_bucket_at(h, idx);
101d8e63 592 } else {
89439d4f
MS
593 idx = i->idx;
594 e = ordered_bucket_at(h, idx);
595 /*
596 * We allow removing the current entry while iterating, but removal may cause
597 * a backward shift. The next entry may thus move one bucket to the left.
598 * To detect when it happens, we remember the key pointer of the entry we were
599 * going to iterate next. If it does not match, there was a backward shift.
600 */
601 if (e->p.b.key != i->next_key) {
602 idx = prev_idx(HASHMAP_BASE(h), idx);
603 e = ordered_bucket_at(h, idx);
604 }
605 assert(e->p.b.key == i->next_key);
101d8e63 606 }
101d8e63 607
349cc4a5 608#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
609 i->prev_idx = idx;
610#endif
611
612 if (e->iterate_next != IDX_NIL) {
613 struct ordered_hashmap_entry *n;
614 i->idx = e->iterate_next;
615 n = ordered_bucket_at(h, i->idx);
616 i->next_key = n->p.b.key;
617 } else
618 i->idx = IDX_NIL;
619
620 return idx;
621
622at_end:
623 i->idx = IDX_NIL;
624 return IDX_NIL;
101d8e63
LP
625}
626
89439d4f
MS
627static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
628 unsigned idx;
629
60918275 630 assert(h);
89439d4f 631 assert(i);
60918275 632
89439d4f
MS
633 if (i->idx == IDX_NIL)
634 goto at_end;
60918275 635
89439d4f
MS
636 if (i->idx == IDX_FIRST) {
637 /* fast forward to the first occupied bucket */
638 if (h->has_indirect) {
639 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
640 h->indirect.idx_lowest_entry = i->idx;
641 } else
642 i->idx = skip_free_buckets(h, 0);
643
644 if (i->idx == IDX_NIL)
645 goto at_end;
646 } else {
647 struct hashmap_base_entry *e;
648
649 assert(i->idx > 0);
60918275 650
89439d4f
MS
651 e = bucket_at(h, i->idx);
652 /*
653 * We allow removing the current entry while iterating, but removal may cause
654 * a backward shift. The next entry may thus move one bucket to the left.
655 * To detect when it happens, we remember the key pointer of the entry we were
656 * going to iterate next. If it does not match, there was a backward shift.
657 */
658 if (e->key != i->next_key)
659 e = bucket_at(h, --i->idx);
60918275 660
89439d4f
MS
661 assert(e->key == i->next_key);
662 }
663
664 idx = i->idx;
349cc4a5 665#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
666 i->prev_idx = idx;
667#endif
668
669 i->idx = skip_free_buckets(h, i->idx + 1);
670 if (i->idx != IDX_NIL)
671 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 672 else
89439d4f
MS
673 i->idx = IDX_NIL;
674
675 return idx;
60918275 676
89439d4f
MS
677at_end:
678 i->idx = IDX_NIL;
679 return IDX_NIL;
60918275
LP
680}
681
89439d4f
MS
682static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
683 if (!h) {
684 i->idx = IDX_NIL;
685 return IDX_NIL;
686 }
101d8e63 687
349cc4a5 688#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
689 if (i->idx == IDX_FIRST) {
690 i->put_count = h->debug.put_count;
691 i->rem_count = h->debug.rem_count;
692 } else {
693 /* While iterating, must not add any new entries */
694 assert(i->put_count == h->debug.put_count);
695 /* ... or remove entries other than the current one */
696 assert(i->rem_count == h->debug.rem_count ||
697 (i->rem_count == h->debug.rem_count - 1 &&
698 i->prev_idx == h->debug.last_rem_idx));
699 /* Reset our removals counter */
700 i->rem_count = h->debug.rem_count;
701 }
702#endif
101d8e63 703
89439d4f
MS
704 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
705 : hashmap_iterate_in_internal_order(h, i);
706}
39c2a6f1 707
138f49e4 708bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
709 struct hashmap_base_entry *e;
710 void *data;
711 unsigned idx;
712
713 idx = hashmap_iterate_entry(h, i);
714 if (idx == IDX_NIL) {
8927b1da
DH
715 if (value)
716 *value = NULL;
89439d4f
MS
717 if (key)
718 *key = NULL;
719
8927b1da 720 return false;
89439d4f
MS
721 }
722
723 e = bucket_at(h, idx);
724 data = entry_value(h, e);
8927b1da
DH
725 if (value)
726 *value = data;
89439d4f
MS
727 if (key)
728 *key = e->key;
729
8927b1da 730 return true;
101d8e63
LP
731}
732
89439d4f
MS
733#define HASHMAP_FOREACH_IDX(idx, h, i) \
734 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
735 (idx != IDX_NIL); \
736 (idx) = hashmap_iterate_entry((h), &(i)))
737
138f49e4 738IteratedCache *_hashmap_iterated_cache_new(HashmapBase *h) {
45ea84d8
VC
739 IteratedCache *cache;
740
741 assert(h);
742 assert(!h->cached);
743
744 if (h->cached)
745 return NULL;
746
747 cache = new0(IteratedCache, 1);
748 if (!cache)
749 return NULL;
750
751 cache->hashmap = h;
752 h->cached = true;
753
754 return cache;
755}
756
89439d4f
MS
757static void reset_direct_storage(HashmapBase *h) {
758 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
759 void *p;
760
761 assert(!h->has_indirect);
762
763 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
764 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
765}
766
add74e89 767static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
768 HashmapBase *h;
769 const struct hashmap_type_info *hi = &hashmap_type_info[type];
b4f60743 770 bool up;
89439d4f 771
7c48ea02 772 up = mempool_enabled();
67f3c402 773
b4f60743 774 h = up ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
60918275 775 if (!h)
89439d4f
MS
776 return NULL;
777
778 h->type = type;
b4f60743 779 h->from_pool = up;
70b400d9 780 h->hash_ops = hash_ops ?: &trivial_hash_ops;
89439d4f
MS
781
782 if (type == HASHMAP_TYPE_ORDERED) {
783 OrderedHashmap *lh = (OrderedHashmap*)h;
784 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
785 }
786
787 reset_direct_storage(h);
60918275 788
89439d4f
MS
789 if (!shared_hash_key_initialized) {
790 random_bytes(shared_hash_key, sizeof(shared_hash_key));
791 shared_hash_key_initialized= true;
792 }
793
349cc4a5 794#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
795 h->debug.func = func;
796 h->debug.file = file;
797 h->debug.line = line;
4f1b3061
TG
798 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
799 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
800 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
801#endif
802
803 return h;
804}
60918275 805
138f49e4 806Hashmap *_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
add74e89 807 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
808}
809
138f49e4 810OrderedHashmap *_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
add74e89 811 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
812}
813
138f49e4 814Set *_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
add74e89 815 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
816}
817
818static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
add74e89 819 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
820 HashmapBase *q;
821
822 assert(h);
823
824 if (*h)
825 return 0;
826
add74e89 827 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
828 if (!q)
829 return -ENOMEM;
830
831 *h = q;
9ff7c5b0 832 return 1;
89439d4f
MS
833}
834
138f49e4 835int _hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
add74e89 836 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
837}
838
138f49e4 839int _ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
add74e89 840 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
841}
842
138f49e4 843int _set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
add74e89 844 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
845}
846
b7847e05
SS
847int _ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) {
848 int r;
849
850 r = _ordered_hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
851 if (r < 0)
852 return r;
853
854 return ordered_hashmap_put(*h, key, value);
855}
856
89439d4f
MS
857static void hashmap_free_no_clear(HashmapBase *h) {
858 assert(!h->has_indirect);
ee05335f 859 assert(h->n_direct_entries == 0);
89439d4f 860
349cc4a5 861#if ENABLE_DEBUG_HASHMAP
4f1b3061 862 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 863 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 864 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 865#endif
45fa9e29 866
205c085b
LP
867 if (h->from_pool) {
868 /* Ensure that the object didn't get migrated between threads. */
869 assert_se(is_main_thread());
89439d4f 870 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
205c085b 871 } else
39c2a6f1 872 free(h);
60918275
LP
873}
874
138f49e4 875HashmapBase *_hashmap_free(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
cfe561a4 876 if (h) {
138f49e4 877 _hashmap_clear(h, default_free_key, default_free_value);
cfe561a4
DH
878 hashmap_free_no_clear(h);
879 }
89439d4f 880
cfe561a4 881 return NULL;
89439d4f
MS
882}
883
138f49e4 884void _hashmap_clear(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
59a5cda7
YW
885 free_func_t free_key, free_value;
886 if (!h)
887 return;
67f3c402 888
59a5cda7
YW
889 free_key = h->hash_ops->free_key ?: default_free_key;
890 free_value = h->hash_ops->free_value ?: default_free_value;
67f3c402 891
59a5cda7 892 if (free_key || free_value) {
449ddb2d 893
c380b84d
LP
894 /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
895 * hash table, and only then call the destructor functions. If these destructors then try to unregister
896 * themselves from our hash table a second time, the entry is already gone. */
897
138f49e4 898 while (_hashmap_size(h) > 0) {
ca323715
TH
899 void *k = NULL;
900 void *v;
c380b84d 901
138f49e4 902 v = _hashmap_first_key_and_value(h, true, &k);
fabe5c0e 903
59a5cda7 904 if (free_key)
c380b84d 905 free_key(k);
fabe5c0e 906
59a5cda7 907 if (free_value)
c380b84d 908 free_value(v);
59a5cda7 909 }
cfe561a4 910 }
fabe5c0e 911
89439d4f
MS
912 if (h->has_indirect) {
913 free(h->indirect.storage);
914 h->has_indirect = false;
915 }
916
917 h->n_direct_entries = 0;
918 reset_direct_storage(h);
919
920 if (h->type == HASHMAP_TYPE_ORDERED) {
921 OrderedHashmap *lh = (OrderedHashmap*) h;
922 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
923 }
84dcca75
VC
924
925 base_set_dirty(h);
11dd41ce
LP
926}
927
89439d4f
MS
928static int resize_buckets(HashmapBase *h, unsigned entries_add);
929
930/*
931 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
932 * Performs Robin Hood swaps as it goes. The entry to put must be placed
933 * by the caller into swap slot IDX_PUT.
934 * If used for in-place resizing, may leave a displaced entry in swap slot
935 * IDX_PUT. Caller must rehash it next.
936 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
937 * false otherwise.
938 */
939static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
940 struct swap_entries *swap) {
941 dib_raw_t raw_dib, *dibs;
942 unsigned dib, distance;
943
349cc4a5 944#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
945 h->debug.put_count++;
946#endif
947
948 dibs = dib_raw_ptr(h);
949
950 for (distance = 0; ; distance++) {
951 raw_dib = dibs[idx];
3742095b 952 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
89439d4f
MS
953 if (raw_dib == DIB_RAW_REHASH)
954 bucket_move_entry(h, swap, idx, IDX_TMP);
955
956 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
957 h->indirect.idx_lowest_entry = idx;
60918275 958
89439d4f
MS
959 bucket_set_dib(h, idx, distance);
960 bucket_move_entry(h, swap, IDX_PUT, idx);
961 if (raw_dib == DIB_RAW_REHASH) {
962 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
963 return true;
964 }
60918275 965
89439d4f
MS
966 return false;
967 }
968
969 dib = bucket_calculate_dib(h, idx, raw_dib);
970
971 if (dib < distance) {
972 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
973 bucket_set_dib(h, idx, distance);
974
975 /* swap the entries */
976 bucket_move_entry(h, swap, idx, IDX_TMP);
977 bucket_move_entry(h, swap, IDX_PUT, idx);
978 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
979
980 distance = dib;
981 }
982
983 idx = next_idx(h, idx);
984 }
60918275
LP
985}
986
89439d4f
MS
987/*
988 * Puts an entry into a hashmap, boldly - no check whether key already exists.
989 * The caller must place the entry (only its key and value, not link indexes)
990 * in swap slot IDX_PUT.
991 * Caller must ensure: the key does not exist yet in the hashmap.
992 * that resize is not needed if !may_resize.
993 * Returns: 1 if entry was put successfully.
994 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
995 * Cannot return -ENOMEM if !may_resize.
996 */
997static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
998 struct swap_entries *swap, bool may_resize) {
999 struct ordered_hashmap_entry *new_entry;
1000 int r;
1001
1002 assert(idx < n_buckets(h));
1003
1004 new_entry = bucket_at_swap(swap, IDX_PUT);
1005
1006 if (may_resize) {
1007 r = resize_buckets(h, 1);
1008 if (r < 0)
1009 return r;
1010 if (r > 0)
1011 idx = bucket_hash(h, new_entry->p.b.key);
1012 }
1013 assert(n_entries(h) < n_buckets(h));
1014
1015 if (h->type == HASHMAP_TYPE_ORDERED) {
1016 OrderedHashmap *lh = (OrderedHashmap*) h;
1017
1018 new_entry->iterate_next = IDX_NIL;
1019 new_entry->iterate_previous = lh->iterate_list_tail;
1020
1021 if (lh->iterate_list_tail != IDX_NIL) {
1022 struct ordered_hashmap_entry *old_tail;
1023
1024 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1025 assert(old_tail->iterate_next == IDX_NIL);
1026 old_tail->iterate_next = IDX_PUT;
1027 }
1028
1029 lh->iterate_list_tail = IDX_PUT;
1030 if (lh->iterate_list_head == IDX_NIL)
1031 lh->iterate_list_head = IDX_PUT;
1032 }
1033
1034 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1035
1036 n_entries_inc(h);
349cc4a5 1037#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1038 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1039#endif
1040
84dcca75
VC
1041 base_set_dirty(h);
1042
89439d4f
MS
1043 return 1;
1044}
1045#define hashmap_put_boldly(h, idx, swap, may_resize) \
1046 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1047
1048/*
1049 * Returns 0 if resize is not needed.
f131770b 1050 * 1 if successfully resized.
89439d4f
MS
1051 * -ENOMEM on allocation failure.
1052 */
1053static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1054 struct swap_entries swap;
1a39bc8c 1055 void *new_storage;
89439d4f
MS
1056 dib_raw_t *old_dibs, *new_dibs;
1057 const struct hashmap_type_info *hi;
1058 unsigned idx, optimal_idx;
1059 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1060 uint8_t new_shift;
1061 bool rehash_next;
45fa9e29
LP
1062
1063 assert(h);
1064
89439d4f
MS
1065 hi = &hashmap_type_info[h->type];
1066 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1067
1068 /* overflow? */
89439d4f 1069 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1070 return -ENOMEM;
1071
89439d4f
MS
1072 /* For direct storage we allow 100% load, because it's tiny. */
1073 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1074 return 0;
45fa9e29 1075
89439d4f
MS
1076 /*
1077 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1078 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1079 */
1080 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1081 /* overflow? */
1082 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1083 return -ENOMEM;
45fa9e29 1084
89439d4f
MS
1085 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1086 return -ENOMEM;
a3b6fafe 1087
89439d4f 1088 old_n_buckets = n_buckets(h);
45fa9e29 1089
89439d4f
MS
1090 if (_likely_(new_n_buckets <= old_n_buckets))
1091 return 0;
45fa9e29 1092
89439d4f
MS
1093 new_shift = log2u_round_up(MAX(
1094 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1095 2 * sizeof(struct direct_storage)));
45fa9e29 1096
89439d4f
MS
1097 /* Realloc storage (buckets and DIB array). */
1098 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1099 1U << new_shift);
1100 if (!new_storage)
1101 return -ENOMEM;
45fa9e29 1102
89439d4f
MS
1103 /* Must upgrade direct to indirect storage. */
1104 if (!h->has_indirect) {
1105 memcpy(new_storage, h->direct.storage,
1106 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1107 h->indirect.n_entries = h->n_direct_entries;
1108 h->indirect.idx_lowest_entry = 0;
1109 h->n_direct_entries = 0;
1110 }
45fa9e29 1111
89439d4f
MS
1112 /* Get a new hash key. If we've just upgraded to indirect storage,
1113 * allow reusing a previously generated key. It's still a different key
1114 * from the shared one that we used for direct storage. */
1115 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1116
1117 h->has_indirect = true;
1118 h->indirect.storage = new_storage;
1119 h->indirect.n_buckets = (1U << new_shift) /
1120 (hi->entry_size + sizeof(dib_raw_t));
1121
1a39bc8c 1122 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
89439d4f
MS
1123 new_dibs = dib_raw_ptr(h);
1124
1125 /*
1126 * Move the DIB array to the new place, replacing valid DIB values with
1127 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1128 * Note: Overlap is not possible, because we have at least doubled the
1129 * number of buckets and dib_raw_t is smaller than any entry type.
1130 */
1131 for (idx = 0; idx < old_n_buckets; idx++) {
1132 assert(old_dibs[idx] != DIB_RAW_REHASH);
1133 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1134 : DIB_RAW_REHASH;
45fa9e29
LP
1135 }
1136
89439d4f 1137 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1138 memzero(bucket_at(h, old_n_buckets),
89439d4f 1139 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1140
89439d4f
MS
1141 /* The upper half of the new DIB array needs initialization */
1142 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1143 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1144
89439d4f
MS
1145 /* Rehash entries that need it */
1146 n_rehashed = 0;
1147 for (idx = 0; idx < old_n_buckets; idx++) {
1148 if (new_dibs[idx] != DIB_RAW_REHASH)
1149 continue;
45fa9e29 1150
89439d4f 1151 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1152
89439d4f
MS
1153 /*
1154 * Not much to do if by luck the entry hashes to its current
1155 * location. Just set its DIB.
1156 */
1157 if (optimal_idx == idx) {
1158 new_dibs[idx] = 0;
1159 n_rehashed++;
1160 continue;
1161 }
1162
1163 new_dibs[idx] = DIB_RAW_FREE;
1164 bucket_move_entry(h, &swap, idx, IDX_PUT);
1165 /* bucket_move_entry does not clear the source */
eccaf899 1166 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1167
1168 do {
1169 /*
1170 * Find the new bucket for the current entry. This may make
1171 * another entry homeless and load it into IDX_PUT.
1172 */
1173 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1174 n_rehashed++;
1175
1176 /* Did the current entry displace another one? */
1177 if (rehash_next)
1178 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1179 } while (rehash_next);
1180 }
60918275 1181
89439d4f 1182 assert(n_rehashed == n_entries(h));
60918275 1183
89439d4f
MS
1184 return 1;
1185}
45fa9e29 1186
89439d4f
MS
1187/*
1188 * Finds an entry with a matching key
1189 * Returns: index of the found entry, or IDX_NIL if not found.
1190 */
1191static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1192 struct hashmap_base_entry *e;
1193 unsigned dib, distance;
1194 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1195
89439d4f 1196 assert(idx < n_buckets(h));
60918275 1197
89439d4f
MS
1198 for (distance = 0; ; distance++) {
1199 if (dibs[idx] == DIB_RAW_FREE)
1200 return IDX_NIL;
60918275 1201
89439d4f 1202 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1203
89439d4f
MS
1204 if (dib < distance)
1205 return IDX_NIL;
1206 if (dib == distance) {
1207 e = bucket_at(h, idx);
1208 if (h->hash_ops->compare(e->key, key) == 0)
1209 return idx;
1210 }
1211
1212 idx = next_idx(h, idx);
1213 }
60918275 1214}
89439d4f 1215#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1216
923041cb 1217int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1218 struct swap_entries swap;
1219 struct plain_hashmap_entry *e;
1220 unsigned hash, idx;
923041cb
MS
1221
1222 assert(h);
1223
1224 hash = bucket_hash(h, key);
89439d4f
MS
1225 idx = bucket_scan(h, hash, key);
1226 if (idx != IDX_NIL) {
1227 e = plain_bucket_at(h, idx);
923041cb
MS
1228 if (e->value == value)
1229 return 0;
1230 return -EEXIST;
1231 }
1232
89439d4f
MS
1233 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1234 e->b.key = key;
1235 e->value = value;
1236 return hashmap_put_boldly(h, hash, &swap, true);
1237}
1238
1239int set_put(Set *s, const void *key) {
1240 struct swap_entries swap;
1241 struct hashmap_base_entry *e;
1242 unsigned hash, idx;
1243
1244 assert(s);
1245
1246 hash = bucket_hash(s, key);
1247 idx = bucket_scan(s, hash, key);
1248 if (idx != IDX_NIL)
1249 return 0;
1250
1251 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1252 e->key = key;
1253 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1254}
1255
0f9ccd95
ZJS
1256int _set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key HASHMAP_DEBUG_PARAMS) {
1257 int r;
1258
1259 r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1260 if (r < 0)
1261 return r;
1262
1263 return set_put(*s, key);
1264}
1265
fcc1d031
ZJS
1266int _set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key HASHMAP_DEBUG_PARAMS) {
1267 int r;
1268
1269 r = _set_ensure_put(s, hash_ops, key HASHMAP_DEBUG_PASS_ARGS);
1270 if (r <= 0) {
1271 if (hash_ops && hash_ops->free_key)
1272 hash_ops->free_key(key);
1273 else
1274 free(key);
1275 }
1276
1277 return r;
1278}
1279
3158713e 1280int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1281 struct swap_entries swap;
1282 struct plain_hashmap_entry *e;
1283 unsigned hash, idx;
3158713e
LP
1284
1285 assert(h);
1286
a3b6fafe 1287 hash = bucket_hash(h, key);
89439d4f
MS
1288 idx = bucket_scan(h, hash, key);
1289 if (idx != IDX_NIL) {
1290 e = plain_bucket_at(h, idx);
349cc4a5 1291#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1292 /* Although the key is equal, the key pointer may have changed,
1293 * and this would break our assumption for iterating. So count
1294 * this operation as incompatible with iteration. */
1295 if (e->b.key != key) {
1296 h->b.debug.put_count++;
1297 h->b.debug.rem_count++;
1298 h->b.debug.last_rem_idx = idx;
1299 }
1300#endif
1301 e->b.key = key;
3158713e 1302 e->value = value;
84dcca75
VC
1303 hashmap_set_dirty(h);
1304
3158713e
LP
1305 return 0;
1306 }
1307
89439d4f
MS
1308 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1309 e->b.key = key;
1310 e->value = value;
1311 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1312}
1313
d99ae53a 1314int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1315 struct plain_hashmap_entry *e;
1316 unsigned hash, idx;
d99ae53a
LP
1317
1318 assert(h);
1319
a3b6fafe 1320 hash = bucket_hash(h, key);
89439d4f
MS
1321 idx = bucket_scan(h, hash, key);
1322 if (idx == IDX_NIL)
d99ae53a
LP
1323 return -ENOENT;
1324
89439d4f 1325 e = plain_bucket_at(h, idx);
d99ae53a 1326 e->value = value;
84dcca75
VC
1327 hashmap_set_dirty(h);
1328
d99ae53a
LP
1329 return 0;
1330}
1331
138f49e4 1332void *_hashmap_get(HashmapBase *h, const void *key) {
89439d4f
MS
1333 struct hashmap_base_entry *e;
1334 unsigned hash, idx;
60918275
LP
1335
1336 if (!h)
1337 return NULL;
1338
a3b6fafe 1339 hash = bucket_hash(h, key);
89439d4f
MS
1340 idx = bucket_scan(h, hash, key);
1341 if (idx == IDX_NIL)
60918275
LP
1342 return NULL;
1343
89439d4f
MS
1344 e = bucket_at(h, idx);
1345 return entry_value(h, e);
60918275
LP
1346}
1347
89439d4f
MS
1348void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1349 struct plain_hashmap_entry *e;
1350 unsigned hash, idx;
d99ae53a
LP
1351
1352 if (!h)
1353 return NULL;
1354
a3b6fafe 1355 hash = bucket_hash(h, key);
89439d4f
MS
1356 idx = bucket_scan(h, hash, key);
1357 if (idx == IDX_NIL)
d99ae53a
LP
1358 return NULL;
1359
89439d4f 1360 e = plain_bucket_at(h, idx);
d99ae53a 1361 if (key2)
89439d4f 1362 *key2 = (void*) e->b.key;
d99ae53a
LP
1363
1364 return e->value;
1365}
1366
138f49e4 1367bool _hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1368 unsigned hash;
96342de6
LN
1369
1370 if (!h)
1371 return false;
1372
a3b6fafe 1373 hash = bucket_hash(h, key);
89439d4f 1374 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1375}
1376
138f49e4 1377void *_hashmap_remove(HashmapBase *h, const void *key) {
89439d4f
MS
1378 struct hashmap_base_entry *e;
1379 unsigned hash, idx;
60918275
LP
1380 void *data;
1381
1382 if (!h)
1383 return NULL;
1384
a3b6fafe 1385 hash = bucket_hash(h, key);
89439d4f
MS
1386 idx = bucket_scan(h, hash, key);
1387 if (idx == IDX_NIL)
60918275
LP
1388 return NULL;
1389
89439d4f
MS
1390 e = bucket_at(h, idx);
1391 data = entry_value(h, e);
1392 remove_entry(h, idx);
60918275
LP
1393
1394 return data;
1395}
1396
89439d4f
MS
1397void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1398 struct plain_hashmap_entry *e;
1399 unsigned hash, idx;
c582a3b3
LP
1400 void *data;
1401
1402 if (!h) {
1403 if (rkey)
1404 *rkey = NULL;
1405 return NULL;
1406 }
1407
1408 hash = bucket_hash(h, key);
89439d4f
MS
1409 idx = bucket_scan(h, hash, key);
1410 if (idx == IDX_NIL) {
c582a3b3
LP
1411 if (rkey)
1412 *rkey = NULL;
1413 return NULL;
1414 }
1415
89439d4f 1416 e = plain_bucket_at(h, idx);
c582a3b3
LP
1417 data = e->value;
1418 if (rkey)
89439d4f 1419 *rkey = (void*) e->b.key;
c582a3b3 1420
89439d4f 1421 remove_entry(h, idx);
c582a3b3
LP
1422
1423 return data;
1424}
1425
101d8e63 1426int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1427 struct swap_entries swap;
1428 struct plain_hashmap_entry *e;
1429 unsigned old_hash, new_hash, idx;
101d8e63
LP
1430
1431 if (!h)
1432 return -ENOENT;
1433
a3b6fafe 1434 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1435 idx = bucket_scan(h, old_hash, old_key);
1436 if (idx == IDX_NIL)
101d8e63
LP
1437 return -ENOENT;
1438
a3b6fafe 1439 new_hash = bucket_hash(h, new_key);
89439d4f 1440 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1441 return -EEXIST;
1442
89439d4f 1443 remove_entry(h, idx);
101d8e63 1444
89439d4f
MS
1445 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1446 e->b.key = new_key;
101d8e63 1447 e->value = value;
89439d4f
MS
1448 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1449
1450 return 0;
1451}
1452
1453int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1454 struct swap_entries swap;
1455 struct hashmap_base_entry *e;
1456 unsigned old_hash, new_hash, idx;
101d8e63 1457
89439d4f
MS
1458 if (!s)
1459 return -ENOENT;
1460
1461 old_hash = bucket_hash(s, old_key);
1462 idx = bucket_scan(s, old_hash, old_key);
1463 if (idx == IDX_NIL)
1464 return -ENOENT;
1465
1466 new_hash = bucket_hash(s, new_key);
1467 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1468 return -EEXIST;
1469
1470 remove_entry(s, idx);
1471
1472 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1473 e->key = new_key;
1474 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1475
1476 return 0;
1477}
1478
8fe914ec 1479int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1480 struct swap_entries swap;
1481 struct plain_hashmap_entry *e;
1482 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1483
1484 if (!h)
1485 return -ENOENT;
1486
a3b6fafe 1487 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1488 idx_old = bucket_scan(h, old_hash, old_key);
1489 if (idx_old == IDX_NIL)
8fe914ec
LP
1490 return -ENOENT;
1491
89439d4f 1492 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1493
89439d4f
MS
1494 new_hash = bucket_hash(h, new_key);
1495 idx_new = bucket_scan(h, new_hash, new_key);
1496 if (idx_new != IDX_NIL)
1497 if (idx_old != idx_new) {
1498 remove_entry(h, idx_new);
1499 /* Compensate for a possible backward shift. */
1500 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1501 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1502 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1503 }
1504
1505 remove_entry(h, idx_old);
1506
1507 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1508 e->b.key = new_key;
8fe914ec 1509 e->value = value;
89439d4f 1510 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1511
1512 return 0;
1513}
1514
138f49e4 1515void *_hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
c380b84d 1516 struct hashmap_base_entry *e;
89439d4f 1517 unsigned hash, idx;
3158713e
LP
1518
1519 if (!h)
1520 return NULL;
1521
a3b6fafe 1522 hash = bucket_hash(h, key);
89439d4f
MS
1523 idx = bucket_scan(h, hash, key);
1524 if (idx == IDX_NIL)
3158713e
LP
1525 return NULL;
1526
c380b84d
LP
1527 e = bucket_at(h, idx);
1528 if (entry_value(h, e) != value)
3158713e
LP
1529 return NULL;
1530
89439d4f 1531 remove_entry(h, idx);
3158713e
LP
1532
1533 return value;
1534}
1535
89439d4f
MS
1536static unsigned find_first_entry(HashmapBase *h) {
1537 Iterator i = ITERATOR_FIRST;
60918275 1538
89439d4f
MS
1539 if (!h || !n_entries(h))
1540 return IDX_NIL;
60918275 1541
89439d4f 1542 return hashmap_iterate_entry(h, &i);
60918275
LP
1543}
1544
138f49e4 1545void *_hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
89439d4f 1546 struct hashmap_base_entry *e;
7ef670c3 1547 void *key, *data;
89439d4f 1548 unsigned idx;
60918275 1549
89439d4f 1550 idx = find_first_entry(h);
51c682df
TH
1551 if (idx == IDX_NIL) {
1552 if (ret_key)
1553 *ret_key = NULL;
60918275 1554 return NULL;
51c682df 1555 }
60918275 1556
89439d4f 1557 e = bucket_at(h, idx);
7ef670c3 1558 key = (void*) e->key;
89439d4f 1559 data = entry_value(h, e);
60918275 1560
7ef670c3
YW
1561 if (remove)
1562 remove_entry(h, idx);
60918275 1563
7ef670c3
YW
1564 if (ret_key)
1565 *ret_key = key;
22be093f 1566
7ef670c3 1567 return data;
22be093f
LP
1568}
1569
138f49e4 1570unsigned _hashmap_size(HashmapBase *h) {
60918275
LP
1571 if (!h)
1572 return 0;
1573
89439d4f 1574 return n_entries(h);
60918275
LP
1575}
1576
138f49e4 1577unsigned _hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1578 if (!h)
1579 return 0;
1580
89439d4f 1581 return n_buckets(h);
45fa9e29
LP
1582}
1583
138f49e4 1584int _hashmap_merge(Hashmap *h, Hashmap *other) {
89439d4f
MS
1585 Iterator i;
1586 unsigned idx;
60918275 1587
89439d4f 1588 assert(h);
60918275 1589
89439d4f
MS
1590 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1591 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1592 int r;
91cdde8a 1593
89439d4f
MS
1594 r = hashmap_put(h, pe->b.key, pe->value);
1595 if (r < 0 && r != -EEXIST)
1596 return r;
1597 }
91cdde8a 1598
89439d4f
MS
1599 return 0;
1600}
91cdde8a 1601
89439d4f
MS
1602int set_merge(Set *s, Set *other) {
1603 Iterator i;
1604 unsigned idx;
91cdde8a 1605
89439d4f
MS
1606 assert(s);
1607
1608 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1609 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1610 int r;
1611
89439d4f
MS
1612 r = set_put(s, se->b.key);
1613 if (r < 0)
a3b6fafe 1614 return r;
91cdde8a
LP
1615 }
1616
1617 return 0;
1618}
1619
138f49e4 1620int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1621 int r;
1622
1623 assert(h);
1624
1625 r = resize_buckets(h, entries_add);
1626 if (r < 0)
1627 return r;
1628
1629 return 0;
1630}
1631
89439d4f
MS
1632/*
1633 * The same as hashmap_merge(), but every new item from other is moved to h.
1634 * Keys already in h are skipped and stay in other.
1635 * Returns: 0 on success.
1636 * -ENOMEM on alloc failure, in which case no move has been done.
1637 */
138f49e4 1638int _hashmap_move(HashmapBase *h, HashmapBase *other) {
89439d4f
MS
1639 struct swap_entries swap;
1640 struct hashmap_base_entry *e, *n;
1641 Iterator i;
1642 unsigned idx;
1643 int r;
101d8e63
LP
1644
1645 assert(h);
1646
101d8e63 1647 if (!other)
7ad63f57 1648 return 0;
101d8e63 1649
89439d4f
MS
1650 assert(other->type == h->type);
1651
1652 /*
1653 * This reserves buckets for the worst case, where none of other's
1654 * entries are yet present in h. This is preferable to risking
1655 * an allocation failure in the middle of the moving and having to
1656 * rollback or return a partial result.
1657 */
1658 r = resize_buckets(h, n_entries(other));
1659 if (r < 0)
1660 return r;
101d8e63 1661
89439d4f
MS
1662 HASHMAP_FOREACH_IDX(idx, other, i) {
1663 unsigned h_hash;
101d8e63 1664
89439d4f 1665 e = bucket_at(other, idx);
a3b6fafe 1666 h_hash = bucket_hash(h, e->key);
89439d4f 1667 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1668 continue;
1669
89439d4f
MS
1670 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1671 n->key = e->key;
1672 if (h->type != HASHMAP_TYPE_SET)
1673 ((struct plain_hashmap_entry*) n)->value =
1674 ((struct plain_hashmap_entry*) e)->value;
1675 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1676
1677 remove_entry(other, idx);
101d8e63 1678 }
7ad63f57
MS
1679
1680 return 0;
101d8e63
LP
1681}
1682
138f49e4 1683int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
89439d4f
MS
1684 struct swap_entries swap;
1685 unsigned h_hash, other_hash, idx;
1686 struct hashmap_base_entry *e, *n;
1687 int r;
101d8e63 1688
101d8e63
LP
1689 assert(h);
1690
a3b6fafe 1691 h_hash = bucket_hash(h, key);
89439d4f 1692 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1693 return -EEXIST;
1694
bf3d3e2b
MS
1695 if (!other)
1696 return -ENOENT;
1697
89439d4f
MS
1698 assert(other->type == h->type);
1699
a3b6fafe 1700 other_hash = bucket_hash(other, key);
89439d4f
MS
1701 idx = bucket_scan(other, other_hash, key);
1702 if (idx == IDX_NIL)
101d8e63
LP
1703 return -ENOENT;
1704
89439d4f
MS
1705 e = bucket_at(other, idx);
1706
1707 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1708 n->key = e->key;
1709 if (h->type != HASHMAP_TYPE_SET)
1710 ((struct plain_hashmap_entry*) n)->value =
1711 ((struct plain_hashmap_entry*) e)->value;
1712 r = hashmap_put_boldly(h, h_hash, &swap, true);
1713 if (r < 0)
1714 return r;
101d8e63 1715
89439d4f 1716 remove_entry(other, idx);
101d8e63
LP
1717 return 0;
1718}
1719
add74e89 1720HashmapBase *_hashmap_copy(HashmapBase *h HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
1721 HashmapBase *copy;
1722 int r;
91cdde8a
LP
1723
1724 assert(h);
1725
add74e89 1726 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_PASS_ARGS);
45fa9e29 1727 if (!copy)
91cdde8a
LP
1728 return NULL;
1729
89439d4f
MS
1730 switch (h->type) {
1731 case HASHMAP_TYPE_PLAIN:
1732 case HASHMAP_TYPE_ORDERED:
1733 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1734 break;
1735 case HASHMAP_TYPE_SET:
1736 r = set_merge((Set*)copy, (Set*)h);
1737 break;
1738 default:
1739 assert_not_reached("Unknown hashmap type");
1740 }
1741
add74e89
ZJS
1742 if (r < 0)
1743 return _hashmap_free(copy, false, false);
91cdde8a
LP
1744
1745 return copy;
1746}
db1413d7 1747
138f49e4 1748char **_hashmap_get_strv(HashmapBase *h) {
db1413d7 1749 char **sv;
89439d4f
MS
1750 Iterator i;
1751 unsigned idx, n;
db1413d7 1752
89439d4f 1753 sv = new(char*, n_entries(h)+1);
729e3769 1754 if (!sv)
db1413d7
KS
1755 return NULL;
1756
1757 n = 0;
89439d4f
MS
1758 HASHMAP_FOREACH_IDX(idx, h, i)
1759 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1760 sv[n] = NULL;
1761
1762 return sv;
1763}
3c1668da 1764
89439d4f
MS
1765void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1766 struct ordered_hashmap_entry *e;
1767 unsigned hash, idx;
3c1668da 1768
3c1668da
LP
1769 if (!h)
1770 return NULL;
1771
a3b6fafe 1772 hash = bucket_hash(h, key);
89439d4f
MS
1773 idx = bucket_scan(h, hash, key);
1774 if (idx == IDX_NIL)
3c1668da
LP
1775 return NULL;
1776
89439d4f
MS
1777 e = ordered_bucket_at(h, idx);
1778 if (e->iterate_next == IDX_NIL)
3c1668da 1779 return NULL;
89439d4f
MS
1780 return ordered_bucket_at(h, e->iterate_next)->p.value;
1781}
3c1668da 1782
89439d4f
MS
1783int set_consume(Set *s, void *value) {
1784 int r;
1785
d97c5aea
LP
1786 assert(s);
1787 assert(value);
1788
89439d4f 1789 r = set_put(s, value);
575ccc1b 1790 if (r <= 0)
89439d4f
MS
1791 free(value);
1792
1793 return r;
1794}
1795
b8b46b1c 1796int _hashmap_put_strdup(Hashmap **h, const char *k, const char *v HASHMAP_DEBUG_PARAMS) {
87da8784
ZJS
1797 int r;
1798
b8b46b1c 1799 r = _hashmap_ensure_allocated(h, &string_hash_ops_free_free HASHMAP_DEBUG_PASS_ARGS);
87da8784
ZJS
1800 if (r < 0)
1801 return r;
1802
1803 _cleanup_free_ char *kdup = NULL, *vdup = NULL;
25b3e2a8 1804
87da8784 1805 kdup = strdup(k);
25b3e2a8 1806 if (!kdup)
87da8784
ZJS
1807 return -ENOMEM;
1808
25b3e2a8
ZJS
1809 if (v) {
1810 vdup = strdup(v);
1811 if (!vdup)
1812 return -ENOMEM;
1813 }
1814
87da8784
ZJS
1815 r = hashmap_put(*h, kdup, vdup);
1816 if (r < 0) {
25b3e2a8 1817 if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
87da8784
ZJS
1818 return 0;
1819 return r;
1820 }
1821
25b3e2a8
ZJS
1822 /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1823 assert(vdup == NULL || r > 0);
1824 if (r > 0)
1825 kdup = vdup = NULL;
87da8784 1826
25b3e2a8 1827 return r;
87da8784
ZJS
1828}
1829
b8b46b1c 1830int _set_put_strdup(Set **s, const char *p HASHMAP_DEBUG_PARAMS) {
89439d4f 1831 char *c;
be327321 1832 int r;
89439d4f
MS
1833
1834 assert(s);
1835 assert(p);
1836
b8b46b1c 1837 r = _set_ensure_allocated(s, &string_hash_ops_free HASHMAP_DEBUG_PASS_ARGS);
be327321
ZJS
1838 if (r < 0)
1839 return r;
1840
1841 if (set_contains(*s, (char*) p))
454f0f86
LP
1842 return 0;
1843
89439d4f
MS
1844 c = strdup(p);
1845 if (!c)
1846 return -ENOMEM;
1847
be327321 1848 return set_consume(*s, c);
89439d4f
MS
1849}
1850
b8b46b1c 1851int _set_put_strdupv(Set **s, char **l HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
1852 int n = 0, r;
1853 char **i;
1854
d97c5aea
LP
1855 assert(s);
1856
89439d4f 1857 STRV_FOREACH(i, l) {
b8b46b1c 1858 r = _set_put_strdup(s, *i HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
1859 if (r < 0)
1860 return r;
1861
1862 n += r;
1863 }
1864
1865 return n;
3c1668da 1866}
d97c5aea
LP
1867
1868int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1869 const char *p = v;
1870 int r;
1871
1872 assert(s);
1873 assert(v);
1874
1875 for (;;) {
1876 char *word;
1877
1878 r = extract_first_word(&p, &word, separators, flags);
1879 if (r <= 0)
1880 return r;
1881
1882 r = set_consume(s, word);
1883 if (r < 0)
1884 return r;
1885 }
1886}
45ea84d8
VC
1887
1888/* expand the cachemem if needed, return true if newly (re)activated. */
1889static int cachemem_maintain(CacheMem *mem, unsigned size) {
45ea84d8
VC
1890 assert(mem);
1891
1892 if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) {
1893 if (size > 0)
1894 return -ENOMEM;
1895 }
1896
afbbc068
ZJS
1897 if (!mem->active) {
1898 mem->active = true;
1899 return true;
1900 }
45ea84d8 1901
afbbc068 1902 return false;
45ea84d8
VC
1903}
1904
1905int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1906 bool sync_keys = false, sync_values = false;
1907 unsigned size;
1908 int r;
1909
1910 assert(cache);
1911 assert(cache->hashmap);
1912
1913 size = n_entries(cache->hashmap);
1914
1915 if (res_keys) {
1916 r = cachemem_maintain(&cache->keys, size);
1917 if (r < 0)
1918 return r;
1919
1920 sync_keys = r;
1921 } else
1922 cache->keys.active = false;
1923
1924 if (res_values) {
1925 r = cachemem_maintain(&cache->values, size);
1926 if (r < 0)
1927 return r;
1928
1929 sync_values = r;
1930 } else
1931 cache->values.active = false;
1932
1933 if (cache->hashmap->dirty) {
1934 if (cache->keys.active)
1935 sync_keys = true;
1936 if (cache->values.active)
1937 sync_values = true;
1938
1939 cache->hashmap->dirty = false;
1940 }
1941
1942 if (sync_keys || sync_values) {
1943 unsigned i, idx;
1944 Iterator iter;
1945
1946 i = 0;
1947 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1948 struct hashmap_base_entry *e;
1949
1950 e = bucket_at(cache->hashmap, idx);
1951
1952 if (sync_keys)
1953 cache->keys.ptr[i] = e->key;
1954 if (sync_values)
1955 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1956 i++;
1957 }
1958 }
1959
1960 if (res_keys)
1961 *res_keys = cache->keys.ptr;
1962 if (res_values)
1963 *res_values = cache->values.ptr;
1964 if (res_n_entries)
1965 *res_n_entries = size;
1966
1967 return 0;
1968}
1969
1970IteratedCache *iterated_cache_free(IteratedCache *cache) {
1971 if (cache) {
1972 free(cache->keys.ptr);
1973 free(cache->values.ptr);
45ea84d8
VC
1974 }
1975
b61658fd 1976 return mfree(cache);
45ea84d8 1977}