]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
hashmap: avoid using TLS in a destructor
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
a7334b09 2
60918275 3#include <errno.h>
11c3a366 4#include <stdint.h>
d4510856 5#include <stdlib.h>
11c3a366 6#include <string.h>
60918275 7
b5efdb8a 8#include "alloc-util.h"
556c7bae 9#include "fileio.h"
b4f60743 10#include "hashmap.h"
60918275 11#include "macro.h"
0a970718 12#include "memory-util.h"
b3dcf58e 13#include "mempool.h"
31c9d74d 14#include "missing.h"
d4510856 15#include "process-util.h"
3df3e884 16#include "random-util.h"
d4510856
LP
17#include "set.h"
18#include "siphash24.h"
556c7bae 19#include "string-util.h"
d4510856 20#include "strv.h"
60918275 21
349cc4a5 22#if ENABLE_DEBUG_HASHMAP
3d4db144 23#include <pthread.h>
2eec67ac
TA
24#include "list.h"
25#endif
26
89439d4f
MS
27/*
28 * Implementation of hashmaps.
29 * Addressing: open
30 * - uses less RAM compared to closed addressing (chaining), because
31 * our entries are small (especially in Sets, which tend to contain
32 * the majority of entries in systemd).
33 * Collision resolution: Robin Hood
34 * - tends to equalize displacement of entries from their optimal buckets.
35 * Probe sequence: linear
36 * - though theoretically worse than random probing/uniform hashing/double
37 * hashing, it is good for cache locality.
38 *
39 * References:
40 * Celis, P. 1986. Robin Hood Hashing.
41 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
42 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
43 * - The results are derived for random probing. Suggests deletion with
44 * tombstones and two mean-centered search methods. None of that works
45 * well for linear probing.
46 *
47 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
48 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
49 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
50 * http://www.math.uu.se/~svante/papers/sj157.pdf
51 * - Applies to Robin Hood with linear probing. Contains remarks on
52 * the unsuitability of mean-centered search with linear probing.
53 *
54 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
55 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
56 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
57 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
58 * in a successful search), and Janson writes about displacement. C = d + 1.
59 *
60 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
61 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
62 * - Explanation of backward shift deletion with pictures.
63 *
64 * Khuong, P. 2013. The Other Robin Hood Hashing.
65 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
66 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
67 */
68
69/*
70 * XXX Ideas for improvement:
71 * For unordered hashmaps, randomize iteration order, similarly to Perl:
72 * http://blog.booking.com/hardening-perls-hash-function.html
73 */
74
75/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
76 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
77#define INV_KEEP_FREE 5U
78
79/* Fields common to entries of all hashmap/set types */
80struct hashmap_base_entry {
60918275 81 const void *key;
89439d4f
MS
82};
83
84/* Entry types for specific hashmap/set types
85 * hashmap_base_entry must be at the beginning of each entry struct. */
86
87struct plain_hashmap_entry {
88 struct hashmap_base_entry b;
60918275 89 void *value;
60918275
LP
90};
91
89439d4f
MS
92struct ordered_hashmap_entry {
93 struct plain_hashmap_entry p;
94 unsigned iterate_next, iterate_previous;
95};
60918275 96
89439d4f
MS
97struct set_entry {
98 struct hashmap_base_entry b;
99};
45fa9e29 100
89439d4f
MS
101/* In several functions it is advantageous to have the hash table extended
102 * virtually by a couple of additional buckets. We reserve special index values
103 * for these "swap" buckets. */
104#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
105#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
106#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
107#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 108
89439d4f
MS
109#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
110#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
111
112assert_cc(IDX_FIRST == _IDX_SWAP_END);
113assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
114
115/* Storage space for the "swap" buckets.
116 * All entry types can fit into a ordered_hashmap_entry. */
117struct swap_entries {
118 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
119};
120
89439d4f
MS
121/* Distance from Initial Bucket */
122typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
123#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
124#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
125#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
126#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
127
128#define DIB_FREE UINT_MAX
129
349cc4a5 130#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
131struct hashmap_debug_info {
132 LIST_FIELDS(struct hashmap_debug_info, debug_list);
133 unsigned max_entries; /* high watermark of n_entries */
134
135 /* who allocated this hashmap */
136 int line;
137 const char *file;
138 const char *func;
139
140 /* fields to detect modification while iterating */
141 unsigned put_count; /* counts puts into the hashmap */
142 unsigned rem_count; /* counts removals from hashmap */
143 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
144};
145
89439d4f
MS
146/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
147static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 148static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 149
89439d4f 150#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 151
fc86aa0e 152#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 153#define HASHMAP_DEBUG_FIELDS
fc86aa0e 154#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 155
89439d4f
MS
156enum HashmapType {
157 HASHMAP_TYPE_PLAIN,
158 HASHMAP_TYPE_ORDERED,
159 HASHMAP_TYPE_SET,
160 _HASHMAP_TYPE_MAX
161};
39c2a6f1 162
89439d4f 163struct _packed_ indirect_storage {
1a39bc8c 164 void *storage; /* where buckets and DIBs are stored */
89439d4f
MS
165 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
166
167 unsigned n_entries; /* number of stored entries */
168 unsigned n_buckets; /* number of buckets */
169
170 unsigned idx_lowest_entry; /* Index below which all buckets are free.
171 Makes "while(hashmap_steal_first())" loops
172 O(n) instead of O(n^2) for unordered hashmaps. */
173 uint8_t _pad[3]; /* padding for the whole HashmapBase */
174 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
175};
176
177struct direct_storage {
178 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
179 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
180 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
1a39bc8c 181 uint8_t storage[sizeof(struct indirect_storage)];
89439d4f
MS
182};
183
184#define DIRECT_BUCKETS(entry_t) \
185 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
186
187/* We should be able to store at least one entry directly. */
188assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
189
190/* We have 3 bits for n_direct_entries. */
191assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
192
193/* Hashmaps with directly stored entries all use this shared hash key.
194 * It's no big deal if the key is guessed, because there can be only
195 * a handful of directly stored entries in a hashmap. When a hashmap
196 * outgrows direct storage, it gets its own key for indirect storage. */
197static uint8_t shared_hash_key[HASH_KEY_SIZE];
198static bool shared_hash_key_initialized;
199
200/* Fields that all hashmap/set types must have */
201struct HashmapBase {
202 const struct hash_ops *hash_ops; /* hash and compare ops to use */
203
204 union _packed_ {
205 struct indirect_storage indirect; /* if has_indirect */
206 struct direct_storage direct; /* if !has_indirect */
207 };
208
209 enum HashmapType type:2; /* HASHMAP_TYPE_* */
210 bool has_indirect:1; /* whether indirect storage is used */
211 unsigned n_direct_entries:3; /* Number of entries in direct storage.
212 * Only valid if !has_indirect. */
213 bool from_pool:1; /* whether was allocated from mempool */
45ea84d8
VC
214 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
215 bool cached:1; /* whether this hashmap is being cached */
89439d4f
MS
216 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
217};
218
219/* Specific hash types
220 * HashmapBase must be at the beginning of each hashmap struct. */
221
222struct Hashmap {
223 struct HashmapBase b;
224};
225
226struct OrderedHashmap {
227 struct HashmapBase b;
228 unsigned iterate_list_head, iterate_list_tail;
229};
230
231struct Set {
232 struct HashmapBase b;
233};
234
45ea84d8
VC
235typedef struct CacheMem {
236 const void **ptr;
237 size_t n_populated, n_allocated;
238 bool active:1;
239} CacheMem;
240
241struct IteratedCache {
242 HashmapBase *hashmap;
243 CacheMem keys, values;
244};
245
89439d4f
MS
246DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
247DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
248/* No need for a separate Set pool */
249assert_cc(sizeof(Hashmap) == sizeof(Set));
250
251struct hashmap_type_info {
252 size_t head_size;
253 size_t entry_size;
254 struct mempool *mempool;
255 unsigned n_direct_buckets;
256};
257
258static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
259 [HASHMAP_TYPE_PLAIN] = {
260 .head_size = sizeof(Hashmap),
261 .entry_size = sizeof(struct plain_hashmap_entry),
262 .mempool = &hashmap_pool,
263 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
264 },
265 [HASHMAP_TYPE_ORDERED] = {
266 .head_size = sizeof(OrderedHashmap),
267 .entry_size = sizeof(struct ordered_hashmap_entry),
268 .mempool = &ordered_hashmap_pool,
269 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
270 },
271 [HASHMAP_TYPE_SET] = {
272 .head_size = sizeof(Set),
273 .entry_size = sizeof(struct set_entry),
274 .mempool = &hashmap_pool,
275 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
276 },
277};
39c2a6f1 278
d18cb393 279#if VALGRIND
d34dae18 280_destructor_ static void cleanup_pools(void) {
556c7bae
ZJS
281 _cleanup_free_ char *t = NULL;
282 int r;
283
284 /* Be nice to valgrind */
285
286 /* The pool is only allocated by the main thread, but the memory can
287 * be passed to other threads. Let's clean up if we are the main thread
288 * and no other threads are live. */
31c9d74d
FS
289 /* We build our own is_main_thread() here, which doesn't use C11
290 * TLS based caching of the result. That's because valgrind apparently
291 * doesn't like malloc() (which C11 TLS internally uses) to be called
292 * from a GCC destructors. */
293 if (getpid() != gettid())
556c7bae
ZJS
294 return;
295
296 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
297 if (r < 0 || !streq(t, "1"))
298 return;
299
300 mempool_drop(&hashmap_pool);
301 mempool_drop(&ordered_hashmap_pool);
302}
303#endif
304
89439d4f
MS
305static unsigned n_buckets(HashmapBase *h) {
306 return h->has_indirect ? h->indirect.n_buckets
307 : hashmap_type_info[h->type].n_direct_buckets;
308}
309
310static unsigned n_entries(HashmapBase *h) {
311 return h->has_indirect ? h->indirect.n_entries
312 : h->n_direct_entries;
313}
314
315static void n_entries_inc(HashmapBase *h) {
316 if (h->has_indirect)
317 h->indirect.n_entries++;
318 else
319 h->n_direct_entries++;
320}
321
322static void n_entries_dec(HashmapBase *h) {
323 if (h->has_indirect)
324 h->indirect.n_entries--;
325 else
326 h->n_direct_entries--;
327}
328
1a39bc8c 329static void *storage_ptr(HashmapBase *h) {
89439d4f
MS
330 return h->has_indirect ? h->indirect.storage
331 : h->direct.storage;
332}
333
334static uint8_t *hash_key(HashmapBase *h) {
335 return h->has_indirect ? h->indirect.hash_key
336 : shared_hash_key;
337}
338
339static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 340 struct siphash state;
0cb3c286 341 uint64_t hash;
b826ab58 342
0cb3c286 343 siphash24_init(&state, hash_key(h));
b826ab58
TG
344
345 h->hash_ops->hash(p, &state);
346
933f9cae 347 hash = siphash24_finalize(&state);
0cb3c286
TG
348
349 return (unsigned) (hash % n_buckets(h));
9bf3b535 350}
89439d4f 351#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535 352
a1e92eee 353static void base_set_dirty(HashmapBase *h) {
84dcca75
VC
354 h->dirty = true;
355}
356#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
357
9bf3b535
LP
358static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
359 static uint8_t current[HASH_KEY_SIZE];
360 static bool current_initialized = false;
361
362 /* Returns a hash function key to use. In order to keep things
363 * fast we will not generate a new key each time we allocate a
364 * new hash table. Instead, we'll just reuse the most recently
365 * generated one, except if we never generated one or when we
366 * are rehashing an entire hash table because we reached a
367 * fill level */
368
369 if (!current_initialized || !reuse_is_ok) {
370 random_bytes(current, sizeof(current));
371 current_initialized = true;
372 }
373
374 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
375}
376
89439d4f
MS
377static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
378 return (struct hashmap_base_entry*)
1a39bc8c 379 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
89439d4f
MS
380}
381
382static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
383 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
384}
385
386static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
387 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
388}
39c2a6f1 389
89439d4f
MS
390static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
391 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
392}
39c2a6f1 393
89439d4f
MS
394static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
395 return &swap->e[idx - _IDX_SWAP_BEGIN];
396}
39c2a6f1 397
89439d4f
MS
398/* Returns a pointer to the bucket at index idx.
399 * Understands real indexes and swap indexes, hence "_virtual". */
400static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
401 unsigned idx) {
402 if (idx < _IDX_SWAP_BEGIN)
403 return bucket_at(h, idx);
404
405 if (idx < _IDX_SWAP_END)
406 return &bucket_at_swap(swap, idx)->p.b;
407
408 assert_not_reached("Invalid index");
409}
410
411static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
412 return (dib_raw_t*)
1a39bc8c 413 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
89439d4f
MS
414}
415
416static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
417 return idx >= from ? idx - from
418 : n_buckets(h) + idx - from;
419}
420
421static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
422 unsigned initial_bucket;
423
424 if (raw_dib == DIB_RAW_FREE)
425 return DIB_FREE;
426
427 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
428 return raw_dib;
429
430 /*
431 * Having an overflow DIB value is very unlikely. The hash function
432 * would have to be bad. For example, in a table of size 2^24 filled
433 * to load factor 0.9 the maximum observed DIB is only about 60.
434 * In theory (assuming I used Maxima correctly), for an infinite size
435 * hash table with load factor 0.8 the probability of a given entry
436 * having DIB > 40 is 1.9e-8.
437 * This returns the correct DIB value by recomputing the hash value in
438 * the unlikely case. XXX Hitting this case could be a hint to rehash.
439 */
440 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
441 return bucket_distance(h, idx, initial_bucket);
442}
443
444static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
445 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
446}
447
448static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
449 dib_raw_t *dibs;
450
451 dibs = dib_raw_ptr(h);
452
453 for ( ; idx < n_buckets(h); idx++)
454 if (dibs[idx] != DIB_RAW_FREE)
455 return idx;
456
457 return IDX_NIL;
458}
459
460static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 461 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
462 bucket_set_dib(h, idx, DIB_FREE);
463}
464
465static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
466 unsigned from, unsigned to) {
467 struct hashmap_base_entry *e_from, *e_to;
468
469 assert(from != to);
39c2a6f1 470
89439d4f
MS
471 e_from = bucket_at_virtual(h, swap, from);
472 e_to = bucket_at_virtual(h, swap, to);
473
474 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
475
476 if (h->type == HASHMAP_TYPE_ORDERED) {
477 OrderedHashmap *lh = (OrderedHashmap*) h;
478 struct ordered_hashmap_entry *le, *le_to;
479
480 le_to = (struct ordered_hashmap_entry*) e_to;
481
482 if (le_to->iterate_next != IDX_NIL) {
483 le = (struct ordered_hashmap_entry*)
484 bucket_at_virtual(h, swap, le_to->iterate_next);
485 le->iterate_previous = to;
486 }
487
488 if (le_to->iterate_previous != IDX_NIL) {
489 le = (struct ordered_hashmap_entry*)
490 bucket_at_virtual(h, swap, le_to->iterate_previous);
491 le->iterate_next = to;
492 }
493
494 if (lh->iterate_list_head == from)
495 lh->iterate_list_head = to;
496 if (lh->iterate_list_tail == from)
497 lh->iterate_list_tail = to;
39c2a6f1 498 }
89439d4f 499}
60918275 500
89439d4f
MS
501static unsigned next_idx(HashmapBase *h, unsigned idx) {
502 return (idx + 1U) % n_buckets(h);
503}
60918275 504
89439d4f
MS
505static unsigned prev_idx(HashmapBase *h, unsigned idx) {
506 return (n_buckets(h) + idx - 1U) % n_buckets(h);
507}
60918275 508
89439d4f
MS
509static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
510 switch (h->type) {
45fa9e29 511
89439d4f
MS
512 case HASHMAP_TYPE_PLAIN:
513 case HASHMAP_TYPE_ORDERED:
514 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 515
89439d4f
MS
516 case HASHMAP_TYPE_SET:
517 return (void*) e->key;
a3b6fafe 518
89439d4f
MS
519 default:
520 assert_not_reached("Unknown hashmap type");
521 }
60918275
LP
522}
523
89439d4f
MS
524static void base_remove_entry(HashmapBase *h, unsigned idx) {
525 unsigned left, right, prev, dib;
526 dib_raw_t raw_dib, *dibs;
45fa9e29 527
89439d4f
MS
528 dibs = dib_raw_ptr(h);
529 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 530
349cc4a5 531#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
532 h->debug.rem_count++;
533 h->debug.last_rem_idx = idx;
534#endif
034c6ed7 535
89439d4f
MS
536 left = idx;
537 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
538 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
539 raw_dib = dibs[right];
4c701096 540 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
89439d4f
MS
541 break;
542
543 /* The buckets are not supposed to be all occupied and with DIB > 0.
544 * That would mean we could make everyone better off by shifting them
545 * backward. This scenario is impossible. */
546 assert(left != right);
547 }
034c6ed7 548
89439d4f
MS
549 if (h->type == HASHMAP_TYPE_ORDERED) {
550 OrderedHashmap *lh = (OrderedHashmap*) h;
551 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
552
553 if (le->iterate_next != IDX_NIL)
554 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
555 else
556 lh->iterate_list_tail = le->iterate_previous;
557
558 if (le->iterate_previous != IDX_NIL)
559 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
560 else
561 lh->iterate_list_head = le->iterate_next;
562 }
563
564 /* Now shift all buckets in the interval (left, right) one step backwards */
565 for (prev = left, left = next_idx(h, left); left != right;
566 prev = left, left = next_idx(h, left)) {
567 dib = bucket_calculate_dib(h, left, dibs[left]);
568 assert(dib != 0);
569 bucket_move_entry(h, NULL, left, prev);
570 bucket_set_dib(h, prev, dib - 1);
571 }
572
573 bucket_mark_free(h, prev);
574 n_entries_dec(h);
84dcca75 575 base_set_dirty(h);
034c6ed7 576}
89439d4f
MS
577#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
578
579static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
580 struct ordered_hashmap_entry *e;
581 unsigned idx;
034c6ed7 582
101d8e63 583 assert(h);
89439d4f
MS
584 assert(i);
585
586 if (i->idx == IDX_NIL)
587 goto at_end;
588
589 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
590 goto at_end;
591
592 if (i->idx == IDX_FIRST) {
593 idx = h->iterate_list_head;
594 e = ordered_bucket_at(h, idx);
101d8e63 595 } else {
89439d4f
MS
596 idx = i->idx;
597 e = ordered_bucket_at(h, idx);
598 /*
599 * We allow removing the current entry while iterating, but removal may cause
600 * a backward shift. The next entry may thus move one bucket to the left.
601 * To detect when it happens, we remember the key pointer of the entry we were
602 * going to iterate next. If it does not match, there was a backward shift.
603 */
604 if (e->p.b.key != i->next_key) {
605 idx = prev_idx(HASHMAP_BASE(h), idx);
606 e = ordered_bucket_at(h, idx);
607 }
608 assert(e->p.b.key == i->next_key);
101d8e63 609 }
101d8e63 610
349cc4a5 611#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
612 i->prev_idx = idx;
613#endif
614
615 if (e->iterate_next != IDX_NIL) {
616 struct ordered_hashmap_entry *n;
617 i->idx = e->iterate_next;
618 n = ordered_bucket_at(h, i->idx);
619 i->next_key = n->p.b.key;
620 } else
621 i->idx = IDX_NIL;
622
623 return idx;
624
625at_end:
626 i->idx = IDX_NIL;
627 return IDX_NIL;
101d8e63
LP
628}
629
89439d4f
MS
630static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
631 unsigned idx;
632
60918275 633 assert(h);
89439d4f 634 assert(i);
60918275 635
89439d4f
MS
636 if (i->idx == IDX_NIL)
637 goto at_end;
60918275 638
89439d4f
MS
639 if (i->idx == IDX_FIRST) {
640 /* fast forward to the first occupied bucket */
641 if (h->has_indirect) {
642 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
643 h->indirect.idx_lowest_entry = i->idx;
644 } else
645 i->idx = skip_free_buckets(h, 0);
646
647 if (i->idx == IDX_NIL)
648 goto at_end;
649 } else {
650 struct hashmap_base_entry *e;
651
652 assert(i->idx > 0);
60918275 653
89439d4f
MS
654 e = bucket_at(h, i->idx);
655 /*
656 * We allow removing the current entry while iterating, but removal may cause
657 * a backward shift. The next entry may thus move one bucket to the left.
658 * To detect when it happens, we remember the key pointer of the entry we were
659 * going to iterate next. If it does not match, there was a backward shift.
660 */
661 if (e->key != i->next_key)
662 e = bucket_at(h, --i->idx);
60918275 663
89439d4f
MS
664 assert(e->key == i->next_key);
665 }
666
667 idx = i->idx;
349cc4a5 668#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
669 i->prev_idx = idx;
670#endif
671
672 i->idx = skip_free_buckets(h, i->idx + 1);
673 if (i->idx != IDX_NIL)
674 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 675 else
89439d4f
MS
676 i->idx = IDX_NIL;
677
678 return idx;
60918275 679
89439d4f
MS
680at_end:
681 i->idx = IDX_NIL;
682 return IDX_NIL;
60918275
LP
683}
684
89439d4f
MS
685static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
686 if (!h) {
687 i->idx = IDX_NIL;
688 return IDX_NIL;
689 }
101d8e63 690
349cc4a5 691#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
692 if (i->idx == IDX_FIRST) {
693 i->put_count = h->debug.put_count;
694 i->rem_count = h->debug.rem_count;
695 } else {
696 /* While iterating, must not add any new entries */
697 assert(i->put_count == h->debug.put_count);
698 /* ... or remove entries other than the current one */
699 assert(i->rem_count == h->debug.rem_count ||
700 (i->rem_count == h->debug.rem_count - 1 &&
701 i->prev_idx == h->debug.last_rem_idx));
702 /* Reset our removals counter */
703 i->rem_count = h->debug.rem_count;
704 }
705#endif
101d8e63 706
89439d4f
MS
707 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
708 : hashmap_iterate_in_internal_order(h, i);
709}
39c2a6f1 710
8927b1da 711bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
712 struct hashmap_base_entry *e;
713 void *data;
714 unsigned idx;
715
716 idx = hashmap_iterate_entry(h, i);
717 if (idx == IDX_NIL) {
8927b1da
DH
718 if (value)
719 *value = NULL;
89439d4f
MS
720 if (key)
721 *key = NULL;
722
8927b1da 723 return false;
89439d4f
MS
724 }
725
726 e = bucket_at(h, idx);
727 data = entry_value(h, e);
8927b1da
DH
728 if (value)
729 *value = data;
89439d4f
MS
730 if (key)
731 *key = e->key;
732
8927b1da 733 return true;
101d8e63
LP
734}
735
8927b1da
DH
736bool set_iterate(Set *s, Iterator *i, void **value) {
737 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 738}
60918275 739
89439d4f
MS
740#define HASHMAP_FOREACH_IDX(idx, h, i) \
741 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
742 (idx != IDX_NIL); \
743 (idx) = hashmap_iterate_entry((h), &(i)))
744
45ea84d8
VC
745IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) {
746 IteratedCache *cache;
747
748 assert(h);
749 assert(!h->cached);
750
751 if (h->cached)
752 return NULL;
753
754 cache = new0(IteratedCache, 1);
755 if (!cache)
756 return NULL;
757
758 cache->hashmap = h;
759 h->cached = true;
760
761 return cache;
762}
763
89439d4f
MS
764static void reset_direct_storage(HashmapBase *h) {
765 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
766 void *p;
767
768 assert(!h->has_indirect);
769
770 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
771 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
772}
773
3ef11dcf 774static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
775 HashmapBase *h;
776 const struct hashmap_type_info *hi = &hashmap_type_info[type];
b4f60743 777 bool up;
89439d4f 778
7c48ea02 779 up = mempool_enabled();
67f3c402 780
b4f60743 781 h = up ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
60918275 782 if (!h)
89439d4f
MS
783 return NULL;
784
785 h->type = type;
b4f60743 786 h->from_pool = up;
70b400d9 787 h->hash_ops = hash_ops ?: &trivial_hash_ops;
89439d4f
MS
788
789 if (type == HASHMAP_TYPE_ORDERED) {
790 OrderedHashmap *lh = (OrderedHashmap*)h;
791 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
792 }
793
794 reset_direct_storage(h);
60918275 795
89439d4f
MS
796 if (!shared_hash_key_initialized) {
797 random_bytes(shared_hash_key, sizeof(shared_hash_key));
798 shared_hash_key_initialized= true;
799 }
800
349cc4a5 801#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
802 h->debug.func = func;
803 h->debug.file = file;
804 h->debug.line = line;
4f1b3061
TG
805 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
806 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
807 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
808#endif
809
810 return h;
811}
60918275 812
89439d4f 813Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 814 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
815}
816
817OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 818 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
819}
820
821Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 822 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
823}
824
825static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 826 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
827 HashmapBase *q;
828
829 assert(h);
830
831 if (*h)
832 return 0;
833
3ef11dcf 834 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
835 if (!q)
836 return -ENOMEM;
837
838 *h = q;
839 return 0;
840}
841
842int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 843 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
844}
845
846int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 847 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
848}
849
850int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 851 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
852}
853
854static void hashmap_free_no_clear(HashmapBase *h) {
855 assert(!h->has_indirect);
ee05335f 856 assert(h->n_direct_entries == 0);
89439d4f 857
349cc4a5 858#if ENABLE_DEBUG_HASHMAP
4f1b3061 859 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 860 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 861 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 862#endif
45fa9e29 863
205c085b
LP
864 if (h->from_pool) {
865 /* Ensure that the object didn't get migrated between threads. */
866 assert_se(is_main_thread());
89439d4f 867 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
205c085b 868 } else
39c2a6f1 869 free(h);
60918275
LP
870}
871
59a5cda7 872HashmapBase *internal_hashmap_free(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
cfe561a4 873 if (h) {
59a5cda7 874 internal_hashmap_clear(h, default_free_key, default_free_value);
cfe561a4
DH
875 hashmap_free_no_clear(h);
876 }
89439d4f 877
cfe561a4 878 return NULL;
89439d4f
MS
879}
880
59a5cda7
YW
881void internal_hashmap_clear(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
882 free_func_t free_key, free_value;
883 if (!h)
884 return;
67f3c402 885
59a5cda7
YW
886 free_key = h->hash_ops->free_key ?: default_free_key;
887 free_value = h->hash_ops->free_value ?: default_free_value;
67f3c402 888
59a5cda7 889 if (free_key || free_value) {
449ddb2d 890
c380b84d
LP
891 /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
892 * hash table, and only then call the destructor functions. If these destructors then try to unregister
893 * themselves from our hash table a second time, the entry is already gone. */
894
895 while (internal_hashmap_size(h) > 0) {
ca323715
TH
896 void *k = NULL;
897 void *v;
c380b84d
LP
898
899 v = internal_hashmap_first_key_and_value(h, true, &k);
fabe5c0e 900
59a5cda7 901 if (free_key)
c380b84d 902 free_key(k);
fabe5c0e 903
59a5cda7 904 if (free_value)
c380b84d 905 free_value(v);
59a5cda7 906 }
cfe561a4 907 }
fabe5c0e 908
89439d4f
MS
909 if (h->has_indirect) {
910 free(h->indirect.storage);
911 h->has_indirect = false;
912 }
913
914 h->n_direct_entries = 0;
915 reset_direct_storage(h);
916
917 if (h->type == HASHMAP_TYPE_ORDERED) {
918 OrderedHashmap *lh = (OrderedHashmap*) h;
919 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
920 }
84dcca75
VC
921
922 base_set_dirty(h);
11dd41ce
LP
923}
924
89439d4f
MS
925static int resize_buckets(HashmapBase *h, unsigned entries_add);
926
927/*
928 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
929 * Performs Robin Hood swaps as it goes. The entry to put must be placed
930 * by the caller into swap slot IDX_PUT.
931 * If used for in-place resizing, may leave a displaced entry in swap slot
932 * IDX_PUT. Caller must rehash it next.
933 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
934 * false otherwise.
935 */
936static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
937 struct swap_entries *swap) {
938 dib_raw_t raw_dib, *dibs;
939 unsigned dib, distance;
940
349cc4a5 941#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
942 h->debug.put_count++;
943#endif
944
945 dibs = dib_raw_ptr(h);
946
947 for (distance = 0; ; distance++) {
948 raw_dib = dibs[idx];
3742095b 949 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
89439d4f
MS
950 if (raw_dib == DIB_RAW_REHASH)
951 bucket_move_entry(h, swap, idx, IDX_TMP);
952
953 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
954 h->indirect.idx_lowest_entry = idx;
60918275 955
89439d4f
MS
956 bucket_set_dib(h, idx, distance);
957 bucket_move_entry(h, swap, IDX_PUT, idx);
958 if (raw_dib == DIB_RAW_REHASH) {
959 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
960 return true;
961 }
60918275 962
89439d4f
MS
963 return false;
964 }
965
966 dib = bucket_calculate_dib(h, idx, raw_dib);
967
968 if (dib < distance) {
969 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
970 bucket_set_dib(h, idx, distance);
971
972 /* swap the entries */
973 bucket_move_entry(h, swap, idx, IDX_TMP);
974 bucket_move_entry(h, swap, IDX_PUT, idx);
975 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
976
977 distance = dib;
978 }
979
980 idx = next_idx(h, idx);
981 }
60918275
LP
982}
983
89439d4f
MS
984/*
985 * Puts an entry into a hashmap, boldly - no check whether key already exists.
986 * The caller must place the entry (only its key and value, not link indexes)
987 * in swap slot IDX_PUT.
988 * Caller must ensure: the key does not exist yet in the hashmap.
989 * that resize is not needed if !may_resize.
990 * Returns: 1 if entry was put successfully.
991 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
992 * Cannot return -ENOMEM if !may_resize.
993 */
994static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
995 struct swap_entries *swap, bool may_resize) {
996 struct ordered_hashmap_entry *new_entry;
997 int r;
998
999 assert(idx < n_buckets(h));
1000
1001 new_entry = bucket_at_swap(swap, IDX_PUT);
1002
1003 if (may_resize) {
1004 r = resize_buckets(h, 1);
1005 if (r < 0)
1006 return r;
1007 if (r > 0)
1008 idx = bucket_hash(h, new_entry->p.b.key);
1009 }
1010 assert(n_entries(h) < n_buckets(h));
1011
1012 if (h->type == HASHMAP_TYPE_ORDERED) {
1013 OrderedHashmap *lh = (OrderedHashmap*) h;
1014
1015 new_entry->iterate_next = IDX_NIL;
1016 new_entry->iterate_previous = lh->iterate_list_tail;
1017
1018 if (lh->iterate_list_tail != IDX_NIL) {
1019 struct ordered_hashmap_entry *old_tail;
1020
1021 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1022 assert(old_tail->iterate_next == IDX_NIL);
1023 old_tail->iterate_next = IDX_PUT;
1024 }
1025
1026 lh->iterate_list_tail = IDX_PUT;
1027 if (lh->iterate_list_head == IDX_NIL)
1028 lh->iterate_list_head = IDX_PUT;
1029 }
1030
1031 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1032
1033 n_entries_inc(h);
349cc4a5 1034#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1035 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1036#endif
1037
84dcca75
VC
1038 base_set_dirty(h);
1039
89439d4f
MS
1040 return 1;
1041}
1042#define hashmap_put_boldly(h, idx, swap, may_resize) \
1043 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1044
1045/*
1046 * Returns 0 if resize is not needed.
f131770b 1047 * 1 if successfully resized.
89439d4f
MS
1048 * -ENOMEM on allocation failure.
1049 */
1050static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1051 struct swap_entries swap;
1a39bc8c 1052 void *new_storage;
89439d4f
MS
1053 dib_raw_t *old_dibs, *new_dibs;
1054 const struct hashmap_type_info *hi;
1055 unsigned idx, optimal_idx;
1056 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1057 uint8_t new_shift;
1058 bool rehash_next;
45fa9e29
LP
1059
1060 assert(h);
1061
89439d4f
MS
1062 hi = &hashmap_type_info[h->type];
1063 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1064
1065 /* overflow? */
89439d4f 1066 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1067 return -ENOMEM;
1068
89439d4f
MS
1069 /* For direct storage we allow 100% load, because it's tiny. */
1070 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1071 return 0;
45fa9e29 1072
89439d4f
MS
1073 /*
1074 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1075 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1076 */
1077 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1078 /* overflow? */
1079 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1080 return -ENOMEM;
45fa9e29 1081
89439d4f
MS
1082 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1083 return -ENOMEM;
a3b6fafe 1084
89439d4f 1085 old_n_buckets = n_buckets(h);
45fa9e29 1086
89439d4f
MS
1087 if (_likely_(new_n_buckets <= old_n_buckets))
1088 return 0;
45fa9e29 1089
89439d4f
MS
1090 new_shift = log2u_round_up(MAX(
1091 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1092 2 * sizeof(struct direct_storage)));
45fa9e29 1093
89439d4f
MS
1094 /* Realloc storage (buckets and DIB array). */
1095 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1096 1U << new_shift);
1097 if (!new_storage)
1098 return -ENOMEM;
45fa9e29 1099
89439d4f
MS
1100 /* Must upgrade direct to indirect storage. */
1101 if (!h->has_indirect) {
1102 memcpy(new_storage, h->direct.storage,
1103 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1104 h->indirect.n_entries = h->n_direct_entries;
1105 h->indirect.idx_lowest_entry = 0;
1106 h->n_direct_entries = 0;
1107 }
45fa9e29 1108
89439d4f
MS
1109 /* Get a new hash key. If we've just upgraded to indirect storage,
1110 * allow reusing a previously generated key. It's still a different key
1111 * from the shared one that we used for direct storage. */
1112 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1113
1114 h->has_indirect = true;
1115 h->indirect.storage = new_storage;
1116 h->indirect.n_buckets = (1U << new_shift) /
1117 (hi->entry_size + sizeof(dib_raw_t));
1118
1a39bc8c 1119 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
89439d4f
MS
1120 new_dibs = dib_raw_ptr(h);
1121
1122 /*
1123 * Move the DIB array to the new place, replacing valid DIB values with
1124 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1125 * Note: Overlap is not possible, because we have at least doubled the
1126 * number of buckets and dib_raw_t is smaller than any entry type.
1127 */
1128 for (idx = 0; idx < old_n_buckets; idx++) {
1129 assert(old_dibs[idx] != DIB_RAW_REHASH);
1130 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1131 : DIB_RAW_REHASH;
45fa9e29
LP
1132 }
1133
89439d4f 1134 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1135 memzero(bucket_at(h, old_n_buckets),
89439d4f 1136 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1137
89439d4f
MS
1138 /* The upper half of the new DIB array needs initialization */
1139 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1140 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1141
89439d4f
MS
1142 /* Rehash entries that need it */
1143 n_rehashed = 0;
1144 for (idx = 0; idx < old_n_buckets; idx++) {
1145 if (new_dibs[idx] != DIB_RAW_REHASH)
1146 continue;
45fa9e29 1147
89439d4f 1148 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1149
89439d4f
MS
1150 /*
1151 * Not much to do if by luck the entry hashes to its current
1152 * location. Just set its DIB.
1153 */
1154 if (optimal_idx == idx) {
1155 new_dibs[idx] = 0;
1156 n_rehashed++;
1157 continue;
1158 }
1159
1160 new_dibs[idx] = DIB_RAW_FREE;
1161 bucket_move_entry(h, &swap, idx, IDX_PUT);
1162 /* bucket_move_entry does not clear the source */
eccaf899 1163 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1164
1165 do {
1166 /*
1167 * Find the new bucket for the current entry. This may make
1168 * another entry homeless and load it into IDX_PUT.
1169 */
1170 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1171 n_rehashed++;
1172
1173 /* Did the current entry displace another one? */
1174 if (rehash_next)
1175 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1176 } while (rehash_next);
1177 }
60918275 1178
89439d4f 1179 assert(n_rehashed == n_entries(h));
60918275 1180
89439d4f
MS
1181 return 1;
1182}
45fa9e29 1183
89439d4f
MS
1184/*
1185 * Finds an entry with a matching key
1186 * Returns: index of the found entry, or IDX_NIL if not found.
1187 */
1188static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1189 struct hashmap_base_entry *e;
1190 unsigned dib, distance;
1191 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1192
89439d4f 1193 assert(idx < n_buckets(h));
60918275 1194
89439d4f
MS
1195 for (distance = 0; ; distance++) {
1196 if (dibs[idx] == DIB_RAW_FREE)
1197 return IDX_NIL;
60918275 1198
89439d4f 1199 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1200
89439d4f
MS
1201 if (dib < distance)
1202 return IDX_NIL;
1203 if (dib == distance) {
1204 e = bucket_at(h, idx);
1205 if (h->hash_ops->compare(e->key, key) == 0)
1206 return idx;
1207 }
1208
1209 idx = next_idx(h, idx);
1210 }
60918275 1211}
89439d4f 1212#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1213
923041cb 1214int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1215 struct swap_entries swap;
1216 struct plain_hashmap_entry *e;
1217 unsigned hash, idx;
923041cb
MS
1218
1219 assert(h);
1220
1221 hash = bucket_hash(h, key);
89439d4f
MS
1222 idx = bucket_scan(h, hash, key);
1223 if (idx != IDX_NIL) {
1224 e = plain_bucket_at(h, idx);
923041cb
MS
1225 if (e->value == value)
1226 return 0;
1227 return -EEXIST;
1228 }
1229
89439d4f
MS
1230 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1231 e->b.key = key;
1232 e->value = value;
1233 return hashmap_put_boldly(h, hash, &swap, true);
1234}
1235
1236int set_put(Set *s, const void *key) {
1237 struct swap_entries swap;
1238 struct hashmap_base_entry *e;
1239 unsigned hash, idx;
1240
1241 assert(s);
1242
1243 hash = bucket_hash(s, key);
1244 idx = bucket_scan(s, hash, key);
1245 if (idx != IDX_NIL)
1246 return 0;
1247
1248 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1249 e->key = key;
1250 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1251}
1252
3158713e 1253int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1254 struct swap_entries swap;
1255 struct plain_hashmap_entry *e;
1256 unsigned hash, idx;
3158713e
LP
1257
1258 assert(h);
1259
a3b6fafe 1260 hash = bucket_hash(h, key);
89439d4f
MS
1261 idx = bucket_scan(h, hash, key);
1262 if (idx != IDX_NIL) {
1263 e = plain_bucket_at(h, idx);
349cc4a5 1264#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1265 /* Although the key is equal, the key pointer may have changed,
1266 * and this would break our assumption for iterating. So count
1267 * this operation as incompatible with iteration. */
1268 if (e->b.key != key) {
1269 h->b.debug.put_count++;
1270 h->b.debug.rem_count++;
1271 h->b.debug.last_rem_idx = idx;
1272 }
1273#endif
1274 e->b.key = key;
3158713e 1275 e->value = value;
84dcca75
VC
1276 hashmap_set_dirty(h);
1277
3158713e
LP
1278 return 0;
1279 }
1280
89439d4f
MS
1281 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1282 e->b.key = key;
1283 e->value = value;
1284 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1285}
1286
d99ae53a 1287int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1288 struct plain_hashmap_entry *e;
1289 unsigned hash, idx;
d99ae53a
LP
1290
1291 assert(h);
1292
a3b6fafe 1293 hash = bucket_hash(h, key);
89439d4f
MS
1294 idx = bucket_scan(h, hash, key);
1295 if (idx == IDX_NIL)
d99ae53a
LP
1296 return -ENOENT;
1297
89439d4f 1298 e = plain_bucket_at(h, idx);
d99ae53a 1299 e->value = value;
84dcca75
VC
1300 hashmap_set_dirty(h);
1301
d99ae53a
LP
1302 return 0;
1303}
1304
89439d4f
MS
1305void *internal_hashmap_get(HashmapBase *h, const void *key) {
1306 struct hashmap_base_entry *e;
1307 unsigned hash, idx;
60918275
LP
1308
1309 if (!h)
1310 return NULL;
1311
a3b6fafe 1312 hash = bucket_hash(h, key);
89439d4f
MS
1313 idx = bucket_scan(h, hash, key);
1314 if (idx == IDX_NIL)
60918275
LP
1315 return NULL;
1316
89439d4f
MS
1317 e = bucket_at(h, idx);
1318 return entry_value(h, e);
60918275
LP
1319}
1320
89439d4f
MS
1321void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1322 struct plain_hashmap_entry *e;
1323 unsigned hash, idx;
d99ae53a
LP
1324
1325 if (!h)
1326 return NULL;
1327
a3b6fafe 1328 hash = bucket_hash(h, key);
89439d4f
MS
1329 idx = bucket_scan(h, hash, key);
1330 if (idx == IDX_NIL)
d99ae53a
LP
1331 return NULL;
1332
89439d4f 1333 e = plain_bucket_at(h, idx);
d99ae53a 1334 if (key2)
89439d4f 1335 *key2 = (void*) e->b.key;
d99ae53a
LP
1336
1337 return e->value;
1338}
1339
89439d4f 1340bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1341 unsigned hash;
96342de6
LN
1342
1343 if (!h)
1344 return false;
1345
a3b6fafe 1346 hash = bucket_hash(h, key);
89439d4f 1347 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1348}
1349
89439d4f
MS
1350void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1351 struct hashmap_base_entry *e;
1352 unsigned hash, idx;
60918275
LP
1353 void *data;
1354
1355 if (!h)
1356 return NULL;
1357
a3b6fafe 1358 hash = bucket_hash(h, key);
89439d4f
MS
1359 idx = bucket_scan(h, hash, key);
1360 if (idx == IDX_NIL)
60918275
LP
1361 return NULL;
1362
89439d4f
MS
1363 e = bucket_at(h, idx);
1364 data = entry_value(h, e);
1365 remove_entry(h, idx);
60918275
LP
1366
1367 return data;
1368}
1369
89439d4f
MS
1370void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1371 struct plain_hashmap_entry *e;
1372 unsigned hash, idx;
c582a3b3
LP
1373 void *data;
1374
1375 if (!h) {
1376 if (rkey)
1377 *rkey = NULL;
1378 return NULL;
1379 }
1380
1381 hash = bucket_hash(h, key);
89439d4f
MS
1382 idx = bucket_scan(h, hash, key);
1383 if (idx == IDX_NIL) {
c582a3b3
LP
1384 if (rkey)
1385 *rkey = NULL;
1386 return NULL;
1387 }
1388
89439d4f 1389 e = plain_bucket_at(h, idx);
c582a3b3
LP
1390 data = e->value;
1391 if (rkey)
89439d4f 1392 *rkey = (void*) e->b.key;
c582a3b3 1393
89439d4f 1394 remove_entry(h, idx);
c582a3b3
LP
1395
1396 return data;
1397}
1398
101d8e63 1399int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1400 struct swap_entries swap;
1401 struct plain_hashmap_entry *e;
1402 unsigned old_hash, new_hash, idx;
101d8e63
LP
1403
1404 if (!h)
1405 return -ENOENT;
1406
a3b6fafe 1407 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1408 idx = bucket_scan(h, old_hash, old_key);
1409 if (idx == IDX_NIL)
101d8e63
LP
1410 return -ENOENT;
1411
a3b6fafe 1412 new_hash = bucket_hash(h, new_key);
89439d4f 1413 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1414 return -EEXIST;
1415
89439d4f 1416 remove_entry(h, idx);
101d8e63 1417
89439d4f
MS
1418 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1419 e->b.key = new_key;
101d8e63 1420 e->value = value;
89439d4f
MS
1421 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1422
1423 return 0;
1424}
1425
1426int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1427 struct swap_entries swap;
1428 struct hashmap_base_entry *e;
1429 unsigned old_hash, new_hash, idx;
101d8e63 1430
89439d4f
MS
1431 if (!s)
1432 return -ENOENT;
1433
1434 old_hash = bucket_hash(s, old_key);
1435 idx = bucket_scan(s, old_hash, old_key);
1436 if (idx == IDX_NIL)
1437 return -ENOENT;
1438
1439 new_hash = bucket_hash(s, new_key);
1440 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1441 return -EEXIST;
1442
1443 remove_entry(s, idx);
1444
1445 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1446 e->key = new_key;
1447 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1448
1449 return 0;
1450}
1451
8fe914ec 1452int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1453 struct swap_entries swap;
1454 struct plain_hashmap_entry *e;
1455 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1456
1457 if (!h)
1458 return -ENOENT;
1459
a3b6fafe 1460 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1461 idx_old = bucket_scan(h, old_hash, old_key);
1462 if (idx_old == IDX_NIL)
8fe914ec
LP
1463 return -ENOENT;
1464
89439d4f 1465 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1466
89439d4f
MS
1467 new_hash = bucket_hash(h, new_key);
1468 idx_new = bucket_scan(h, new_hash, new_key);
1469 if (idx_new != IDX_NIL)
1470 if (idx_old != idx_new) {
1471 remove_entry(h, idx_new);
1472 /* Compensate for a possible backward shift. */
1473 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1474 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1475 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1476 }
1477
1478 remove_entry(h, idx_old);
1479
1480 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1481 e->b.key = new_key;
8fe914ec 1482 e->value = value;
89439d4f 1483 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1484
1485 return 0;
1486}
1487
c380b84d
LP
1488void *internal_hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
1489 struct hashmap_base_entry *e;
89439d4f 1490 unsigned hash, idx;
3158713e
LP
1491
1492 if (!h)
1493 return NULL;
1494
a3b6fafe 1495 hash = bucket_hash(h, key);
89439d4f
MS
1496 idx = bucket_scan(h, hash, key);
1497 if (idx == IDX_NIL)
3158713e
LP
1498 return NULL;
1499
c380b84d
LP
1500 e = bucket_at(h, idx);
1501 if (entry_value(h, e) != value)
3158713e
LP
1502 return NULL;
1503
89439d4f 1504 remove_entry(h, idx);
3158713e
LP
1505
1506 return value;
1507}
1508
89439d4f
MS
1509static unsigned find_first_entry(HashmapBase *h) {
1510 Iterator i = ITERATOR_FIRST;
60918275 1511
89439d4f
MS
1512 if (!h || !n_entries(h))
1513 return IDX_NIL;
60918275 1514
89439d4f 1515 return hashmap_iterate_entry(h, &i);
60918275
LP
1516}
1517
7ef670c3 1518void *internal_hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
89439d4f 1519 struct hashmap_base_entry *e;
7ef670c3 1520 void *key, *data;
89439d4f 1521 unsigned idx;
60918275 1522
89439d4f 1523 idx = find_first_entry(h);
51c682df
TH
1524 if (idx == IDX_NIL) {
1525 if (ret_key)
1526 *ret_key = NULL;
60918275 1527 return NULL;
51c682df 1528 }
60918275 1529
89439d4f 1530 e = bucket_at(h, idx);
7ef670c3 1531 key = (void*) e->key;
89439d4f 1532 data = entry_value(h, e);
60918275 1533
7ef670c3
YW
1534 if (remove)
1535 remove_entry(h, idx);
60918275 1536
7ef670c3
YW
1537 if (ret_key)
1538 *ret_key = key;
22be093f 1539
7ef670c3 1540 return data;
22be093f
LP
1541}
1542
89439d4f 1543unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1544 if (!h)
1545 return 0;
1546
89439d4f 1547 return n_entries(h);
60918275
LP
1548}
1549
89439d4f 1550unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1551 if (!h)
1552 return 0;
1553
89439d4f 1554 return n_buckets(h);
45fa9e29
LP
1555}
1556
89439d4f
MS
1557int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1558 Iterator i;
1559 unsigned idx;
60918275 1560
89439d4f 1561 assert(h);
60918275 1562
89439d4f
MS
1563 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1564 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1565 int r;
91cdde8a 1566
89439d4f
MS
1567 r = hashmap_put(h, pe->b.key, pe->value);
1568 if (r < 0 && r != -EEXIST)
1569 return r;
1570 }
91cdde8a 1571
89439d4f
MS
1572 return 0;
1573}
91cdde8a 1574
89439d4f
MS
1575int set_merge(Set *s, Set *other) {
1576 Iterator i;
1577 unsigned idx;
91cdde8a 1578
89439d4f
MS
1579 assert(s);
1580
1581 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1582 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1583 int r;
1584
89439d4f
MS
1585 r = set_put(s, se->b.key);
1586 if (r < 0)
a3b6fafe 1587 return r;
91cdde8a
LP
1588 }
1589
1590 return 0;
1591}
1592
89439d4f 1593int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1594 int r;
1595
1596 assert(h);
1597
1598 r = resize_buckets(h, entries_add);
1599 if (r < 0)
1600 return r;
1601
1602 return 0;
1603}
1604
89439d4f
MS
1605/*
1606 * The same as hashmap_merge(), but every new item from other is moved to h.
1607 * Keys already in h are skipped and stay in other.
1608 * Returns: 0 on success.
1609 * -ENOMEM on alloc failure, in which case no move has been done.
1610 */
1611int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1612 struct swap_entries swap;
1613 struct hashmap_base_entry *e, *n;
1614 Iterator i;
1615 unsigned idx;
1616 int r;
101d8e63
LP
1617
1618 assert(h);
1619
101d8e63 1620 if (!other)
7ad63f57 1621 return 0;
101d8e63 1622
89439d4f
MS
1623 assert(other->type == h->type);
1624
1625 /*
1626 * This reserves buckets for the worst case, where none of other's
1627 * entries are yet present in h. This is preferable to risking
1628 * an allocation failure in the middle of the moving and having to
1629 * rollback or return a partial result.
1630 */
1631 r = resize_buckets(h, n_entries(other));
1632 if (r < 0)
1633 return r;
101d8e63 1634
89439d4f
MS
1635 HASHMAP_FOREACH_IDX(idx, other, i) {
1636 unsigned h_hash;
101d8e63 1637
89439d4f 1638 e = bucket_at(other, idx);
a3b6fafe 1639 h_hash = bucket_hash(h, e->key);
89439d4f 1640 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1641 continue;
1642
89439d4f
MS
1643 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1644 n->key = e->key;
1645 if (h->type != HASHMAP_TYPE_SET)
1646 ((struct plain_hashmap_entry*) n)->value =
1647 ((struct plain_hashmap_entry*) e)->value;
1648 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1649
1650 remove_entry(other, idx);
101d8e63 1651 }
7ad63f57
MS
1652
1653 return 0;
101d8e63
LP
1654}
1655
89439d4f
MS
1656int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1657 struct swap_entries swap;
1658 unsigned h_hash, other_hash, idx;
1659 struct hashmap_base_entry *e, *n;
1660 int r;
101d8e63 1661
101d8e63
LP
1662 assert(h);
1663
a3b6fafe 1664 h_hash = bucket_hash(h, key);
89439d4f 1665 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1666 return -EEXIST;
1667
bf3d3e2b
MS
1668 if (!other)
1669 return -ENOENT;
1670
89439d4f
MS
1671 assert(other->type == h->type);
1672
a3b6fafe 1673 other_hash = bucket_hash(other, key);
89439d4f
MS
1674 idx = bucket_scan(other, other_hash, key);
1675 if (idx == IDX_NIL)
101d8e63
LP
1676 return -ENOENT;
1677
89439d4f
MS
1678 e = bucket_at(other, idx);
1679
1680 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1681 n->key = e->key;
1682 if (h->type != HASHMAP_TYPE_SET)
1683 ((struct plain_hashmap_entry*) n)->value =
1684 ((struct plain_hashmap_entry*) e)->value;
1685 r = hashmap_put_boldly(h, h_hash, &swap, true);
1686 if (r < 0)
1687 return r;
101d8e63 1688
89439d4f 1689 remove_entry(other, idx);
101d8e63
LP
1690 return 0;
1691}
1692
89439d4f
MS
1693HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1694 HashmapBase *copy;
1695 int r;
91cdde8a
LP
1696
1697 assert(h);
1698
89439d4f 1699 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1700 if (!copy)
91cdde8a
LP
1701 return NULL;
1702
89439d4f
MS
1703 switch (h->type) {
1704 case HASHMAP_TYPE_PLAIN:
1705 case HASHMAP_TYPE_ORDERED:
1706 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1707 break;
1708 case HASHMAP_TYPE_SET:
1709 r = set_merge((Set*)copy, (Set*)h);
1710 break;
1711 default:
1712 assert_not_reached("Unknown hashmap type");
1713 }
1714
1715 if (r < 0) {
59a5cda7 1716 internal_hashmap_free(copy, false, false);
91cdde8a
LP
1717 return NULL;
1718 }
1719
1720 return copy;
1721}
db1413d7 1722
89439d4f 1723char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1724 char **sv;
89439d4f
MS
1725 Iterator i;
1726 unsigned idx, n;
db1413d7 1727
89439d4f 1728 sv = new(char*, n_entries(h)+1);
729e3769 1729 if (!sv)
db1413d7
KS
1730 return NULL;
1731
1732 n = 0;
89439d4f
MS
1733 HASHMAP_FOREACH_IDX(idx, h, i)
1734 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1735 sv[n] = NULL;
1736
1737 return sv;
1738}
3c1668da 1739
89439d4f
MS
1740void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1741 struct ordered_hashmap_entry *e;
1742 unsigned hash, idx;
3c1668da 1743
3c1668da
LP
1744 if (!h)
1745 return NULL;
1746
a3b6fafe 1747 hash = bucket_hash(h, key);
89439d4f
MS
1748 idx = bucket_scan(h, hash, key);
1749 if (idx == IDX_NIL)
3c1668da
LP
1750 return NULL;
1751
89439d4f
MS
1752 e = ordered_bucket_at(h, idx);
1753 if (e->iterate_next == IDX_NIL)
3c1668da 1754 return NULL;
89439d4f
MS
1755 return ordered_bucket_at(h, e->iterate_next)->p.value;
1756}
3c1668da 1757
89439d4f
MS
1758int set_consume(Set *s, void *value) {
1759 int r;
1760
d97c5aea
LP
1761 assert(s);
1762 assert(value);
1763
89439d4f 1764 r = set_put(s, value);
575ccc1b 1765 if (r <= 0)
89439d4f
MS
1766 free(value);
1767
1768 return r;
1769}
1770
1771int set_put_strdup(Set *s, const char *p) {
1772 char *c;
89439d4f
MS
1773
1774 assert(s);
1775 assert(p);
1776
454f0f86
LP
1777 if (set_contains(s, (char*) p))
1778 return 0;
1779
89439d4f
MS
1780 c = strdup(p);
1781 if (!c)
1782 return -ENOMEM;
1783
454f0f86 1784 return set_consume(s, c);
89439d4f
MS
1785}
1786
1787int set_put_strdupv(Set *s, char **l) {
1788 int n = 0, r;
1789 char **i;
1790
d97c5aea
LP
1791 assert(s);
1792
89439d4f
MS
1793 STRV_FOREACH(i, l) {
1794 r = set_put_strdup(s, *i);
1795 if (r < 0)
1796 return r;
1797
1798 n += r;
1799 }
1800
1801 return n;
3c1668da 1802}
d97c5aea
LP
1803
1804int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1805 const char *p = v;
1806 int r;
1807
1808 assert(s);
1809 assert(v);
1810
1811 for (;;) {
1812 char *word;
1813
1814 r = extract_first_word(&p, &word, separators, flags);
1815 if (r <= 0)
1816 return r;
1817
1818 r = set_consume(s, word);
1819 if (r < 0)
1820 return r;
1821 }
1822}
45ea84d8
VC
1823
1824/* expand the cachemem if needed, return true if newly (re)activated. */
1825static int cachemem_maintain(CacheMem *mem, unsigned size) {
45ea84d8
VC
1826 assert(mem);
1827
1828 if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) {
1829 if (size > 0)
1830 return -ENOMEM;
1831 }
1832
afbbc068
ZJS
1833 if (!mem->active) {
1834 mem->active = true;
1835 return true;
1836 }
45ea84d8 1837
afbbc068 1838 return false;
45ea84d8
VC
1839}
1840
1841int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1842 bool sync_keys = false, sync_values = false;
1843 unsigned size;
1844 int r;
1845
1846 assert(cache);
1847 assert(cache->hashmap);
1848
1849 size = n_entries(cache->hashmap);
1850
1851 if (res_keys) {
1852 r = cachemem_maintain(&cache->keys, size);
1853 if (r < 0)
1854 return r;
1855
1856 sync_keys = r;
1857 } else
1858 cache->keys.active = false;
1859
1860 if (res_values) {
1861 r = cachemem_maintain(&cache->values, size);
1862 if (r < 0)
1863 return r;
1864
1865 sync_values = r;
1866 } else
1867 cache->values.active = false;
1868
1869 if (cache->hashmap->dirty) {
1870 if (cache->keys.active)
1871 sync_keys = true;
1872 if (cache->values.active)
1873 sync_values = true;
1874
1875 cache->hashmap->dirty = false;
1876 }
1877
1878 if (sync_keys || sync_values) {
1879 unsigned i, idx;
1880 Iterator iter;
1881
1882 i = 0;
1883 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1884 struct hashmap_base_entry *e;
1885
1886 e = bucket_at(cache->hashmap, idx);
1887
1888 if (sync_keys)
1889 cache->keys.ptr[i] = e->key;
1890 if (sync_values)
1891 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1892 i++;
1893 }
1894 }
1895
1896 if (res_keys)
1897 *res_keys = cache->keys.ptr;
1898 if (res_values)
1899 *res_values = cache->values.ptr;
1900 if (res_n_entries)
1901 *res_n_entries = size;
1902
1903 return 0;
1904}
1905
1906IteratedCache *iterated_cache_free(IteratedCache *cache) {
1907 if (cache) {
1908 free(cache->keys.ptr);
1909 free(cache->values.ptr);
45ea84d8
VC
1910 }
1911
b61658fd 1912 return mfree(cache);
45ea84d8 1913}