]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
man: be more explicit about thread safety of sd_journal
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
a7334b09 2
60918275 3#include <errno.h>
11c3a366 4#include <stdint.h>
d4510856 5#include <stdlib.h>
11c3a366 6#include <string.h>
60918275 7
b5efdb8a 8#include "alloc-util.h"
60918275 9#include "hashmap.h"
556c7bae 10#include "fileio.h"
60918275 11#include "macro.h"
b3dcf58e 12#include "mempool.h"
d4510856 13#include "process-util.h"
3df3e884 14#include "random-util.h"
d4510856
LP
15#include "set.h"
16#include "siphash24.h"
556c7bae 17#include "string-util.h"
d4510856
LP
18#include "strv.h"
19#include "util.h"
60918275 20
349cc4a5 21#if ENABLE_DEBUG_HASHMAP
3d4db144 22#include <pthread.h>
2eec67ac
TA
23#include "list.h"
24#endif
25
89439d4f
MS
26/*
27 * Implementation of hashmaps.
28 * Addressing: open
29 * - uses less RAM compared to closed addressing (chaining), because
30 * our entries are small (especially in Sets, which tend to contain
31 * the majority of entries in systemd).
32 * Collision resolution: Robin Hood
33 * - tends to equalize displacement of entries from their optimal buckets.
34 * Probe sequence: linear
35 * - though theoretically worse than random probing/uniform hashing/double
36 * hashing, it is good for cache locality.
37 *
38 * References:
39 * Celis, P. 1986. Robin Hood Hashing.
40 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
41 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
42 * - The results are derived for random probing. Suggests deletion with
43 * tombstones and two mean-centered search methods. None of that works
44 * well for linear probing.
45 *
46 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
47 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
48 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
49 * http://www.math.uu.se/~svante/papers/sj157.pdf
50 * - Applies to Robin Hood with linear probing. Contains remarks on
51 * the unsuitability of mean-centered search with linear probing.
52 *
53 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
54 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
55 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
56 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
57 * in a successful search), and Janson writes about displacement. C = d + 1.
58 *
59 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
60 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
61 * - Explanation of backward shift deletion with pictures.
62 *
63 * Khuong, P. 2013. The Other Robin Hood Hashing.
64 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
65 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
66 */
67
68/*
69 * XXX Ideas for improvement:
70 * For unordered hashmaps, randomize iteration order, similarly to Perl:
71 * http://blog.booking.com/hardening-perls-hash-function.html
72 */
73
74/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
75 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
76#define INV_KEEP_FREE 5U
77
78/* Fields common to entries of all hashmap/set types */
79struct hashmap_base_entry {
60918275 80 const void *key;
89439d4f
MS
81};
82
83/* Entry types for specific hashmap/set types
84 * hashmap_base_entry must be at the beginning of each entry struct. */
85
86struct plain_hashmap_entry {
87 struct hashmap_base_entry b;
60918275 88 void *value;
60918275
LP
89};
90
89439d4f
MS
91struct ordered_hashmap_entry {
92 struct plain_hashmap_entry p;
93 unsigned iterate_next, iterate_previous;
94};
60918275 95
89439d4f
MS
96struct set_entry {
97 struct hashmap_base_entry b;
98};
45fa9e29 99
89439d4f
MS
100/* In several functions it is advantageous to have the hash table extended
101 * virtually by a couple of additional buckets. We reserve special index values
102 * for these "swap" buckets. */
103#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
104#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
105#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
106#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 107
89439d4f
MS
108#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
109#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
110
111assert_cc(IDX_FIRST == _IDX_SWAP_END);
112assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
113
114/* Storage space for the "swap" buckets.
115 * All entry types can fit into a ordered_hashmap_entry. */
116struct swap_entries {
117 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
118};
119
89439d4f
MS
120/* Distance from Initial Bucket */
121typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
122#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
123#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
124#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
125#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
126
127#define DIB_FREE UINT_MAX
128
349cc4a5 129#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
130struct hashmap_debug_info {
131 LIST_FIELDS(struct hashmap_debug_info, debug_list);
132 unsigned max_entries; /* high watermark of n_entries */
133
134 /* who allocated this hashmap */
135 int line;
136 const char *file;
137 const char *func;
138
139 /* fields to detect modification while iterating */
140 unsigned put_count; /* counts puts into the hashmap */
141 unsigned rem_count; /* counts removals from hashmap */
142 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
143};
144
89439d4f
MS
145/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
146static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 147static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 148
89439d4f 149#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 150
fc86aa0e 151#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 152#define HASHMAP_DEBUG_FIELDS
fc86aa0e 153#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 154
89439d4f
MS
155enum HashmapType {
156 HASHMAP_TYPE_PLAIN,
157 HASHMAP_TYPE_ORDERED,
158 HASHMAP_TYPE_SET,
159 _HASHMAP_TYPE_MAX
160};
39c2a6f1 161
89439d4f 162struct _packed_ indirect_storage {
1a39bc8c 163 void *storage; /* where buckets and DIBs are stored */
89439d4f
MS
164 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
165
166 unsigned n_entries; /* number of stored entries */
167 unsigned n_buckets; /* number of buckets */
168
169 unsigned idx_lowest_entry; /* Index below which all buckets are free.
170 Makes "while(hashmap_steal_first())" loops
171 O(n) instead of O(n^2) for unordered hashmaps. */
172 uint8_t _pad[3]; /* padding for the whole HashmapBase */
173 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
174};
175
176struct direct_storage {
177 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
178 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
179 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
1a39bc8c 180 uint8_t storage[sizeof(struct indirect_storage)];
89439d4f
MS
181};
182
183#define DIRECT_BUCKETS(entry_t) \
184 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
185
186/* We should be able to store at least one entry directly. */
187assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
188
189/* We have 3 bits for n_direct_entries. */
190assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
191
192/* Hashmaps with directly stored entries all use this shared hash key.
193 * It's no big deal if the key is guessed, because there can be only
194 * a handful of directly stored entries in a hashmap. When a hashmap
195 * outgrows direct storage, it gets its own key for indirect storage. */
196static uint8_t shared_hash_key[HASH_KEY_SIZE];
197static bool shared_hash_key_initialized;
198
199/* Fields that all hashmap/set types must have */
200struct HashmapBase {
201 const struct hash_ops *hash_ops; /* hash and compare ops to use */
202
203 union _packed_ {
204 struct indirect_storage indirect; /* if has_indirect */
205 struct direct_storage direct; /* if !has_indirect */
206 };
207
208 enum HashmapType type:2; /* HASHMAP_TYPE_* */
209 bool has_indirect:1; /* whether indirect storage is used */
210 unsigned n_direct_entries:3; /* Number of entries in direct storage.
211 * Only valid if !has_indirect. */
212 bool from_pool:1; /* whether was allocated from mempool */
45ea84d8
VC
213 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
214 bool cached:1; /* whether this hashmap is being cached */
89439d4f
MS
215 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
216};
217
218/* Specific hash types
219 * HashmapBase must be at the beginning of each hashmap struct. */
220
221struct Hashmap {
222 struct HashmapBase b;
223};
224
225struct OrderedHashmap {
226 struct HashmapBase b;
227 unsigned iterate_list_head, iterate_list_tail;
228};
229
230struct Set {
231 struct HashmapBase b;
232};
233
45ea84d8
VC
234typedef struct CacheMem {
235 const void **ptr;
236 size_t n_populated, n_allocated;
237 bool active:1;
238} CacheMem;
239
240struct IteratedCache {
241 HashmapBase *hashmap;
242 CacheMem keys, values;
243};
244
89439d4f
MS
245DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
246DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
247/* No need for a separate Set pool */
248assert_cc(sizeof(Hashmap) == sizeof(Set));
249
250struct hashmap_type_info {
251 size_t head_size;
252 size_t entry_size;
253 struct mempool *mempool;
254 unsigned n_direct_buckets;
255};
256
257static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
258 [HASHMAP_TYPE_PLAIN] = {
259 .head_size = sizeof(Hashmap),
260 .entry_size = sizeof(struct plain_hashmap_entry),
261 .mempool = &hashmap_pool,
262 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
263 },
264 [HASHMAP_TYPE_ORDERED] = {
265 .head_size = sizeof(OrderedHashmap),
266 .entry_size = sizeof(struct ordered_hashmap_entry),
267 .mempool = &ordered_hashmap_pool,
268 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
269 },
270 [HASHMAP_TYPE_SET] = {
271 .head_size = sizeof(Set),
272 .entry_size = sizeof(struct set_entry),
273 .mempool = &hashmap_pool,
274 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
275 },
276};
39c2a6f1 277
d18cb393 278#if VALGRIND
556c7bae
ZJS
279__attribute__((destructor)) static void cleanup_pools(void) {
280 _cleanup_free_ char *t = NULL;
281 int r;
282
283 /* Be nice to valgrind */
284
285 /* The pool is only allocated by the main thread, but the memory can
286 * be passed to other threads. Let's clean up if we are the main thread
287 * and no other threads are live. */
288 if (!is_main_thread())
289 return;
290
291 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
292 if (r < 0 || !streq(t, "1"))
293 return;
294
295 mempool_drop(&hashmap_pool);
296 mempool_drop(&ordered_hashmap_pool);
297}
298#endif
299
89439d4f
MS
300static unsigned n_buckets(HashmapBase *h) {
301 return h->has_indirect ? h->indirect.n_buckets
302 : hashmap_type_info[h->type].n_direct_buckets;
303}
304
305static unsigned n_entries(HashmapBase *h) {
306 return h->has_indirect ? h->indirect.n_entries
307 : h->n_direct_entries;
308}
309
310static void n_entries_inc(HashmapBase *h) {
311 if (h->has_indirect)
312 h->indirect.n_entries++;
313 else
314 h->n_direct_entries++;
315}
316
317static void n_entries_dec(HashmapBase *h) {
318 if (h->has_indirect)
319 h->indirect.n_entries--;
320 else
321 h->n_direct_entries--;
322}
323
1a39bc8c 324static void *storage_ptr(HashmapBase *h) {
89439d4f
MS
325 return h->has_indirect ? h->indirect.storage
326 : h->direct.storage;
327}
328
329static uint8_t *hash_key(HashmapBase *h) {
330 return h->has_indirect ? h->indirect.hash_key
331 : shared_hash_key;
332}
333
334static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 335 struct siphash state;
0cb3c286 336 uint64_t hash;
b826ab58 337
0cb3c286 338 siphash24_init(&state, hash_key(h));
b826ab58
TG
339
340 h->hash_ops->hash(p, &state);
341
933f9cae 342 hash = siphash24_finalize(&state);
0cb3c286
TG
343
344 return (unsigned) (hash % n_buckets(h));
9bf3b535 345}
89439d4f 346#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535 347
84dcca75
VC
348static inline void base_set_dirty(HashmapBase *h) {
349 h->dirty = true;
350}
351#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
352
9bf3b535
LP
353static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
354 static uint8_t current[HASH_KEY_SIZE];
355 static bool current_initialized = false;
356
357 /* Returns a hash function key to use. In order to keep things
358 * fast we will not generate a new key each time we allocate a
359 * new hash table. Instead, we'll just reuse the most recently
360 * generated one, except if we never generated one or when we
361 * are rehashing an entire hash table because we reached a
362 * fill level */
363
364 if (!current_initialized || !reuse_is_ok) {
365 random_bytes(current, sizeof(current));
366 current_initialized = true;
367 }
368
369 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
370}
371
89439d4f
MS
372static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
373 return (struct hashmap_base_entry*)
1a39bc8c 374 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
89439d4f
MS
375}
376
377static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
378 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
379}
380
381static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
382 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
383}
39c2a6f1 384
89439d4f
MS
385static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
386 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
387}
39c2a6f1 388
89439d4f
MS
389static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
390 return &swap->e[idx - _IDX_SWAP_BEGIN];
391}
39c2a6f1 392
89439d4f
MS
393/* Returns a pointer to the bucket at index idx.
394 * Understands real indexes and swap indexes, hence "_virtual". */
395static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
396 unsigned idx) {
397 if (idx < _IDX_SWAP_BEGIN)
398 return bucket_at(h, idx);
399
400 if (idx < _IDX_SWAP_END)
401 return &bucket_at_swap(swap, idx)->p.b;
402
403 assert_not_reached("Invalid index");
404}
405
406static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
407 return (dib_raw_t*)
1a39bc8c 408 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
89439d4f
MS
409}
410
411static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
412 return idx >= from ? idx - from
413 : n_buckets(h) + idx - from;
414}
415
416static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
417 unsigned initial_bucket;
418
419 if (raw_dib == DIB_RAW_FREE)
420 return DIB_FREE;
421
422 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
423 return raw_dib;
424
425 /*
426 * Having an overflow DIB value is very unlikely. The hash function
427 * would have to be bad. For example, in a table of size 2^24 filled
428 * to load factor 0.9 the maximum observed DIB is only about 60.
429 * In theory (assuming I used Maxima correctly), for an infinite size
430 * hash table with load factor 0.8 the probability of a given entry
431 * having DIB > 40 is 1.9e-8.
432 * This returns the correct DIB value by recomputing the hash value in
433 * the unlikely case. XXX Hitting this case could be a hint to rehash.
434 */
435 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
436 return bucket_distance(h, idx, initial_bucket);
437}
438
439static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
440 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
441}
442
443static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
444 dib_raw_t *dibs;
445
446 dibs = dib_raw_ptr(h);
447
448 for ( ; idx < n_buckets(h); idx++)
449 if (dibs[idx] != DIB_RAW_FREE)
450 return idx;
451
452 return IDX_NIL;
453}
454
455static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 456 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
457 bucket_set_dib(h, idx, DIB_FREE);
458}
459
460static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
461 unsigned from, unsigned to) {
462 struct hashmap_base_entry *e_from, *e_to;
463
464 assert(from != to);
39c2a6f1 465
89439d4f
MS
466 e_from = bucket_at_virtual(h, swap, from);
467 e_to = bucket_at_virtual(h, swap, to);
468
469 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
470
471 if (h->type == HASHMAP_TYPE_ORDERED) {
472 OrderedHashmap *lh = (OrderedHashmap*) h;
473 struct ordered_hashmap_entry *le, *le_to;
474
475 le_to = (struct ordered_hashmap_entry*) e_to;
476
477 if (le_to->iterate_next != IDX_NIL) {
478 le = (struct ordered_hashmap_entry*)
479 bucket_at_virtual(h, swap, le_to->iterate_next);
480 le->iterate_previous = to;
481 }
482
483 if (le_to->iterate_previous != IDX_NIL) {
484 le = (struct ordered_hashmap_entry*)
485 bucket_at_virtual(h, swap, le_to->iterate_previous);
486 le->iterate_next = to;
487 }
488
489 if (lh->iterate_list_head == from)
490 lh->iterate_list_head = to;
491 if (lh->iterate_list_tail == from)
492 lh->iterate_list_tail = to;
39c2a6f1 493 }
89439d4f 494}
60918275 495
89439d4f
MS
496static unsigned next_idx(HashmapBase *h, unsigned idx) {
497 return (idx + 1U) % n_buckets(h);
498}
60918275 499
89439d4f
MS
500static unsigned prev_idx(HashmapBase *h, unsigned idx) {
501 return (n_buckets(h) + idx - 1U) % n_buckets(h);
502}
60918275 503
89439d4f
MS
504static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
505 switch (h->type) {
45fa9e29 506
89439d4f
MS
507 case HASHMAP_TYPE_PLAIN:
508 case HASHMAP_TYPE_ORDERED:
509 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 510
89439d4f
MS
511 case HASHMAP_TYPE_SET:
512 return (void*) e->key;
a3b6fafe 513
89439d4f
MS
514 default:
515 assert_not_reached("Unknown hashmap type");
516 }
60918275
LP
517}
518
89439d4f
MS
519static void base_remove_entry(HashmapBase *h, unsigned idx) {
520 unsigned left, right, prev, dib;
521 dib_raw_t raw_dib, *dibs;
45fa9e29 522
89439d4f
MS
523 dibs = dib_raw_ptr(h);
524 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 525
349cc4a5 526#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
527 h->debug.rem_count++;
528 h->debug.last_rem_idx = idx;
529#endif
034c6ed7 530
89439d4f
MS
531 left = idx;
532 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
533 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
534 raw_dib = dibs[right];
4c701096 535 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
89439d4f
MS
536 break;
537
538 /* The buckets are not supposed to be all occupied and with DIB > 0.
539 * That would mean we could make everyone better off by shifting them
540 * backward. This scenario is impossible. */
541 assert(left != right);
542 }
034c6ed7 543
89439d4f
MS
544 if (h->type == HASHMAP_TYPE_ORDERED) {
545 OrderedHashmap *lh = (OrderedHashmap*) h;
546 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
547
548 if (le->iterate_next != IDX_NIL)
549 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
550 else
551 lh->iterate_list_tail = le->iterate_previous;
552
553 if (le->iterate_previous != IDX_NIL)
554 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
555 else
556 lh->iterate_list_head = le->iterate_next;
557 }
558
559 /* Now shift all buckets in the interval (left, right) one step backwards */
560 for (prev = left, left = next_idx(h, left); left != right;
561 prev = left, left = next_idx(h, left)) {
562 dib = bucket_calculate_dib(h, left, dibs[left]);
563 assert(dib != 0);
564 bucket_move_entry(h, NULL, left, prev);
565 bucket_set_dib(h, prev, dib - 1);
566 }
567
568 bucket_mark_free(h, prev);
569 n_entries_dec(h);
84dcca75 570 base_set_dirty(h);
034c6ed7 571}
89439d4f
MS
572#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
573
574static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
575 struct ordered_hashmap_entry *e;
576 unsigned idx;
034c6ed7 577
101d8e63 578 assert(h);
89439d4f
MS
579 assert(i);
580
581 if (i->idx == IDX_NIL)
582 goto at_end;
583
584 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
585 goto at_end;
586
587 if (i->idx == IDX_FIRST) {
588 idx = h->iterate_list_head;
589 e = ordered_bucket_at(h, idx);
101d8e63 590 } else {
89439d4f
MS
591 idx = i->idx;
592 e = ordered_bucket_at(h, idx);
593 /*
594 * We allow removing the current entry while iterating, but removal may cause
595 * a backward shift. The next entry may thus move one bucket to the left.
596 * To detect when it happens, we remember the key pointer of the entry we were
597 * going to iterate next. If it does not match, there was a backward shift.
598 */
599 if (e->p.b.key != i->next_key) {
600 idx = prev_idx(HASHMAP_BASE(h), idx);
601 e = ordered_bucket_at(h, idx);
602 }
603 assert(e->p.b.key == i->next_key);
101d8e63 604 }
101d8e63 605
349cc4a5 606#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
607 i->prev_idx = idx;
608#endif
609
610 if (e->iterate_next != IDX_NIL) {
611 struct ordered_hashmap_entry *n;
612 i->idx = e->iterate_next;
613 n = ordered_bucket_at(h, i->idx);
614 i->next_key = n->p.b.key;
615 } else
616 i->idx = IDX_NIL;
617
618 return idx;
619
620at_end:
621 i->idx = IDX_NIL;
622 return IDX_NIL;
101d8e63
LP
623}
624
89439d4f
MS
625static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
626 unsigned idx;
627
60918275 628 assert(h);
89439d4f 629 assert(i);
60918275 630
89439d4f
MS
631 if (i->idx == IDX_NIL)
632 goto at_end;
60918275 633
89439d4f
MS
634 if (i->idx == IDX_FIRST) {
635 /* fast forward to the first occupied bucket */
636 if (h->has_indirect) {
637 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
638 h->indirect.idx_lowest_entry = i->idx;
639 } else
640 i->idx = skip_free_buckets(h, 0);
641
642 if (i->idx == IDX_NIL)
643 goto at_end;
644 } else {
645 struct hashmap_base_entry *e;
646
647 assert(i->idx > 0);
60918275 648
89439d4f
MS
649 e = bucket_at(h, i->idx);
650 /*
651 * We allow removing the current entry while iterating, but removal may cause
652 * a backward shift. The next entry may thus move one bucket to the left.
653 * To detect when it happens, we remember the key pointer of the entry we were
654 * going to iterate next. If it does not match, there was a backward shift.
655 */
656 if (e->key != i->next_key)
657 e = bucket_at(h, --i->idx);
60918275 658
89439d4f
MS
659 assert(e->key == i->next_key);
660 }
661
662 idx = i->idx;
349cc4a5 663#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
664 i->prev_idx = idx;
665#endif
666
667 i->idx = skip_free_buckets(h, i->idx + 1);
668 if (i->idx != IDX_NIL)
669 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 670 else
89439d4f
MS
671 i->idx = IDX_NIL;
672
673 return idx;
60918275 674
89439d4f
MS
675at_end:
676 i->idx = IDX_NIL;
677 return IDX_NIL;
60918275
LP
678}
679
89439d4f
MS
680static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
681 if (!h) {
682 i->idx = IDX_NIL;
683 return IDX_NIL;
684 }
101d8e63 685
349cc4a5 686#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
687 if (i->idx == IDX_FIRST) {
688 i->put_count = h->debug.put_count;
689 i->rem_count = h->debug.rem_count;
690 } else {
691 /* While iterating, must not add any new entries */
692 assert(i->put_count == h->debug.put_count);
693 /* ... or remove entries other than the current one */
694 assert(i->rem_count == h->debug.rem_count ||
695 (i->rem_count == h->debug.rem_count - 1 &&
696 i->prev_idx == h->debug.last_rem_idx));
697 /* Reset our removals counter */
698 i->rem_count = h->debug.rem_count;
699 }
700#endif
101d8e63 701
89439d4f
MS
702 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
703 : hashmap_iterate_in_internal_order(h, i);
704}
39c2a6f1 705
8927b1da 706bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
707 struct hashmap_base_entry *e;
708 void *data;
709 unsigned idx;
710
711 idx = hashmap_iterate_entry(h, i);
712 if (idx == IDX_NIL) {
8927b1da
DH
713 if (value)
714 *value = NULL;
89439d4f
MS
715 if (key)
716 *key = NULL;
717
8927b1da 718 return false;
89439d4f
MS
719 }
720
721 e = bucket_at(h, idx);
722 data = entry_value(h, e);
8927b1da
DH
723 if (value)
724 *value = data;
89439d4f
MS
725 if (key)
726 *key = e->key;
727
8927b1da 728 return true;
101d8e63
LP
729}
730
8927b1da
DH
731bool set_iterate(Set *s, Iterator *i, void **value) {
732 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 733}
60918275 734
89439d4f
MS
735#define HASHMAP_FOREACH_IDX(idx, h, i) \
736 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
737 (idx != IDX_NIL); \
738 (idx) = hashmap_iterate_entry((h), &(i)))
739
45ea84d8
VC
740IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) {
741 IteratedCache *cache;
742
743 assert(h);
744 assert(!h->cached);
745
746 if (h->cached)
747 return NULL;
748
749 cache = new0(IteratedCache, 1);
750 if (!cache)
751 return NULL;
752
753 cache->hashmap = h;
754 h->cached = true;
755
756 return cache;
757}
758
89439d4f
MS
759static void reset_direct_storage(HashmapBase *h) {
760 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
761 void *p;
762
763 assert(!h->has_indirect);
764
765 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
766 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
767}
768
3ef11dcf 769static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
770 HashmapBase *h;
771 const struct hashmap_type_info *hi = &hashmap_type_info[type];
772 bool use_pool;
773
774 use_pool = is_main_thread();
775
776 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 777
60918275 778 if (!h)
89439d4f
MS
779 return NULL;
780
781 h->type = type;
782 h->from_pool = use_pool;
783 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
784
785 if (type == HASHMAP_TYPE_ORDERED) {
786 OrderedHashmap *lh = (OrderedHashmap*)h;
787 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
788 }
789
790 reset_direct_storage(h);
60918275 791
89439d4f
MS
792 if (!shared_hash_key_initialized) {
793 random_bytes(shared_hash_key, sizeof(shared_hash_key));
794 shared_hash_key_initialized= true;
795 }
796
349cc4a5 797#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
798 h->debug.func = func;
799 h->debug.file = file;
800 h->debug.line = line;
4f1b3061
TG
801 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
802 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
803 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
804#endif
805
806 return h;
807}
60918275 808
89439d4f 809Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 810 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
811}
812
813OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 814 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
815}
816
817Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 818 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
819}
820
821static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 822 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
823 HashmapBase *q;
824
825 assert(h);
826
827 if (*h)
828 return 0;
829
3ef11dcf 830 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
831 if (!q)
832 return -ENOMEM;
833
834 *h = q;
835 return 0;
836}
837
838int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 839 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
840}
841
842int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 843 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
844}
845
846int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 847 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
848}
849
850static void hashmap_free_no_clear(HashmapBase *h) {
851 assert(!h->has_indirect);
852 assert(!h->n_direct_entries);
853
349cc4a5 854#if ENABLE_DEBUG_HASHMAP
4f1b3061 855 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 856 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 857 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 858#endif
45fa9e29 859
39c2a6f1 860 if (h->from_pool)
89439d4f 861 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
862 else
863 free(h);
60918275
LP
864}
865
cfe561a4 866HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
867
868 /* Free the hashmap, but nothing in it */
869
cfe561a4
DH
870 if (h) {
871 internal_hashmap_clear(h);
872 hashmap_free_no_clear(h);
873 }
89439d4f 874
cfe561a4 875 return NULL;
89439d4f
MS
876}
877
cfe561a4 878HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
879
880 /* Free the hashmap and all data objects in it, but not the
881 * keys */
882
cfe561a4
DH
883 if (h) {
884 internal_hashmap_clear_free(h);
885 hashmap_free_no_clear(h);
886 }
61b1477c 887
cfe561a4 888 return NULL;
449ddb2d
LP
889}
890
cfe561a4 891Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
892
893 /* Free the hashmap and all data and key objects in it */
894
cfe561a4
DH
895 if (h) {
896 hashmap_clear_free_free(h);
897 hashmap_free_no_clear(HASHMAP_BASE(h));
898 }
fabe5c0e 899
cfe561a4 900 return NULL;
fabe5c0e
LP
901}
902
89439d4f 903void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
904 if (!h)
905 return;
906
89439d4f
MS
907 if (h->has_indirect) {
908 free(h->indirect.storage);
909 h->has_indirect = false;
910 }
911
912 h->n_direct_entries = 0;
913 reset_direct_storage(h);
914
915 if (h->type == HASHMAP_TYPE_ORDERED) {
916 OrderedHashmap *lh = (OrderedHashmap*) h;
917 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
918 }
84dcca75
VC
919
920 base_set_dirty(h);
11dd41ce
LP
921}
922
89439d4f
MS
923void internal_hashmap_clear_free(HashmapBase *h) {
924 unsigned idx;
9946996c 925
61b1477c
LP
926 if (!h)
927 return;
9946996c 928
89439d4f
MS
929 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
930 idx = skip_free_buckets(h, idx + 1))
931 free(entry_value(h, bucket_at(h, idx)));
932
933 internal_hashmap_clear(h);
9946996c
LP
934}
935
fabe5c0e 936void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
937 unsigned idx;
938
fabe5c0e
LP
939 if (!h)
940 return;
941
89439d4f
MS
942 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
943 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
944 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
945 free((void*)e->b.key);
946 free(e->value);
fabe5c0e 947 }
89439d4f
MS
948
949 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
950}
951
89439d4f
MS
952static int resize_buckets(HashmapBase *h, unsigned entries_add);
953
954/*
955 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
956 * Performs Robin Hood swaps as it goes. The entry to put must be placed
957 * by the caller into swap slot IDX_PUT.
958 * If used for in-place resizing, may leave a displaced entry in swap slot
959 * IDX_PUT. Caller must rehash it next.
960 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
961 * false otherwise.
962 */
963static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
964 struct swap_entries *swap) {
965 dib_raw_t raw_dib, *dibs;
966 unsigned dib, distance;
967
349cc4a5 968#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
969 h->debug.put_count++;
970#endif
971
972 dibs = dib_raw_ptr(h);
973
974 for (distance = 0; ; distance++) {
975 raw_dib = dibs[idx];
3742095b 976 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
89439d4f
MS
977 if (raw_dib == DIB_RAW_REHASH)
978 bucket_move_entry(h, swap, idx, IDX_TMP);
979
980 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
981 h->indirect.idx_lowest_entry = idx;
60918275 982
89439d4f
MS
983 bucket_set_dib(h, idx, distance);
984 bucket_move_entry(h, swap, IDX_PUT, idx);
985 if (raw_dib == DIB_RAW_REHASH) {
986 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
987 return true;
988 }
60918275 989
89439d4f
MS
990 return false;
991 }
992
993 dib = bucket_calculate_dib(h, idx, raw_dib);
994
995 if (dib < distance) {
996 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
997 bucket_set_dib(h, idx, distance);
998
999 /* swap the entries */
1000 bucket_move_entry(h, swap, idx, IDX_TMP);
1001 bucket_move_entry(h, swap, IDX_PUT, idx);
1002 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1003
1004 distance = dib;
1005 }
1006
1007 idx = next_idx(h, idx);
1008 }
60918275
LP
1009}
1010
89439d4f
MS
1011/*
1012 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1013 * The caller must place the entry (only its key and value, not link indexes)
1014 * in swap slot IDX_PUT.
1015 * Caller must ensure: the key does not exist yet in the hashmap.
1016 * that resize is not needed if !may_resize.
1017 * Returns: 1 if entry was put successfully.
1018 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1019 * Cannot return -ENOMEM if !may_resize.
1020 */
1021static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1022 struct swap_entries *swap, bool may_resize) {
1023 struct ordered_hashmap_entry *new_entry;
1024 int r;
1025
1026 assert(idx < n_buckets(h));
1027
1028 new_entry = bucket_at_swap(swap, IDX_PUT);
1029
1030 if (may_resize) {
1031 r = resize_buckets(h, 1);
1032 if (r < 0)
1033 return r;
1034 if (r > 0)
1035 idx = bucket_hash(h, new_entry->p.b.key);
1036 }
1037 assert(n_entries(h) < n_buckets(h));
1038
1039 if (h->type == HASHMAP_TYPE_ORDERED) {
1040 OrderedHashmap *lh = (OrderedHashmap*) h;
1041
1042 new_entry->iterate_next = IDX_NIL;
1043 new_entry->iterate_previous = lh->iterate_list_tail;
1044
1045 if (lh->iterate_list_tail != IDX_NIL) {
1046 struct ordered_hashmap_entry *old_tail;
1047
1048 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1049 assert(old_tail->iterate_next == IDX_NIL);
1050 old_tail->iterate_next = IDX_PUT;
1051 }
1052
1053 lh->iterate_list_tail = IDX_PUT;
1054 if (lh->iterate_list_head == IDX_NIL)
1055 lh->iterate_list_head = IDX_PUT;
1056 }
1057
1058 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1059
1060 n_entries_inc(h);
349cc4a5 1061#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1062 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1063#endif
1064
84dcca75
VC
1065 base_set_dirty(h);
1066
89439d4f
MS
1067 return 1;
1068}
1069#define hashmap_put_boldly(h, idx, swap, may_resize) \
1070 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1071
1072/*
1073 * Returns 0 if resize is not needed.
f131770b 1074 * 1 if successfully resized.
89439d4f
MS
1075 * -ENOMEM on allocation failure.
1076 */
1077static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1078 struct swap_entries swap;
1a39bc8c 1079 void *new_storage;
89439d4f
MS
1080 dib_raw_t *old_dibs, *new_dibs;
1081 const struct hashmap_type_info *hi;
1082 unsigned idx, optimal_idx;
1083 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1084 uint8_t new_shift;
1085 bool rehash_next;
45fa9e29
LP
1086
1087 assert(h);
1088
89439d4f
MS
1089 hi = &hashmap_type_info[h->type];
1090 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1091
1092 /* overflow? */
89439d4f 1093 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1094 return -ENOMEM;
1095
89439d4f
MS
1096 /* For direct storage we allow 100% load, because it's tiny. */
1097 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1098 return 0;
45fa9e29 1099
89439d4f
MS
1100 /*
1101 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1102 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1103 */
1104 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1105 /* overflow? */
1106 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1107 return -ENOMEM;
45fa9e29 1108
89439d4f
MS
1109 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1110 return -ENOMEM;
a3b6fafe 1111
89439d4f 1112 old_n_buckets = n_buckets(h);
45fa9e29 1113
89439d4f
MS
1114 if (_likely_(new_n_buckets <= old_n_buckets))
1115 return 0;
45fa9e29 1116
89439d4f
MS
1117 new_shift = log2u_round_up(MAX(
1118 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1119 2 * sizeof(struct direct_storage)));
45fa9e29 1120
89439d4f
MS
1121 /* Realloc storage (buckets and DIB array). */
1122 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1123 1U << new_shift);
1124 if (!new_storage)
1125 return -ENOMEM;
45fa9e29 1126
89439d4f
MS
1127 /* Must upgrade direct to indirect storage. */
1128 if (!h->has_indirect) {
1129 memcpy(new_storage, h->direct.storage,
1130 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1131 h->indirect.n_entries = h->n_direct_entries;
1132 h->indirect.idx_lowest_entry = 0;
1133 h->n_direct_entries = 0;
1134 }
45fa9e29 1135
89439d4f
MS
1136 /* Get a new hash key. If we've just upgraded to indirect storage,
1137 * allow reusing a previously generated key. It's still a different key
1138 * from the shared one that we used for direct storage. */
1139 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1140
1141 h->has_indirect = true;
1142 h->indirect.storage = new_storage;
1143 h->indirect.n_buckets = (1U << new_shift) /
1144 (hi->entry_size + sizeof(dib_raw_t));
1145
1a39bc8c 1146 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
89439d4f
MS
1147 new_dibs = dib_raw_ptr(h);
1148
1149 /*
1150 * Move the DIB array to the new place, replacing valid DIB values with
1151 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1152 * Note: Overlap is not possible, because we have at least doubled the
1153 * number of buckets and dib_raw_t is smaller than any entry type.
1154 */
1155 for (idx = 0; idx < old_n_buckets; idx++) {
1156 assert(old_dibs[idx] != DIB_RAW_REHASH);
1157 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1158 : DIB_RAW_REHASH;
45fa9e29
LP
1159 }
1160
89439d4f 1161 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1162 memzero(bucket_at(h, old_n_buckets),
89439d4f 1163 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1164
89439d4f
MS
1165 /* The upper half of the new DIB array needs initialization */
1166 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1167 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1168
89439d4f
MS
1169 /* Rehash entries that need it */
1170 n_rehashed = 0;
1171 for (idx = 0; idx < old_n_buckets; idx++) {
1172 if (new_dibs[idx] != DIB_RAW_REHASH)
1173 continue;
45fa9e29 1174
89439d4f 1175 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1176
89439d4f
MS
1177 /*
1178 * Not much to do if by luck the entry hashes to its current
1179 * location. Just set its DIB.
1180 */
1181 if (optimal_idx == idx) {
1182 new_dibs[idx] = 0;
1183 n_rehashed++;
1184 continue;
1185 }
1186
1187 new_dibs[idx] = DIB_RAW_FREE;
1188 bucket_move_entry(h, &swap, idx, IDX_PUT);
1189 /* bucket_move_entry does not clear the source */
eccaf899 1190 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1191
1192 do {
1193 /*
1194 * Find the new bucket for the current entry. This may make
1195 * another entry homeless and load it into IDX_PUT.
1196 */
1197 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1198 n_rehashed++;
1199
1200 /* Did the current entry displace another one? */
1201 if (rehash_next)
1202 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1203 } while (rehash_next);
1204 }
60918275 1205
89439d4f 1206 assert(n_rehashed == n_entries(h));
60918275 1207
89439d4f
MS
1208 return 1;
1209}
45fa9e29 1210
89439d4f
MS
1211/*
1212 * Finds an entry with a matching key
1213 * Returns: index of the found entry, or IDX_NIL if not found.
1214 */
1215static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1216 struct hashmap_base_entry *e;
1217 unsigned dib, distance;
1218 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1219
89439d4f 1220 assert(idx < n_buckets(h));
60918275 1221
89439d4f
MS
1222 for (distance = 0; ; distance++) {
1223 if (dibs[idx] == DIB_RAW_FREE)
1224 return IDX_NIL;
60918275 1225
89439d4f 1226 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1227
89439d4f
MS
1228 if (dib < distance)
1229 return IDX_NIL;
1230 if (dib == distance) {
1231 e = bucket_at(h, idx);
1232 if (h->hash_ops->compare(e->key, key) == 0)
1233 return idx;
1234 }
1235
1236 idx = next_idx(h, idx);
1237 }
60918275 1238}
89439d4f 1239#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1240
923041cb 1241int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1242 struct swap_entries swap;
1243 struct plain_hashmap_entry *e;
1244 unsigned hash, idx;
923041cb
MS
1245
1246 assert(h);
1247
1248 hash = bucket_hash(h, key);
89439d4f
MS
1249 idx = bucket_scan(h, hash, key);
1250 if (idx != IDX_NIL) {
1251 e = plain_bucket_at(h, idx);
923041cb
MS
1252 if (e->value == value)
1253 return 0;
1254 return -EEXIST;
1255 }
1256
89439d4f
MS
1257 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1258 e->b.key = key;
1259 e->value = value;
1260 return hashmap_put_boldly(h, hash, &swap, true);
1261}
1262
1263int set_put(Set *s, const void *key) {
1264 struct swap_entries swap;
1265 struct hashmap_base_entry *e;
1266 unsigned hash, idx;
1267
1268 assert(s);
1269
1270 hash = bucket_hash(s, key);
1271 idx = bucket_scan(s, hash, key);
1272 if (idx != IDX_NIL)
1273 return 0;
1274
1275 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1276 e->key = key;
1277 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1278}
1279
3158713e 1280int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1281 struct swap_entries swap;
1282 struct plain_hashmap_entry *e;
1283 unsigned hash, idx;
3158713e
LP
1284
1285 assert(h);
1286
a3b6fafe 1287 hash = bucket_hash(h, key);
89439d4f
MS
1288 idx = bucket_scan(h, hash, key);
1289 if (idx != IDX_NIL) {
1290 e = plain_bucket_at(h, idx);
349cc4a5 1291#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1292 /* Although the key is equal, the key pointer may have changed,
1293 * and this would break our assumption for iterating. So count
1294 * this operation as incompatible with iteration. */
1295 if (e->b.key != key) {
1296 h->b.debug.put_count++;
1297 h->b.debug.rem_count++;
1298 h->b.debug.last_rem_idx = idx;
1299 }
1300#endif
1301 e->b.key = key;
3158713e 1302 e->value = value;
84dcca75
VC
1303 hashmap_set_dirty(h);
1304
3158713e
LP
1305 return 0;
1306 }
1307
89439d4f
MS
1308 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1309 e->b.key = key;
1310 e->value = value;
1311 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1312}
1313
d99ae53a 1314int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1315 struct plain_hashmap_entry *e;
1316 unsigned hash, idx;
d99ae53a
LP
1317
1318 assert(h);
1319
a3b6fafe 1320 hash = bucket_hash(h, key);
89439d4f
MS
1321 idx = bucket_scan(h, hash, key);
1322 if (idx == IDX_NIL)
d99ae53a
LP
1323 return -ENOENT;
1324
89439d4f 1325 e = plain_bucket_at(h, idx);
d99ae53a 1326 e->value = value;
84dcca75
VC
1327 hashmap_set_dirty(h);
1328
d99ae53a
LP
1329 return 0;
1330}
1331
89439d4f
MS
1332void *internal_hashmap_get(HashmapBase *h, const void *key) {
1333 struct hashmap_base_entry *e;
1334 unsigned hash, idx;
60918275
LP
1335
1336 if (!h)
1337 return NULL;
1338
a3b6fafe 1339 hash = bucket_hash(h, key);
89439d4f
MS
1340 idx = bucket_scan(h, hash, key);
1341 if (idx == IDX_NIL)
60918275
LP
1342 return NULL;
1343
89439d4f
MS
1344 e = bucket_at(h, idx);
1345 return entry_value(h, e);
60918275
LP
1346}
1347
89439d4f
MS
1348void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1349 struct plain_hashmap_entry *e;
1350 unsigned hash, idx;
d99ae53a
LP
1351
1352 if (!h)
1353 return NULL;
1354
a3b6fafe 1355 hash = bucket_hash(h, key);
89439d4f
MS
1356 idx = bucket_scan(h, hash, key);
1357 if (idx == IDX_NIL)
d99ae53a
LP
1358 return NULL;
1359
89439d4f 1360 e = plain_bucket_at(h, idx);
d99ae53a 1361 if (key2)
89439d4f 1362 *key2 = (void*) e->b.key;
d99ae53a
LP
1363
1364 return e->value;
1365}
1366
89439d4f 1367bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1368 unsigned hash;
96342de6
LN
1369
1370 if (!h)
1371 return false;
1372
a3b6fafe 1373 hash = bucket_hash(h, key);
89439d4f 1374 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1375}
1376
89439d4f
MS
1377void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1378 struct hashmap_base_entry *e;
1379 unsigned hash, idx;
60918275
LP
1380 void *data;
1381
1382 if (!h)
1383 return NULL;
1384
a3b6fafe 1385 hash = bucket_hash(h, key);
89439d4f
MS
1386 idx = bucket_scan(h, hash, key);
1387 if (idx == IDX_NIL)
60918275
LP
1388 return NULL;
1389
89439d4f
MS
1390 e = bucket_at(h, idx);
1391 data = entry_value(h, e);
1392 remove_entry(h, idx);
60918275
LP
1393
1394 return data;
1395}
1396
89439d4f
MS
1397void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1398 struct plain_hashmap_entry *e;
1399 unsigned hash, idx;
c582a3b3
LP
1400 void *data;
1401
1402 if (!h) {
1403 if (rkey)
1404 *rkey = NULL;
1405 return NULL;
1406 }
1407
1408 hash = bucket_hash(h, key);
89439d4f
MS
1409 idx = bucket_scan(h, hash, key);
1410 if (idx == IDX_NIL) {
c582a3b3
LP
1411 if (rkey)
1412 *rkey = NULL;
1413 return NULL;
1414 }
1415
89439d4f 1416 e = plain_bucket_at(h, idx);
c582a3b3
LP
1417 data = e->value;
1418 if (rkey)
89439d4f 1419 *rkey = (void*) e->b.key;
c582a3b3 1420
89439d4f 1421 remove_entry(h, idx);
c582a3b3
LP
1422
1423 return data;
1424}
1425
101d8e63 1426int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1427 struct swap_entries swap;
1428 struct plain_hashmap_entry *e;
1429 unsigned old_hash, new_hash, idx;
101d8e63
LP
1430
1431 if (!h)
1432 return -ENOENT;
1433
a3b6fafe 1434 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1435 idx = bucket_scan(h, old_hash, old_key);
1436 if (idx == IDX_NIL)
101d8e63
LP
1437 return -ENOENT;
1438
a3b6fafe 1439 new_hash = bucket_hash(h, new_key);
89439d4f 1440 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1441 return -EEXIST;
1442
89439d4f 1443 remove_entry(h, idx);
101d8e63 1444
89439d4f
MS
1445 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1446 e->b.key = new_key;
101d8e63 1447 e->value = value;
89439d4f
MS
1448 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1449
1450 return 0;
1451}
1452
1453int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1454 struct swap_entries swap;
1455 struct hashmap_base_entry *e;
1456 unsigned old_hash, new_hash, idx;
101d8e63 1457
89439d4f
MS
1458 if (!s)
1459 return -ENOENT;
1460
1461 old_hash = bucket_hash(s, old_key);
1462 idx = bucket_scan(s, old_hash, old_key);
1463 if (idx == IDX_NIL)
1464 return -ENOENT;
1465
1466 new_hash = bucket_hash(s, new_key);
1467 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1468 return -EEXIST;
1469
1470 remove_entry(s, idx);
1471
1472 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1473 e->key = new_key;
1474 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1475
1476 return 0;
1477}
1478
8fe914ec 1479int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1480 struct swap_entries swap;
1481 struct plain_hashmap_entry *e;
1482 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1483
1484 if (!h)
1485 return -ENOENT;
1486
a3b6fafe 1487 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1488 idx_old = bucket_scan(h, old_hash, old_key);
1489 if (idx_old == IDX_NIL)
8fe914ec
LP
1490 return -ENOENT;
1491
89439d4f 1492 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1493
89439d4f
MS
1494 new_hash = bucket_hash(h, new_key);
1495 idx_new = bucket_scan(h, new_hash, new_key);
1496 if (idx_new != IDX_NIL)
1497 if (idx_old != idx_new) {
1498 remove_entry(h, idx_new);
1499 /* Compensate for a possible backward shift. */
1500 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1501 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1502 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1503 }
1504
1505 remove_entry(h, idx_old);
1506
1507 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1508 e->b.key = new_key;
8fe914ec 1509 e->value = value;
89439d4f 1510 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1511
1512 return 0;
1513}
1514
89439d4f
MS
1515void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1516 struct plain_hashmap_entry *e;
1517 unsigned hash, idx;
3158713e
LP
1518
1519 if (!h)
1520 return NULL;
1521
a3b6fafe 1522 hash = bucket_hash(h, key);
89439d4f
MS
1523 idx = bucket_scan(h, hash, key);
1524 if (idx == IDX_NIL)
3158713e
LP
1525 return NULL;
1526
89439d4f 1527 e = plain_bucket_at(h, idx);
3158713e
LP
1528 if (e->value != value)
1529 return NULL;
1530
89439d4f 1531 remove_entry(h, idx);
3158713e
LP
1532
1533 return value;
1534}
1535
89439d4f
MS
1536static unsigned find_first_entry(HashmapBase *h) {
1537 Iterator i = ITERATOR_FIRST;
60918275 1538
89439d4f
MS
1539 if (!h || !n_entries(h))
1540 return IDX_NIL;
60918275 1541
89439d4f 1542 return hashmap_iterate_entry(h, &i);
60918275
LP
1543}
1544
89439d4f
MS
1545void *internal_hashmap_first(HashmapBase *h) {
1546 unsigned idx;
60918275 1547
89439d4f
MS
1548 idx = find_first_entry(h);
1549 if (idx == IDX_NIL)
60918275
LP
1550 return NULL;
1551
89439d4f 1552 return entry_value(h, bucket_at(h, idx));
60918275
LP
1553}
1554
89439d4f
MS
1555void *internal_hashmap_first_key(HashmapBase *h) {
1556 struct hashmap_base_entry *e;
1557 unsigned idx;
2e4a6ff4 1558
89439d4f
MS
1559 idx = find_first_entry(h);
1560 if (idx == IDX_NIL)
2e4a6ff4
LP
1561 return NULL;
1562
89439d4f
MS
1563 e = bucket_at(h, idx);
1564 return (void*) e->key;
2e4a6ff4
LP
1565}
1566
89439d4f
MS
1567void *internal_hashmap_steal_first(HashmapBase *h) {
1568 struct hashmap_base_entry *e;
60918275 1569 void *data;
89439d4f 1570 unsigned idx;
60918275 1571
89439d4f
MS
1572 idx = find_first_entry(h);
1573 if (idx == IDX_NIL)
60918275
LP
1574 return NULL;
1575
89439d4f
MS
1576 e = bucket_at(h, idx);
1577 data = entry_value(h, e);
1578 remove_entry(h, idx);
60918275
LP
1579
1580 return data;
1581}
1582
89439d4f
MS
1583void *internal_hashmap_steal_first_key(HashmapBase *h) {
1584 struct hashmap_base_entry *e;
22be093f 1585 void *key;
89439d4f 1586 unsigned idx;
22be093f 1587
89439d4f
MS
1588 idx = find_first_entry(h);
1589 if (idx == IDX_NIL)
22be093f
LP
1590 return NULL;
1591
89439d4f
MS
1592 e = bucket_at(h, idx);
1593 key = (void*) e->key;
1594 remove_entry(h, idx);
22be093f
LP
1595
1596 return key;
1597}
1598
89439d4f 1599unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1600
1601 if (!h)
1602 return 0;
1603
89439d4f 1604 return n_entries(h);
60918275
LP
1605}
1606
89439d4f 1607unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1608
1609 if (!h)
1610 return 0;
1611
89439d4f 1612 return n_buckets(h);
45fa9e29
LP
1613}
1614
89439d4f
MS
1615int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1616 Iterator i;
1617 unsigned idx;
60918275 1618
89439d4f 1619 assert(h);
60918275 1620
89439d4f
MS
1621 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1622 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1623 int r;
91cdde8a 1624
89439d4f
MS
1625 r = hashmap_put(h, pe->b.key, pe->value);
1626 if (r < 0 && r != -EEXIST)
1627 return r;
1628 }
91cdde8a 1629
89439d4f
MS
1630 return 0;
1631}
91cdde8a 1632
89439d4f
MS
1633int set_merge(Set *s, Set *other) {
1634 Iterator i;
1635 unsigned idx;
91cdde8a 1636
89439d4f
MS
1637 assert(s);
1638
1639 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1640 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1641 int r;
1642
89439d4f
MS
1643 r = set_put(s, se->b.key);
1644 if (r < 0)
a3b6fafe 1645 return r;
91cdde8a
LP
1646 }
1647
1648 return 0;
1649}
1650
89439d4f 1651int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1652 int r;
1653
1654 assert(h);
1655
1656 r = resize_buckets(h, entries_add);
1657 if (r < 0)
1658 return r;
1659
1660 return 0;
1661}
1662
89439d4f
MS
1663/*
1664 * The same as hashmap_merge(), but every new item from other is moved to h.
1665 * Keys already in h are skipped and stay in other.
1666 * Returns: 0 on success.
1667 * -ENOMEM on alloc failure, in which case no move has been done.
1668 */
1669int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1670 struct swap_entries swap;
1671 struct hashmap_base_entry *e, *n;
1672 Iterator i;
1673 unsigned idx;
1674 int r;
101d8e63
LP
1675
1676 assert(h);
1677
101d8e63 1678 if (!other)
7ad63f57 1679 return 0;
101d8e63 1680
89439d4f
MS
1681 assert(other->type == h->type);
1682
1683 /*
1684 * This reserves buckets for the worst case, where none of other's
1685 * entries are yet present in h. This is preferable to risking
1686 * an allocation failure in the middle of the moving and having to
1687 * rollback or return a partial result.
1688 */
1689 r = resize_buckets(h, n_entries(other));
1690 if (r < 0)
1691 return r;
101d8e63 1692
89439d4f
MS
1693 HASHMAP_FOREACH_IDX(idx, other, i) {
1694 unsigned h_hash;
101d8e63 1695
89439d4f 1696 e = bucket_at(other, idx);
a3b6fafe 1697 h_hash = bucket_hash(h, e->key);
89439d4f 1698 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1699 continue;
1700
89439d4f
MS
1701 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1702 n->key = e->key;
1703 if (h->type != HASHMAP_TYPE_SET)
1704 ((struct plain_hashmap_entry*) n)->value =
1705 ((struct plain_hashmap_entry*) e)->value;
1706 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1707
1708 remove_entry(other, idx);
101d8e63 1709 }
7ad63f57
MS
1710
1711 return 0;
101d8e63
LP
1712}
1713
89439d4f
MS
1714int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1715 struct swap_entries swap;
1716 unsigned h_hash, other_hash, idx;
1717 struct hashmap_base_entry *e, *n;
1718 int r;
101d8e63 1719
101d8e63
LP
1720 assert(h);
1721
a3b6fafe 1722 h_hash = bucket_hash(h, key);
89439d4f 1723 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1724 return -EEXIST;
1725
bf3d3e2b
MS
1726 if (!other)
1727 return -ENOENT;
1728
89439d4f
MS
1729 assert(other->type == h->type);
1730
a3b6fafe 1731 other_hash = bucket_hash(other, key);
89439d4f
MS
1732 idx = bucket_scan(other, other_hash, key);
1733 if (idx == IDX_NIL)
101d8e63
LP
1734 return -ENOENT;
1735
89439d4f
MS
1736 e = bucket_at(other, idx);
1737
1738 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1739 n->key = e->key;
1740 if (h->type != HASHMAP_TYPE_SET)
1741 ((struct plain_hashmap_entry*) n)->value =
1742 ((struct plain_hashmap_entry*) e)->value;
1743 r = hashmap_put_boldly(h, h_hash, &swap, true);
1744 if (r < 0)
1745 return r;
101d8e63 1746
89439d4f 1747 remove_entry(other, idx);
101d8e63
LP
1748 return 0;
1749}
1750
89439d4f
MS
1751HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1752 HashmapBase *copy;
1753 int r;
91cdde8a
LP
1754
1755 assert(h);
1756
89439d4f 1757 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1758 if (!copy)
91cdde8a
LP
1759 return NULL;
1760
89439d4f
MS
1761 switch (h->type) {
1762 case HASHMAP_TYPE_PLAIN:
1763 case HASHMAP_TYPE_ORDERED:
1764 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1765 break;
1766 case HASHMAP_TYPE_SET:
1767 r = set_merge((Set*)copy, (Set*)h);
1768 break;
1769 default:
1770 assert_not_reached("Unknown hashmap type");
1771 }
1772
1773 if (r < 0) {
1774 internal_hashmap_free(copy);
91cdde8a
LP
1775 return NULL;
1776 }
1777
1778 return copy;
1779}
db1413d7 1780
89439d4f 1781char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1782 char **sv;
89439d4f
MS
1783 Iterator i;
1784 unsigned idx, n;
db1413d7 1785
89439d4f 1786 sv = new(char*, n_entries(h)+1);
729e3769 1787 if (!sv)
db1413d7
KS
1788 return NULL;
1789
1790 n = 0;
89439d4f
MS
1791 HASHMAP_FOREACH_IDX(idx, h, i)
1792 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1793 sv[n] = NULL;
1794
1795 return sv;
1796}
3c1668da 1797
89439d4f
MS
1798void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1799 struct ordered_hashmap_entry *e;
1800 unsigned hash, idx;
3c1668da 1801
3c1668da
LP
1802 if (!h)
1803 return NULL;
1804
a3b6fafe 1805 hash = bucket_hash(h, key);
89439d4f
MS
1806 idx = bucket_scan(h, hash, key);
1807 if (idx == IDX_NIL)
3c1668da
LP
1808 return NULL;
1809
89439d4f
MS
1810 e = ordered_bucket_at(h, idx);
1811 if (e->iterate_next == IDX_NIL)
3c1668da 1812 return NULL;
89439d4f
MS
1813 return ordered_bucket_at(h, e->iterate_next)->p.value;
1814}
3c1668da 1815
89439d4f
MS
1816int set_consume(Set *s, void *value) {
1817 int r;
1818
d97c5aea
LP
1819 assert(s);
1820 assert(value);
1821
89439d4f 1822 r = set_put(s, value);
575ccc1b 1823 if (r <= 0)
89439d4f
MS
1824 free(value);
1825
1826 return r;
1827}
1828
1829int set_put_strdup(Set *s, const char *p) {
1830 char *c;
89439d4f
MS
1831
1832 assert(s);
1833 assert(p);
1834
454f0f86
LP
1835 if (set_contains(s, (char*) p))
1836 return 0;
1837
89439d4f
MS
1838 c = strdup(p);
1839 if (!c)
1840 return -ENOMEM;
1841
454f0f86 1842 return set_consume(s, c);
89439d4f
MS
1843}
1844
1845int set_put_strdupv(Set *s, char **l) {
1846 int n = 0, r;
1847 char **i;
1848
d97c5aea
LP
1849 assert(s);
1850
89439d4f
MS
1851 STRV_FOREACH(i, l) {
1852 r = set_put_strdup(s, *i);
1853 if (r < 0)
1854 return r;
1855
1856 n += r;
1857 }
1858
1859 return n;
3c1668da 1860}
d97c5aea
LP
1861
1862int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1863 const char *p = v;
1864 int r;
1865
1866 assert(s);
1867 assert(v);
1868
1869 for (;;) {
1870 char *word;
1871
1872 r = extract_first_word(&p, &word, separators, flags);
1873 if (r <= 0)
1874 return r;
1875
1876 r = set_consume(s, word);
1877 if (r < 0)
1878 return r;
1879 }
1880}
45ea84d8
VC
1881
1882/* expand the cachemem if needed, return true if newly (re)activated. */
1883static int cachemem_maintain(CacheMem *mem, unsigned size) {
45ea84d8
VC
1884 assert(mem);
1885
1886 if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) {
1887 if (size > 0)
1888 return -ENOMEM;
1889 }
1890
afbbc068
ZJS
1891 if (!mem->active) {
1892 mem->active = true;
1893 return true;
1894 }
45ea84d8 1895
afbbc068 1896 return false;
45ea84d8
VC
1897}
1898
1899int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1900 bool sync_keys = false, sync_values = false;
1901 unsigned size;
1902 int r;
1903
1904 assert(cache);
1905 assert(cache->hashmap);
1906
1907 size = n_entries(cache->hashmap);
1908
1909 if (res_keys) {
1910 r = cachemem_maintain(&cache->keys, size);
1911 if (r < 0)
1912 return r;
1913
1914 sync_keys = r;
1915 } else
1916 cache->keys.active = false;
1917
1918 if (res_values) {
1919 r = cachemem_maintain(&cache->values, size);
1920 if (r < 0)
1921 return r;
1922
1923 sync_values = r;
1924 } else
1925 cache->values.active = false;
1926
1927 if (cache->hashmap->dirty) {
1928 if (cache->keys.active)
1929 sync_keys = true;
1930 if (cache->values.active)
1931 sync_values = true;
1932
1933 cache->hashmap->dirty = false;
1934 }
1935
1936 if (sync_keys || sync_values) {
1937 unsigned i, idx;
1938 Iterator iter;
1939
1940 i = 0;
1941 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1942 struct hashmap_base_entry *e;
1943
1944 e = bucket_at(cache->hashmap, idx);
1945
1946 if (sync_keys)
1947 cache->keys.ptr[i] = e->key;
1948 if (sync_values)
1949 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1950 i++;
1951 }
1952 }
1953
1954 if (res_keys)
1955 *res_keys = cache->keys.ptr;
1956 if (res_values)
1957 *res_values = cache->values.ptr;
1958 if (res_n_entries)
1959 *res_n_entries = size;
1960
1961 return 0;
1962}
1963
1964IteratedCache *iterated_cache_free(IteratedCache *cache) {
1965 if (cache) {
1966 free(cache->keys.ptr);
1967 free(cache->values.ptr);
1968 free(cache);
1969 }
1970
1971 return NULL;
1972}