]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
tree-wide: drop license boilerplate
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
a7334b09
LP
2/***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
89439d4f 6 Copyright 2014 Michal Schmidt
a7334b09
LP
7***/
8
60918275 9#include <errno.h>
11c3a366 10#include <stdint.h>
d4510856 11#include <stdlib.h>
11c3a366 12#include <string.h>
60918275 13
b5efdb8a 14#include "alloc-util.h"
60918275 15#include "hashmap.h"
556c7bae 16#include "fileio.h"
60918275 17#include "macro.h"
b3dcf58e 18#include "mempool.h"
d4510856 19#include "process-util.h"
3df3e884 20#include "random-util.h"
d4510856
LP
21#include "set.h"
22#include "siphash24.h"
556c7bae 23#include "string-util.h"
d4510856
LP
24#include "strv.h"
25#include "util.h"
60918275 26
349cc4a5 27#if ENABLE_DEBUG_HASHMAP
3d4db144 28#include <pthread.h>
2eec67ac
TA
29#include "list.h"
30#endif
31
89439d4f
MS
32/*
33 * Implementation of hashmaps.
34 * Addressing: open
35 * - uses less RAM compared to closed addressing (chaining), because
36 * our entries are small (especially in Sets, which tend to contain
37 * the majority of entries in systemd).
38 * Collision resolution: Robin Hood
39 * - tends to equalize displacement of entries from their optimal buckets.
40 * Probe sequence: linear
41 * - though theoretically worse than random probing/uniform hashing/double
42 * hashing, it is good for cache locality.
43 *
44 * References:
45 * Celis, P. 1986. Robin Hood Hashing.
46 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
47 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
48 * - The results are derived for random probing. Suggests deletion with
49 * tombstones and two mean-centered search methods. None of that works
50 * well for linear probing.
51 *
52 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
53 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
54 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
55 * http://www.math.uu.se/~svante/papers/sj157.pdf
56 * - Applies to Robin Hood with linear probing. Contains remarks on
57 * the unsuitability of mean-centered search with linear probing.
58 *
59 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
60 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
61 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
62 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
63 * in a successful search), and Janson writes about displacement. C = d + 1.
64 *
65 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
66 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
67 * - Explanation of backward shift deletion with pictures.
68 *
69 * Khuong, P. 2013. The Other Robin Hood Hashing.
70 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
71 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
72 */
73
74/*
75 * XXX Ideas for improvement:
76 * For unordered hashmaps, randomize iteration order, similarly to Perl:
77 * http://blog.booking.com/hardening-perls-hash-function.html
78 */
79
80/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
81 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
82#define INV_KEEP_FREE 5U
83
84/* Fields common to entries of all hashmap/set types */
85struct hashmap_base_entry {
60918275 86 const void *key;
89439d4f
MS
87};
88
89/* Entry types for specific hashmap/set types
90 * hashmap_base_entry must be at the beginning of each entry struct. */
91
92struct plain_hashmap_entry {
93 struct hashmap_base_entry b;
60918275 94 void *value;
60918275
LP
95};
96
89439d4f
MS
97struct ordered_hashmap_entry {
98 struct plain_hashmap_entry p;
99 unsigned iterate_next, iterate_previous;
100};
60918275 101
89439d4f
MS
102struct set_entry {
103 struct hashmap_base_entry b;
104};
45fa9e29 105
89439d4f
MS
106/* In several functions it is advantageous to have the hash table extended
107 * virtually by a couple of additional buckets. We reserve special index values
108 * for these "swap" buckets. */
109#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
110#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
111#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
112#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 113
89439d4f
MS
114#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
115#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
116
117assert_cc(IDX_FIRST == _IDX_SWAP_END);
118assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
119
120/* Storage space for the "swap" buckets.
121 * All entry types can fit into a ordered_hashmap_entry. */
122struct swap_entries {
123 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
124};
125
89439d4f
MS
126/* Distance from Initial Bucket */
127typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
128#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
129#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
130#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
131#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
132
133#define DIB_FREE UINT_MAX
134
349cc4a5 135#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
136struct hashmap_debug_info {
137 LIST_FIELDS(struct hashmap_debug_info, debug_list);
138 unsigned max_entries; /* high watermark of n_entries */
139
140 /* who allocated this hashmap */
141 int line;
142 const char *file;
143 const char *func;
144
145 /* fields to detect modification while iterating */
146 unsigned put_count; /* counts puts into the hashmap */
147 unsigned rem_count; /* counts removals from hashmap */
148 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
149};
150
89439d4f
MS
151/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
152static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 153static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 154
89439d4f 155#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 156
fc86aa0e 157#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 158#define HASHMAP_DEBUG_FIELDS
fc86aa0e 159#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 160
89439d4f
MS
161enum HashmapType {
162 HASHMAP_TYPE_PLAIN,
163 HASHMAP_TYPE_ORDERED,
164 HASHMAP_TYPE_SET,
165 _HASHMAP_TYPE_MAX
166};
39c2a6f1 167
89439d4f 168struct _packed_ indirect_storage {
1a39bc8c 169 void *storage; /* where buckets and DIBs are stored */
89439d4f
MS
170 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
171
172 unsigned n_entries; /* number of stored entries */
173 unsigned n_buckets; /* number of buckets */
174
175 unsigned idx_lowest_entry; /* Index below which all buckets are free.
176 Makes "while(hashmap_steal_first())" loops
177 O(n) instead of O(n^2) for unordered hashmaps. */
178 uint8_t _pad[3]; /* padding for the whole HashmapBase */
179 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
180};
181
182struct direct_storage {
183 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
184 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
185 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
1a39bc8c 186 uint8_t storage[sizeof(struct indirect_storage)];
89439d4f
MS
187};
188
189#define DIRECT_BUCKETS(entry_t) \
190 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
191
192/* We should be able to store at least one entry directly. */
193assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
194
195/* We have 3 bits for n_direct_entries. */
196assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
197
198/* Hashmaps with directly stored entries all use this shared hash key.
199 * It's no big deal if the key is guessed, because there can be only
200 * a handful of directly stored entries in a hashmap. When a hashmap
201 * outgrows direct storage, it gets its own key for indirect storage. */
202static uint8_t shared_hash_key[HASH_KEY_SIZE];
203static bool shared_hash_key_initialized;
204
205/* Fields that all hashmap/set types must have */
206struct HashmapBase {
207 const struct hash_ops *hash_ops; /* hash and compare ops to use */
208
209 union _packed_ {
210 struct indirect_storage indirect; /* if has_indirect */
211 struct direct_storage direct; /* if !has_indirect */
212 };
213
214 enum HashmapType type:2; /* HASHMAP_TYPE_* */
215 bool has_indirect:1; /* whether indirect storage is used */
216 unsigned n_direct_entries:3; /* Number of entries in direct storage.
217 * Only valid if !has_indirect. */
218 bool from_pool:1; /* whether was allocated from mempool */
45ea84d8
VC
219 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
220 bool cached:1; /* whether this hashmap is being cached */
89439d4f
MS
221 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
222};
223
224/* Specific hash types
225 * HashmapBase must be at the beginning of each hashmap struct. */
226
227struct Hashmap {
228 struct HashmapBase b;
229};
230
231struct OrderedHashmap {
232 struct HashmapBase b;
233 unsigned iterate_list_head, iterate_list_tail;
234};
235
236struct Set {
237 struct HashmapBase b;
238};
239
45ea84d8
VC
240typedef struct CacheMem {
241 const void **ptr;
242 size_t n_populated, n_allocated;
243 bool active:1;
244} CacheMem;
245
246struct IteratedCache {
247 HashmapBase *hashmap;
248 CacheMem keys, values;
249};
250
89439d4f
MS
251DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
252DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
253/* No need for a separate Set pool */
254assert_cc(sizeof(Hashmap) == sizeof(Set));
255
256struct hashmap_type_info {
257 size_t head_size;
258 size_t entry_size;
259 struct mempool *mempool;
260 unsigned n_direct_buckets;
261};
262
263static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
264 [HASHMAP_TYPE_PLAIN] = {
265 .head_size = sizeof(Hashmap),
266 .entry_size = sizeof(struct plain_hashmap_entry),
267 .mempool = &hashmap_pool,
268 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
269 },
270 [HASHMAP_TYPE_ORDERED] = {
271 .head_size = sizeof(OrderedHashmap),
272 .entry_size = sizeof(struct ordered_hashmap_entry),
273 .mempool = &ordered_hashmap_pool,
274 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
275 },
276 [HASHMAP_TYPE_SET] = {
277 .head_size = sizeof(Set),
278 .entry_size = sizeof(struct set_entry),
279 .mempool = &hashmap_pool,
280 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
281 },
282};
39c2a6f1 283
556c7bae
ZJS
284#ifdef VALGRIND
285__attribute__((destructor)) static void cleanup_pools(void) {
286 _cleanup_free_ char *t = NULL;
287 int r;
288
289 /* Be nice to valgrind */
290
291 /* The pool is only allocated by the main thread, but the memory can
292 * be passed to other threads. Let's clean up if we are the main thread
293 * and no other threads are live. */
294 if (!is_main_thread())
295 return;
296
297 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
298 if (r < 0 || !streq(t, "1"))
299 return;
300
301 mempool_drop(&hashmap_pool);
302 mempool_drop(&ordered_hashmap_pool);
303}
304#endif
305
89439d4f
MS
306static unsigned n_buckets(HashmapBase *h) {
307 return h->has_indirect ? h->indirect.n_buckets
308 : hashmap_type_info[h->type].n_direct_buckets;
309}
310
311static unsigned n_entries(HashmapBase *h) {
312 return h->has_indirect ? h->indirect.n_entries
313 : h->n_direct_entries;
314}
315
316static void n_entries_inc(HashmapBase *h) {
317 if (h->has_indirect)
318 h->indirect.n_entries++;
319 else
320 h->n_direct_entries++;
321}
322
323static void n_entries_dec(HashmapBase *h) {
324 if (h->has_indirect)
325 h->indirect.n_entries--;
326 else
327 h->n_direct_entries--;
328}
329
1a39bc8c 330static void *storage_ptr(HashmapBase *h) {
89439d4f
MS
331 return h->has_indirect ? h->indirect.storage
332 : h->direct.storage;
333}
334
335static uint8_t *hash_key(HashmapBase *h) {
336 return h->has_indirect ? h->indirect.hash_key
337 : shared_hash_key;
338}
339
340static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 341 struct siphash state;
0cb3c286 342 uint64_t hash;
b826ab58 343
0cb3c286 344 siphash24_init(&state, hash_key(h));
b826ab58
TG
345
346 h->hash_ops->hash(p, &state);
347
933f9cae 348 hash = siphash24_finalize(&state);
0cb3c286
TG
349
350 return (unsigned) (hash % n_buckets(h));
9bf3b535 351}
89439d4f 352#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535 353
84dcca75
VC
354static inline void base_set_dirty(HashmapBase *h) {
355 h->dirty = true;
356}
357#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
358
9bf3b535
LP
359static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
360 static uint8_t current[HASH_KEY_SIZE];
361 static bool current_initialized = false;
362
363 /* Returns a hash function key to use. In order to keep things
364 * fast we will not generate a new key each time we allocate a
365 * new hash table. Instead, we'll just reuse the most recently
366 * generated one, except if we never generated one or when we
367 * are rehashing an entire hash table because we reached a
368 * fill level */
369
370 if (!current_initialized || !reuse_is_ok) {
371 random_bytes(current, sizeof(current));
372 current_initialized = true;
373 }
374
375 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
376}
377
89439d4f
MS
378static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
379 return (struct hashmap_base_entry*)
1a39bc8c 380 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
89439d4f
MS
381}
382
383static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
384 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
385}
386
387static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
388 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
389}
39c2a6f1 390
89439d4f
MS
391static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
392 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
393}
39c2a6f1 394
89439d4f
MS
395static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
396 return &swap->e[idx - _IDX_SWAP_BEGIN];
397}
39c2a6f1 398
89439d4f
MS
399/* Returns a pointer to the bucket at index idx.
400 * Understands real indexes and swap indexes, hence "_virtual". */
401static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
402 unsigned idx) {
403 if (idx < _IDX_SWAP_BEGIN)
404 return bucket_at(h, idx);
405
406 if (idx < _IDX_SWAP_END)
407 return &bucket_at_swap(swap, idx)->p.b;
408
409 assert_not_reached("Invalid index");
410}
411
412static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
413 return (dib_raw_t*)
1a39bc8c 414 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
89439d4f
MS
415}
416
417static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
418 return idx >= from ? idx - from
419 : n_buckets(h) + idx - from;
420}
421
422static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
423 unsigned initial_bucket;
424
425 if (raw_dib == DIB_RAW_FREE)
426 return DIB_FREE;
427
428 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
429 return raw_dib;
430
431 /*
432 * Having an overflow DIB value is very unlikely. The hash function
433 * would have to be bad. For example, in a table of size 2^24 filled
434 * to load factor 0.9 the maximum observed DIB is only about 60.
435 * In theory (assuming I used Maxima correctly), for an infinite size
436 * hash table with load factor 0.8 the probability of a given entry
437 * having DIB > 40 is 1.9e-8.
438 * This returns the correct DIB value by recomputing the hash value in
439 * the unlikely case. XXX Hitting this case could be a hint to rehash.
440 */
441 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
442 return bucket_distance(h, idx, initial_bucket);
443}
444
445static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
446 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
447}
448
449static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
450 dib_raw_t *dibs;
451
452 dibs = dib_raw_ptr(h);
453
454 for ( ; idx < n_buckets(h); idx++)
455 if (dibs[idx] != DIB_RAW_FREE)
456 return idx;
457
458 return IDX_NIL;
459}
460
461static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 462 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
463 bucket_set_dib(h, idx, DIB_FREE);
464}
465
466static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
467 unsigned from, unsigned to) {
468 struct hashmap_base_entry *e_from, *e_to;
469
470 assert(from != to);
39c2a6f1 471
89439d4f
MS
472 e_from = bucket_at_virtual(h, swap, from);
473 e_to = bucket_at_virtual(h, swap, to);
474
475 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
476
477 if (h->type == HASHMAP_TYPE_ORDERED) {
478 OrderedHashmap *lh = (OrderedHashmap*) h;
479 struct ordered_hashmap_entry *le, *le_to;
480
481 le_to = (struct ordered_hashmap_entry*) e_to;
482
483 if (le_to->iterate_next != IDX_NIL) {
484 le = (struct ordered_hashmap_entry*)
485 bucket_at_virtual(h, swap, le_to->iterate_next);
486 le->iterate_previous = to;
487 }
488
489 if (le_to->iterate_previous != IDX_NIL) {
490 le = (struct ordered_hashmap_entry*)
491 bucket_at_virtual(h, swap, le_to->iterate_previous);
492 le->iterate_next = to;
493 }
494
495 if (lh->iterate_list_head == from)
496 lh->iterate_list_head = to;
497 if (lh->iterate_list_tail == from)
498 lh->iterate_list_tail = to;
39c2a6f1 499 }
89439d4f 500}
60918275 501
89439d4f
MS
502static unsigned next_idx(HashmapBase *h, unsigned idx) {
503 return (idx + 1U) % n_buckets(h);
504}
60918275 505
89439d4f
MS
506static unsigned prev_idx(HashmapBase *h, unsigned idx) {
507 return (n_buckets(h) + idx - 1U) % n_buckets(h);
508}
60918275 509
89439d4f
MS
510static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
511 switch (h->type) {
45fa9e29 512
89439d4f
MS
513 case HASHMAP_TYPE_PLAIN:
514 case HASHMAP_TYPE_ORDERED:
515 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 516
89439d4f
MS
517 case HASHMAP_TYPE_SET:
518 return (void*) e->key;
a3b6fafe 519
89439d4f
MS
520 default:
521 assert_not_reached("Unknown hashmap type");
522 }
60918275
LP
523}
524
89439d4f
MS
525static void base_remove_entry(HashmapBase *h, unsigned idx) {
526 unsigned left, right, prev, dib;
527 dib_raw_t raw_dib, *dibs;
45fa9e29 528
89439d4f
MS
529 dibs = dib_raw_ptr(h);
530 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 531
349cc4a5 532#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
533 h->debug.rem_count++;
534 h->debug.last_rem_idx = idx;
535#endif
034c6ed7 536
89439d4f
MS
537 left = idx;
538 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
539 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
540 raw_dib = dibs[right];
4c701096 541 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
89439d4f
MS
542 break;
543
544 /* The buckets are not supposed to be all occupied and with DIB > 0.
545 * That would mean we could make everyone better off by shifting them
546 * backward. This scenario is impossible. */
547 assert(left != right);
548 }
034c6ed7 549
89439d4f
MS
550 if (h->type == HASHMAP_TYPE_ORDERED) {
551 OrderedHashmap *lh = (OrderedHashmap*) h;
552 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
553
554 if (le->iterate_next != IDX_NIL)
555 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
556 else
557 lh->iterate_list_tail = le->iterate_previous;
558
559 if (le->iterate_previous != IDX_NIL)
560 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
561 else
562 lh->iterate_list_head = le->iterate_next;
563 }
564
565 /* Now shift all buckets in the interval (left, right) one step backwards */
566 for (prev = left, left = next_idx(h, left); left != right;
567 prev = left, left = next_idx(h, left)) {
568 dib = bucket_calculate_dib(h, left, dibs[left]);
569 assert(dib != 0);
570 bucket_move_entry(h, NULL, left, prev);
571 bucket_set_dib(h, prev, dib - 1);
572 }
573
574 bucket_mark_free(h, prev);
575 n_entries_dec(h);
84dcca75 576 base_set_dirty(h);
034c6ed7 577}
89439d4f
MS
578#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
579
580static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
581 struct ordered_hashmap_entry *e;
582 unsigned idx;
034c6ed7 583
101d8e63 584 assert(h);
89439d4f
MS
585 assert(i);
586
587 if (i->idx == IDX_NIL)
588 goto at_end;
589
590 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
591 goto at_end;
592
593 if (i->idx == IDX_FIRST) {
594 idx = h->iterate_list_head;
595 e = ordered_bucket_at(h, idx);
101d8e63 596 } else {
89439d4f
MS
597 idx = i->idx;
598 e = ordered_bucket_at(h, idx);
599 /*
600 * We allow removing the current entry while iterating, but removal may cause
601 * a backward shift. The next entry may thus move one bucket to the left.
602 * To detect when it happens, we remember the key pointer of the entry we were
603 * going to iterate next. If it does not match, there was a backward shift.
604 */
605 if (e->p.b.key != i->next_key) {
606 idx = prev_idx(HASHMAP_BASE(h), idx);
607 e = ordered_bucket_at(h, idx);
608 }
609 assert(e->p.b.key == i->next_key);
101d8e63 610 }
101d8e63 611
349cc4a5 612#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
613 i->prev_idx = idx;
614#endif
615
616 if (e->iterate_next != IDX_NIL) {
617 struct ordered_hashmap_entry *n;
618 i->idx = e->iterate_next;
619 n = ordered_bucket_at(h, i->idx);
620 i->next_key = n->p.b.key;
621 } else
622 i->idx = IDX_NIL;
623
624 return idx;
625
626at_end:
627 i->idx = IDX_NIL;
628 return IDX_NIL;
101d8e63
LP
629}
630
89439d4f
MS
631static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
632 unsigned idx;
633
60918275 634 assert(h);
89439d4f 635 assert(i);
60918275 636
89439d4f
MS
637 if (i->idx == IDX_NIL)
638 goto at_end;
60918275 639
89439d4f
MS
640 if (i->idx == IDX_FIRST) {
641 /* fast forward to the first occupied bucket */
642 if (h->has_indirect) {
643 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
644 h->indirect.idx_lowest_entry = i->idx;
645 } else
646 i->idx = skip_free_buckets(h, 0);
647
648 if (i->idx == IDX_NIL)
649 goto at_end;
650 } else {
651 struct hashmap_base_entry *e;
652
653 assert(i->idx > 0);
60918275 654
89439d4f
MS
655 e = bucket_at(h, i->idx);
656 /*
657 * We allow removing the current entry while iterating, but removal may cause
658 * a backward shift. The next entry may thus move one bucket to the left.
659 * To detect when it happens, we remember the key pointer of the entry we were
660 * going to iterate next. If it does not match, there was a backward shift.
661 */
662 if (e->key != i->next_key)
663 e = bucket_at(h, --i->idx);
60918275 664
89439d4f
MS
665 assert(e->key == i->next_key);
666 }
667
668 idx = i->idx;
349cc4a5 669#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
670 i->prev_idx = idx;
671#endif
672
673 i->idx = skip_free_buckets(h, i->idx + 1);
674 if (i->idx != IDX_NIL)
675 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 676 else
89439d4f
MS
677 i->idx = IDX_NIL;
678
679 return idx;
60918275 680
89439d4f
MS
681at_end:
682 i->idx = IDX_NIL;
683 return IDX_NIL;
60918275
LP
684}
685
89439d4f
MS
686static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
687 if (!h) {
688 i->idx = IDX_NIL;
689 return IDX_NIL;
690 }
101d8e63 691
349cc4a5 692#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
693 if (i->idx == IDX_FIRST) {
694 i->put_count = h->debug.put_count;
695 i->rem_count = h->debug.rem_count;
696 } else {
697 /* While iterating, must not add any new entries */
698 assert(i->put_count == h->debug.put_count);
699 /* ... or remove entries other than the current one */
700 assert(i->rem_count == h->debug.rem_count ||
701 (i->rem_count == h->debug.rem_count - 1 &&
702 i->prev_idx == h->debug.last_rem_idx));
703 /* Reset our removals counter */
704 i->rem_count = h->debug.rem_count;
705 }
706#endif
101d8e63 707
89439d4f
MS
708 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
709 : hashmap_iterate_in_internal_order(h, i);
710}
39c2a6f1 711
8927b1da 712bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
713 struct hashmap_base_entry *e;
714 void *data;
715 unsigned idx;
716
717 idx = hashmap_iterate_entry(h, i);
718 if (idx == IDX_NIL) {
8927b1da
DH
719 if (value)
720 *value = NULL;
89439d4f
MS
721 if (key)
722 *key = NULL;
723
8927b1da 724 return false;
89439d4f
MS
725 }
726
727 e = bucket_at(h, idx);
728 data = entry_value(h, e);
8927b1da
DH
729 if (value)
730 *value = data;
89439d4f
MS
731 if (key)
732 *key = e->key;
733
8927b1da 734 return true;
101d8e63
LP
735}
736
8927b1da
DH
737bool set_iterate(Set *s, Iterator *i, void **value) {
738 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 739}
60918275 740
89439d4f
MS
741#define HASHMAP_FOREACH_IDX(idx, h, i) \
742 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
743 (idx != IDX_NIL); \
744 (idx) = hashmap_iterate_entry((h), &(i)))
745
45ea84d8
VC
746IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) {
747 IteratedCache *cache;
748
749 assert(h);
750 assert(!h->cached);
751
752 if (h->cached)
753 return NULL;
754
755 cache = new0(IteratedCache, 1);
756 if (!cache)
757 return NULL;
758
759 cache->hashmap = h;
760 h->cached = true;
761
762 return cache;
763}
764
89439d4f
MS
765static void reset_direct_storage(HashmapBase *h) {
766 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
767 void *p;
768
769 assert(!h->has_indirect);
770
771 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
772 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
773}
774
3ef11dcf 775static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
776 HashmapBase *h;
777 const struct hashmap_type_info *hi = &hashmap_type_info[type];
778 bool use_pool;
779
780 use_pool = is_main_thread();
781
782 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 783
60918275 784 if (!h)
89439d4f
MS
785 return NULL;
786
787 h->type = type;
788 h->from_pool = use_pool;
789 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
790
791 if (type == HASHMAP_TYPE_ORDERED) {
792 OrderedHashmap *lh = (OrderedHashmap*)h;
793 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
794 }
795
796 reset_direct_storage(h);
60918275 797
89439d4f
MS
798 if (!shared_hash_key_initialized) {
799 random_bytes(shared_hash_key, sizeof(shared_hash_key));
800 shared_hash_key_initialized= true;
801 }
802
349cc4a5 803#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
804 h->debug.func = func;
805 h->debug.file = file;
806 h->debug.line = line;
4f1b3061
TG
807 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
808 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
809 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
810#endif
811
812 return h;
813}
60918275 814
89439d4f 815Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 816 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
817}
818
819OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 820 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
821}
822
823Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 824 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
825}
826
827static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 828 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
829 HashmapBase *q;
830
831 assert(h);
832
833 if (*h)
834 return 0;
835
3ef11dcf 836 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
837 if (!q)
838 return -ENOMEM;
839
840 *h = q;
841 return 0;
842}
843
844int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 845 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
846}
847
848int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 849 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
850}
851
852int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 853 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
854}
855
856static void hashmap_free_no_clear(HashmapBase *h) {
857 assert(!h->has_indirect);
858 assert(!h->n_direct_entries);
859
349cc4a5 860#if ENABLE_DEBUG_HASHMAP
4f1b3061 861 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 862 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 863 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 864#endif
45fa9e29 865
39c2a6f1 866 if (h->from_pool)
89439d4f 867 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
868 else
869 free(h);
60918275
LP
870}
871
cfe561a4 872HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
873
874 /* Free the hashmap, but nothing in it */
875
cfe561a4
DH
876 if (h) {
877 internal_hashmap_clear(h);
878 hashmap_free_no_clear(h);
879 }
89439d4f 880
cfe561a4 881 return NULL;
89439d4f
MS
882}
883
cfe561a4 884HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
885
886 /* Free the hashmap and all data objects in it, but not the
887 * keys */
888
cfe561a4
DH
889 if (h) {
890 internal_hashmap_clear_free(h);
891 hashmap_free_no_clear(h);
892 }
61b1477c 893
cfe561a4 894 return NULL;
449ddb2d
LP
895}
896
cfe561a4 897Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
898
899 /* Free the hashmap and all data and key objects in it */
900
cfe561a4
DH
901 if (h) {
902 hashmap_clear_free_free(h);
903 hashmap_free_no_clear(HASHMAP_BASE(h));
904 }
fabe5c0e 905
cfe561a4 906 return NULL;
fabe5c0e
LP
907}
908
89439d4f 909void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
910 if (!h)
911 return;
912
89439d4f
MS
913 if (h->has_indirect) {
914 free(h->indirect.storage);
915 h->has_indirect = false;
916 }
917
918 h->n_direct_entries = 0;
919 reset_direct_storage(h);
920
921 if (h->type == HASHMAP_TYPE_ORDERED) {
922 OrderedHashmap *lh = (OrderedHashmap*) h;
923 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
924 }
84dcca75
VC
925
926 base_set_dirty(h);
11dd41ce
LP
927}
928
89439d4f
MS
929void internal_hashmap_clear_free(HashmapBase *h) {
930 unsigned idx;
9946996c 931
61b1477c
LP
932 if (!h)
933 return;
9946996c 934
89439d4f
MS
935 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
936 idx = skip_free_buckets(h, idx + 1))
937 free(entry_value(h, bucket_at(h, idx)));
938
939 internal_hashmap_clear(h);
9946996c
LP
940}
941
fabe5c0e 942void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
943 unsigned idx;
944
fabe5c0e
LP
945 if (!h)
946 return;
947
89439d4f
MS
948 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
949 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
950 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
951 free((void*)e->b.key);
952 free(e->value);
fabe5c0e 953 }
89439d4f
MS
954
955 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
956}
957
89439d4f
MS
958static int resize_buckets(HashmapBase *h, unsigned entries_add);
959
960/*
961 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
962 * Performs Robin Hood swaps as it goes. The entry to put must be placed
963 * by the caller into swap slot IDX_PUT.
964 * If used for in-place resizing, may leave a displaced entry in swap slot
965 * IDX_PUT. Caller must rehash it next.
966 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
967 * false otherwise.
968 */
969static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
970 struct swap_entries *swap) {
971 dib_raw_t raw_dib, *dibs;
972 unsigned dib, distance;
973
349cc4a5 974#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
975 h->debug.put_count++;
976#endif
977
978 dibs = dib_raw_ptr(h);
979
980 for (distance = 0; ; distance++) {
981 raw_dib = dibs[idx];
3742095b 982 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
89439d4f
MS
983 if (raw_dib == DIB_RAW_REHASH)
984 bucket_move_entry(h, swap, idx, IDX_TMP);
985
986 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
987 h->indirect.idx_lowest_entry = idx;
60918275 988
89439d4f
MS
989 bucket_set_dib(h, idx, distance);
990 bucket_move_entry(h, swap, IDX_PUT, idx);
991 if (raw_dib == DIB_RAW_REHASH) {
992 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
993 return true;
994 }
60918275 995
89439d4f
MS
996 return false;
997 }
998
999 dib = bucket_calculate_dib(h, idx, raw_dib);
1000
1001 if (dib < distance) {
1002 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
1003 bucket_set_dib(h, idx, distance);
1004
1005 /* swap the entries */
1006 bucket_move_entry(h, swap, idx, IDX_TMP);
1007 bucket_move_entry(h, swap, IDX_PUT, idx);
1008 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1009
1010 distance = dib;
1011 }
1012
1013 idx = next_idx(h, idx);
1014 }
60918275
LP
1015}
1016
89439d4f
MS
1017/*
1018 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1019 * The caller must place the entry (only its key and value, not link indexes)
1020 * in swap slot IDX_PUT.
1021 * Caller must ensure: the key does not exist yet in the hashmap.
1022 * that resize is not needed if !may_resize.
1023 * Returns: 1 if entry was put successfully.
1024 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1025 * Cannot return -ENOMEM if !may_resize.
1026 */
1027static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1028 struct swap_entries *swap, bool may_resize) {
1029 struct ordered_hashmap_entry *new_entry;
1030 int r;
1031
1032 assert(idx < n_buckets(h));
1033
1034 new_entry = bucket_at_swap(swap, IDX_PUT);
1035
1036 if (may_resize) {
1037 r = resize_buckets(h, 1);
1038 if (r < 0)
1039 return r;
1040 if (r > 0)
1041 idx = bucket_hash(h, new_entry->p.b.key);
1042 }
1043 assert(n_entries(h) < n_buckets(h));
1044
1045 if (h->type == HASHMAP_TYPE_ORDERED) {
1046 OrderedHashmap *lh = (OrderedHashmap*) h;
1047
1048 new_entry->iterate_next = IDX_NIL;
1049 new_entry->iterate_previous = lh->iterate_list_tail;
1050
1051 if (lh->iterate_list_tail != IDX_NIL) {
1052 struct ordered_hashmap_entry *old_tail;
1053
1054 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1055 assert(old_tail->iterate_next == IDX_NIL);
1056 old_tail->iterate_next = IDX_PUT;
1057 }
1058
1059 lh->iterate_list_tail = IDX_PUT;
1060 if (lh->iterate_list_head == IDX_NIL)
1061 lh->iterate_list_head = IDX_PUT;
1062 }
1063
1064 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1065
1066 n_entries_inc(h);
349cc4a5 1067#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1068 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1069#endif
1070
84dcca75
VC
1071 base_set_dirty(h);
1072
89439d4f
MS
1073 return 1;
1074}
1075#define hashmap_put_boldly(h, idx, swap, may_resize) \
1076 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1077
1078/*
1079 * Returns 0 if resize is not needed.
f131770b 1080 * 1 if successfully resized.
89439d4f
MS
1081 * -ENOMEM on allocation failure.
1082 */
1083static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1084 struct swap_entries swap;
1a39bc8c 1085 void *new_storage;
89439d4f
MS
1086 dib_raw_t *old_dibs, *new_dibs;
1087 const struct hashmap_type_info *hi;
1088 unsigned idx, optimal_idx;
1089 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1090 uint8_t new_shift;
1091 bool rehash_next;
45fa9e29
LP
1092
1093 assert(h);
1094
89439d4f
MS
1095 hi = &hashmap_type_info[h->type];
1096 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1097
1098 /* overflow? */
89439d4f 1099 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1100 return -ENOMEM;
1101
89439d4f
MS
1102 /* For direct storage we allow 100% load, because it's tiny. */
1103 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1104 return 0;
45fa9e29 1105
89439d4f
MS
1106 /*
1107 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1108 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1109 */
1110 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1111 /* overflow? */
1112 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1113 return -ENOMEM;
45fa9e29 1114
89439d4f
MS
1115 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1116 return -ENOMEM;
a3b6fafe 1117
89439d4f 1118 old_n_buckets = n_buckets(h);
45fa9e29 1119
89439d4f
MS
1120 if (_likely_(new_n_buckets <= old_n_buckets))
1121 return 0;
45fa9e29 1122
89439d4f
MS
1123 new_shift = log2u_round_up(MAX(
1124 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1125 2 * sizeof(struct direct_storage)));
45fa9e29 1126
89439d4f
MS
1127 /* Realloc storage (buckets and DIB array). */
1128 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1129 1U << new_shift);
1130 if (!new_storage)
1131 return -ENOMEM;
45fa9e29 1132
89439d4f
MS
1133 /* Must upgrade direct to indirect storage. */
1134 if (!h->has_indirect) {
1135 memcpy(new_storage, h->direct.storage,
1136 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1137 h->indirect.n_entries = h->n_direct_entries;
1138 h->indirect.idx_lowest_entry = 0;
1139 h->n_direct_entries = 0;
1140 }
45fa9e29 1141
89439d4f
MS
1142 /* Get a new hash key. If we've just upgraded to indirect storage,
1143 * allow reusing a previously generated key. It's still a different key
1144 * from the shared one that we used for direct storage. */
1145 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1146
1147 h->has_indirect = true;
1148 h->indirect.storage = new_storage;
1149 h->indirect.n_buckets = (1U << new_shift) /
1150 (hi->entry_size + sizeof(dib_raw_t));
1151
1a39bc8c 1152 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
89439d4f
MS
1153 new_dibs = dib_raw_ptr(h);
1154
1155 /*
1156 * Move the DIB array to the new place, replacing valid DIB values with
1157 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1158 * Note: Overlap is not possible, because we have at least doubled the
1159 * number of buckets and dib_raw_t is smaller than any entry type.
1160 */
1161 for (idx = 0; idx < old_n_buckets; idx++) {
1162 assert(old_dibs[idx] != DIB_RAW_REHASH);
1163 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1164 : DIB_RAW_REHASH;
45fa9e29
LP
1165 }
1166
89439d4f 1167 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1168 memzero(bucket_at(h, old_n_buckets),
89439d4f 1169 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1170
89439d4f
MS
1171 /* The upper half of the new DIB array needs initialization */
1172 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1173 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1174
89439d4f
MS
1175 /* Rehash entries that need it */
1176 n_rehashed = 0;
1177 for (idx = 0; idx < old_n_buckets; idx++) {
1178 if (new_dibs[idx] != DIB_RAW_REHASH)
1179 continue;
45fa9e29 1180
89439d4f 1181 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1182
89439d4f
MS
1183 /*
1184 * Not much to do if by luck the entry hashes to its current
1185 * location. Just set its DIB.
1186 */
1187 if (optimal_idx == idx) {
1188 new_dibs[idx] = 0;
1189 n_rehashed++;
1190 continue;
1191 }
1192
1193 new_dibs[idx] = DIB_RAW_FREE;
1194 bucket_move_entry(h, &swap, idx, IDX_PUT);
1195 /* bucket_move_entry does not clear the source */
eccaf899 1196 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1197
1198 do {
1199 /*
1200 * Find the new bucket for the current entry. This may make
1201 * another entry homeless and load it into IDX_PUT.
1202 */
1203 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1204 n_rehashed++;
1205
1206 /* Did the current entry displace another one? */
1207 if (rehash_next)
1208 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1209 } while (rehash_next);
1210 }
60918275 1211
89439d4f 1212 assert(n_rehashed == n_entries(h));
60918275 1213
89439d4f
MS
1214 return 1;
1215}
45fa9e29 1216
89439d4f
MS
1217/*
1218 * Finds an entry with a matching key
1219 * Returns: index of the found entry, or IDX_NIL if not found.
1220 */
1221static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1222 struct hashmap_base_entry *e;
1223 unsigned dib, distance;
1224 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1225
89439d4f 1226 assert(idx < n_buckets(h));
60918275 1227
89439d4f
MS
1228 for (distance = 0; ; distance++) {
1229 if (dibs[idx] == DIB_RAW_FREE)
1230 return IDX_NIL;
60918275 1231
89439d4f 1232 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1233
89439d4f
MS
1234 if (dib < distance)
1235 return IDX_NIL;
1236 if (dib == distance) {
1237 e = bucket_at(h, idx);
1238 if (h->hash_ops->compare(e->key, key) == 0)
1239 return idx;
1240 }
1241
1242 idx = next_idx(h, idx);
1243 }
60918275 1244}
89439d4f 1245#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1246
923041cb 1247int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1248 struct swap_entries swap;
1249 struct plain_hashmap_entry *e;
1250 unsigned hash, idx;
923041cb
MS
1251
1252 assert(h);
1253
1254 hash = bucket_hash(h, key);
89439d4f
MS
1255 idx = bucket_scan(h, hash, key);
1256 if (idx != IDX_NIL) {
1257 e = plain_bucket_at(h, idx);
923041cb
MS
1258 if (e->value == value)
1259 return 0;
1260 return -EEXIST;
1261 }
1262
89439d4f
MS
1263 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1264 e->b.key = key;
1265 e->value = value;
1266 return hashmap_put_boldly(h, hash, &swap, true);
1267}
1268
1269int set_put(Set *s, const void *key) {
1270 struct swap_entries swap;
1271 struct hashmap_base_entry *e;
1272 unsigned hash, idx;
1273
1274 assert(s);
1275
1276 hash = bucket_hash(s, key);
1277 idx = bucket_scan(s, hash, key);
1278 if (idx != IDX_NIL)
1279 return 0;
1280
1281 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1282 e->key = key;
1283 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1284}
1285
3158713e 1286int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1287 struct swap_entries swap;
1288 struct plain_hashmap_entry *e;
1289 unsigned hash, idx;
3158713e
LP
1290
1291 assert(h);
1292
a3b6fafe 1293 hash = bucket_hash(h, key);
89439d4f
MS
1294 idx = bucket_scan(h, hash, key);
1295 if (idx != IDX_NIL) {
1296 e = plain_bucket_at(h, idx);
349cc4a5 1297#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1298 /* Although the key is equal, the key pointer may have changed,
1299 * and this would break our assumption for iterating. So count
1300 * this operation as incompatible with iteration. */
1301 if (e->b.key != key) {
1302 h->b.debug.put_count++;
1303 h->b.debug.rem_count++;
1304 h->b.debug.last_rem_idx = idx;
1305 }
1306#endif
1307 e->b.key = key;
3158713e 1308 e->value = value;
84dcca75
VC
1309 hashmap_set_dirty(h);
1310
3158713e
LP
1311 return 0;
1312 }
1313
89439d4f
MS
1314 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1315 e->b.key = key;
1316 e->value = value;
1317 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1318}
1319
d99ae53a 1320int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1321 struct plain_hashmap_entry *e;
1322 unsigned hash, idx;
d99ae53a
LP
1323
1324 assert(h);
1325
a3b6fafe 1326 hash = bucket_hash(h, key);
89439d4f
MS
1327 idx = bucket_scan(h, hash, key);
1328 if (idx == IDX_NIL)
d99ae53a
LP
1329 return -ENOENT;
1330
89439d4f 1331 e = plain_bucket_at(h, idx);
d99ae53a 1332 e->value = value;
84dcca75
VC
1333 hashmap_set_dirty(h);
1334
d99ae53a
LP
1335 return 0;
1336}
1337
89439d4f
MS
1338void *internal_hashmap_get(HashmapBase *h, const void *key) {
1339 struct hashmap_base_entry *e;
1340 unsigned hash, idx;
60918275
LP
1341
1342 if (!h)
1343 return NULL;
1344
a3b6fafe 1345 hash = bucket_hash(h, key);
89439d4f
MS
1346 idx = bucket_scan(h, hash, key);
1347 if (idx == IDX_NIL)
60918275
LP
1348 return NULL;
1349
89439d4f
MS
1350 e = bucket_at(h, idx);
1351 return entry_value(h, e);
60918275
LP
1352}
1353
89439d4f
MS
1354void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1355 struct plain_hashmap_entry *e;
1356 unsigned hash, idx;
d99ae53a
LP
1357
1358 if (!h)
1359 return NULL;
1360
a3b6fafe 1361 hash = bucket_hash(h, key);
89439d4f
MS
1362 idx = bucket_scan(h, hash, key);
1363 if (idx == IDX_NIL)
d99ae53a
LP
1364 return NULL;
1365
89439d4f 1366 e = plain_bucket_at(h, idx);
d99ae53a 1367 if (key2)
89439d4f 1368 *key2 = (void*) e->b.key;
d99ae53a
LP
1369
1370 return e->value;
1371}
1372
89439d4f 1373bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1374 unsigned hash;
96342de6
LN
1375
1376 if (!h)
1377 return false;
1378
a3b6fafe 1379 hash = bucket_hash(h, key);
89439d4f 1380 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1381}
1382
89439d4f
MS
1383void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1384 struct hashmap_base_entry *e;
1385 unsigned hash, idx;
60918275
LP
1386 void *data;
1387
1388 if (!h)
1389 return NULL;
1390
a3b6fafe 1391 hash = bucket_hash(h, key);
89439d4f
MS
1392 idx = bucket_scan(h, hash, key);
1393 if (idx == IDX_NIL)
60918275
LP
1394 return NULL;
1395
89439d4f
MS
1396 e = bucket_at(h, idx);
1397 data = entry_value(h, e);
1398 remove_entry(h, idx);
60918275
LP
1399
1400 return data;
1401}
1402
89439d4f
MS
1403void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1404 struct plain_hashmap_entry *e;
1405 unsigned hash, idx;
c582a3b3
LP
1406 void *data;
1407
1408 if (!h) {
1409 if (rkey)
1410 *rkey = NULL;
1411 return NULL;
1412 }
1413
1414 hash = bucket_hash(h, key);
89439d4f
MS
1415 idx = bucket_scan(h, hash, key);
1416 if (idx == IDX_NIL) {
c582a3b3
LP
1417 if (rkey)
1418 *rkey = NULL;
1419 return NULL;
1420 }
1421
89439d4f 1422 e = plain_bucket_at(h, idx);
c582a3b3
LP
1423 data = e->value;
1424 if (rkey)
89439d4f 1425 *rkey = (void*) e->b.key;
c582a3b3 1426
89439d4f 1427 remove_entry(h, idx);
c582a3b3
LP
1428
1429 return data;
1430}
1431
101d8e63 1432int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1433 struct swap_entries swap;
1434 struct plain_hashmap_entry *e;
1435 unsigned old_hash, new_hash, idx;
101d8e63
LP
1436
1437 if (!h)
1438 return -ENOENT;
1439
a3b6fafe 1440 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1441 idx = bucket_scan(h, old_hash, old_key);
1442 if (idx == IDX_NIL)
101d8e63
LP
1443 return -ENOENT;
1444
a3b6fafe 1445 new_hash = bucket_hash(h, new_key);
89439d4f 1446 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1447 return -EEXIST;
1448
89439d4f 1449 remove_entry(h, idx);
101d8e63 1450
89439d4f
MS
1451 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1452 e->b.key = new_key;
101d8e63 1453 e->value = value;
89439d4f
MS
1454 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1455
1456 return 0;
1457}
1458
1459int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1460 struct swap_entries swap;
1461 struct hashmap_base_entry *e;
1462 unsigned old_hash, new_hash, idx;
101d8e63 1463
89439d4f
MS
1464 if (!s)
1465 return -ENOENT;
1466
1467 old_hash = bucket_hash(s, old_key);
1468 idx = bucket_scan(s, old_hash, old_key);
1469 if (idx == IDX_NIL)
1470 return -ENOENT;
1471
1472 new_hash = bucket_hash(s, new_key);
1473 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1474 return -EEXIST;
1475
1476 remove_entry(s, idx);
1477
1478 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1479 e->key = new_key;
1480 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1481
1482 return 0;
1483}
1484
8fe914ec 1485int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1486 struct swap_entries swap;
1487 struct plain_hashmap_entry *e;
1488 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1489
1490 if (!h)
1491 return -ENOENT;
1492
a3b6fafe 1493 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1494 idx_old = bucket_scan(h, old_hash, old_key);
1495 if (idx_old == IDX_NIL)
8fe914ec
LP
1496 return -ENOENT;
1497
89439d4f 1498 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1499
89439d4f
MS
1500 new_hash = bucket_hash(h, new_key);
1501 idx_new = bucket_scan(h, new_hash, new_key);
1502 if (idx_new != IDX_NIL)
1503 if (idx_old != idx_new) {
1504 remove_entry(h, idx_new);
1505 /* Compensate for a possible backward shift. */
1506 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1507 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1508 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1509 }
1510
1511 remove_entry(h, idx_old);
1512
1513 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1514 e->b.key = new_key;
8fe914ec 1515 e->value = value;
89439d4f 1516 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1517
1518 return 0;
1519}
1520
89439d4f
MS
1521void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1522 struct plain_hashmap_entry *e;
1523 unsigned hash, idx;
3158713e
LP
1524
1525 if (!h)
1526 return NULL;
1527
a3b6fafe 1528 hash = bucket_hash(h, key);
89439d4f
MS
1529 idx = bucket_scan(h, hash, key);
1530 if (idx == IDX_NIL)
3158713e
LP
1531 return NULL;
1532
89439d4f 1533 e = plain_bucket_at(h, idx);
3158713e
LP
1534 if (e->value != value)
1535 return NULL;
1536
89439d4f 1537 remove_entry(h, idx);
3158713e
LP
1538
1539 return value;
1540}
1541
89439d4f
MS
1542static unsigned find_first_entry(HashmapBase *h) {
1543 Iterator i = ITERATOR_FIRST;
60918275 1544
89439d4f
MS
1545 if (!h || !n_entries(h))
1546 return IDX_NIL;
60918275 1547
89439d4f 1548 return hashmap_iterate_entry(h, &i);
60918275
LP
1549}
1550
89439d4f
MS
1551void *internal_hashmap_first(HashmapBase *h) {
1552 unsigned idx;
60918275 1553
89439d4f
MS
1554 idx = find_first_entry(h);
1555 if (idx == IDX_NIL)
60918275
LP
1556 return NULL;
1557
89439d4f 1558 return entry_value(h, bucket_at(h, idx));
60918275
LP
1559}
1560
89439d4f
MS
1561void *internal_hashmap_first_key(HashmapBase *h) {
1562 struct hashmap_base_entry *e;
1563 unsigned idx;
2e4a6ff4 1564
89439d4f
MS
1565 idx = find_first_entry(h);
1566 if (idx == IDX_NIL)
2e4a6ff4
LP
1567 return NULL;
1568
89439d4f
MS
1569 e = bucket_at(h, idx);
1570 return (void*) e->key;
2e4a6ff4
LP
1571}
1572
89439d4f
MS
1573void *internal_hashmap_steal_first(HashmapBase *h) {
1574 struct hashmap_base_entry *e;
60918275 1575 void *data;
89439d4f 1576 unsigned idx;
60918275 1577
89439d4f
MS
1578 idx = find_first_entry(h);
1579 if (idx == IDX_NIL)
60918275
LP
1580 return NULL;
1581
89439d4f
MS
1582 e = bucket_at(h, idx);
1583 data = entry_value(h, e);
1584 remove_entry(h, idx);
60918275
LP
1585
1586 return data;
1587}
1588
89439d4f
MS
1589void *internal_hashmap_steal_first_key(HashmapBase *h) {
1590 struct hashmap_base_entry *e;
22be093f 1591 void *key;
89439d4f 1592 unsigned idx;
22be093f 1593
89439d4f
MS
1594 idx = find_first_entry(h);
1595 if (idx == IDX_NIL)
22be093f
LP
1596 return NULL;
1597
89439d4f
MS
1598 e = bucket_at(h, idx);
1599 key = (void*) e->key;
1600 remove_entry(h, idx);
22be093f
LP
1601
1602 return key;
1603}
1604
89439d4f 1605unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1606
1607 if (!h)
1608 return 0;
1609
89439d4f 1610 return n_entries(h);
60918275
LP
1611}
1612
89439d4f 1613unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1614
1615 if (!h)
1616 return 0;
1617
89439d4f 1618 return n_buckets(h);
45fa9e29
LP
1619}
1620
89439d4f
MS
1621int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1622 Iterator i;
1623 unsigned idx;
60918275 1624
89439d4f 1625 assert(h);
60918275 1626
89439d4f
MS
1627 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1628 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1629 int r;
91cdde8a 1630
89439d4f
MS
1631 r = hashmap_put(h, pe->b.key, pe->value);
1632 if (r < 0 && r != -EEXIST)
1633 return r;
1634 }
91cdde8a 1635
89439d4f
MS
1636 return 0;
1637}
91cdde8a 1638
89439d4f
MS
1639int set_merge(Set *s, Set *other) {
1640 Iterator i;
1641 unsigned idx;
91cdde8a 1642
89439d4f
MS
1643 assert(s);
1644
1645 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1646 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1647 int r;
1648
89439d4f
MS
1649 r = set_put(s, se->b.key);
1650 if (r < 0)
a3b6fafe 1651 return r;
91cdde8a
LP
1652 }
1653
1654 return 0;
1655}
1656
89439d4f 1657int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1658 int r;
1659
1660 assert(h);
1661
1662 r = resize_buckets(h, entries_add);
1663 if (r < 0)
1664 return r;
1665
1666 return 0;
1667}
1668
89439d4f
MS
1669/*
1670 * The same as hashmap_merge(), but every new item from other is moved to h.
1671 * Keys already in h are skipped and stay in other.
1672 * Returns: 0 on success.
1673 * -ENOMEM on alloc failure, in which case no move has been done.
1674 */
1675int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1676 struct swap_entries swap;
1677 struct hashmap_base_entry *e, *n;
1678 Iterator i;
1679 unsigned idx;
1680 int r;
101d8e63
LP
1681
1682 assert(h);
1683
101d8e63 1684 if (!other)
7ad63f57 1685 return 0;
101d8e63 1686
89439d4f
MS
1687 assert(other->type == h->type);
1688
1689 /*
1690 * This reserves buckets for the worst case, where none of other's
1691 * entries are yet present in h. This is preferable to risking
1692 * an allocation failure in the middle of the moving and having to
1693 * rollback or return a partial result.
1694 */
1695 r = resize_buckets(h, n_entries(other));
1696 if (r < 0)
1697 return r;
101d8e63 1698
89439d4f
MS
1699 HASHMAP_FOREACH_IDX(idx, other, i) {
1700 unsigned h_hash;
101d8e63 1701
89439d4f 1702 e = bucket_at(other, idx);
a3b6fafe 1703 h_hash = bucket_hash(h, e->key);
89439d4f 1704 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1705 continue;
1706
89439d4f
MS
1707 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1708 n->key = e->key;
1709 if (h->type != HASHMAP_TYPE_SET)
1710 ((struct plain_hashmap_entry*) n)->value =
1711 ((struct plain_hashmap_entry*) e)->value;
1712 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1713
1714 remove_entry(other, idx);
101d8e63 1715 }
7ad63f57
MS
1716
1717 return 0;
101d8e63
LP
1718}
1719
89439d4f
MS
1720int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1721 struct swap_entries swap;
1722 unsigned h_hash, other_hash, idx;
1723 struct hashmap_base_entry *e, *n;
1724 int r;
101d8e63 1725
101d8e63
LP
1726 assert(h);
1727
a3b6fafe 1728 h_hash = bucket_hash(h, key);
89439d4f 1729 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1730 return -EEXIST;
1731
bf3d3e2b
MS
1732 if (!other)
1733 return -ENOENT;
1734
89439d4f
MS
1735 assert(other->type == h->type);
1736
a3b6fafe 1737 other_hash = bucket_hash(other, key);
89439d4f
MS
1738 idx = bucket_scan(other, other_hash, key);
1739 if (idx == IDX_NIL)
101d8e63
LP
1740 return -ENOENT;
1741
89439d4f
MS
1742 e = bucket_at(other, idx);
1743
1744 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1745 n->key = e->key;
1746 if (h->type != HASHMAP_TYPE_SET)
1747 ((struct plain_hashmap_entry*) n)->value =
1748 ((struct plain_hashmap_entry*) e)->value;
1749 r = hashmap_put_boldly(h, h_hash, &swap, true);
1750 if (r < 0)
1751 return r;
101d8e63 1752
89439d4f 1753 remove_entry(other, idx);
101d8e63
LP
1754 return 0;
1755}
1756
89439d4f
MS
1757HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1758 HashmapBase *copy;
1759 int r;
91cdde8a
LP
1760
1761 assert(h);
1762
89439d4f 1763 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1764 if (!copy)
91cdde8a
LP
1765 return NULL;
1766
89439d4f
MS
1767 switch (h->type) {
1768 case HASHMAP_TYPE_PLAIN:
1769 case HASHMAP_TYPE_ORDERED:
1770 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1771 break;
1772 case HASHMAP_TYPE_SET:
1773 r = set_merge((Set*)copy, (Set*)h);
1774 break;
1775 default:
1776 assert_not_reached("Unknown hashmap type");
1777 }
1778
1779 if (r < 0) {
1780 internal_hashmap_free(copy);
91cdde8a
LP
1781 return NULL;
1782 }
1783
1784 return copy;
1785}
db1413d7 1786
89439d4f 1787char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1788 char **sv;
89439d4f
MS
1789 Iterator i;
1790 unsigned idx, n;
db1413d7 1791
89439d4f 1792 sv = new(char*, n_entries(h)+1);
729e3769 1793 if (!sv)
db1413d7
KS
1794 return NULL;
1795
1796 n = 0;
89439d4f
MS
1797 HASHMAP_FOREACH_IDX(idx, h, i)
1798 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1799 sv[n] = NULL;
1800
1801 return sv;
1802}
3c1668da 1803
89439d4f
MS
1804void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1805 struct ordered_hashmap_entry *e;
1806 unsigned hash, idx;
3c1668da 1807
3c1668da
LP
1808 if (!h)
1809 return NULL;
1810
a3b6fafe 1811 hash = bucket_hash(h, key);
89439d4f
MS
1812 idx = bucket_scan(h, hash, key);
1813 if (idx == IDX_NIL)
3c1668da
LP
1814 return NULL;
1815
89439d4f
MS
1816 e = ordered_bucket_at(h, idx);
1817 if (e->iterate_next == IDX_NIL)
3c1668da 1818 return NULL;
89439d4f
MS
1819 return ordered_bucket_at(h, e->iterate_next)->p.value;
1820}
3c1668da 1821
89439d4f
MS
1822int set_consume(Set *s, void *value) {
1823 int r;
1824
d97c5aea
LP
1825 assert(s);
1826 assert(value);
1827
89439d4f 1828 r = set_put(s, value);
575ccc1b 1829 if (r <= 0)
89439d4f
MS
1830 free(value);
1831
1832 return r;
1833}
1834
1835int set_put_strdup(Set *s, const char *p) {
1836 char *c;
89439d4f
MS
1837
1838 assert(s);
1839 assert(p);
1840
454f0f86
LP
1841 if (set_contains(s, (char*) p))
1842 return 0;
1843
89439d4f
MS
1844 c = strdup(p);
1845 if (!c)
1846 return -ENOMEM;
1847
454f0f86 1848 return set_consume(s, c);
89439d4f
MS
1849}
1850
1851int set_put_strdupv(Set *s, char **l) {
1852 int n = 0, r;
1853 char **i;
1854
d97c5aea
LP
1855 assert(s);
1856
89439d4f
MS
1857 STRV_FOREACH(i, l) {
1858 r = set_put_strdup(s, *i);
1859 if (r < 0)
1860 return r;
1861
1862 n += r;
1863 }
1864
1865 return n;
3c1668da 1866}
d97c5aea
LP
1867
1868int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1869 const char *p = v;
1870 int r;
1871
1872 assert(s);
1873 assert(v);
1874
1875 for (;;) {
1876 char *word;
1877
1878 r = extract_first_word(&p, &word, separators, flags);
1879 if (r <= 0)
1880 return r;
1881
1882 r = set_consume(s, word);
1883 if (r < 0)
1884 return r;
1885 }
1886}
45ea84d8
VC
1887
1888/* expand the cachemem if needed, return true if newly (re)activated. */
1889static int cachemem_maintain(CacheMem *mem, unsigned size) {
45ea84d8
VC
1890 assert(mem);
1891
1892 if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) {
1893 if (size > 0)
1894 return -ENOMEM;
1895 }
1896
afbbc068
ZJS
1897 if (!mem->active) {
1898 mem->active = true;
1899 return true;
1900 }
45ea84d8 1901
afbbc068 1902 return false;
45ea84d8
VC
1903}
1904
1905int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1906 bool sync_keys = false, sync_values = false;
1907 unsigned size;
1908 int r;
1909
1910 assert(cache);
1911 assert(cache->hashmap);
1912
1913 size = n_entries(cache->hashmap);
1914
1915 if (res_keys) {
1916 r = cachemem_maintain(&cache->keys, size);
1917 if (r < 0)
1918 return r;
1919
1920 sync_keys = r;
1921 } else
1922 cache->keys.active = false;
1923
1924 if (res_values) {
1925 r = cachemem_maintain(&cache->values, size);
1926 if (r < 0)
1927 return r;
1928
1929 sync_values = r;
1930 } else
1931 cache->values.active = false;
1932
1933 if (cache->hashmap->dirty) {
1934 if (cache->keys.active)
1935 sync_keys = true;
1936 if (cache->values.active)
1937 sync_values = true;
1938
1939 cache->hashmap->dirty = false;
1940 }
1941
1942 if (sync_keys || sync_values) {
1943 unsigned i, idx;
1944 Iterator iter;
1945
1946 i = 0;
1947 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1948 struct hashmap_base_entry *e;
1949
1950 e = bucket_at(cache->hashmap, idx);
1951
1952 if (sync_keys)
1953 cache->keys.ptr[i] = e->key;
1954 if (sync_values)
1955 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1956 i++;
1957 }
1958 }
1959
1960 if (res_keys)
1961 *res_keys = cache->keys.ptr;
1962 if (res_values)
1963 *res_values = cache->values.ptr;
1964 if (res_n_entries)
1965 *res_n_entries = size;
1966
1967 return 0;
1968}
1969
1970IteratedCache *iterated_cache_free(IteratedCache *cache) {
1971 if (cache) {
1972 free(cache->keys.ptr);
1973 free(cache->values.ptr);
1974 free(cache);
1975 }
1976
1977 return NULL;
1978}