]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
util: introduce memcmp_safe()
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
a7334b09 2
60918275 3#include <errno.h>
11c3a366 4#include <stdint.h>
d4510856 5#include <stdlib.h>
11c3a366 6#include <string.h>
60918275 7
b5efdb8a 8#include "alloc-util.h"
b4f60743 9#include "env-util.h"
556c7bae 10#include "fileio.h"
b4f60743 11#include "hashmap.h"
60918275 12#include "macro.h"
b3dcf58e 13#include "mempool.h"
d4510856 14#include "process-util.h"
3df3e884 15#include "random-util.h"
d4510856
LP
16#include "set.h"
17#include "siphash24.h"
556c7bae 18#include "string-util.h"
d4510856
LP
19#include "strv.h"
20#include "util.h"
60918275 21
349cc4a5 22#if ENABLE_DEBUG_HASHMAP
3d4db144 23#include <pthread.h>
2eec67ac
TA
24#include "list.h"
25#endif
26
89439d4f
MS
27/*
28 * Implementation of hashmaps.
29 * Addressing: open
30 * - uses less RAM compared to closed addressing (chaining), because
31 * our entries are small (especially in Sets, which tend to contain
32 * the majority of entries in systemd).
33 * Collision resolution: Robin Hood
34 * - tends to equalize displacement of entries from their optimal buckets.
35 * Probe sequence: linear
36 * - though theoretically worse than random probing/uniform hashing/double
37 * hashing, it is good for cache locality.
38 *
39 * References:
40 * Celis, P. 1986. Robin Hood Hashing.
41 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
42 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
43 * - The results are derived for random probing. Suggests deletion with
44 * tombstones and two mean-centered search methods. None of that works
45 * well for linear probing.
46 *
47 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
48 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
49 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
50 * http://www.math.uu.se/~svante/papers/sj157.pdf
51 * - Applies to Robin Hood with linear probing. Contains remarks on
52 * the unsuitability of mean-centered search with linear probing.
53 *
54 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
55 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
56 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
57 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
58 * in a successful search), and Janson writes about displacement. C = d + 1.
59 *
60 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
61 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
62 * - Explanation of backward shift deletion with pictures.
63 *
64 * Khuong, P. 2013. The Other Robin Hood Hashing.
65 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
66 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
67 */
68
69/*
70 * XXX Ideas for improvement:
71 * For unordered hashmaps, randomize iteration order, similarly to Perl:
72 * http://blog.booking.com/hardening-perls-hash-function.html
73 */
74
75/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
76 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
77#define INV_KEEP_FREE 5U
78
79/* Fields common to entries of all hashmap/set types */
80struct hashmap_base_entry {
60918275 81 const void *key;
89439d4f
MS
82};
83
84/* Entry types for specific hashmap/set types
85 * hashmap_base_entry must be at the beginning of each entry struct. */
86
87struct plain_hashmap_entry {
88 struct hashmap_base_entry b;
60918275 89 void *value;
60918275
LP
90};
91
89439d4f
MS
92struct ordered_hashmap_entry {
93 struct plain_hashmap_entry p;
94 unsigned iterate_next, iterate_previous;
95};
60918275 96
89439d4f
MS
97struct set_entry {
98 struct hashmap_base_entry b;
99};
45fa9e29 100
89439d4f
MS
101/* In several functions it is advantageous to have the hash table extended
102 * virtually by a couple of additional buckets. We reserve special index values
103 * for these "swap" buckets. */
104#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
105#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
106#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
107#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 108
89439d4f
MS
109#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
110#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
111
112assert_cc(IDX_FIRST == _IDX_SWAP_END);
113assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
114
115/* Storage space for the "swap" buckets.
116 * All entry types can fit into a ordered_hashmap_entry. */
117struct swap_entries {
118 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
119};
120
89439d4f
MS
121/* Distance from Initial Bucket */
122typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
123#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
124#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
125#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
126#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
127
128#define DIB_FREE UINT_MAX
129
349cc4a5 130#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
131struct hashmap_debug_info {
132 LIST_FIELDS(struct hashmap_debug_info, debug_list);
133 unsigned max_entries; /* high watermark of n_entries */
134
135 /* who allocated this hashmap */
136 int line;
137 const char *file;
138 const char *func;
139
140 /* fields to detect modification while iterating */
141 unsigned put_count; /* counts puts into the hashmap */
142 unsigned rem_count; /* counts removals from hashmap */
143 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
144};
145
89439d4f
MS
146/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
147static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 148static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 149
89439d4f 150#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 151
fc86aa0e 152#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 153#define HASHMAP_DEBUG_FIELDS
fc86aa0e 154#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 155
89439d4f
MS
156enum HashmapType {
157 HASHMAP_TYPE_PLAIN,
158 HASHMAP_TYPE_ORDERED,
159 HASHMAP_TYPE_SET,
160 _HASHMAP_TYPE_MAX
161};
39c2a6f1 162
89439d4f 163struct _packed_ indirect_storage {
1a39bc8c 164 void *storage; /* where buckets and DIBs are stored */
89439d4f
MS
165 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
166
167 unsigned n_entries; /* number of stored entries */
168 unsigned n_buckets; /* number of buckets */
169
170 unsigned idx_lowest_entry; /* Index below which all buckets are free.
171 Makes "while(hashmap_steal_first())" loops
172 O(n) instead of O(n^2) for unordered hashmaps. */
173 uint8_t _pad[3]; /* padding for the whole HashmapBase */
174 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
175};
176
177struct direct_storage {
178 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
179 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
180 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
1a39bc8c 181 uint8_t storage[sizeof(struct indirect_storage)];
89439d4f
MS
182};
183
184#define DIRECT_BUCKETS(entry_t) \
185 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
186
187/* We should be able to store at least one entry directly. */
188assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
189
190/* We have 3 bits for n_direct_entries. */
191assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
192
193/* Hashmaps with directly stored entries all use this shared hash key.
194 * It's no big deal if the key is guessed, because there can be only
195 * a handful of directly stored entries in a hashmap. When a hashmap
196 * outgrows direct storage, it gets its own key for indirect storage. */
197static uint8_t shared_hash_key[HASH_KEY_SIZE];
198static bool shared_hash_key_initialized;
199
200/* Fields that all hashmap/set types must have */
201struct HashmapBase {
202 const struct hash_ops *hash_ops; /* hash and compare ops to use */
203
204 union _packed_ {
205 struct indirect_storage indirect; /* if has_indirect */
206 struct direct_storage direct; /* if !has_indirect */
207 };
208
209 enum HashmapType type:2; /* HASHMAP_TYPE_* */
210 bool has_indirect:1; /* whether indirect storage is used */
211 unsigned n_direct_entries:3; /* Number of entries in direct storage.
212 * Only valid if !has_indirect. */
213 bool from_pool:1; /* whether was allocated from mempool */
45ea84d8
VC
214 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
215 bool cached:1; /* whether this hashmap is being cached */
89439d4f
MS
216 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
217};
218
219/* Specific hash types
220 * HashmapBase must be at the beginning of each hashmap struct. */
221
222struct Hashmap {
223 struct HashmapBase b;
224};
225
226struct OrderedHashmap {
227 struct HashmapBase b;
228 unsigned iterate_list_head, iterate_list_tail;
229};
230
231struct Set {
232 struct HashmapBase b;
233};
234
45ea84d8
VC
235typedef struct CacheMem {
236 const void **ptr;
237 size_t n_populated, n_allocated;
238 bool active:1;
239} CacheMem;
240
241struct IteratedCache {
242 HashmapBase *hashmap;
243 CacheMem keys, values;
244};
245
89439d4f
MS
246DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
247DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
248/* No need for a separate Set pool */
249assert_cc(sizeof(Hashmap) == sizeof(Set));
250
251struct hashmap_type_info {
252 size_t head_size;
253 size_t entry_size;
254 struct mempool *mempool;
255 unsigned n_direct_buckets;
256};
257
258static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
259 [HASHMAP_TYPE_PLAIN] = {
260 .head_size = sizeof(Hashmap),
261 .entry_size = sizeof(struct plain_hashmap_entry),
262 .mempool = &hashmap_pool,
263 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
264 },
265 [HASHMAP_TYPE_ORDERED] = {
266 .head_size = sizeof(OrderedHashmap),
267 .entry_size = sizeof(struct ordered_hashmap_entry),
268 .mempool = &ordered_hashmap_pool,
269 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
270 },
271 [HASHMAP_TYPE_SET] = {
272 .head_size = sizeof(Set),
273 .entry_size = sizeof(struct set_entry),
274 .mempool = &hashmap_pool,
275 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
276 },
277};
39c2a6f1 278
d18cb393 279#if VALGRIND
556c7bae
ZJS
280__attribute__((destructor)) static void cleanup_pools(void) {
281 _cleanup_free_ char *t = NULL;
282 int r;
283
284 /* Be nice to valgrind */
285
286 /* The pool is only allocated by the main thread, but the memory can
287 * be passed to other threads. Let's clean up if we are the main thread
288 * and no other threads are live. */
289 if (!is_main_thread())
290 return;
291
292 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
293 if (r < 0 || !streq(t, "1"))
294 return;
295
296 mempool_drop(&hashmap_pool);
297 mempool_drop(&ordered_hashmap_pool);
298}
299#endif
300
89439d4f
MS
301static unsigned n_buckets(HashmapBase *h) {
302 return h->has_indirect ? h->indirect.n_buckets
303 : hashmap_type_info[h->type].n_direct_buckets;
304}
305
306static unsigned n_entries(HashmapBase *h) {
307 return h->has_indirect ? h->indirect.n_entries
308 : h->n_direct_entries;
309}
310
311static void n_entries_inc(HashmapBase *h) {
312 if (h->has_indirect)
313 h->indirect.n_entries++;
314 else
315 h->n_direct_entries++;
316}
317
318static void n_entries_dec(HashmapBase *h) {
319 if (h->has_indirect)
320 h->indirect.n_entries--;
321 else
322 h->n_direct_entries--;
323}
324
1a39bc8c 325static void *storage_ptr(HashmapBase *h) {
89439d4f
MS
326 return h->has_indirect ? h->indirect.storage
327 : h->direct.storage;
328}
329
330static uint8_t *hash_key(HashmapBase *h) {
331 return h->has_indirect ? h->indirect.hash_key
332 : shared_hash_key;
333}
334
335static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 336 struct siphash state;
0cb3c286 337 uint64_t hash;
b826ab58 338
0cb3c286 339 siphash24_init(&state, hash_key(h));
b826ab58
TG
340
341 h->hash_ops->hash(p, &state);
342
933f9cae 343 hash = siphash24_finalize(&state);
0cb3c286
TG
344
345 return (unsigned) (hash % n_buckets(h));
9bf3b535 346}
89439d4f 347#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535 348
84dcca75
VC
349static inline void base_set_dirty(HashmapBase *h) {
350 h->dirty = true;
351}
352#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
353
9bf3b535
LP
354static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
355 static uint8_t current[HASH_KEY_SIZE];
356 static bool current_initialized = false;
357
358 /* Returns a hash function key to use. In order to keep things
359 * fast we will not generate a new key each time we allocate a
360 * new hash table. Instead, we'll just reuse the most recently
361 * generated one, except if we never generated one or when we
362 * are rehashing an entire hash table because we reached a
363 * fill level */
364
365 if (!current_initialized || !reuse_is_ok) {
366 random_bytes(current, sizeof(current));
367 current_initialized = true;
368 }
369
370 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
371}
372
89439d4f
MS
373static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
374 return (struct hashmap_base_entry*)
1a39bc8c 375 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
89439d4f
MS
376}
377
378static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
379 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
380}
381
382static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
383 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
384}
39c2a6f1 385
89439d4f
MS
386static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
387 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
388}
39c2a6f1 389
89439d4f
MS
390static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
391 return &swap->e[idx - _IDX_SWAP_BEGIN];
392}
39c2a6f1 393
89439d4f
MS
394/* Returns a pointer to the bucket at index idx.
395 * Understands real indexes and swap indexes, hence "_virtual". */
396static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
397 unsigned idx) {
398 if (idx < _IDX_SWAP_BEGIN)
399 return bucket_at(h, idx);
400
401 if (idx < _IDX_SWAP_END)
402 return &bucket_at_swap(swap, idx)->p.b;
403
404 assert_not_reached("Invalid index");
405}
406
407static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
408 return (dib_raw_t*)
1a39bc8c 409 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
89439d4f
MS
410}
411
412static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
413 return idx >= from ? idx - from
414 : n_buckets(h) + idx - from;
415}
416
417static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
418 unsigned initial_bucket;
419
420 if (raw_dib == DIB_RAW_FREE)
421 return DIB_FREE;
422
423 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
424 return raw_dib;
425
426 /*
427 * Having an overflow DIB value is very unlikely. The hash function
428 * would have to be bad. For example, in a table of size 2^24 filled
429 * to load factor 0.9 the maximum observed DIB is only about 60.
430 * In theory (assuming I used Maxima correctly), for an infinite size
431 * hash table with load factor 0.8 the probability of a given entry
432 * having DIB > 40 is 1.9e-8.
433 * This returns the correct DIB value by recomputing the hash value in
434 * the unlikely case. XXX Hitting this case could be a hint to rehash.
435 */
436 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
437 return bucket_distance(h, idx, initial_bucket);
438}
439
440static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
441 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
442}
443
444static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
445 dib_raw_t *dibs;
446
447 dibs = dib_raw_ptr(h);
448
449 for ( ; idx < n_buckets(h); idx++)
450 if (dibs[idx] != DIB_RAW_FREE)
451 return idx;
452
453 return IDX_NIL;
454}
455
456static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 457 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
458 bucket_set_dib(h, idx, DIB_FREE);
459}
460
461static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
462 unsigned from, unsigned to) {
463 struct hashmap_base_entry *e_from, *e_to;
464
465 assert(from != to);
39c2a6f1 466
89439d4f
MS
467 e_from = bucket_at_virtual(h, swap, from);
468 e_to = bucket_at_virtual(h, swap, to);
469
470 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
471
472 if (h->type == HASHMAP_TYPE_ORDERED) {
473 OrderedHashmap *lh = (OrderedHashmap*) h;
474 struct ordered_hashmap_entry *le, *le_to;
475
476 le_to = (struct ordered_hashmap_entry*) e_to;
477
478 if (le_to->iterate_next != IDX_NIL) {
479 le = (struct ordered_hashmap_entry*)
480 bucket_at_virtual(h, swap, le_to->iterate_next);
481 le->iterate_previous = to;
482 }
483
484 if (le_to->iterate_previous != IDX_NIL) {
485 le = (struct ordered_hashmap_entry*)
486 bucket_at_virtual(h, swap, le_to->iterate_previous);
487 le->iterate_next = to;
488 }
489
490 if (lh->iterate_list_head == from)
491 lh->iterate_list_head = to;
492 if (lh->iterate_list_tail == from)
493 lh->iterate_list_tail = to;
39c2a6f1 494 }
89439d4f 495}
60918275 496
89439d4f
MS
497static unsigned next_idx(HashmapBase *h, unsigned idx) {
498 return (idx + 1U) % n_buckets(h);
499}
60918275 500
89439d4f
MS
501static unsigned prev_idx(HashmapBase *h, unsigned idx) {
502 return (n_buckets(h) + idx - 1U) % n_buckets(h);
503}
60918275 504
89439d4f
MS
505static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
506 switch (h->type) {
45fa9e29 507
89439d4f
MS
508 case HASHMAP_TYPE_PLAIN:
509 case HASHMAP_TYPE_ORDERED:
510 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 511
89439d4f
MS
512 case HASHMAP_TYPE_SET:
513 return (void*) e->key;
a3b6fafe 514
89439d4f
MS
515 default:
516 assert_not_reached("Unknown hashmap type");
517 }
60918275
LP
518}
519
89439d4f
MS
520static void base_remove_entry(HashmapBase *h, unsigned idx) {
521 unsigned left, right, prev, dib;
522 dib_raw_t raw_dib, *dibs;
45fa9e29 523
89439d4f
MS
524 dibs = dib_raw_ptr(h);
525 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 526
349cc4a5 527#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
528 h->debug.rem_count++;
529 h->debug.last_rem_idx = idx;
530#endif
034c6ed7 531
89439d4f
MS
532 left = idx;
533 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
534 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
535 raw_dib = dibs[right];
4c701096 536 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
89439d4f
MS
537 break;
538
539 /* The buckets are not supposed to be all occupied and with DIB > 0.
540 * That would mean we could make everyone better off by shifting them
541 * backward. This scenario is impossible. */
542 assert(left != right);
543 }
034c6ed7 544
89439d4f
MS
545 if (h->type == HASHMAP_TYPE_ORDERED) {
546 OrderedHashmap *lh = (OrderedHashmap*) h;
547 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
548
549 if (le->iterate_next != IDX_NIL)
550 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
551 else
552 lh->iterate_list_tail = le->iterate_previous;
553
554 if (le->iterate_previous != IDX_NIL)
555 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
556 else
557 lh->iterate_list_head = le->iterate_next;
558 }
559
560 /* Now shift all buckets in the interval (left, right) one step backwards */
561 for (prev = left, left = next_idx(h, left); left != right;
562 prev = left, left = next_idx(h, left)) {
563 dib = bucket_calculate_dib(h, left, dibs[left]);
564 assert(dib != 0);
565 bucket_move_entry(h, NULL, left, prev);
566 bucket_set_dib(h, prev, dib - 1);
567 }
568
569 bucket_mark_free(h, prev);
570 n_entries_dec(h);
84dcca75 571 base_set_dirty(h);
034c6ed7 572}
89439d4f
MS
573#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
574
575static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
576 struct ordered_hashmap_entry *e;
577 unsigned idx;
034c6ed7 578
101d8e63 579 assert(h);
89439d4f
MS
580 assert(i);
581
582 if (i->idx == IDX_NIL)
583 goto at_end;
584
585 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
586 goto at_end;
587
588 if (i->idx == IDX_FIRST) {
589 idx = h->iterate_list_head;
590 e = ordered_bucket_at(h, idx);
101d8e63 591 } else {
89439d4f
MS
592 idx = i->idx;
593 e = ordered_bucket_at(h, idx);
594 /*
595 * We allow removing the current entry while iterating, but removal may cause
596 * a backward shift. The next entry may thus move one bucket to the left.
597 * To detect when it happens, we remember the key pointer of the entry we were
598 * going to iterate next. If it does not match, there was a backward shift.
599 */
600 if (e->p.b.key != i->next_key) {
601 idx = prev_idx(HASHMAP_BASE(h), idx);
602 e = ordered_bucket_at(h, idx);
603 }
604 assert(e->p.b.key == i->next_key);
101d8e63 605 }
101d8e63 606
349cc4a5 607#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
608 i->prev_idx = idx;
609#endif
610
611 if (e->iterate_next != IDX_NIL) {
612 struct ordered_hashmap_entry *n;
613 i->idx = e->iterate_next;
614 n = ordered_bucket_at(h, i->idx);
615 i->next_key = n->p.b.key;
616 } else
617 i->idx = IDX_NIL;
618
619 return idx;
620
621at_end:
622 i->idx = IDX_NIL;
623 return IDX_NIL;
101d8e63
LP
624}
625
89439d4f
MS
626static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
627 unsigned idx;
628
60918275 629 assert(h);
89439d4f 630 assert(i);
60918275 631
89439d4f
MS
632 if (i->idx == IDX_NIL)
633 goto at_end;
60918275 634
89439d4f
MS
635 if (i->idx == IDX_FIRST) {
636 /* fast forward to the first occupied bucket */
637 if (h->has_indirect) {
638 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
639 h->indirect.idx_lowest_entry = i->idx;
640 } else
641 i->idx = skip_free_buckets(h, 0);
642
643 if (i->idx == IDX_NIL)
644 goto at_end;
645 } else {
646 struct hashmap_base_entry *e;
647
648 assert(i->idx > 0);
60918275 649
89439d4f
MS
650 e = bucket_at(h, i->idx);
651 /*
652 * We allow removing the current entry while iterating, but removal may cause
653 * a backward shift. The next entry may thus move one bucket to the left.
654 * To detect when it happens, we remember the key pointer of the entry we were
655 * going to iterate next. If it does not match, there was a backward shift.
656 */
657 if (e->key != i->next_key)
658 e = bucket_at(h, --i->idx);
60918275 659
89439d4f
MS
660 assert(e->key == i->next_key);
661 }
662
663 idx = i->idx;
349cc4a5 664#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
665 i->prev_idx = idx;
666#endif
667
668 i->idx = skip_free_buckets(h, i->idx + 1);
669 if (i->idx != IDX_NIL)
670 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 671 else
89439d4f
MS
672 i->idx = IDX_NIL;
673
674 return idx;
60918275 675
89439d4f
MS
676at_end:
677 i->idx = IDX_NIL;
678 return IDX_NIL;
60918275
LP
679}
680
89439d4f
MS
681static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
682 if (!h) {
683 i->idx = IDX_NIL;
684 return IDX_NIL;
685 }
101d8e63 686
349cc4a5 687#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
688 if (i->idx == IDX_FIRST) {
689 i->put_count = h->debug.put_count;
690 i->rem_count = h->debug.rem_count;
691 } else {
692 /* While iterating, must not add any new entries */
693 assert(i->put_count == h->debug.put_count);
694 /* ... or remove entries other than the current one */
695 assert(i->rem_count == h->debug.rem_count ||
696 (i->rem_count == h->debug.rem_count - 1 &&
697 i->prev_idx == h->debug.last_rem_idx));
698 /* Reset our removals counter */
699 i->rem_count = h->debug.rem_count;
700 }
701#endif
101d8e63 702
89439d4f
MS
703 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
704 : hashmap_iterate_in_internal_order(h, i);
705}
39c2a6f1 706
8927b1da 707bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
708 struct hashmap_base_entry *e;
709 void *data;
710 unsigned idx;
711
712 idx = hashmap_iterate_entry(h, i);
713 if (idx == IDX_NIL) {
8927b1da
DH
714 if (value)
715 *value = NULL;
89439d4f
MS
716 if (key)
717 *key = NULL;
718
8927b1da 719 return false;
89439d4f
MS
720 }
721
722 e = bucket_at(h, idx);
723 data = entry_value(h, e);
8927b1da
DH
724 if (value)
725 *value = data;
89439d4f
MS
726 if (key)
727 *key = e->key;
728
8927b1da 729 return true;
101d8e63
LP
730}
731
8927b1da
DH
732bool set_iterate(Set *s, Iterator *i, void **value) {
733 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 734}
60918275 735
89439d4f
MS
736#define HASHMAP_FOREACH_IDX(idx, h, i) \
737 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
738 (idx != IDX_NIL); \
739 (idx) = hashmap_iterate_entry((h), &(i)))
740
45ea84d8
VC
741IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) {
742 IteratedCache *cache;
743
744 assert(h);
745 assert(!h->cached);
746
747 if (h->cached)
748 return NULL;
749
750 cache = new0(IteratedCache, 1);
751 if (!cache)
752 return NULL;
753
754 cache->hashmap = h;
755 h->cached = true;
756
757 return cache;
758}
759
89439d4f
MS
760static void reset_direct_storage(HashmapBase *h) {
761 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
762 void *p;
763
764 assert(!h->has_indirect);
765
766 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
767 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
768}
769
b4f60743
LP
770static bool use_pool(void) {
771 static int b = -1;
772
773 if (!is_main_thread())
774 return false;
775
776 if (b < 0)
777 b = getenv_bool("SYSTEMD_MEMPOOL") != 0;
778
779 return b;
780}
781
3ef11dcf 782static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
783 HashmapBase *h;
784 const struct hashmap_type_info *hi = &hashmap_type_info[type];
b4f60743 785 bool up;
89439d4f 786
b4f60743 787 up = use_pool();
67f3c402 788
b4f60743 789 h = up ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
60918275 790 if (!h)
89439d4f
MS
791 return NULL;
792
793 h->type = type;
b4f60743 794 h->from_pool = up;
89439d4f
MS
795 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
796
797 if (type == HASHMAP_TYPE_ORDERED) {
798 OrderedHashmap *lh = (OrderedHashmap*)h;
799 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
800 }
801
802 reset_direct_storage(h);
60918275 803
89439d4f
MS
804 if (!shared_hash_key_initialized) {
805 random_bytes(shared_hash_key, sizeof(shared_hash_key));
806 shared_hash_key_initialized= true;
807 }
808
349cc4a5 809#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
810 h->debug.func = func;
811 h->debug.file = file;
812 h->debug.line = line;
4f1b3061
TG
813 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
814 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
815 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
816#endif
817
818 return h;
819}
60918275 820
89439d4f 821Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 822 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
823}
824
825OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 826 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
827}
828
829Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 830 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
831}
832
833static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 834 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
835 HashmapBase *q;
836
837 assert(h);
838
839 if (*h)
840 return 0;
841
3ef11dcf 842 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
843 if (!q)
844 return -ENOMEM;
845
846 *h = q;
847 return 0;
848}
849
850int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 851 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
852}
853
854int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 855 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
856}
857
858int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 859 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
860}
861
862static void hashmap_free_no_clear(HashmapBase *h) {
863 assert(!h->has_indirect);
864 assert(!h->n_direct_entries);
865
349cc4a5 866#if ENABLE_DEBUG_HASHMAP
4f1b3061 867 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 868 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 869 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 870#endif
45fa9e29 871
205c085b
LP
872 if (h->from_pool) {
873 /* Ensure that the object didn't get migrated between threads. */
874 assert_se(is_main_thread());
89439d4f 875 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
205c085b 876 } else
39c2a6f1 877 free(h);
60918275
LP
878}
879
cfe561a4 880HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
881
882 /* Free the hashmap, but nothing in it */
883
cfe561a4
DH
884 if (h) {
885 internal_hashmap_clear(h);
886 hashmap_free_no_clear(h);
887 }
89439d4f 888
cfe561a4 889 return NULL;
89439d4f
MS
890}
891
cfe561a4 892HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
893
894 /* Free the hashmap and all data objects in it, but not the
895 * keys */
896
cfe561a4
DH
897 if (h) {
898 internal_hashmap_clear_free(h);
899 hashmap_free_no_clear(h);
900 }
61b1477c 901
cfe561a4 902 return NULL;
449ddb2d
LP
903}
904
cfe561a4 905Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
906
907 /* Free the hashmap and all data and key objects in it */
908
cfe561a4
DH
909 if (h) {
910 hashmap_clear_free_free(h);
911 hashmap_free_no_clear(HASHMAP_BASE(h));
912 }
fabe5c0e 913
cfe561a4 914 return NULL;
fabe5c0e
LP
915}
916
89439d4f 917void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
918 if (!h)
919 return;
920
89439d4f
MS
921 if (h->has_indirect) {
922 free(h->indirect.storage);
923 h->has_indirect = false;
924 }
925
926 h->n_direct_entries = 0;
927 reset_direct_storage(h);
928
929 if (h->type == HASHMAP_TYPE_ORDERED) {
930 OrderedHashmap *lh = (OrderedHashmap*) h;
931 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
932 }
84dcca75
VC
933
934 base_set_dirty(h);
11dd41ce
LP
935}
936
89439d4f
MS
937void internal_hashmap_clear_free(HashmapBase *h) {
938 unsigned idx;
9946996c 939
61b1477c
LP
940 if (!h)
941 return;
9946996c 942
89439d4f
MS
943 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
944 idx = skip_free_buckets(h, idx + 1))
945 free(entry_value(h, bucket_at(h, idx)));
946
947 internal_hashmap_clear(h);
9946996c
LP
948}
949
fabe5c0e 950void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
951 unsigned idx;
952
fabe5c0e
LP
953 if (!h)
954 return;
955
89439d4f
MS
956 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
957 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
958 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
959 free((void*)e->b.key);
960 free(e->value);
fabe5c0e 961 }
89439d4f
MS
962
963 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
964}
965
89439d4f
MS
966static int resize_buckets(HashmapBase *h, unsigned entries_add);
967
968/*
969 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
970 * Performs Robin Hood swaps as it goes. The entry to put must be placed
971 * by the caller into swap slot IDX_PUT.
972 * If used for in-place resizing, may leave a displaced entry in swap slot
973 * IDX_PUT. Caller must rehash it next.
974 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
975 * false otherwise.
976 */
977static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
978 struct swap_entries *swap) {
979 dib_raw_t raw_dib, *dibs;
980 unsigned dib, distance;
981
349cc4a5 982#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
983 h->debug.put_count++;
984#endif
985
986 dibs = dib_raw_ptr(h);
987
988 for (distance = 0; ; distance++) {
989 raw_dib = dibs[idx];
3742095b 990 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
89439d4f
MS
991 if (raw_dib == DIB_RAW_REHASH)
992 bucket_move_entry(h, swap, idx, IDX_TMP);
993
994 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
995 h->indirect.idx_lowest_entry = idx;
60918275 996
89439d4f
MS
997 bucket_set_dib(h, idx, distance);
998 bucket_move_entry(h, swap, IDX_PUT, idx);
999 if (raw_dib == DIB_RAW_REHASH) {
1000 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1001 return true;
1002 }
60918275 1003
89439d4f
MS
1004 return false;
1005 }
1006
1007 dib = bucket_calculate_dib(h, idx, raw_dib);
1008
1009 if (dib < distance) {
1010 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
1011 bucket_set_dib(h, idx, distance);
1012
1013 /* swap the entries */
1014 bucket_move_entry(h, swap, idx, IDX_TMP);
1015 bucket_move_entry(h, swap, IDX_PUT, idx);
1016 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1017
1018 distance = dib;
1019 }
1020
1021 idx = next_idx(h, idx);
1022 }
60918275
LP
1023}
1024
89439d4f
MS
1025/*
1026 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1027 * The caller must place the entry (only its key and value, not link indexes)
1028 * in swap slot IDX_PUT.
1029 * Caller must ensure: the key does not exist yet in the hashmap.
1030 * that resize is not needed if !may_resize.
1031 * Returns: 1 if entry was put successfully.
1032 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1033 * Cannot return -ENOMEM if !may_resize.
1034 */
1035static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1036 struct swap_entries *swap, bool may_resize) {
1037 struct ordered_hashmap_entry *new_entry;
1038 int r;
1039
1040 assert(idx < n_buckets(h));
1041
1042 new_entry = bucket_at_swap(swap, IDX_PUT);
1043
1044 if (may_resize) {
1045 r = resize_buckets(h, 1);
1046 if (r < 0)
1047 return r;
1048 if (r > 0)
1049 idx = bucket_hash(h, new_entry->p.b.key);
1050 }
1051 assert(n_entries(h) < n_buckets(h));
1052
1053 if (h->type == HASHMAP_TYPE_ORDERED) {
1054 OrderedHashmap *lh = (OrderedHashmap*) h;
1055
1056 new_entry->iterate_next = IDX_NIL;
1057 new_entry->iterate_previous = lh->iterate_list_tail;
1058
1059 if (lh->iterate_list_tail != IDX_NIL) {
1060 struct ordered_hashmap_entry *old_tail;
1061
1062 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1063 assert(old_tail->iterate_next == IDX_NIL);
1064 old_tail->iterate_next = IDX_PUT;
1065 }
1066
1067 lh->iterate_list_tail = IDX_PUT;
1068 if (lh->iterate_list_head == IDX_NIL)
1069 lh->iterate_list_head = IDX_PUT;
1070 }
1071
1072 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1073
1074 n_entries_inc(h);
349cc4a5 1075#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1076 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1077#endif
1078
84dcca75
VC
1079 base_set_dirty(h);
1080
89439d4f
MS
1081 return 1;
1082}
1083#define hashmap_put_boldly(h, idx, swap, may_resize) \
1084 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1085
1086/*
1087 * Returns 0 if resize is not needed.
f131770b 1088 * 1 if successfully resized.
89439d4f
MS
1089 * -ENOMEM on allocation failure.
1090 */
1091static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1092 struct swap_entries swap;
1a39bc8c 1093 void *new_storage;
89439d4f
MS
1094 dib_raw_t *old_dibs, *new_dibs;
1095 const struct hashmap_type_info *hi;
1096 unsigned idx, optimal_idx;
1097 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1098 uint8_t new_shift;
1099 bool rehash_next;
45fa9e29
LP
1100
1101 assert(h);
1102
89439d4f
MS
1103 hi = &hashmap_type_info[h->type];
1104 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1105
1106 /* overflow? */
89439d4f 1107 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1108 return -ENOMEM;
1109
89439d4f
MS
1110 /* For direct storage we allow 100% load, because it's tiny. */
1111 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1112 return 0;
45fa9e29 1113
89439d4f
MS
1114 /*
1115 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1116 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1117 */
1118 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1119 /* overflow? */
1120 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1121 return -ENOMEM;
45fa9e29 1122
89439d4f
MS
1123 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1124 return -ENOMEM;
a3b6fafe 1125
89439d4f 1126 old_n_buckets = n_buckets(h);
45fa9e29 1127
89439d4f
MS
1128 if (_likely_(new_n_buckets <= old_n_buckets))
1129 return 0;
45fa9e29 1130
89439d4f
MS
1131 new_shift = log2u_round_up(MAX(
1132 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1133 2 * sizeof(struct direct_storage)));
45fa9e29 1134
89439d4f
MS
1135 /* Realloc storage (buckets and DIB array). */
1136 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1137 1U << new_shift);
1138 if (!new_storage)
1139 return -ENOMEM;
45fa9e29 1140
89439d4f
MS
1141 /* Must upgrade direct to indirect storage. */
1142 if (!h->has_indirect) {
1143 memcpy(new_storage, h->direct.storage,
1144 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1145 h->indirect.n_entries = h->n_direct_entries;
1146 h->indirect.idx_lowest_entry = 0;
1147 h->n_direct_entries = 0;
1148 }
45fa9e29 1149
89439d4f
MS
1150 /* Get a new hash key. If we've just upgraded to indirect storage,
1151 * allow reusing a previously generated key. It's still a different key
1152 * from the shared one that we used for direct storage. */
1153 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1154
1155 h->has_indirect = true;
1156 h->indirect.storage = new_storage;
1157 h->indirect.n_buckets = (1U << new_shift) /
1158 (hi->entry_size + sizeof(dib_raw_t));
1159
1a39bc8c 1160 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
89439d4f
MS
1161 new_dibs = dib_raw_ptr(h);
1162
1163 /*
1164 * Move the DIB array to the new place, replacing valid DIB values with
1165 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1166 * Note: Overlap is not possible, because we have at least doubled the
1167 * number of buckets and dib_raw_t is smaller than any entry type.
1168 */
1169 for (idx = 0; idx < old_n_buckets; idx++) {
1170 assert(old_dibs[idx] != DIB_RAW_REHASH);
1171 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1172 : DIB_RAW_REHASH;
45fa9e29
LP
1173 }
1174
89439d4f 1175 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1176 memzero(bucket_at(h, old_n_buckets),
89439d4f 1177 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1178
89439d4f
MS
1179 /* The upper half of the new DIB array needs initialization */
1180 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1181 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1182
89439d4f
MS
1183 /* Rehash entries that need it */
1184 n_rehashed = 0;
1185 for (idx = 0; idx < old_n_buckets; idx++) {
1186 if (new_dibs[idx] != DIB_RAW_REHASH)
1187 continue;
45fa9e29 1188
89439d4f 1189 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1190
89439d4f
MS
1191 /*
1192 * Not much to do if by luck the entry hashes to its current
1193 * location. Just set its DIB.
1194 */
1195 if (optimal_idx == idx) {
1196 new_dibs[idx] = 0;
1197 n_rehashed++;
1198 continue;
1199 }
1200
1201 new_dibs[idx] = DIB_RAW_FREE;
1202 bucket_move_entry(h, &swap, idx, IDX_PUT);
1203 /* bucket_move_entry does not clear the source */
eccaf899 1204 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1205
1206 do {
1207 /*
1208 * Find the new bucket for the current entry. This may make
1209 * another entry homeless and load it into IDX_PUT.
1210 */
1211 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1212 n_rehashed++;
1213
1214 /* Did the current entry displace another one? */
1215 if (rehash_next)
1216 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1217 } while (rehash_next);
1218 }
60918275 1219
89439d4f 1220 assert(n_rehashed == n_entries(h));
60918275 1221
89439d4f
MS
1222 return 1;
1223}
45fa9e29 1224
89439d4f
MS
1225/*
1226 * Finds an entry with a matching key
1227 * Returns: index of the found entry, or IDX_NIL if not found.
1228 */
1229static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1230 struct hashmap_base_entry *e;
1231 unsigned dib, distance;
1232 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1233
89439d4f 1234 assert(idx < n_buckets(h));
60918275 1235
89439d4f
MS
1236 for (distance = 0; ; distance++) {
1237 if (dibs[idx] == DIB_RAW_FREE)
1238 return IDX_NIL;
60918275 1239
89439d4f 1240 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1241
89439d4f
MS
1242 if (dib < distance)
1243 return IDX_NIL;
1244 if (dib == distance) {
1245 e = bucket_at(h, idx);
1246 if (h->hash_ops->compare(e->key, key) == 0)
1247 return idx;
1248 }
1249
1250 idx = next_idx(h, idx);
1251 }
60918275 1252}
89439d4f 1253#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1254
923041cb 1255int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1256 struct swap_entries swap;
1257 struct plain_hashmap_entry *e;
1258 unsigned hash, idx;
923041cb
MS
1259
1260 assert(h);
1261
1262 hash = bucket_hash(h, key);
89439d4f
MS
1263 idx = bucket_scan(h, hash, key);
1264 if (idx != IDX_NIL) {
1265 e = plain_bucket_at(h, idx);
923041cb
MS
1266 if (e->value == value)
1267 return 0;
1268 return -EEXIST;
1269 }
1270
89439d4f
MS
1271 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1272 e->b.key = key;
1273 e->value = value;
1274 return hashmap_put_boldly(h, hash, &swap, true);
1275}
1276
1277int set_put(Set *s, const void *key) {
1278 struct swap_entries swap;
1279 struct hashmap_base_entry *e;
1280 unsigned hash, idx;
1281
1282 assert(s);
1283
1284 hash = bucket_hash(s, key);
1285 idx = bucket_scan(s, hash, key);
1286 if (idx != IDX_NIL)
1287 return 0;
1288
1289 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1290 e->key = key;
1291 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1292}
1293
3158713e 1294int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1295 struct swap_entries swap;
1296 struct plain_hashmap_entry *e;
1297 unsigned hash, idx;
3158713e
LP
1298
1299 assert(h);
1300
a3b6fafe 1301 hash = bucket_hash(h, key);
89439d4f
MS
1302 idx = bucket_scan(h, hash, key);
1303 if (idx != IDX_NIL) {
1304 e = plain_bucket_at(h, idx);
349cc4a5 1305#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1306 /* Although the key is equal, the key pointer may have changed,
1307 * and this would break our assumption for iterating. So count
1308 * this operation as incompatible with iteration. */
1309 if (e->b.key != key) {
1310 h->b.debug.put_count++;
1311 h->b.debug.rem_count++;
1312 h->b.debug.last_rem_idx = idx;
1313 }
1314#endif
1315 e->b.key = key;
3158713e 1316 e->value = value;
84dcca75
VC
1317 hashmap_set_dirty(h);
1318
3158713e
LP
1319 return 0;
1320 }
1321
89439d4f
MS
1322 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1323 e->b.key = key;
1324 e->value = value;
1325 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1326}
1327
d99ae53a 1328int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1329 struct plain_hashmap_entry *e;
1330 unsigned hash, idx;
d99ae53a
LP
1331
1332 assert(h);
1333
a3b6fafe 1334 hash = bucket_hash(h, key);
89439d4f
MS
1335 idx = bucket_scan(h, hash, key);
1336 if (idx == IDX_NIL)
d99ae53a
LP
1337 return -ENOENT;
1338
89439d4f 1339 e = plain_bucket_at(h, idx);
d99ae53a 1340 e->value = value;
84dcca75
VC
1341 hashmap_set_dirty(h);
1342
d99ae53a
LP
1343 return 0;
1344}
1345
89439d4f
MS
1346void *internal_hashmap_get(HashmapBase *h, const void *key) {
1347 struct hashmap_base_entry *e;
1348 unsigned hash, idx;
60918275
LP
1349
1350 if (!h)
1351 return NULL;
1352
a3b6fafe 1353 hash = bucket_hash(h, key);
89439d4f
MS
1354 idx = bucket_scan(h, hash, key);
1355 if (idx == IDX_NIL)
60918275
LP
1356 return NULL;
1357
89439d4f
MS
1358 e = bucket_at(h, idx);
1359 return entry_value(h, e);
60918275
LP
1360}
1361
89439d4f
MS
1362void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1363 struct plain_hashmap_entry *e;
1364 unsigned hash, idx;
d99ae53a
LP
1365
1366 if (!h)
1367 return NULL;
1368
a3b6fafe 1369 hash = bucket_hash(h, key);
89439d4f
MS
1370 idx = bucket_scan(h, hash, key);
1371 if (idx == IDX_NIL)
d99ae53a
LP
1372 return NULL;
1373
89439d4f 1374 e = plain_bucket_at(h, idx);
d99ae53a 1375 if (key2)
89439d4f 1376 *key2 = (void*) e->b.key;
d99ae53a
LP
1377
1378 return e->value;
1379}
1380
89439d4f 1381bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1382 unsigned hash;
96342de6
LN
1383
1384 if (!h)
1385 return false;
1386
a3b6fafe 1387 hash = bucket_hash(h, key);
89439d4f 1388 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1389}
1390
89439d4f
MS
1391void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1392 struct hashmap_base_entry *e;
1393 unsigned hash, idx;
60918275
LP
1394 void *data;
1395
1396 if (!h)
1397 return NULL;
1398
a3b6fafe 1399 hash = bucket_hash(h, key);
89439d4f
MS
1400 idx = bucket_scan(h, hash, key);
1401 if (idx == IDX_NIL)
60918275
LP
1402 return NULL;
1403
89439d4f
MS
1404 e = bucket_at(h, idx);
1405 data = entry_value(h, e);
1406 remove_entry(h, idx);
60918275
LP
1407
1408 return data;
1409}
1410
89439d4f
MS
1411void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1412 struct plain_hashmap_entry *e;
1413 unsigned hash, idx;
c582a3b3
LP
1414 void *data;
1415
1416 if (!h) {
1417 if (rkey)
1418 *rkey = NULL;
1419 return NULL;
1420 }
1421
1422 hash = bucket_hash(h, key);
89439d4f
MS
1423 idx = bucket_scan(h, hash, key);
1424 if (idx == IDX_NIL) {
c582a3b3
LP
1425 if (rkey)
1426 *rkey = NULL;
1427 return NULL;
1428 }
1429
89439d4f 1430 e = plain_bucket_at(h, idx);
c582a3b3
LP
1431 data = e->value;
1432 if (rkey)
89439d4f 1433 *rkey = (void*) e->b.key;
c582a3b3 1434
89439d4f 1435 remove_entry(h, idx);
c582a3b3
LP
1436
1437 return data;
1438}
1439
101d8e63 1440int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1441 struct swap_entries swap;
1442 struct plain_hashmap_entry *e;
1443 unsigned old_hash, new_hash, idx;
101d8e63
LP
1444
1445 if (!h)
1446 return -ENOENT;
1447
a3b6fafe 1448 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1449 idx = bucket_scan(h, old_hash, old_key);
1450 if (idx == IDX_NIL)
101d8e63
LP
1451 return -ENOENT;
1452
a3b6fafe 1453 new_hash = bucket_hash(h, new_key);
89439d4f 1454 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1455 return -EEXIST;
1456
89439d4f 1457 remove_entry(h, idx);
101d8e63 1458
89439d4f
MS
1459 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1460 e->b.key = new_key;
101d8e63 1461 e->value = value;
89439d4f
MS
1462 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1463
1464 return 0;
1465}
1466
1467int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1468 struct swap_entries swap;
1469 struct hashmap_base_entry *e;
1470 unsigned old_hash, new_hash, idx;
101d8e63 1471
89439d4f
MS
1472 if (!s)
1473 return -ENOENT;
1474
1475 old_hash = bucket_hash(s, old_key);
1476 idx = bucket_scan(s, old_hash, old_key);
1477 if (idx == IDX_NIL)
1478 return -ENOENT;
1479
1480 new_hash = bucket_hash(s, new_key);
1481 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1482 return -EEXIST;
1483
1484 remove_entry(s, idx);
1485
1486 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1487 e->key = new_key;
1488 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1489
1490 return 0;
1491}
1492
8fe914ec 1493int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1494 struct swap_entries swap;
1495 struct plain_hashmap_entry *e;
1496 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1497
1498 if (!h)
1499 return -ENOENT;
1500
a3b6fafe 1501 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1502 idx_old = bucket_scan(h, old_hash, old_key);
1503 if (idx_old == IDX_NIL)
8fe914ec
LP
1504 return -ENOENT;
1505
89439d4f 1506 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1507
89439d4f
MS
1508 new_hash = bucket_hash(h, new_key);
1509 idx_new = bucket_scan(h, new_hash, new_key);
1510 if (idx_new != IDX_NIL)
1511 if (idx_old != idx_new) {
1512 remove_entry(h, idx_new);
1513 /* Compensate for a possible backward shift. */
1514 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1515 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1516 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1517 }
1518
1519 remove_entry(h, idx_old);
1520
1521 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1522 e->b.key = new_key;
8fe914ec 1523 e->value = value;
89439d4f 1524 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1525
1526 return 0;
1527}
1528
89439d4f
MS
1529void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1530 struct plain_hashmap_entry *e;
1531 unsigned hash, idx;
3158713e
LP
1532
1533 if (!h)
1534 return NULL;
1535
a3b6fafe 1536 hash = bucket_hash(h, key);
89439d4f
MS
1537 idx = bucket_scan(h, hash, key);
1538 if (idx == IDX_NIL)
3158713e
LP
1539 return NULL;
1540
89439d4f 1541 e = plain_bucket_at(h, idx);
3158713e
LP
1542 if (e->value != value)
1543 return NULL;
1544
89439d4f 1545 remove_entry(h, idx);
3158713e
LP
1546
1547 return value;
1548}
1549
89439d4f
MS
1550static unsigned find_first_entry(HashmapBase *h) {
1551 Iterator i = ITERATOR_FIRST;
60918275 1552
89439d4f
MS
1553 if (!h || !n_entries(h))
1554 return IDX_NIL;
60918275 1555
89439d4f 1556 return hashmap_iterate_entry(h, &i);
60918275
LP
1557}
1558
89439d4f
MS
1559void *internal_hashmap_first(HashmapBase *h) {
1560 unsigned idx;
60918275 1561
89439d4f
MS
1562 idx = find_first_entry(h);
1563 if (idx == IDX_NIL)
60918275
LP
1564 return NULL;
1565
89439d4f 1566 return entry_value(h, bucket_at(h, idx));
60918275
LP
1567}
1568
89439d4f
MS
1569void *internal_hashmap_first_key(HashmapBase *h) {
1570 struct hashmap_base_entry *e;
1571 unsigned idx;
2e4a6ff4 1572
89439d4f
MS
1573 idx = find_first_entry(h);
1574 if (idx == IDX_NIL)
2e4a6ff4
LP
1575 return NULL;
1576
89439d4f
MS
1577 e = bucket_at(h, idx);
1578 return (void*) e->key;
2e4a6ff4
LP
1579}
1580
89439d4f
MS
1581void *internal_hashmap_steal_first(HashmapBase *h) {
1582 struct hashmap_base_entry *e;
60918275 1583 void *data;
89439d4f 1584 unsigned idx;
60918275 1585
89439d4f
MS
1586 idx = find_first_entry(h);
1587 if (idx == IDX_NIL)
60918275
LP
1588 return NULL;
1589
89439d4f
MS
1590 e = bucket_at(h, idx);
1591 data = entry_value(h, e);
1592 remove_entry(h, idx);
60918275
LP
1593
1594 return data;
1595}
1596
89439d4f
MS
1597void *internal_hashmap_steal_first_key(HashmapBase *h) {
1598 struct hashmap_base_entry *e;
22be093f 1599 void *key;
89439d4f 1600 unsigned idx;
22be093f 1601
89439d4f
MS
1602 idx = find_first_entry(h);
1603 if (idx == IDX_NIL)
22be093f
LP
1604 return NULL;
1605
89439d4f
MS
1606 e = bucket_at(h, idx);
1607 key = (void*) e->key;
1608 remove_entry(h, idx);
22be093f
LP
1609
1610 return key;
1611}
1612
89439d4f 1613unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1614
1615 if (!h)
1616 return 0;
1617
89439d4f 1618 return n_entries(h);
60918275
LP
1619}
1620
89439d4f 1621unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1622
1623 if (!h)
1624 return 0;
1625
89439d4f 1626 return n_buckets(h);
45fa9e29
LP
1627}
1628
89439d4f
MS
1629int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1630 Iterator i;
1631 unsigned idx;
60918275 1632
89439d4f 1633 assert(h);
60918275 1634
89439d4f
MS
1635 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1636 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1637 int r;
91cdde8a 1638
89439d4f
MS
1639 r = hashmap_put(h, pe->b.key, pe->value);
1640 if (r < 0 && r != -EEXIST)
1641 return r;
1642 }
91cdde8a 1643
89439d4f
MS
1644 return 0;
1645}
91cdde8a 1646
89439d4f
MS
1647int set_merge(Set *s, Set *other) {
1648 Iterator i;
1649 unsigned idx;
91cdde8a 1650
89439d4f
MS
1651 assert(s);
1652
1653 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1654 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1655 int r;
1656
89439d4f
MS
1657 r = set_put(s, se->b.key);
1658 if (r < 0)
a3b6fafe 1659 return r;
91cdde8a
LP
1660 }
1661
1662 return 0;
1663}
1664
89439d4f 1665int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1666 int r;
1667
1668 assert(h);
1669
1670 r = resize_buckets(h, entries_add);
1671 if (r < 0)
1672 return r;
1673
1674 return 0;
1675}
1676
89439d4f
MS
1677/*
1678 * The same as hashmap_merge(), but every new item from other is moved to h.
1679 * Keys already in h are skipped and stay in other.
1680 * Returns: 0 on success.
1681 * -ENOMEM on alloc failure, in which case no move has been done.
1682 */
1683int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1684 struct swap_entries swap;
1685 struct hashmap_base_entry *e, *n;
1686 Iterator i;
1687 unsigned idx;
1688 int r;
101d8e63
LP
1689
1690 assert(h);
1691
101d8e63 1692 if (!other)
7ad63f57 1693 return 0;
101d8e63 1694
89439d4f
MS
1695 assert(other->type == h->type);
1696
1697 /*
1698 * This reserves buckets for the worst case, where none of other's
1699 * entries are yet present in h. This is preferable to risking
1700 * an allocation failure in the middle of the moving and having to
1701 * rollback or return a partial result.
1702 */
1703 r = resize_buckets(h, n_entries(other));
1704 if (r < 0)
1705 return r;
101d8e63 1706
89439d4f
MS
1707 HASHMAP_FOREACH_IDX(idx, other, i) {
1708 unsigned h_hash;
101d8e63 1709
89439d4f 1710 e = bucket_at(other, idx);
a3b6fafe 1711 h_hash = bucket_hash(h, e->key);
89439d4f 1712 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1713 continue;
1714
89439d4f
MS
1715 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1716 n->key = e->key;
1717 if (h->type != HASHMAP_TYPE_SET)
1718 ((struct plain_hashmap_entry*) n)->value =
1719 ((struct plain_hashmap_entry*) e)->value;
1720 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1721
1722 remove_entry(other, idx);
101d8e63 1723 }
7ad63f57
MS
1724
1725 return 0;
101d8e63
LP
1726}
1727
89439d4f
MS
1728int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1729 struct swap_entries swap;
1730 unsigned h_hash, other_hash, idx;
1731 struct hashmap_base_entry *e, *n;
1732 int r;
101d8e63 1733
101d8e63
LP
1734 assert(h);
1735
a3b6fafe 1736 h_hash = bucket_hash(h, key);
89439d4f 1737 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1738 return -EEXIST;
1739
bf3d3e2b
MS
1740 if (!other)
1741 return -ENOENT;
1742
89439d4f
MS
1743 assert(other->type == h->type);
1744
a3b6fafe 1745 other_hash = bucket_hash(other, key);
89439d4f
MS
1746 idx = bucket_scan(other, other_hash, key);
1747 if (idx == IDX_NIL)
101d8e63
LP
1748 return -ENOENT;
1749
89439d4f
MS
1750 e = bucket_at(other, idx);
1751
1752 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1753 n->key = e->key;
1754 if (h->type != HASHMAP_TYPE_SET)
1755 ((struct plain_hashmap_entry*) n)->value =
1756 ((struct plain_hashmap_entry*) e)->value;
1757 r = hashmap_put_boldly(h, h_hash, &swap, true);
1758 if (r < 0)
1759 return r;
101d8e63 1760
89439d4f 1761 remove_entry(other, idx);
101d8e63
LP
1762 return 0;
1763}
1764
89439d4f
MS
1765HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1766 HashmapBase *copy;
1767 int r;
91cdde8a
LP
1768
1769 assert(h);
1770
89439d4f 1771 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1772 if (!copy)
91cdde8a
LP
1773 return NULL;
1774
89439d4f
MS
1775 switch (h->type) {
1776 case HASHMAP_TYPE_PLAIN:
1777 case HASHMAP_TYPE_ORDERED:
1778 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1779 break;
1780 case HASHMAP_TYPE_SET:
1781 r = set_merge((Set*)copy, (Set*)h);
1782 break;
1783 default:
1784 assert_not_reached("Unknown hashmap type");
1785 }
1786
1787 if (r < 0) {
1788 internal_hashmap_free(copy);
91cdde8a
LP
1789 return NULL;
1790 }
1791
1792 return copy;
1793}
db1413d7 1794
89439d4f 1795char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1796 char **sv;
89439d4f
MS
1797 Iterator i;
1798 unsigned idx, n;
db1413d7 1799
89439d4f 1800 sv = new(char*, n_entries(h)+1);
729e3769 1801 if (!sv)
db1413d7
KS
1802 return NULL;
1803
1804 n = 0;
89439d4f
MS
1805 HASHMAP_FOREACH_IDX(idx, h, i)
1806 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1807 sv[n] = NULL;
1808
1809 return sv;
1810}
3c1668da 1811
89439d4f
MS
1812void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1813 struct ordered_hashmap_entry *e;
1814 unsigned hash, idx;
3c1668da 1815
3c1668da
LP
1816 if (!h)
1817 return NULL;
1818
a3b6fafe 1819 hash = bucket_hash(h, key);
89439d4f
MS
1820 idx = bucket_scan(h, hash, key);
1821 if (idx == IDX_NIL)
3c1668da
LP
1822 return NULL;
1823
89439d4f
MS
1824 e = ordered_bucket_at(h, idx);
1825 if (e->iterate_next == IDX_NIL)
3c1668da 1826 return NULL;
89439d4f
MS
1827 return ordered_bucket_at(h, e->iterate_next)->p.value;
1828}
3c1668da 1829
89439d4f
MS
1830int set_consume(Set *s, void *value) {
1831 int r;
1832
d97c5aea
LP
1833 assert(s);
1834 assert(value);
1835
89439d4f 1836 r = set_put(s, value);
575ccc1b 1837 if (r <= 0)
89439d4f
MS
1838 free(value);
1839
1840 return r;
1841}
1842
1843int set_put_strdup(Set *s, const char *p) {
1844 char *c;
89439d4f
MS
1845
1846 assert(s);
1847 assert(p);
1848
454f0f86
LP
1849 if (set_contains(s, (char*) p))
1850 return 0;
1851
89439d4f
MS
1852 c = strdup(p);
1853 if (!c)
1854 return -ENOMEM;
1855
454f0f86 1856 return set_consume(s, c);
89439d4f
MS
1857}
1858
1859int set_put_strdupv(Set *s, char **l) {
1860 int n = 0, r;
1861 char **i;
1862
d97c5aea
LP
1863 assert(s);
1864
89439d4f
MS
1865 STRV_FOREACH(i, l) {
1866 r = set_put_strdup(s, *i);
1867 if (r < 0)
1868 return r;
1869
1870 n += r;
1871 }
1872
1873 return n;
3c1668da 1874}
d97c5aea
LP
1875
1876int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1877 const char *p = v;
1878 int r;
1879
1880 assert(s);
1881 assert(v);
1882
1883 for (;;) {
1884 char *word;
1885
1886 r = extract_first_word(&p, &word, separators, flags);
1887 if (r <= 0)
1888 return r;
1889
1890 r = set_consume(s, word);
1891 if (r < 0)
1892 return r;
1893 }
1894}
45ea84d8
VC
1895
1896/* expand the cachemem if needed, return true if newly (re)activated. */
1897static int cachemem_maintain(CacheMem *mem, unsigned size) {
45ea84d8
VC
1898 assert(mem);
1899
1900 if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) {
1901 if (size > 0)
1902 return -ENOMEM;
1903 }
1904
afbbc068
ZJS
1905 if (!mem->active) {
1906 mem->active = true;
1907 return true;
1908 }
45ea84d8 1909
afbbc068 1910 return false;
45ea84d8
VC
1911}
1912
1913int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1914 bool sync_keys = false, sync_values = false;
1915 unsigned size;
1916 int r;
1917
1918 assert(cache);
1919 assert(cache->hashmap);
1920
1921 size = n_entries(cache->hashmap);
1922
1923 if (res_keys) {
1924 r = cachemem_maintain(&cache->keys, size);
1925 if (r < 0)
1926 return r;
1927
1928 sync_keys = r;
1929 } else
1930 cache->keys.active = false;
1931
1932 if (res_values) {
1933 r = cachemem_maintain(&cache->values, size);
1934 if (r < 0)
1935 return r;
1936
1937 sync_values = r;
1938 } else
1939 cache->values.active = false;
1940
1941 if (cache->hashmap->dirty) {
1942 if (cache->keys.active)
1943 sync_keys = true;
1944 if (cache->values.active)
1945 sync_values = true;
1946
1947 cache->hashmap->dirty = false;
1948 }
1949
1950 if (sync_keys || sync_values) {
1951 unsigned i, idx;
1952 Iterator iter;
1953
1954 i = 0;
1955 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1956 struct hashmap_base_entry *e;
1957
1958 e = bucket_at(cache->hashmap, idx);
1959
1960 if (sync_keys)
1961 cache->keys.ptr[i] = e->key;
1962 if (sync_values)
1963 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1964 i++;
1965 }
1966 }
1967
1968 if (res_keys)
1969 *res_keys = cache->keys.ptr;
1970 if (res_values)
1971 *res_values = cache->values.ptr;
1972 if (res_n_entries)
1973 *res_n_entries = size;
1974
1975 return 0;
1976}
1977
1978IteratedCache *iterated_cache_free(IteratedCache *cache) {
1979 if (cache) {
1980 free(cache->keys.ptr);
1981 free(cache->values.ptr);
1982 free(cache);
1983 }
1984
1985 return NULL;
1986}