]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
src/basic: rename audit.[ch] → audit-util.[ch] and capability.[ch] → capability-util...
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
d6c9574f 1/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
60918275 2
a7334b09
LP
3/***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
89439d4f 7 Copyright 2014 Michal Schmidt
a7334b09
LP
8
9 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
a7334b09
LP
12 (at your option) any later version.
13
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 17 Lesser General Public License for more details.
a7334b09 18
5430f7f2 19 You should have received a copy of the GNU Lesser General Public License
a7334b09
LP
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
21***/
22
60918275 23#include <stdlib.h>
60918275 24#include <errno.h>
4f1b3061 25#include <pthread.h>
60918275
LP
26
27#include "util.h"
28#include "hashmap.h"
89439d4f 29#include "set.h"
60918275 30#include "macro.h"
9bf3b535 31#include "siphash24.h"
89439d4f 32#include "strv.h"
b3dcf58e 33#include "mempool.h"
3df3e884 34#include "random-util.h"
60918275 35
2eec67ac
TA
36#ifdef ENABLE_DEBUG_HASHMAP
37#include "list.h"
38#endif
39
89439d4f
MS
40/*
41 * Implementation of hashmaps.
42 * Addressing: open
43 * - uses less RAM compared to closed addressing (chaining), because
44 * our entries are small (especially in Sets, which tend to contain
45 * the majority of entries in systemd).
46 * Collision resolution: Robin Hood
47 * - tends to equalize displacement of entries from their optimal buckets.
48 * Probe sequence: linear
49 * - though theoretically worse than random probing/uniform hashing/double
50 * hashing, it is good for cache locality.
51 *
52 * References:
53 * Celis, P. 1986. Robin Hood Hashing.
54 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
55 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
56 * - The results are derived for random probing. Suggests deletion with
57 * tombstones and two mean-centered search methods. None of that works
58 * well for linear probing.
59 *
60 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
61 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
62 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
63 * http://www.math.uu.se/~svante/papers/sj157.pdf
64 * - Applies to Robin Hood with linear probing. Contains remarks on
65 * the unsuitability of mean-centered search with linear probing.
66 *
67 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
68 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
69 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
70 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
71 * in a successful search), and Janson writes about displacement. C = d + 1.
72 *
73 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
74 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
75 * - Explanation of backward shift deletion with pictures.
76 *
77 * Khuong, P. 2013. The Other Robin Hood Hashing.
78 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
79 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
80 */
81
82/*
83 * XXX Ideas for improvement:
84 * For unordered hashmaps, randomize iteration order, similarly to Perl:
85 * http://blog.booking.com/hardening-perls-hash-function.html
86 */
87
88/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
89 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
90#define INV_KEEP_FREE 5U
91
92/* Fields common to entries of all hashmap/set types */
93struct hashmap_base_entry {
60918275 94 const void *key;
89439d4f
MS
95};
96
97/* Entry types for specific hashmap/set types
98 * hashmap_base_entry must be at the beginning of each entry struct. */
99
100struct plain_hashmap_entry {
101 struct hashmap_base_entry b;
60918275 102 void *value;
60918275
LP
103};
104
89439d4f
MS
105struct ordered_hashmap_entry {
106 struct plain_hashmap_entry p;
107 unsigned iterate_next, iterate_previous;
108};
60918275 109
89439d4f
MS
110struct set_entry {
111 struct hashmap_base_entry b;
112};
45fa9e29 113
89439d4f
MS
114/* In several functions it is advantageous to have the hash table extended
115 * virtually by a couple of additional buckets. We reserve special index values
116 * for these "swap" buckets. */
117#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
118#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
119#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
120#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 121
89439d4f
MS
122#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
123#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
124
125assert_cc(IDX_FIRST == _IDX_SWAP_END);
126assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
127
128/* Storage space for the "swap" buckets.
129 * All entry types can fit into a ordered_hashmap_entry. */
130struct swap_entries {
131 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
132};
133
89439d4f
MS
134/* Distance from Initial Bucket */
135typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
136#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
137#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
138#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
139#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
140
141#define DIB_FREE UINT_MAX
142
fc86aa0e 143#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
144struct hashmap_debug_info {
145 LIST_FIELDS(struct hashmap_debug_info, debug_list);
146 unsigned max_entries; /* high watermark of n_entries */
147
148 /* who allocated this hashmap */
149 int line;
150 const char *file;
151 const char *func;
152
153 /* fields to detect modification while iterating */
154 unsigned put_count; /* counts puts into the hashmap */
155 unsigned rem_count; /* counts removals from hashmap */
156 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
157};
158
89439d4f
MS
159/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
160static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 161static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 162
89439d4f 163#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 164
fc86aa0e 165#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 166#define HASHMAP_DEBUG_FIELDS
fc86aa0e 167#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 168
89439d4f
MS
169enum HashmapType {
170 HASHMAP_TYPE_PLAIN,
171 HASHMAP_TYPE_ORDERED,
172 HASHMAP_TYPE_SET,
173 _HASHMAP_TYPE_MAX
174};
39c2a6f1 175
89439d4f
MS
176struct _packed_ indirect_storage {
177 char *storage; /* where buckets and DIBs are stored */
178 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
179
180 unsigned n_entries; /* number of stored entries */
181 unsigned n_buckets; /* number of buckets */
182
183 unsigned idx_lowest_entry; /* Index below which all buckets are free.
184 Makes "while(hashmap_steal_first())" loops
185 O(n) instead of O(n^2) for unordered hashmaps. */
186 uint8_t _pad[3]; /* padding for the whole HashmapBase */
187 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
188};
189
190struct direct_storage {
191 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
192 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
193 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
194 char storage[sizeof(struct indirect_storage)];
195};
196
197#define DIRECT_BUCKETS(entry_t) \
198 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
199
200/* We should be able to store at least one entry directly. */
201assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
202
203/* We have 3 bits for n_direct_entries. */
204assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
205
206/* Hashmaps with directly stored entries all use this shared hash key.
207 * It's no big deal if the key is guessed, because there can be only
208 * a handful of directly stored entries in a hashmap. When a hashmap
209 * outgrows direct storage, it gets its own key for indirect storage. */
210static uint8_t shared_hash_key[HASH_KEY_SIZE];
211static bool shared_hash_key_initialized;
212
213/* Fields that all hashmap/set types must have */
214struct HashmapBase {
215 const struct hash_ops *hash_ops; /* hash and compare ops to use */
216
217 union _packed_ {
218 struct indirect_storage indirect; /* if has_indirect */
219 struct direct_storage direct; /* if !has_indirect */
220 };
221
222 enum HashmapType type:2; /* HASHMAP_TYPE_* */
223 bool has_indirect:1; /* whether indirect storage is used */
224 unsigned n_direct_entries:3; /* Number of entries in direct storage.
225 * Only valid if !has_indirect. */
226 bool from_pool:1; /* whether was allocated from mempool */
227 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
228};
229
230/* Specific hash types
231 * HashmapBase must be at the beginning of each hashmap struct. */
232
233struct Hashmap {
234 struct HashmapBase b;
235};
236
237struct OrderedHashmap {
238 struct HashmapBase b;
239 unsigned iterate_list_head, iterate_list_tail;
240};
241
242struct Set {
243 struct HashmapBase b;
244};
245
246DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
247DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
248/* No need for a separate Set pool */
249assert_cc(sizeof(Hashmap) == sizeof(Set));
250
251struct hashmap_type_info {
252 size_t head_size;
253 size_t entry_size;
254 struct mempool *mempool;
255 unsigned n_direct_buckets;
256};
257
258static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
259 [HASHMAP_TYPE_PLAIN] = {
260 .head_size = sizeof(Hashmap),
261 .entry_size = sizeof(struct plain_hashmap_entry),
262 .mempool = &hashmap_pool,
263 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
264 },
265 [HASHMAP_TYPE_ORDERED] = {
266 .head_size = sizeof(OrderedHashmap),
267 .entry_size = sizeof(struct ordered_hashmap_entry),
268 .mempool = &ordered_hashmap_pool,
269 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
270 },
271 [HASHMAP_TYPE_SET] = {
272 .head_size = sizeof(Set),
273 .entry_size = sizeof(struct set_entry),
274 .mempool = &hashmap_pool,
275 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
276 },
277};
39c2a6f1 278
b826ab58 279void string_hash_func(const void *p, struct siphash *state) {
1e2527a6 280 siphash24_compress(p, strlen(p) + 1, state);
60918275
LP
281}
282
283int string_compare_func(const void *a, const void *b) {
284 return strcmp(a, b);
285}
286
d5099efc
MS
287const struct hash_ops string_hash_ops = {
288 .hash = string_hash_func,
289 .compare = string_compare_func
290};
291
b826ab58
TG
292void trivial_hash_func(const void *p, struct siphash *state) {
293 siphash24_compress(&p, sizeof(p), state);
60918275
LP
294}
295
296int trivial_compare_func(const void *a, const void *b) {
297 return a < b ? -1 : (a > b ? 1 : 0);
298}
299
d5099efc
MS
300const struct hash_ops trivial_hash_ops = {
301 .hash = trivial_hash_func,
302 .compare = trivial_compare_func
303};
304
b826ab58
TG
305void uint64_hash_func(const void *p, struct siphash *state) {
306 siphash24_compress(p, sizeof(uint64_t), state);
a4bcff5b
LP
307}
308
309int uint64_compare_func(const void *_a, const void *_b) {
310 uint64_t a, b;
a4bcff5b
LP
311 a = *(const uint64_t*) _a;
312 b = *(const uint64_t*) _b;
a4bcff5b
LP
313 return a < b ? -1 : (a > b ? 1 : 0);
314}
315
d5099efc
MS
316const struct hash_ops uint64_hash_ops = {
317 .hash = uint64_hash_func,
318 .compare = uint64_compare_func
319};
320
de99c9dc 321#if SIZEOF_DEV_T != 8
b826ab58
TG
322void devt_hash_func(const void *p, struct siphash *state) {
323 siphash24_compress(p, sizeof(dev_t), state);
de99c9dc
LP
324}
325
326int devt_compare_func(const void *_a, const void *_b) {
327 dev_t a, b;
328 a = *(const dev_t*) _a;
329 b = *(const dev_t*) _b;
330 return a < b ? -1 : (a > b ? 1 : 0);
331}
d5099efc
MS
332
333const struct hash_ops devt_hash_ops = {
334 .hash = devt_hash_func,
335 .compare = devt_compare_func
336};
de99c9dc
LP
337#endif
338
89439d4f
MS
339static unsigned n_buckets(HashmapBase *h) {
340 return h->has_indirect ? h->indirect.n_buckets
341 : hashmap_type_info[h->type].n_direct_buckets;
342}
343
344static unsigned n_entries(HashmapBase *h) {
345 return h->has_indirect ? h->indirect.n_entries
346 : h->n_direct_entries;
347}
348
349static void n_entries_inc(HashmapBase *h) {
350 if (h->has_indirect)
351 h->indirect.n_entries++;
352 else
353 h->n_direct_entries++;
354}
355
356static void n_entries_dec(HashmapBase *h) {
357 if (h->has_indirect)
358 h->indirect.n_entries--;
359 else
360 h->n_direct_entries--;
361}
362
363static char *storage_ptr(HashmapBase *h) {
364 return h->has_indirect ? h->indirect.storage
365 : h->direct.storage;
366}
367
368static uint8_t *hash_key(HashmapBase *h) {
369 return h->has_indirect ? h->indirect.hash_key
370 : shared_hash_key;
371}
372
373static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 374 struct siphash state;
0cb3c286 375 uint64_t hash;
b826ab58 376
0cb3c286 377 siphash24_init(&state, hash_key(h));
b826ab58
TG
378
379 h->hash_ops->hash(p, &state);
380
0cb3c286
TG
381 siphash24_finalize((uint8_t*)&hash, &state);
382
383 return (unsigned) (hash % n_buckets(h));
9bf3b535 384}
89439d4f 385#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535
LP
386
387static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
388 static uint8_t current[HASH_KEY_SIZE];
389 static bool current_initialized = false;
390
391 /* Returns a hash function key to use. In order to keep things
392 * fast we will not generate a new key each time we allocate a
393 * new hash table. Instead, we'll just reuse the most recently
394 * generated one, except if we never generated one or when we
395 * are rehashing an entire hash table because we reached a
396 * fill level */
397
398 if (!current_initialized || !reuse_is_ok) {
399 random_bytes(current, sizeof(current));
400 current_initialized = true;
401 }
402
403 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
404}
405
89439d4f
MS
406static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
407 return (struct hashmap_base_entry*)
408 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
409}
410
411static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
412 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
413}
414
415static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
416 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
417}
39c2a6f1 418
89439d4f
MS
419static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
420 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
421}
39c2a6f1 422
89439d4f
MS
423static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
424 return &swap->e[idx - _IDX_SWAP_BEGIN];
425}
39c2a6f1 426
89439d4f
MS
427/* Returns a pointer to the bucket at index idx.
428 * Understands real indexes and swap indexes, hence "_virtual". */
429static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
430 unsigned idx) {
431 if (idx < _IDX_SWAP_BEGIN)
432 return bucket_at(h, idx);
433
434 if (idx < _IDX_SWAP_END)
435 return &bucket_at_swap(swap, idx)->p.b;
436
437 assert_not_reached("Invalid index");
438}
439
440static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
441 return (dib_raw_t*)
442 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
443}
444
445static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
446 return idx >= from ? idx - from
447 : n_buckets(h) + idx - from;
448}
449
450static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
451 unsigned initial_bucket;
452
453 if (raw_dib == DIB_RAW_FREE)
454 return DIB_FREE;
455
456 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
457 return raw_dib;
458
459 /*
460 * Having an overflow DIB value is very unlikely. The hash function
461 * would have to be bad. For example, in a table of size 2^24 filled
462 * to load factor 0.9 the maximum observed DIB is only about 60.
463 * In theory (assuming I used Maxima correctly), for an infinite size
464 * hash table with load factor 0.8 the probability of a given entry
465 * having DIB > 40 is 1.9e-8.
466 * This returns the correct DIB value by recomputing the hash value in
467 * the unlikely case. XXX Hitting this case could be a hint to rehash.
468 */
469 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
470 return bucket_distance(h, idx, initial_bucket);
471}
472
473static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
474 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
475}
476
477static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
478 dib_raw_t *dibs;
479
480 dibs = dib_raw_ptr(h);
481
482 for ( ; idx < n_buckets(h); idx++)
483 if (dibs[idx] != DIB_RAW_FREE)
484 return idx;
485
486 return IDX_NIL;
487}
488
489static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 490 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
491 bucket_set_dib(h, idx, DIB_FREE);
492}
493
494static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
495 unsigned from, unsigned to) {
496 struct hashmap_base_entry *e_from, *e_to;
497
498 assert(from != to);
39c2a6f1 499
89439d4f
MS
500 e_from = bucket_at_virtual(h, swap, from);
501 e_to = bucket_at_virtual(h, swap, to);
502
503 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
504
505 if (h->type == HASHMAP_TYPE_ORDERED) {
506 OrderedHashmap *lh = (OrderedHashmap*) h;
507 struct ordered_hashmap_entry *le, *le_to;
508
509 le_to = (struct ordered_hashmap_entry*) e_to;
510
511 if (le_to->iterate_next != IDX_NIL) {
512 le = (struct ordered_hashmap_entry*)
513 bucket_at_virtual(h, swap, le_to->iterate_next);
514 le->iterate_previous = to;
515 }
516
517 if (le_to->iterate_previous != IDX_NIL) {
518 le = (struct ordered_hashmap_entry*)
519 bucket_at_virtual(h, swap, le_to->iterate_previous);
520 le->iterate_next = to;
521 }
522
523 if (lh->iterate_list_head == from)
524 lh->iterate_list_head = to;
525 if (lh->iterate_list_tail == from)
526 lh->iterate_list_tail = to;
39c2a6f1 527 }
89439d4f 528}
60918275 529
89439d4f
MS
530static unsigned next_idx(HashmapBase *h, unsigned idx) {
531 return (idx + 1U) % n_buckets(h);
532}
60918275 533
89439d4f
MS
534static unsigned prev_idx(HashmapBase *h, unsigned idx) {
535 return (n_buckets(h) + idx - 1U) % n_buckets(h);
536}
60918275 537
89439d4f
MS
538static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
539 switch (h->type) {
45fa9e29 540
89439d4f
MS
541 case HASHMAP_TYPE_PLAIN:
542 case HASHMAP_TYPE_ORDERED:
543 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 544
89439d4f
MS
545 case HASHMAP_TYPE_SET:
546 return (void*) e->key;
a3b6fafe 547
89439d4f
MS
548 default:
549 assert_not_reached("Unknown hashmap type");
550 }
60918275
LP
551}
552
89439d4f
MS
553static void base_remove_entry(HashmapBase *h, unsigned idx) {
554 unsigned left, right, prev, dib;
555 dib_raw_t raw_dib, *dibs;
45fa9e29 556
89439d4f
MS
557 dibs = dib_raw_ptr(h);
558 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 559
fc86aa0e 560#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
561 h->debug.rem_count++;
562 h->debug.last_rem_idx = idx;
563#endif
034c6ed7 564
89439d4f
MS
565 left = idx;
566 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
567 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
568 raw_dib = dibs[right];
569 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
570 break;
571
572 /* The buckets are not supposed to be all occupied and with DIB > 0.
573 * That would mean we could make everyone better off by shifting them
574 * backward. This scenario is impossible. */
575 assert(left != right);
576 }
034c6ed7 577
89439d4f
MS
578 if (h->type == HASHMAP_TYPE_ORDERED) {
579 OrderedHashmap *lh = (OrderedHashmap*) h;
580 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
581
582 if (le->iterate_next != IDX_NIL)
583 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
584 else
585 lh->iterate_list_tail = le->iterate_previous;
586
587 if (le->iterate_previous != IDX_NIL)
588 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
589 else
590 lh->iterate_list_head = le->iterate_next;
591 }
592
593 /* Now shift all buckets in the interval (left, right) one step backwards */
594 for (prev = left, left = next_idx(h, left); left != right;
595 prev = left, left = next_idx(h, left)) {
596 dib = bucket_calculate_dib(h, left, dibs[left]);
597 assert(dib != 0);
598 bucket_move_entry(h, NULL, left, prev);
599 bucket_set_dib(h, prev, dib - 1);
600 }
601
602 bucket_mark_free(h, prev);
603 n_entries_dec(h);
034c6ed7 604}
89439d4f
MS
605#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
606
607static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
608 struct ordered_hashmap_entry *e;
609 unsigned idx;
034c6ed7 610
101d8e63 611 assert(h);
89439d4f
MS
612 assert(i);
613
614 if (i->idx == IDX_NIL)
615 goto at_end;
616
617 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
618 goto at_end;
619
620 if (i->idx == IDX_FIRST) {
621 idx = h->iterate_list_head;
622 e = ordered_bucket_at(h, idx);
101d8e63 623 } else {
89439d4f
MS
624 idx = i->idx;
625 e = ordered_bucket_at(h, idx);
626 /*
627 * We allow removing the current entry while iterating, but removal may cause
628 * a backward shift. The next entry may thus move one bucket to the left.
629 * To detect when it happens, we remember the key pointer of the entry we were
630 * going to iterate next. If it does not match, there was a backward shift.
631 */
632 if (e->p.b.key != i->next_key) {
633 idx = prev_idx(HASHMAP_BASE(h), idx);
634 e = ordered_bucket_at(h, idx);
635 }
636 assert(e->p.b.key == i->next_key);
101d8e63 637 }
101d8e63 638
fc86aa0e 639#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
640 i->prev_idx = idx;
641#endif
642
643 if (e->iterate_next != IDX_NIL) {
644 struct ordered_hashmap_entry *n;
645 i->idx = e->iterate_next;
646 n = ordered_bucket_at(h, i->idx);
647 i->next_key = n->p.b.key;
648 } else
649 i->idx = IDX_NIL;
650
651 return idx;
652
653at_end:
654 i->idx = IDX_NIL;
655 return IDX_NIL;
101d8e63
LP
656}
657
89439d4f
MS
658static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
659 unsigned idx;
660
60918275 661 assert(h);
89439d4f 662 assert(i);
60918275 663
89439d4f
MS
664 if (i->idx == IDX_NIL)
665 goto at_end;
60918275 666
89439d4f
MS
667 if (i->idx == IDX_FIRST) {
668 /* fast forward to the first occupied bucket */
669 if (h->has_indirect) {
670 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
671 h->indirect.idx_lowest_entry = i->idx;
672 } else
673 i->idx = skip_free_buckets(h, 0);
674
675 if (i->idx == IDX_NIL)
676 goto at_end;
677 } else {
678 struct hashmap_base_entry *e;
679
680 assert(i->idx > 0);
60918275 681
89439d4f
MS
682 e = bucket_at(h, i->idx);
683 /*
684 * We allow removing the current entry while iterating, but removal may cause
685 * a backward shift. The next entry may thus move one bucket to the left.
686 * To detect when it happens, we remember the key pointer of the entry we were
687 * going to iterate next. If it does not match, there was a backward shift.
688 */
689 if (e->key != i->next_key)
690 e = bucket_at(h, --i->idx);
60918275 691
89439d4f
MS
692 assert(e->key == i->next_key);
693 }
694
695 idx = i->idx;
fc86aa0e 696#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
697 i->prev_idx = idx;
698#endif
699
700 i->idx = skip_free_buckets(h, i->idx + 1);
701 if (i->idx != IDX_NIL)
702 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 703 else
89439d4f
MS
704 i->idx = IDX_NIL;
705
706 return idx;
60918275 707
89439d4f
MS
708at_end:
709 i->idx = IDX_NIL;
710 return IDX_NIL;
60918275
LP
711}
712
89439d4f
MS
713static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
714 if (!h) {
715 i->idx = IDX_NIL;
716 return IDX_NIL;
717 }
101d8e63 718
fc86aa0e 719#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
720 if (i->idx == IDX_FIRST) {
721 i->put_count = h->debug.put_count;
722 i->rem_count = h->debug.rem_count;
723 } else {
724 /* While iterating, must not add any new entries */
725 assert(i->put_count == h->debug.put_count);
726 /* ... or remove entries other than the current one */
727 assert(i->rem_count == h->debug.rem_count ||
728 (i->rem_count == h->debug.rem_count - 1 &&
729 i->prev_idx == h->debug.last_rem_idx));
730 /* Reset our removals counter */
731 i->rem_count = h->debug.rem_count;
732 }
733#endif
101d8e63 734
89439d4f
MS
735 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
736 : hashmap_iterate_in_internal_order(h, i);
737}
39c2a6f1 738
8927b1da 739bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
740 struct hashmap_base_entry *e;
741 void *data;
742 unsigned idx;
743
744 idx = hashmap_iterate_entry(h, i);
745 if (idx == IDX_NIL) {
8927b1da
DH
746 if (value)
747 *value = NULL;
89439d4f
MS
748 if (key)
749 *key = NULL;
750
8927b1da 751 return false;
89439d4f
MS
752 }
753
754 e = bucket_at(h, idx);
755 data = entry_value(h, e);
8927b1da
DH
756 if (value)
757 *value = data;
89439d4f
MS
758 if (key)
759 *key = e->key;
760
8927b1da 761 return true;
101d8e63
LP
762}
763
8927b1da
DH
764bool set_iterate(Set *s, Iterator *i, void **value) {
765 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 766}
60918275 767
89439d4f
MS
768#define HASHMAP_FOREACH_IDX(idx, h, i) \
769 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
770 (idx != IDX_NIL); \
771 (idx) = hashmap_iterate_entry((h), &(i)))
772
773static void reset_direct_storage(HashmapBase *h) {
774 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
775 void *p;
776
777 assert(!h->has_indirect);
778
779 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
780 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
781}
782
3ef11dcf 783static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
784 HashmapBase *h;
785 const struct hashmap_type_info *hi = &hashmap_type_info[type];
786 bool use_pool;
787
788 use_pool = is_main_thread();
789
790 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 791
60918275 792 if (!h)
89439d4f
MS
793 return NULL;
794
795 h->type = type;
796 h->from_pool = use_pool;
797 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
798
799 if (type == HASHMAP_TYPE_ORDERED) {
800 OrderedHashmap *lh = (OrderedHashmap*)h;
801 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
802 }
803
804 reset_direct_storage(h);
60918275 805
89439d4f
MS
806 if (!shared_hash_key_initialized) {
807 random_bytes(shared_hash_key, sizeof(shared_hash_key));
808 shared_hash_key_initialized= true;
809 }
810
fc86aa0e 811#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
812 h->debug.func = func;
813 h->debug.file = file;
814 h->debug.line = line;
4f1b3061
TG
815 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
816 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
817 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
818#endif
819
820 return h;
821}
60918275 822
89439d4f 823Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 824 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
825}
826
827OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 828 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
829}
830
831Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 832 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
833}
834
835static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 836 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
837 HashmapBase *q;
838
839 assert(h);
840
841 if (*h)
842 return 0;
843
3ef11dcf 844 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
845 if (!q)
846 return -ENOMEM;
847
848 *h = q;
849 return 0;
850}
851
852int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 853 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
854}
855
856int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 857 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
858}
859
860int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 861 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
862}
863
864static void hashmap_free_no_clear(HashmapBase *h) {
865 assert(!h->has_indirect);
866 assert(!h->n_direct_entries);
867
fc86aa0e 868#ifdef ENABLE_DEBUG_HASHMAP
4f1b3061 869 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 870 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 871 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 872#endif
45fa9e29 873
39c2a6f1 874 if (h->from_pool)
89439d4f 875 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
876 else
877 free(h);
60918275
LP
878}
879
cfe561a4 880HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
881
882 /* Free the hashmap, but nothing in it */
883
cfe561a4
DH
884 if (h) {
885 internal_hashmap_clear(h);
886 hashmap_free_no_clear(h);
887 }
89439d4f 888
cfe561a4 889 return NULL;
89439d4f
MS
890}
891
cfe561a4 892HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
893
894 /* Free the hashmap and all data objects in it, but not the
895 * keys */
896
cfe561a4
DH
897 if (h) {
898 internal_hashmap_clear_free(h);
899 hashmap_free_no_clear(h);
900 }
61b1477c 901
cfe561a4 902 return NULL;
449ddb2d
LP
903}
904
cfe561a4 905Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
906
907 /* Free the hashmap and all data and key objects in it */
908
cfe561a4
DH
909 if (h) {
910 hashmap_clear_free_free(h);
911 hashmap_free_no_clear(HASHMAP_BASE(h));
912 }
fabe5c0e 913
cfe561a4 914 return NULL;
fabe5c0e
LP
915}
916
89439d4f 917void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
918 if (!h)
919 return;
920
89439d4f
MS
921 if (h->has_indirect) {
922 free(h->indirect.storage);
923 h->has_indirect = false;
924 }
925
926 h->n_direct_entries = 0;
927 reset_direct_storage(h);
928
929 if (h->type == HASHMAP_TYPE_ORDERED) {
930 OrderedHashmap *lh = (OrderedHashmap*) h;
931 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
932 }
11dd41ce
LP
933}
934
89439d4f
MS
935void internal_hashmap_clear_free(HashmapBase *h) {
936 unsigned idx;
9946996c 937
61b1477c
LP
938 if (!h)
939 return;
9946996c 940
89439d4f
MS
941 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
942 idx = skip_free_buckets(h, idx + 1))
943 free(entry_value(h, bucket_at(h, idx)));
944
945 internal_hashmap_clear(h);
9946996c
LP
946}
947
fabe5c0e 948void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
949 unsigned idx;
950
fabe5c0e
LP
951 if (!h)
952 return;
953
89439d4f
MS
954 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
955 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
956 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
957 free((void*)e->b.key);
958 free(e->value);
fabe5c0e 959 }
89439d4f
MS
960
961 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
962}
963
89439d4f
MS
964static int resize_buckets(HashmapBase *h, unsigned entries_add);
965
966/*
967 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
968 * Performs Robin Hood swaps as it goes. The entry to put must be placed
969 * by the caller into swap slot IDX_PUT.
970 * If used for in-place resizing, may leave a displaced entry in swap slot
971 * IDX_PUT. Caller must rehash it next.
972 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
973 * false otherwise.
974 */
975static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
976 struct swap_entries *swap) {
977 dib_raw_t raw_dib, *dibs;
978 unsigned dib, distance;
979
fc86aa0e 980#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
981 h->debug.put_count++;
982#endif
983
984 dibs = dib_raw_ptr(h);
985
986 for (distance = 0; ; distance++) {
987 raw_dib = dibs[idx];
988 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
989 if (raw_dib == DIB_RAW_REHASH)
990 bucket_move_entry(h, swap, idx, IDX_TMP);
991
992 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
993 h->indirect.idx_lowest_entry = idx;
60918275 994
89439d4f
MS
995 bucket_set_dib(h, idx, distance);
996 bucket_move_entry(h, swap, IDX_PUT, idx);
997 if (raw_dib == DIB_RAW_REHASH) {
998 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
999 return true;
1000 }
60918275 1001
89439d4f
MS
1002 return false;
1003 }
1004
1005 dib = bucket_calculate_dib(h, idx, raw_dib);
1006
1007 if (dib < distance) {
1008 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
1009 bucket_set_dib(h, idx, distance);
1010
1011 /* swap the entries */
1012 bucket_move_entry(h, swap, idx, IDX_TMP);
1013 bucket_move_entry(h, swap, IDX_PUT, idx);
1014 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1015
1016 distance = dib;
1017 }
1018
1019 idx = next_idx(h, idx);
1020 }
60918275
LP
1021}
1022
89439d4f
MS
1023/*
1024 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1025 * The caller must place the entry (only its key and value, not link indexes)
1026 * in swap slot IDX_PUT.
1027 * Caller must ensure: the key does not exist yet in the hashmap.
1028 * that resize is not needed if !may_resize.
1029 * Returns: 1 if entry was put successfully.
1030 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1031 * Cannot return -ENOMEM if !may_resize.
1032 */
1033static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1034 struct swap_entries *swap, bool may_resize) {
1035 struct ordered_hashmap_entry *new_entry;
1036 int r;
1037
1038 assert(idx < n_buckets(h));
1039
1040 new_entry = bucket_at_swap(swap, IDX_PUT);
1041
1042 if (may_resize) {
1043 r = resize_buckets(h, 1);
1044 if (r < 0)
1045 return r;
1046 if (r > 0)
1047 idx = bucket_hash(h, new_entry->p.b.key);
1048 }
1049 assert(n_entries(h) < n_buckets(h));
1050
1051 if (h->type == HASHMAP_TYPE_ORDERED) {
1052 OrderedHashmap *lh = (OrderedHashmap*) h;
1053
1054 new_entry->iterate_next = IDX_NIL;
1055 new_entry->iterate_previous = lh->iterate_list_tail;
1056
1057 if (lh->iterate_list_tail != IDX_NIL) {
1058 struct ordered_hashmap_entry *old_tail;
1059
1060 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1061 assert(old_tail->iterate_next == IDX_NIL);
1062 old_tail->iterate_next = IDX_PUT;
1063 }
1064
1065 lh->iterate_list_tail = IDX_PUT;
1066 if (lh->iterate_list_head == IDX_NIL)
1067 lh->iterate_list_head = IDX_PUT;
1068 }
1069
1070 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1071
1072 n_entries_inc(h);
fc86aa0e 1073#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1074 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1075#endif
1076
1077 return 1;
1078}
1079#define hashmap_put_boldly(h, idx, swap, may_resize) \
1080 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1081
1082/*
1083 * Returns 0 if resize is not needed.
f131770b 1084 * 1 if successfully resized.
89439d4f
MS
1085 * -ENOMEM on allocation failure.
1086 */
1087static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1088 struct swap_entries swap;
1089 char *new_storage;
1090 dib_raw_t *old_dibs, *new_dibs;
1091 const struct hashmap_type_info *hi;
1092 unsigned idx, optimal_idx;
1093 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1094 uint8_t new_shift;
1095 bool rehash_next;
45fa9e29
LP
1096
1097 assert(h);
1098
89439d4f
MS
1099 hi = &hashmap_type_info[h->type];
1100 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1101
1102 /* overflow? */
89439d4f 1103 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1104 return -ENOMEM;
1105
89439d4f
MS
1106 /* For direct storage we allow 100% load, because it's tiny. */
1107 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1108 return 0;
45fa9e29 1109
89439d4f
MS
1110 /*
1111 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1112 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1113 */
1114 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1115 /* overflow? */
1116 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1117 return -ENOMEM;
45fa9e29 1118
89439d4f
MS
1119 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1120 return -ENOMEM;
a3b6fafe 1121
89439d4f 1122 old_n_buckets = n_buckets(h);
45fa9e29 1123
89439d4f
MS
1124 if (_likely_(new_n_buckets <= old_n_buckets))
1125 return 0;
45fa9e29 1126
89439d4f
MS
1127 new_shift = log2u_round_up(MAX(
1128 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1129 2 * sizeof(struct direct_storage)));
45fa9e29 1130
89439d4f
MS
1131 /* Realloc storage (buckets and DIB array). */
1132 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1133 1U << new_shift);
1134 if (!new_storage)
1135 return -ENOMEM;
45fa9e29 1136
89439d4f
MS
1137 /* Must upgrade direct to indirect storage. */
1138 if (!h->has_indirect) {
1139 memcpy(new_storage, h->direct.storage,
1140 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1141 h->indirect.n_entries = h->n_direct_entries;
1142 h->indirect.idx_lowest_entry = 0;
1143 h->n_direct_entries = 0;
1144 }
45fa9e29 1145
89439d4f
MS
1146 /* Get a new hash key. If we've just upgraded to indirect storage,
1147 * allow reusing a previously generated key. It's still a different key
1148 * from the shared one that we used for direct storage. */
1149 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1150
1151 h->has_indirect = true;
1152 h->indirect.storage = new_storage;
1153 h->indirect.n_buckets = (1U << new_shift) /
1154 (hi->entry_size + sizeof(dib_raw_t));
1155
1156 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1157 new_dibs = dib_raw_ptr(h);
1158
1159 /*
1160 * Move the DIB array to the new place, replacing valid DIB values with
1161 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1162 * Note: Overlap is not possible, because we have at least doubled the
1163 * number of buckets and dib_raw_t is smaller than any entry type.
1164 */
1165 for (idx = 0; idx < old_n_buckets; idx++) {
1166 assert(old_dibs[idx] != DIB_RAW_REHASH);
1167 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1168 : DIB_RAW_REHASH;
45fa9e29
LP
1169 }
1170
89439d4f 1171 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1172 memzero(bucket_at(h, old_n_buckets),
89439d4f 1173 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1174
89439d4f
MS
1175 /* The upper half of the new DIB array needs initialization */
1176 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1177 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1178
89439d4f
MS
1179 /* Rehash entries that need it */
1180 n_rehashed = 0;
1181 for (idx = 0; idx < old_n_buckets; idx++) {
1182 if (new_dibs[idx] != DIB_RAW_REHASH)
1183 continue;
45fa9e29 1184
89439d4f 1185 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1186
89439d4f
MS
1187 /*
1188 * Not much to do if by luck the entry hashes to its current
1189 * location. Just set its DIB.
1190 */
1191 if (optimal_idx == idx) {
1192 new_dibs[idx] = 0;
1193 n_rehashed++;
1194 continue;
1195 }
1196
1197 new_dibs[idx] = DIB_RAW_FREE;
1198 bucket_move_entry(h, &swap, idx, IDX_PUT);
1199 /* bucket_move_entry does not clear the source */
eccaf899 1200 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1201
1202 do {
1203 /*
1204 * Find the new bucket for the current entry. This may make
1205 * another entry homeless and load it into IDX_PUT.
1206 */
1207 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1208 n_rehashed++;
1209
1210 /* Did the current entry displace another one? */
1211 if (rehash_next)
1212 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1213 } while (rehash_next);
1214 }
60918275 1215
89439d4f 1216 assert(n_rehashed == n_entries(h));
60918275 1217
89439d4f
MS
1218 return 1;
1219}
45fa9e29 1220
89439d4f
MS
1221/*
1222 * Finds an entry with a matching key
1223 * Returns: index of the found entry, or IDX_NIL if not found.
1224 */
1225static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1226 struct hashmap_base_entry *e;
1227 unsigned dib, distance;
1228 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1229
89439d4f 1230 assert(idx < n_buckets(h));
60918275 1231
89439d4f
MS
1232 for (distance = 0; ; distance++) {
1233 if (dibs[idx] == DIB_RAW_FREE)
1234 return IDX_NIL;
60918275 1235
89439d4f 1236 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1237
89439d4f
MS
1238 if (dib < distance)
1239 return IDX_NIL;
1240 if (dib == distance) {
1241 e = bucket_at(h, idx);
1242 if (h->hash_ops->compare(e->key, key) == 0)
1243 return idx;
1244 }
1245
1246 idx = next_idx(h, idx);
1247 }
60918275 1248}
89439d4f 1249#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1250
923041cb 1251int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1252 struct swap_entries swap;
1253 struct plain_hashmap_entry *e;
1254 unsigned hash, idx;
923041cb
MS
1255
1256 assert(h);
1257
1258 hash = bucket_hash(h, key);
89439d4f
MS
1259 idx = bucket_scan(h, hash, key);
1260 if (idx != IDX_NIL) {
1261 e = plain_bucket_at(h, idx);
923041cb
MS
1262 if (e->value == value)
1263 return 0;
1264 return -EEXIST;
1265 }
1266
89439d4f
MS
1267 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1268 e->b.key = key;
1269 e->value = value;
1270 return hashmap_put_boldly(h, hash, &swap, true);
1271}
1272
1273int set_put(Set *s, const void *key) {
1274 struct swap_entries swap;
1275 struct hashmap_base_entry *e;
1276 unsigned hash, idx;
1277
1278 assert(s);
1279
1280 hash = bucket_hash(s, key);
1281 idx = bucket_scan(s, hash, key);
1282 if (idx != IDX_NIL)
1283 return 0;
1284
1285 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1286 e->key = key;
1287 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1288}
1289
3158713e 1290int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1291 struct swap_entries swap;
1292 struct plain_hashmap_entry *e;
1293 unsigned hash, idx;
3158713e
LP
1294
1295 assert(h);
1296
a3b6fafe 1297 hash = bucket_hash(h, key);
89439d4f
MS
1298 idx = bucket_scan(h, hash, key);
1299 if (idx != IDX_NIL) {
1300 e = plain_bucket_at(h, idx);
fc86aa0e 1301#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1302 /* Although the key is equal, the key pointer may have changed,
1303 * and this would break our assumption for iterating. So count
1304 * this operation as incompatible with iteration. */
1305 if (e->b.key != key) {
1306 h->b.debug.put_count++;
1307 h->b.debug.rem_count++;
1308 h->b.debug.last_rem_idx = idx;
1309 }
1310#endif
1311 e->b.key = key;
3158713e
LP
1312 e->value = value;
1313 return 0;
1314 }
1315
89439d4f
MS
1316 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1317 e->b.key = key;
1318 e->value = value;
1319 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1320}
1321
d99ae53a 1322int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1323 struct plain_hashmap_entry *e;
1324 unsigned hash, idx;
d99ae53a
LP
1325
1326 assert(h);
1327
a3b6fafe 1328 hash = bucket_hash(h, key);
89439d4f
MS
1329 idx = bucket_scan(h, hash, key);
1330 if (idx == IDX_NIL)
d99ae53a
LP
1331 return -ENOENT;
1332
89439d4f 1333 e = plain_bucket_at(h, idx);
d99ae53a
LP
1334 e->value = value;
1335 return 0;
1336}
1337
89439d4f
MS
1338void *internal_hashmap_get(HashmapBase *h, const void *key) {
1339 struct hashmap_base_entry *e;
1340 unsigned hash, idx;
60918275
LP
1341
1342 if (!h)
1343 return NULL;
1344
a3b6fafe 1345 hash = bucket_hash(h, key);
89439d4f
MS
1346 idx = bucket_scan(h, hash, key);
1347 if (idx == IDX_NIL)
60918275
LP
1348 return NULL;
1349
89439d4f
MS
1350 e = bucket_at(h, idx);
1351 return entry_value(h, e);
60918275
LP
1352}
1353
89439d4f
MS
1354void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1355 struct plain_hashmap_entry *e;
1356 unsigned hash, idx;
d99ae53a
LP
1357
1358 if (!h)
1359 return NULL;
1360
a3b6fafe 1361 hash = bucket_hash(h, key);
89439d4f
MS
1362 idx = bucket_scan(h, hash, key);
1363 if (idx == IDX_NIL)
d99ae53a
LP
1364 return NULL;
1365
89439d4f 1366 e = plain_bucket_at(h, idx);
d99ae53a 1367 if (key2)
89439d4f 1368 *key2 = (void*) e->b.key;
d99ae53a
LP
1369
1370 return e->value;
1371}
1372
89439d4f 1373bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1374 unsigned hash;
96342de6
LN
1375
1376 if (!h)
1377 return false;
1378
a3b6fafe 1379 hash = bucket_hash(h, key);
89439d4f 1380 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1381}
1382
89439d4f
MS
1383void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1384 struct hashmap_base_entry *e;
1385 unsigned hash, idx;
60918275
LP
1386 void *data;
1387
1388 if (!h)
1389 return NULL;
1390
a3b6fafe 1391 hash = bucket_hash(h, key);
89439d4f
MS
1392 idx = bucket_scan(h, hash, key);
1393 if (idx == IDX_NIL)
60918275
LP
1394 return NULL;
1395
89439d4f
MS
1396 e = bucket_at(h, idx);
1397 data = entry_value(h, e);
1398 remove_entry(h, idx);
60918275
LP
1399
1400 return data;
1401}
1402
89439d4f
MS
1403void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1404 struct plain_hashmap_entry *e;
1405 unsigned hash, idx;
c582a3b3
LP
1406 void *data;
1407
1408 if (!h) {
1409 if (rkey)
1410 *rkey = NULL;
1411 return NULL;
1412 }
1413
1414 hash = bucket_hash(h, key);
89439d4f
MS
1415 idx = bucket_scan(h, hash, key);
1416 if (idx == IDX_NIL) {
c582a3b3
LP
1417 if (rkey)
1418 *rkey = NULL;
1419 return NULL;
1420 }
1421
89439d4f 1422 e = plain_bucket_at(h, idx);
c582a3b3
LP
1423 data = e->value;
1424 if (rkey)
89439d4f 1425 *rkey = (void*) e->b.key;
c582a3b3 1426
89439d4f 1427 remove_entry(h, idx);
c582a3b3
LP
1428
1429 return data;
1430}
1431
101d8e63 1432int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1433 struct swap_entries swap;
1434 struct plain_hashmap_entry *e;
1435 unsigned old_hash, new_hash, idx;
101d8e63
LP
1436
1437 if (!h)
1438 return -ENOENT;
1439
a3b6fafe 1440 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1441 idx = bucket_scan(h, old_hash, old_key);
1442 if (idx == IDX_NIL)
101d8e63
LP
1443 return -ENOENT;
1444
a3b6fafe 1445 new_hash = bucket_hash(h, new_key);
89439d4f 1446 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1447 return -EEXIST;
1448
89439d4f 1449 remove_entry(h, idx);
101d8e63 1450
89439d4f
MS
1451 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1452 e->b.key = new_key;
101d8e63 1453 e->value = value;
89439d4f
MS
1454 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1455
1456 return 0;
1457}
1458
1459int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1460 struct swap_entries swap;
1461 struct hashmap_base_entry *e;
1462 unsigned old_hash, new_hash, idx;
101d8e63 1463
89439d4f
MS
1464 if (!s)
1465 return -ENOENT;
1466
1467 old_hash = bucket_hash(s, old_key);
1468 idx = bucket_scan(s, old_hash, old_key);
1469 if (idx == IDX_NIL)
1470 return -ENOENT;
1471
1472 new_hash = bucket_hash(s, new_key);
1473 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1474 return -EEXIST;
1475
1476 remove_entry(s, idx);
1477
1478 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1479 e->key = new_key;
1480 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1481
1482 return 0;
1483}
1484
8fe914ec 1485int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1486 struct swap_entries swap;
1487 struct plain_hashmap_entry *e;
1488 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1489
1490 if (!h)
1491 return -ENOENT;
1492
a3b6fafe 1493 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1494 idx_old = bucket_scan(h, old_hash, old_key);
1495 if (idx_old == IDX_NIL)
8fe914ec
LP
1496 return -ENOENT;
1497
89439d4f 1498 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1499
89439d4f
MS
1500 new_hash = bucket_hash(h, new_key);
1501 idx_new = bucket_scan(h, new_hash, new_key);
1502 if (idx_new != IDX_NIL)
1503 if (idx_old != idx_new) {
1504 remove_entry(h, idx_new);
1505 /* Compensate for a possible backward shift. */
1506 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1507 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1508 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1509 }
1510
1511 remove_entry(h, idx_old);
1512
1513 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1514 e->b.key = new_key;
8fe914ec 1515 e->value = value;
89439d4f 1516 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1517
1518 return 0;
1519}
1520
89439d4f
MS
1521void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1522 struct plain_hashmap_entry *e;
1523 unsigned hash, idx;
3158713e
LP
1524
1525 if (!h)
1526 return NULL;
1527
a3b6fafe 1528 hash = bucket_hash(h, key);
89439d4f
MS
1529 idx = bucket_scan(h, hash, key);
1530 if (idx == IDX_NIL)
3158713e
LP
1531 return NULL;
1532
89439d4f 1533 e = plain_bucket_at(h, idx);
3158713e
LP
1534 if (e->value != value)
1535 return NULL;
1536
89439d4f 1537 remove_entry(h, idx);
3158713e
LP
1538
1539 return value;
1540}
1541
89439d4f
MS
1542static unsigned find_first_entry(HashmapBase *h) {
1543 Iterator i = ITERATOR_FIRST;
60918275 1544
89439d4f
MS
1545 if (!h || !n_entries(h))
1546 return IDX_NIL;
60918275 1547
89439d4f 1548 return hashmap_iterate_entry(h, &i);
60918275
LP
1549}
1550
89439d4f
MS
1551void *internal_hashmap_first(HashmapBase *h) {
1552 unsigned idx;
60918275 1553
89439d4f
MS
1554 idx = find_first_entry(h);
1555 if (idx == IDX_NIL)
60918275
LP
1556 return NULL;
1557
89439d4f 1558 return entry_value(h, bucket_at(h, idx));
60918275
LP
1559}
1560
89439d4f
MS
1561void *internal_hashmap_first_key(HashmapBase *h) {
1562 struct hashmap_base_entry *e;
1563 unsigned idx;
2e4a6ff4 1564
89439d4f
MS
1565 idx = find_first_entry(h);
1566 if (idx == IDX_NIL)
2e4a6ff4
LP
1567 return NULL;
1568
89439d4f
MS
1569 e = bucket_at(h, idx);
1570 return (void*) e->key;
2e4a6ff4
LP
1571}
1572
89439d4f
MS
1573void *internal_hashmap_steal_first(HashmapBase *h) {
1574 struct hashmap_base_entry *e;
60918275 1575 void *data;
89439d4f 1576 unsigned idx;
60918275 1577
89439d4f
MS
1578 idx = find_first_entry(h);
1579 if (idx == IDX_NIL)
60918275
LP
1580 return NULL;
1581
89439d4f
MS
1582 e = bucket_at(h, idx);
1583 data = entry_value(h, e);
1584 remove_entry(h, idx);
60918275
LP
1585
1586 return data;
1587}
1588
89439d4f
MS
1589void *internal_hashmap_steal_first_key(HashmapBase *h) {
1590 struct hashmap_base_entry *e;
22be093f 1591 void *key;
89439d4f 1592 unsigned idx;
22be093f 1593
89439d4f
MS
1594 idx = find_first_entry(h);
1595 if (idx == IDX_NIL)
22be093f
LP
1596 return NULL;
1597
89439d4f
MS
1598 e = bucket_at(h, idx);
1599 key = (void*) e->key;
1600 remove_entry(h, idx);
22be093f
LP
1601
1602 return key;
1603}
1604
89439d4f 1605unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1606
1607 if (!h)
1608 return 0;
1609
89439d4f 1610 return n_entries(h);
60918275
LP
1611}
1612
89439d4f 1613unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1614
1615 if (!h)
1616 return 0;
1617
89439d4f 1618 return n_buckets(h);
45fa9e29
LP
1619}
1620
89439d4f
MS
1621int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1622 Iterator i;
1623 unsigned idx;
60918275 1624
89439d4f 1625 assert(h);
60918275 1626
89439d4f
MS
1627 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1628 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1629 int r;
91cdde8a 1630
89439d4f
MS
1631 r = hashmap_put(h, pe->b.key, pe->value);
1632 if (r < 0 && r != -EEXIST)
1633 return r;
1634 }
91cdde8a 1635
89439d4f
MS
1636 return 0;
1637}
91cdde8a 1638
89439d4f
MS
1639int set_merge(Set *s, Set *other) {
1640 Iterator i;
1641 unsigned idx;
91cdde8a 1642
89439d4f
MS
1643 assert(s);
1644
1645 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1646 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1647 int r;
1648
89439d4f
MS
1649 r = set_put(s, se->b.key);
1650 if (r < 0)
a3b6fafe 1651 return r;
91cdde8a
LP
1652 }
1653
1654 return 0;
1655}
1656
89439d4f 1657int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1658 int r;
1659
1660 assert(h);
1661
1662 r = resize_buckets(h, entries_add);
1663 if (r < 0)
1664 return r;
1665
1666 return 0;
1667}
1668
89439d4f
MS
1669/*
1670 * The same as hashmap_merge(), but every new item from other is moved to h.
1671 * Keys already in h are skipped and stay in other.
1672 * Returns: 0 on success.
1673 * -ENOMEM on alloc failure, in which case no move has been done.
1674 */
1675int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1676 struct swap_entries swap;
1677 struct hashmap_base_entry *e, *n;
1678 Iterator i;
1679 unsigned idx;
1680 int r;
101d8e63
LP
1681
1682 assert(h);
1683
101d8e63 1684 if (!other)
7ad63f57 1685 return 0;
101d8e63 1686
89439d4f
MS
1687 assert(other->type == h->type);
1688
1689 /*
1690 * This reserves buckets for the worst case, where none of other's
1691 * entries are yet present in h. This is preferable to risking
1692 * an allocation failure in the middle of the moving and having to
1693 * rollback or return a partial result.
1694 */
1695 r = resize_buckets(h, n_entries(other));
1696 if (r < 0)
1697 return r;
101d8e63 1698
89439d4f
MS
1699 HASHMAP_FOREACH_IDX(idx, other, i) {
1700 unsigned h_hash;
101d8e63 1701
89439d4f 1702 e = bucket_at(other, idx);
a3b6fafe 1703 h_hash = bucket_hash(h, e->key);
89439d4f 1704 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1705 continue;
1706
89439d4f
MS
1707 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1708 n->key = e->key;
1709 if (h->type != HASHMAP_TYPE_SET)
1710 ((struct plain_hashmap_entry*) n)->value =
1711 ((struct plain_hashmap_entry*) e)->value;
1712 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1713
1714 remove_entry(other, idx);
101d8e63 1715 }
7ad63f57
MS
1716
1717 return 0;
101d8e63
LP
1718}
1719
89439d4f
MS
1720int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1721 struct swap_entries swap;
1722 unsigned h_hash, other_hash, idx;
1723 struct hashmap_base_entry *e, *n;
1724 int r;
101d8e63 1725
101d8e63
LP
1726 assert(h);
1727
a3b6fafe 1728 h_hash = bucket_hash(h, key);
89439d4f 1729 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1730 return -EEXIST;
1731
bf3d3e2b
MS
1732 if (!other)
1733 return -ENOENT;
1734
89439d4f
MS
1735 assert(other->type == h->type);
1736
a3b6fafe 1737 other_hash = bucket_hash(other, key);
89439d4f
MS
1738 idx = bucket_scan(other, other_hash, key);
1739 if (idx == IDX_NIL)
101d8e63
LP
1740 return -ENOENT;
1741
89439d4f
MS
1742 e = bucket_at(other, idx);
1743
1744 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1745 n->key = e->key;
1746 if (h->type != HASHMAP_TYPE_SET)
1747 ((struct plain_hashmap_entry*) n)->value =
1748 ((struct plain_hashmap_entry*) e)->value;
1749 r = hashmap_put_boldly(h, h_hash, &swap, true);
1750 if (r < 0)
1751 return r;
101d8e63 1752
89439d4f 1753 remove_entry(other, idx);
101d8e63
LP
1754 return 0;
1755}
1756
89439d4f
MS
1757HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1758 HashmapBase *copy;
1759 int r;
91cdde8a
LP
1760
1761 assert(h);
1762
89439d4f 1763 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1764 if (!copy)
91cdde8a
LP
1765 return NULL;
1766
89439d4f
MS
1767 switch (h->type) {
1768 case HASHMAP_TYPE_PLAIN:
1769 case HASHMAP_TYPE_ORDERED:
1770 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1771 break;
1772 case HASHMAP_TYPE_SET:
1773 r = set_merge((Set*)copy, (Set*)h);
1774 break;
1775 default:
1776 assert_not_reached("Unknown hashmap type");
1777 }
1778
1779 if (r < 0) {
1780 internal_hashmap_free(copy);
91cdde8a
LP
1781 return NULL;
1782 }
1783
1784 return copy;
1785}
db1413d7 1786
89439d4f 1787char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1788 char **sv;
89439d4f
MS
1789 Iterator i;
1790 unsigned idx, n;
db1413d7 1791
89439d4f 1792 sv = new(char*, n_entries(h)+1);
729e3769 1793 if (!sv)
db1413d7
KS
1794 return NULL;
1795
1796 n = 0;
89439d4f
MS
1797 HASHMAP_FOREACH_IDX(idx, h, i)
1798 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1799 sv[n] = NULL;
1800
1801 return sv;
1802}
3c1668da 1803
89439d4f
MS
1804void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1805 struct ordered_hashmap_entry *e;
1806 unsigned hash, idx;
3c1668da 1807
3c1668da
LP
1808 if (!h)
1809 return NULL;
1810
a3b6fafe 1811 hash = bucket_hash(h, key);
89439d4f
MS
1812 idx = bucket_scan(h, hash, key);
1813 if (idx == IDX_NIL)
3c1668da
LP
1814 return NULL;
1815
89439d4f
MS
1816 e = ordered_bucket_at(h, idx);
1817 if (e->iterate_next == IDX_NIL)
3c1668da 1818 return NULL;
89439d4f
MS
1819 return ordered_bucket_at(h, e->iterate_next)->p.value;
1820}
3c1668da 1821
89439d4f
MS
1822int set_consume(Set *s, void *value) {
1823 int r;
1824
1825 r = set_put(s, value);
575ccc1b 1826 if (r <= 0)
89439d4f
MS
1827 free(value);
1828
1829 return r;
1830}
1831
1832int set_put_strdup(Set *s, const char *p) {
1833 char *c;
1834 int r;
1835
1836 assert(s);
1837 assert(p);
1838
1839 c = strdup(p);
1840 if (!c)
1841 return -ENOMEM;
1842
1843 r = set_consume(s, c);
1844 if (r == -EEXIST)
1845 return 0;
1846
1847 return r;
1848}
1849
1850int set_put_strdupv(Set *s, char **l) {
1851 int n = 0, r;
1852 char **i;
1853
1854 STRV_FOREACH(i, l) {
1855 r = set_put_strdup(s, *i);
1856 if (r < 0)
1857 return r;
1858
1859 n += r;
1860 }
1861
1862 return n;
3c1668da 1863}