]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
Merge pull request #1919 from martinpitt/master
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
d6c9574f 1/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
60918275 2
a7334b09
LP
3/***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
89439d4f 7 Copyright 2014 Michal Schmidt
a7334b09
LP
8
9 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
a7334b09
LP
12 (at your option) any later version.
13
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 17 Lesser General Public License for more details.
a7334b09 18
5430f7f2 19 You should have received a copy of the GNU Lesser General Public License
a7334b09
LP
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
21***/
22
60918275 23#include <errno.h>
4f1b3061 24#include <pthread.h>
d4510856 25#include <stdlib.h>
60918275 26
b5efdb8a 27#include "alloc-util.h"
60918275
LP
28#include "hashmap.h"
29#include "macro.h"
b3dcf58e 30#include "mempool.h"
d4510856 31#include "process-util.h"
3df3e884 32#include "random-util.h"
d4510856
LP
33#include "set.h"
34#include "siphash24.h"
35#include "strv.h"
36#include "util.h"
60918275 37
2eec67ac
TA
38#ifdef ENABLE_DEBUG_HASHMAP
39#include "list.h"
40#endif
41
89439d4f
MS
42/*
43 * Implementation of hashmaps.
44 * Addressing: open
45 * - uses less RAM compared to closed addressing (chaining), because
46 * our entries are small (especially in Sets, which tend to contain
47 * the majority of entries in systemd).
48 * Collision resolution: Robin Hood
49 * - tends to equalize displacement of entries from their optimal buckets.
50 * Probe sequence: linear
51 * - though theoretically worse than random probing/uniform hashing/double
52 * hashing, it is good for cache locality.
53 *
54 * References:
55 * Celis, P. 1986. Robin Hood Hashing.
56 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
57 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
58 * - The results are derived for random probing. Suggests deletion with
59 * tombstones and two mean-centered search methods. None of that works
60 * well for linear probing.
61 *
62 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
63 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
64 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
65 * http://www.math.uu.se/~svante/papers/sj157.pdf
66 * - Applies to Robin Hood with linear probing. Contains remarks on
67 * the unsuitability of mean-centered search with linear probing.
68 *
69 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
70 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
71 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
72 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
73 * in a successful search), and Janson writes about displacement. C = d + 1.
74 *
75 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
76 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
77 * - Explanation of backward shift deletion with pictures.
78 *
79 * Khuong, P. 2013. The Other Robin Hood Hashing.
80 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
81 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
82 */
83
84/*
85 * XXX Ideas for improvement:
86 * For unordered hashmaps, randomize iteration order, similarly to Perl:
87 * http://blog.booking.com/hardening-perls-hash-function.html
88 */
89
90/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
91 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
92#define INV_KEEP_FREE 5U
93
94/* Fields common to entries of all hashmap/set types */
95struct hashmap_base_entry {
60918275 96 const void *key;
89439d4f
MS
97};
98
99/* Entry types for specific hashmap/set types
100 * hashmap_base_entry must be at the beginning of each entry struct. */
101
102struct plain_hashmap_entry {
103 struct hashmap_base_entry b;
60918275 104 void *value;
60918275
LP
105};
106
89439d4f
MS
107struct ordered_hashmap_entry {
108 struct plain_hashmap_entry p;
109 unsigned iterate_next, iterate_previous;
110};
60918275 111
89439d4f
MS
112struct set_entry {
113 struct hashmap_base_entry b;
114};
45fa9e29 115
89439d4f
MS
116/* In several functions it is advantageous to have the hash table extended
117 * virtually by a couple of additional buckets. We reserve special index values
118 * for these "swap" buckets. */
119#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
120#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
121#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
122#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 123
89439d4f
MS
124#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
125#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
126
127assert_cc(IDX_FIRST == _IDX_SWAP_END);
128assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
129
130/* Storage space for the "swap" buckets.
131 * All entry types can fit into a ordered_hashmap_entry. */
132struct swap_entries {
133 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
134};
135
89439d4f
MS
136/* Distance from Initial Bucket */
137typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
138#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
139#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
140#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
141#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
142
143#define DIB_FREE UINT_MAX
144
fc86aa0e 145#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
146struct hashmap_debug_info {
147 LIST_FIELDS(struct hashmap_debug_info, debug_list);
148 unsigned max_entries; /* high watermark of n_entries */
149
150 /* who allocated this hashmap */
151 int line;
152 const char *file;
153 const char *func;
154
155 /* fields to detect modification while iterating */
156 unsigned put_count; /* counts puts into the hashmap */
157 unsigned rem_count; /* counts removals from hashmap */
158 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
159};
160
89439d4f
MS
161/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
162static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 163static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 164
89439d4f 165#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 166
fc86aa0e 167#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 168#define HASHMAP_DEBUG_FIELDS
fc86aa0e 169#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 170
89439d4f
MS
171enum HashmapType {
172 HASHMAP_TYPE_PLAIN,
173 HASHMAP_TYPE_ORDERED,
174 HASHMAP_TYPE_SET,
175 _HASHMAP_TYPE_MAX
176};
39c2a6f1 177
89439d4f
MS
178struct _packed_ indirect_storage {
179 char *storage; /* where buckets and DIBs are stored */
180 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
181
182 unsigned n_entries; /* number of stored entries */
183 unsigned n_buckets; /* number of buckets */
184
185 unsigned idx_lowest_entry; /* Index below which all buckets are free.
186 Makes "while(hashmap_steal_first())" loops
187 O(n) instead of O(n^2) for unordered hashmaps. */
188 uint8_t _pad[3]; /* padding for the whole HashmapBase */
189 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
190};
191
192struct direct_storage {
193 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
194 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
195 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
196 char storage[sizeof(struct indirect_storage)];
197};
198
199#define DIRECT_BUCKETS(entry_t) \
200 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
201
202/* We should be able to store at least one entry directly. */
203assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
204
205/* We have 3 bits for n_direct_entries. */
206assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
207
208/* Hashmaps with directly stored entries all use this shared hash key.
209 * It's no big deal if the key is guessed, because there can be only
210 * a handful of directly stored entries in a hashmap. When a hashmap
211 * outgrows direct storage, it gets its own key for indirect storage. */
212static uint8_t shared_hash_key[HASH_KEY_SIZE];
213static bool shared_hash_key_initialized;
214
215/* Fields that all hashmap/set types must have */
216struct HashmapBase {
217 const struct hash_ops *hash_ops; /* hash and compare ops to use */
218
219 union _packed_ {
220 struct indirect_storage indirect; /* if has_indirect */
221 struct direct_storage direct; /* if !has_indirect */
222 };
223
224 enum HashmapType type:2; /* HASHMAP_TYPE_* */
225 bool has_indirect:1; /* whether indirect storage is used */
226 unsigned n_direct_entries:3; /* Number of entries in direct storage.
227 * Only valid if !has_indirect. */
228 bool from_pool:1; /* whether was allocated from mempool */
229 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
230};
231
232/* Specific hash types
233 * HashmapBase must be at the beginning of each hashmap struct. */
234
235struct Hashmap {
236 struct HashmapBase b;
237};
238
239struct OrderedHashmap {
240 struct HashmapBase b;
241 unsigned iterate_list_head, iterate_list_tail;
242};
243
244struct Set {
245 struct HashmapBase b;
246};
247
248DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
249DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
250/* No need for a separate Set pool */
251assert_cc(sizeof(Hashmap) == sizeof(Set));
252
253struct hashmap_type_info {
254 size_t head_size;
255 size_t entry_size;
256 struct mempool *mempool;
257 unsigned n_direct_buckets;
258};
259
260static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
261 [HASHMAP_TYPE_PLAIN] = {
262 .head_size = sizeof(Hashmap),
263 .entry_size = sizeof(struct plain_hashmap_entry),
264 .mempool = &hashmap_pool,
265 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
266 },
267 [HASHMAP_TYPE_ORDERED] = {
268 .head_size = sizeof(OrderedHashmap),
269 .entry_size = sizeof(struct ordered_hashmap_entry),
270 .mempool = &ordered_hashmap_pool,
271 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
272 },
273 [HASHMAP_TYPE_SET] = {
274 .head_size = sizeof(Set),
275 .entry_size = sizeof(struct set_entry),
276 .mempool = &hashmap_pool,
277 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
278 },
279};
39c2a6f1 280
b826ab58 281void string_hash_func(const void *p, struct siphash *state) {
1e2527a6 282 siphash24_compress(p, strlen(p) + 1, state);
60918275
LP
283}
284
285int string_compare_func(const void *a, const void *b) {
286 return strcmp(a, b);
287}
288
d5099efc
MS
289const struct hash_ops string_hash_ops = {
290 .hash = string_hash_func,
291 .compare = string_compare_func
292};
293
b826ab58
TG
294void trivial_hash_func(const void *p, struct siphash *state) {
295 siphash24_compress(&p, sizeof(p), state);
60918275
LP
296}
297
298int trivial_compare_func(const void *a, const void *b) {
299 return a < b ? -1 : (a > b ? 1 : 0);
300}
301
d5099efc
MS
302const struct hash_ops trivial_hash_ops = {
303 .hash = trivial_hash_func,
304 .compare = trivial_compare_func
305};
306
b826ab58
TG
307void uint64_hash_func(const void *p, struct siphash *state) {
308 siphash24_compress(p, sizeof(uint64_t), state);
a4bcff5b
LP
309}
310
311int uint64_compare_func(const void *_a, const void *_b) {
312 uint64_t a, b;
a4bcff5b
LP
313 a = *(const uint64_t*) _a;
314 b = *(const uint64_t*) _b;
a4bcff5b
LP
315 return a < b ? -1 : (a > b ? 1 : 0);
316}
317
d5099efc
MS
318const struct hash_ops uint64_hash_ops = {
319 .hash = uint64_hash_func,
320 .compare = uint64_compare_func
321};
322
de99c9dc 323#if SIZEOF_DEV_T != 8
b826ab58
TG
324void devt_hash_func(const void *p, struct siphash *state) {
325 siphash24_compress(p, sizeof(dev_t), state);
de99c9dc
LP
326}
327
328int devt_compare_func(const void *_a, const void *_b) {
329 dev_t a, b;
330 a = *(const dev_t*) _a;
331 b = *(const dev_t*) _b;
332 return a < b ? -1 : (a > b ? 1 : 0);
333}
d5099efc
MS
334
335const struct hash_ops devt_hash_ops = {
336 .hash = devt_hash_func,
337 .compare = devt_compare_func
338};
de99c9dc
LP
339#endif
340
89439d4f
MS
341static unsigned n_buckets(HashmapBase *h) {
342 return h->has_indirect ? h->indirect.n_buckets
343 : hashmap_type_info[h->type].n_direct_buckets;
344}
345
346static unsigned n_entries(HashmapBase *h) {
347 return h->has_indirect ? h->indirect.n_entries
348 : h->n_direct_entries;
349}
350
351static void n_entries_inc(HashmapBase *h) {
352 if (h->has_indirect)
353 h->indirect.n_entries++;
354 else
355 h->n_direct_entries++;
356}
357
358static void n_entries_dec(HashmapBase *h) {
359 if (h->has_indirect)
360 h->indirect.n_entries--;
361 else
362 h->n_direct_entries--;
363}
364
365static char *storage_ptr(HashmapBase *h) {
366 return h->has_indirect ? h->indirect.storage
367 : h->direct.storage;
368}
369
370static uint8_t *hash_key(HashmapBase *h) {
371 return h->has_indirect ? h->indirect.hash_key
372 : shared_hash_key;
373}
374
375static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 376 struct siphash state;
0cb3c286 377 uint64_t hash;
b826ab58 378
0cb3c286 379 siphash24_init(&state, hash_key(h));
b826ab58
TG
380
381 h->hash_ops->hash(p, &state);
382
dbe81cbd 383 siphash24_finalize(&hash, &state);
0cb3c286
TG
384
385 return (unsigned) (hash % n_buckets(h));
9bf3b535 386}
89439d4f 387#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535
LP
388
389static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
390 static uint8_t current[HASH_KEY_SIZE];
391 static bool current_initialized = false;
392
393 /* Returns a hash function key to use. In order to keep things
394 * fast we will not generate a new key each time we allocate a
395 * new hash table. Instead, we'll just reuse the most recently
396 * generated one, except if we never generated one or when we
397 * are rehashing an entire hash table because we reached a
398 * fill level */
399
400 if (!current_initialized || !reuse_is_ok) {
401 random_bytes(current, sizeof(current));
402 current_initialized = true;
403 }
404
405 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
406}
407
89439d4f
MS
408static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
409 return (struct hashmap_base_entry*)
410 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
411}
412
413static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
414 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
415}
416
417static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
418 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
419}
39c2a6f1 420
89439d4f
MS
421static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
422 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
423}
39c2a6f1 424
89439d4f
MS
425static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
426 return &swap->e[idx - _IDX_SWAP_BEGIN];
427}
39c2a6f1 428
89439d4f
MS
429/* Returns a pointer to the bucket at index idx.
430 * Understands real indexes and swap indexes, hence "_virtual". */
431static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
432 unsigned idx) {
433 if (idx < _IDX_SWAP_BEGIN)
434 return bucket_at(h, idx);
435
436 if (idx < _IDX_SWAP_END)
437 return &bucket_at_swap(swap, idx)->p.b;
438
439 assert_not_reached("Invalid index");
440}
441
442static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
443 return (dib_raw_t*)
444 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
445}
446
447static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
448 return idx >= from ? idx - from
449 : n_buckets(h) + idx - from;
450}
451
452static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
453 unsigned initial_bucket;
454
455 if (raw_dib == DIB_RAW_FREE)
456 return DIB_FREE;
457
458 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
459 return raw_dib;
460
461 /*
462 * Having an overflow DIB value is very unlikely. The hash function
463 * would have to be bad. For example, in a table of size 2^24 filled
464 * to load factor 0.9 the maximum observed DIB is only about 60.
465 * In theory (assuming I used Maxima correctly), for an infinite size
466 * hash table with load factor 0.8 the probability of a given entry
467 * having DIB > 40 is 1.9e-8.
468 * This returns the correct DIB value by recomputing the hash value in
469 * the unlikely case. XXX Hitting this case could be a hint to rehash.
470 */
471 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
472 return bucket_distance(h, idx, initial_bucket);
473}
474
475static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
476 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
477}
478
479static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
480 dib_raw_t *dibs;
481
482 dibs = dib_raw_ptr(h);
483
484 for ( ; idx < n_buckets(h); idx++)
485 if (dibs[idx] != DIB_RAW_FREE)
486 return idx;
487
488 return IDX_NIL;
489}
490
491static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 492 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
493 bucket_set_dib(h, idx, DIB_FREE);
494}
495
496static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
497 unsigned from, unsigned to) {
498 struct hashmap_base_entry *e_from, *e_to;
499
500 assert(from != to);
39c2a6f1 501
89439d4f
MS
502 e_from = bucket_at_virtual(h, swap, from);
503 e_to = bucket_at_virtual(h, swap, to);
504
505 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
506
507 if (h->type == HASHMAP_TYPE_ORDERED) {
508 OrderedHashmap *lh = (OrderedHashmap*) h;
509 struct ordered_hashmap_entry *le, *le_to;
510
511 le_to = (struct ordered_hashmap_entry*) e_to;
512
513 if (le_to->iterate_next != IDX_NIL) {
514 le = (struct ordered_hashmap_entry*)
515 bucket_at_virtual(h, swap, le_to->iterate_next);
516 le->iterate_previous = to;
517 }
518
519 if (le_to->iterate_previous != IDX_NIL) {
520 le = (struct ordered_hashmap_entry*)
521 bucket_at_virtual(h, swap, le_to->iterate_previous);
522 le->iterate_next = to;
523 }
524
525 if (lh->iterate_list_head == from)
526 lh->iterate_list_head = to;
527 if (lh->iterate_list_tail == from)
528 lh->iterate_list_tail = to;
39c2a6f1 529 }
89439d4f 530}
60918275 531
89439d4f
MS
532static unsigned next_idx(HashmapBase *h, unsigned idx) {
533 return (idx + 1U) % n_buckets(h);
534}
60918275 535
89439d4f
MS
536static unsigned prev_idx(HashmapBase *h, unsigned idx) {
537 return (n_buckets(h) + idx - 1U) % n_buckets(h);
538}
60918275 539
89439d4f
MS
540static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
541 switch (h->type) {
45fa9e29 542
89439d4f
MS
543 case HASHMAP_TYPE_PLAIN:
544 case HASHMAP_TYPE_ORDERED:
545 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 546
89439d4f
MS
547 case HASHMAP_TYPE_SET:
548 return (void*) e->key;
a3b6fafe 549
89439d4f
MS
550 default:
551 assert_not_reached("Unknown hashmap type");
552 }
60918275
LP
553}
554
89439d4f
MS
555static void base_remove_entry(HashmapBase *h, unsigned idx) {
556 unsigned left, right, prev, dib;
557 dib_raw_t raw_dib, *dibs;
45fa9e29 558
89439d4f
MS
559 dibs = dib_raw_ptr(h);
560 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 561
fc86aa0e 562#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
563 h->debug.rem_count++;
564 h->debug.last_rem_idx = idx;
565#endif
034c6ed7 566
89439d4f
MS
567 left = idx;
568 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
569 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
570 raw_dib = dibs[right];
571 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
572 break;
573
574 /* The buckets are not supposed to be all occupied and with DIB > 0.
575 * That would mean we could make everyone better off by shifting them
576 * backward. This scenario is impossible. */
577 assert(left != right);
578 }
034c6ed7 579
89439d4f
MS
580 if (h->type == HASHMAP_TYPE_ORDERED) {
581 OrderedHashmap *lh = (OrderedHashmap*) h;
582 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
583
584 if (le->iterate_next != IDX_NIL)
585 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
586 else
587 lh->iterate_list_tail = le->iterate_previous;
588
589 if (le->iterate_previous != IDX_NIL)
590 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
591 else
592 lh->iterate_list_head = le->iterate_next;
593 }
594
595 /* Now shift all buckets in the interval (left, right) one step backwards */
596 for (prev = left, left = next_idx(h, left); left != right;
597 prev = left, left = next_idx(h, left)) {
598 dib = bucket_calculate_dib(h, left, dibs[left]);
599 assert(dib != 0);
600 bucket_move_entry(h, NULL, left, prev);
601 bucket_set_dib(h, prev, dib - 1);
602 }
603
604 bucket_mark_free(h, prev);
605 n_entries_dec(h);
034c6ed7 606}
89439d4f
MS
607#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
608
609static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
610 struct ordered_hashmap_entry *e;
611 unsigned idx;
034c6ed7 612
101d8e63 613 assert(h);
89439d4f
MS
614 assert(i);
615
616 if (i->idx == IDX_NIL)
617 goto at_end;
618
619 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
620 goto at_end;
621
622 if (i->idx == IDX_FIRST) {
623 idx = h->iterate_list_head;
624 e = ordered_bucket_at(h, idx);
101d8e63 625 } else {
89439d4f
MS
626 idx = i->idx;
627 e = ordered_bucket_at(h, idx);
628 /*
629 * We allow removing the current entry while iterating, but removal may cause
630 * a backward shift. The next entry may thus move one bucket to the left.
631 * To detect when it happens, we remember the key pointer of the entry we were
632 * going to iterate next. If it does not match, there was a backward shift.
633 */
634 if (e->p.b.key != i->next_key) {
635 idx = prev_idx(HASHMAP_BASE(h), idx);
636 e = ordered_bucket_at(h, idx);
637 }
638 assert(e->p.b.key == i->next_key);
101d8e63 639 }
101d8e63 640
fc86aa0e 641#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
642 i->prev_idx = idx;
643#endif
644
645 if (e->iterate_next != IDX_NIL) {
646 struct ordered_hashmap_entry *n;
647 i->idx = e->iterate_next;
648 n = ordered_bucket_at(h, i->idx);
649 i->next_key = n->p.b.key;
650 } else
651 i->idx = IDX_NIL;
652
653 return idx;
654
655at_end:
656 i->idx = IDX_NIL;
657 return IDX_NIL;
101d8e63
LP
658}
659
89439d4f
MS
660static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
661 unsigned idx;
662
60918275 663 assert(h);
89439d4f 664 assert(i);
60918275 665
89439d4f
MS
666 if (i->idx == IDX_NIL)
667 goto at_end;
60918275 668
89439d4f
MS
669 if (i->idx == IDX_FIRST) {
670 /* fast forward to the first occupied bucket */
671 if (h->has_indirect) {
672 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
673 h->indirect.idx_lowest_entry = i->idx;
674 } else
675 i->idx = skip_free_buckets(h, 0);
676
677 if (i->idx == IDX_NIL)
678 goto at_end;
679 } else {
680 struct hashmap_base_entry *e;
681
682 assert(i->idx > 0);
60918275 683
89439d4f
MS
684 e = bucket_at(h, i->idx);
685 /*
686 * We allow removing the current entry while iterating, but removal may cause
687 * a backward shift. The next entry may thus move one bucket to the left.
688 * To detect when it happens, we remember the key pointer of the entry we were
689 * going to iterate next. If it does not match, there was a backward shift.
690 */
691 if (e->key != i->next_key)
692 e = bucket_at(h, --i->idx);
60918275 693
89439d4f
MS
694 assert(e->key == i->next_key);
695 }
696
697 idx = i->idx;
fc86aa0e 698#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
699 i->prev_idx = idx;
700#endif
701
702 i->idx = skip_free_buckets(h, i->idx + 1);
703 if (i->idx != IDX_NIL)
704 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 705 else
89439d4f
MS
706 i->idx = IDX_NIL;
707
708 return idx;
60918275 709
89439d4f
MS
710at_end:
711 i->idx = IDX_NIL;
712 return IDX_NIL;
60918275
LP
713}
714
89439d4f
MS
715static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
716 if (!h) {
717 i->idx = IDX_NIL;
718 return IDX_NIL;
719 }
101d8e63 720
fc86aa0e 721#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
722 if (i->idx == IDX_FIRST) {
723 i->put_count = h->debug.put_count;
724 i->rem_count = h->debug.rem_count;
725 } else {
726 /* While iterating, must not add any new entries */
727 assert(i->put_count == h->debug.put_count);
728 /* ... or remove entries other than the current one */
729 assert(i->rem_count == h->debug.rem_count ||
730 (i->rem_count == h->debug.rem_count - 1 &&
731 i->prev_idx == h->debug.last_rem_idx));
732 /* Reset our removals counter */
733 i->rem_count = h->debug.rem_count;
734 }
735#endif
101d8e63 736
89439d4f
MS
737 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
738 : hashmap_iterate_in_internal_order(h, i);
739}
39c2a6f1 740
8927b1da 741bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
742 struct hashmap_base_entry *e;
743 void *data;
744 unsigned idx;
745
746 idx = hashmap_iterate_entry(h, i);
747 if (idx == IDX_NIL) {
8927b1da
DH
748 if (value)
749 *value = NULL;
89439d4f
MS
750 if (key)
751 *key = NULL;
752
8927b1da 753 return false;
89439d4f
MS
754 }
755
756 e = bucket_at(h, idx);
757 data = entry_value(h, e);
8927b1da
DH
758 if (value)
759 *value = data;
89439d4f
MS
760 if (key)
761 *key = e->key;
762
8927b1da 763 return true;
101d8e63
LP
764}
765
8927b1da
DH
766bool set_iterate(Set *s, Iterator *i, void **value) {
767 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 768}
60918275 769
89439d4f
MS
770#define HASHMAP_FOREACH_IDX(idx, h, i) \
771 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
772 (idx != IDX_NIL); \
773 (idx) = hashmap_iterate_entry((h), &(i)))
774
775static void reset_direct_storage(HashmapBase *h) {
776 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
777 void *p;
778
779 assert(!h->has_indirect);
780
781 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
782 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
783}
784
3ef11dcf 785static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
786 HashmapBase *h;
787 const struct hashmap_type_info *hi = &hashmap_type_info[type];
788 bool use_pool;
789
790 use_pool = is_main_thread();
791
792 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 793
60918275 794 if (!h)
89439d4f
MS
795 return NULL;
796
797 h->type = type;
798 h->from_pool = use_pool;
799 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
800
801 if (type == HASHMAP_TYPE_ORDERED) {
802 OrderedHashmap *lh = (OrderedHashmap*)h;
803 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
804 }
805
806 reset_direct_storage(h);
60918275 807
89439d4f
MS
808 if (!shared_hash_key_initialized) {
809 random_bytes(shared_hash_key, sizeof(shared_hash_key));
810 shared_hash_key_initialized= true;
811 }
812
fc86aa0e 813#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
814 h->debug.func = func;
815 h->debug.file = file;
816 h->debug.line = line;
4f1b3061
TG
817 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
818 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
819 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
820#endif
821
822 return h;
823}
60918275 824
89439d4f 825Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 826 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
827}
828
829OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 830 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
831}
832
833Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 834 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
835}
836
837static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 838 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
839 HashmapBase *q;
840
841 assert(h);
842
843 if (*h)
844 return 0;
845
3ef11dcf 846 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
847 if (!q)
848 return -ENOMEM;
849
850 *h = q;
851 return 0;
852}
853
854int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 855 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
856}
857
858int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 859 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
860}
861
862int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 863 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
864}
865
866static void hashmap_free_no_clear(HashmapBase *h) {
867 assert(!h->has_indirect);
868 assert(!h->n_direct_entries);
869
fc86aa0e 870#ifdef ENABLE_DEBUG_HASHMAP
4f1b3061 871 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 872 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 873 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 874#endif
45fa9e29 875
39c2a6f1 876 if (h->from_pool)
89439d4f 877 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
878 else
879 free(h);
60918275
LP
880}
881
cfe561a4 882HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
883
884 /* Free the hashmap, but nothing in it */
885
cfe561a4
DH
886 if (h) {
887 internal_hashmap_clear(h);
888 hashmap_free_no_clear(h);
889 }
89439d4f 890
cfe561a4 891 return NULL;
89439d4f
MS
892}
893
cfe561a4 894HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
895
896 /* Free the hashmap and all data objects in it, but not the
897 * keys */
898
cfe561a4
DH
899 if (h) {
900 internal_hashmap_clear_free(h);
901 hashmap_free_no_clear(h);
902 }
61b1477c 903
cfe561a4 904 return NULL;
449ddb2d
LP
905}
906
cfe561a4 907Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
908
909 /* Free the hashmap and all data and key objects in it */
910
cfe561a4
DH
911 if (h) {
912 hashmap_clear_free_free(h);
913 hashmap_free_no_clear(HASHMAP_BASE(h));
914 }
fabe5c0e 915
cfe561a4 916 return NULL;
fabe5c0e
LP
917}
918
89439d4f 919void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
920 if (!h)
921 return;
922
89439d4f
MS
923 if (h->has_indirect) {
924 free(h->indirect.storage);
925 h->has_indirect = false;
926 }
927
928 h->n_direct_entries = 0;
929 reset_direct_storage(h);
930
931 if (h->type == HASHMAP_TYPE_ORDERED) {
932 OrderedHashmap *lh = (OrderedHashmap*) h;
933 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
934 }
11dd41ce
LP
935}
936
89439d4f
MS
937void internal_hashmap_clear_free(HashmapBase *h) {
938 unsigned idx;
9946996c 939
61b1477c
LP
940 if (!h)
941 return;
9946996c 942
89439d4f
MS
943 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
944 idx = skip_free_buckets(h, idx + 1))
945 free(entry_value(h, bucket_at(h, idx)));
946
947 internal_hashmap_clear(h);
9946996c
LP
948}
949
fabe5c0e 950void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
951 unsigned idx;
952
fabe5c0e
LP
953 if (!h)
954 return;
955
89439d4f
MS
956 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
957 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
958 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
959 free((void*)e->b.key);
960 free(e->value);
fabe5c0e 961 }
89439d4f
MS
962
963 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
964}
965
89439d4f
MS
966static int resize_buckets(HashmapBase *h, unsigned entries_add);
967
968/*
969 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
970 * Performs Robin Hood swaps as it goes. The entry to put must be placed
971 * by the caller into swap slot IDX_PUT.
972 * If used for in-place resizing, may leave a displaced entry in swap slot
973 * IDX_PUT. Caller must rehash it next.
974 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
975 * false otherwise.
976 */
977static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
978 struct swap_entries *swap) {
979 dib_raw_t raw_dib, *dibs;
980 unsigned dib, distance;
981
fc86aa0e 982#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
983 h->debug.put_count++;
984#endif
985
986 dibs = dib_raw_ptr(h);
987
988 for (distance = 0; ; distance++) {
989 raw_dib = dibs[idx];
990 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
991 if (raw_dib == DIB_RAW_REHASH)
992 bucket_move_entry(h, swap, idx, IDX_TMP);
993
994 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
995 h->indirect.idx_lowest_entry = idx;
60918275 996
89439d4f
MS
997 bucket_set_dib(h, idx, distance);
998 bucket_move_entry(h, swap, IDX_PUT, idx);
999 if (raw_dib == DIB_RAW_REHASH) {
1000 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1001 return true;
1002 }
60918275 1003
89439d4f
MS
1004 return false;
1005 }
1006
1007 dib = bucket_calculate_dib(h, idx, raw_dib);
1008
1009 if (dib < distance) {
1010 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
1011 bucket_set_dib(h, idx, distance);
1012
1013 /* swap the entries */
1014 bucket_move_entry(h, swap, idx, IDX_TMP);
1015 bucket_move_entry(h, swap, IDX_PUT, idx);
1016 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1017
1018 distance = dib;
1019 }
1020
1021 idx = next_idx(h, idx);
1022 }
60918275
LP
1023}
1024
89439d4f
MS
1025/*
1026 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1027 * The caller must place the entry (only its key and value, not link indexes)
1028 * in swap slot IDX_PUT.
1029 * Caller must ensure: the key does not exist yet in the hashmap.
1030 * that resize is not needed if !may_resize.
1031 * Returns: 1 if entry was put successfully.
1032 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1033 * Cannot return -ENOMEM if !may_resize.
1034 */
1035static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1036 struct swap_entries *swap, bool may_resize) {
1037 struct ordered_hashmap_entry *new_entry;
1038 int r;
1039
1040 assert(idx < n_buckets(h));
1041
1042 new_entry = bucket_at_swap(swap, IDX_PUT);
1043
1044 if (may_resize) {
1045 r = resize_buckets(h, 1);
1046 if (r < 0)
1047 return r;
1048 if (r > 0)
1049 idx = bucket_hash(h, new_entry->p.b.key);
1050 }
1051 assert(n_entries(h) < n_buckets(h));
1052
1053 if (h->type == HASHMAP_TYPE_ORDERED) {
1054 OrderedHashmap *lh = (OrderedHashmap*) h;
1055
1056 new_entry->iterate_next = IDX_NIL;
1057 new_entry->iterate_previous = lh->iterate_list_tail;
1058
1059 if (lh->iterate_list_tail != IDX_NIL) {
1060 struct ordered_hashmap_entry *old_tail;
1061
1062 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1063 assert(old_tail->iterate_next == IDX_NIL);
1064 old_tail->iterate_next = IDX_PUT;
1065 }
1066
1067 lh->iterate_list_tail = IDX_PUT;
1068 if (lh->iterate_list_head == IDX_NIL)
1069 lh->iterate_list_head = IDX_PUT;
1070 }
1071
1072 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1073
1074 n_entries_inc(h);
fc86aa0e 1075#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1076 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1077#endif
1078
1079 return 1;
1080}
1081#define hashmap_put_boldly(h, idx, swap, may_resize) \
1082 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1083
1084/*
1085 * Returns 0 if resize is not needed.
f131770b 1086 * 1 if successfully resized.
89439d4f
MS
1087 * -ENOMEM on allocation failure.
1088 */
1089static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1090 struct swap_entries swap;
1091 char *new_storage;
1092 dib_raw_t *old_dibs, *new_dibs;
1093 const struct hashmap_type_info *hi;
1094 unsigned idx, optimal_idx;
1095 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1096 uint8_t new_shift;
1097 bool rehash_next;
45fa9e29
LP
1098
1099 assert(h);
1100
89439d4f
MS
1101 hi = &hashmap_type_info[h->type];
1102 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1103
1104 /* overflow? */
89439d4f 1105 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1106 return -ENOMEM;
1107
89439d4f
MS
1108 /* For direct storage we allow 100% load, because it's tiny. */
1109 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1110 return 0;
45fa9e29 1111
89439d4f
MS
1112 /*
1113 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1114 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1115 */
1116 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1117 /* overflow? */
1118 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1119 return -ENOMEM;
45fa9e29 1120
89439d4f
MS
1121 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1122 return -ENOMEM;
a3b6fafe 1123
89439d4f 1124 old_n_buckets = n_buckets(h);
45fa9e29 1125
89439d4f
MS
1126 if (_likely_(new_n_buckets <= old_n_buckets))
1127 return 0;
45fa9e29 1128
89439d4f
MS
1129 new_shift = log2u_round_up(MAX(
1130 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1131 2 * sizeof(struct direct_storage)));
45fa9e29 1132
89439d4f
MS
1133 /* Realloc storage (buckets and DIB array). */
1134 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1135 1U << new_shift);
1136 if (!new_storage)
1137 return -ENOMEM;
45fa9e29 1138
89439d4f
MS
1139 /* Must upgrade direct to indirect storage. */
1140 if (!h->has_indirect) {
1141 memcpy(new_storage, h->direct.storage,
1142 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1143 h->indirect.n_entries = h->n_direct_entries;
1144 h->indirect.idx_lowest_entry = 0;
1145 h->n_direct_entries = 0;
1146 }
45fa9e29 1147
89439d4f
MS
1148 /* Get a new hash key. If we've just upgraded to indirect storage,
1149 * allow reusing a previously generated key. It's still a different key
1150 * from the shared one that we used for direct storage. */
1151 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1152
1153 h->has_indirect = true;
1154 h->indirect.storage = new_storage;
1155 h->indirect.n_buckets = (1U << new_shift) /
1156 (hi->entry_size + sizeof(dib_raw_t));
1157
1158 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1159 new_dibs = dib_raw_ptr(h);
1160
1161 /*
1162 * Move the DIB array to the new place, replacing valid DIB values with
1163 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1164 * Note: Overlap is not possible, because we have at least doubled the
1165 * number of buckets and dib_raw_t is smaller than any entry type.
1166 */
1167 for (idx = 0; idx < old_n_buckets; idx++) {
1168 assert(old_dibs[idx] != DIB_RAW_REHASH);
1169 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1170 : DIB_RAW_REHASH;
45fa9e29
LP
1171 }
1172
89439d4f 1173 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1174 memzero(bucket_at(h, old_n_buckets),
89439d4f 1175 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1176
89439d4f
MS
1177 /* The upper half of the new DIB array needs initialization */
1178 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1179 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1180
89439d4f
MS
1181 /* Rehash entries that need it */
1182 n_rehashed = 0;
1183 for (idx = 0; idx < old_n_buckets; idx++) {
1184 if (new_dibs[idx] != DIB_RAW_REHASH)
1185 continue;
45fa9e29 1186
89439d4f 1187 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1188
89439d4f
MS
1189 /*
1190 * Not much to do if by luck the entry hashes to its current
1191 * location. Just set its DIB.
1192 */
1193 if (optimal_idx == idx) {
1194 new_dibs[idx] = 0;
1195 n_rehashed++;
1196 continue;
1197 }
1198
1199 new_dibs[idx] = DIB_RAW_FREE;
1200 bucket_move_entry(h, &swap, idx, IDX_PUT);
1201 /* bucket_move_entry does not clear the source */
eccaf899 1202 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1203
1204 do {
1205 /*
1206 * Find the new bucket for the current entry. This may make
1207 * another entry homeless and load it into IDX_PUT.
1208 */
1209 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1210 n_rehashed++;
1211
1212 /* Did the current entry displace another one? */
1213 if (rehash_next)
1214 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1215 } while (rehash_next);
1216 }
60918275 1217
89439d4f 1218 assert(n_rehashed == n_entries(h));
60918275 1219
89439d4f
MS
1220 return 1;
1221}
45fa9e29 1222
89439d4f
MS
1223/*
1224 * Finds an entry with a matching key
1225 * Returns: index of the found entry, or IDX_NIL if not found.
1226 */
1227static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1228 struct hashmap_base_entry *e;
1229 unsigned dib, distance;
1230 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1231
89439d4f 1232 assert(idx < n_buckets(h));
60918275 1233
89439d4f
MS
1234 for (distance = 0; ; distance++) {
1235 if (dibs[idx] == DIB_RAW_FREE)
1236 return IDX_NIL;
60918275 1237
89439d4f 1238 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1239
89439d4f
MS
1240 if (dib < distance)
1241 return IDX_NIL;
1242 if (dib == distance) {
1243 e = bucket_at(h, idx);
1244 if (h->hash_ops->compare(e->key, key) == 0)
1245 return idx;
1246 }
1247
1248 idx = next_idx(h, idx);
1249 }
60918275 1250}
89439d4f 1251#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1252
923041cb 1253int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1254 struct swap_entries swap;
1255 struct plain_hashmap_entry *e;
1256 unsigned hash, idx;
923041cb
MS
1257
1258 assert(h);
1259
1260 hash = bucket_hash(h, key);
89439d4f
MS
1261 idx = bucket_scan(h, hash, key);
1262 if (idx != IDX_NIL) {
1263 e = plain_bucket_at(h, idx);
923041cb
MS
1264 if (e->value == value)
1265 return 0;
1266 return -EEXIST;
1267 }
1268
89439d4f
MS
1269 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1270 e->b.key = key;
1271 e->value = value;
1272 return hashmap_put_boldly(h, hash, &swap, true);
1273}
1274
1275int set_put(Set *s, const void *key) {
1276 struct swap_entries swap;
1277 struct hashmap_base_entry *e;
1278 unsigned hash, idx;
1279
1280 assert(s);
1281
1282 hash = bucket_hash(s, key);
1283 idx = bucket_scan(s, hash, key);
1284 if (idx != IDX_NIL)
1285 return 0;
1286
1287 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1288 e->key = key;
1289 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1290}
1291
3158713e 1292int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1293 struct swap_entries swap;
1294 struct plain_hashmap_entry *e;
1295 unsigned hash, idx;
3158713e
LP
1296
1297 assert(h);
1298
a3b6fafe 1299 hash = bucket_hash(h, key);
89439d4f
MS
1300 idx = bucket_scan(h, hash, key);
1301 if (idx != IDX_NIL) {
1302 e = plain_bucket_at(h, idx);
fc86aa0e 1303#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1304 /* Although the key is equal, the key pointer may have changed,
1305 * and this would break our assumption for iterating. So count
1306 * this operation as incompatible with iteration. */
1307 if (e->b.key != key) {
1308 h->b.debug.put_count++;
1309 h->b.debug.rem_count++;
1310 h->b.debug.last_rem_idx = idx;
1311 }
1312#endif
1313 e->b.key = key;
3158713e
LP
1314 e->value = value;
1315 return 0;
1316 }
1317
89439d4f
MS
1318 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1319 e->b.key = key;
1320 e->value = value;
1321 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1322}
1323
d99ae53a 1324int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1325 struct plain_hashmap_entry *e;
1326 unsigned hash, idx;
d99ae53a
LP
1327
1328 assert(h);
1329
a3b6fafe 1330 hash = bucket_hash(h, key);
89439d4f
MS
1331 idx = bucket_scan(h, hash, key);
1332 if (idx == IDX_NIL)
d99ae53a
LP
1333 return -ENOENT;
1334
89439d4f 1335 e = plain_bucket_at(h, idx);
d99ae53a
LP
1336 e->value = value;
1337 return 0;
1338}
1339
89439d4f
MS
1340void *internal_hashmap_get(HashmapBase *h, const void *key) {
1341 struct hashmap_base_entry *e;
1342 unsigned hash, idx;
60918275
LP
1343
1344 if (!h)
1345 return NULL;
1346
a3b6fafe 1347 hash = bucket_hash(h, key);
89439d4f
MS
1348 idx = bucket_scan(h, hash, key);
1349 if (idx == IDX_NIL)
60918275
LP
1350 return NULL;
1351
89439d4f
MS
1352 e = bucket_at(h, idx);
1353 return entry_value(h, e);
60918275
LP
1354}
1355
89439d4f
MS
1356void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1357 struct plain_hashmap_entry *e;
1358 unsigned hash, idx;
d99ae53a
LP
1359
1360 if (!h)
1361 return NULL;
1362
a3b6fafe 1363 hash = bucket_hash(h, key);
89439d4f
MS
1364 idx = bucket_scan(h, hash, key);
1365 if (idx == IDX_NIL)
d99ae53a
LP
1366 return NULL;
1367
89439d4f 1368 e = plain_bucket_at(h, idx);
d99ae53a 1369 if (key2)
89439d4f 1370 *key2 = (void*) e->b.key;
d99ae53a
LP
1371
1372 return e->value;
1373}
1374
89439d4f 1375bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1376 unsigned hash;
96342de6
LN
1377
1378 if (!h)
1379 return false;
1380
a3b6fafe 1381 hash = bucket_hash(h, key);
89439d4f 1382 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1383}
1384
89439d4f
MS
1385void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1386 struct hashmap_base_entry *e;
1387 unsigned hash, idx;
60918275
LP
1388 void *data;
1389
1390 if (!h)
1391 return NULL;
1392
a3b6fafe 1393 hash = bucket_hash(h, key);
89439d4f
MS
1394 idx = bucket_scan(h, hash, key);
1395 if (idx == IDX_NIL)
60918275
LP
1396 return NULL;
1397
89439d4f
MS
1398 e = bucket_at(h, idx);
1399 data = entry_value(h, e);
1400 remove_entry(h, idx);
60918275
LP
1401
1402 return data;
1403}
1404
89439d4f
MS
1405void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1406 struct plain_hashmap_entry *e;
1407 unsigned hash, idx;
c582a3b3
LP
1408 void *data;
1409
1410 if (!h) {
1411 if (rkey)
1412 *rkey = NULL;
1413 return NULL;
1414 }
1415
1416 hash = bucket_hash(h, key);
89439d4f
MS
1417 idx = bucket_scan(h, hash, key);
1418 if (idx == IDX_NIL) {
c582a3b3
LP
1419 if (rkey)
1420 *rkey = NULL;
1421 return NULL;
1422 }
1423
89439d4f 1424 e = plain_bucket_at(h, idx);
c582a3b3
LP
1425 data = e->value;
1426 if (rkey)
89439d4f 1427 *rkey = (void*) e->b.key;
c582a3b3 1428
89439d4f 1429 remove_entry(h, idx);
c582a3b3
LP
1430
1431 return data;
1432}
1433
101d8e63 1434int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1435 struct swap_entries swap;
1436 struct plain_hashmap_entry *e;
1437 unsigned old_hash, new_hash, idx;
101d8e63
LP
1438
1439 if (!h)
1440 return -ENOENT;
1441
a3b6fafe 1442 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1443 idx = bucket_scan(h, old_hash, old_key);
1444 if (idx == IDX_NIL)
101d8e63
LP
1445 return -ENOENT;
1446
a3b6fafe 1447 new_hash = bucket_hash(h, new_key);
89439d4f 1448 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1449 return -EEXIST;
1450
89439d4f 1451 remove_entry(h, idx);
101d8e63 1452
89439d4f
MS
1453 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1454 e->b.key = new_key;
101d8e63 1455 e->value = value;
89439d4f
MS
1456 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1457
1458 return 0;
1459}
1460
1461int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1462 struct swap_entries swap;
1463 struct hashmap_base_entry *e;
1464 unsigned old_hash, new_hash, idx;
101d8e63 1465
89439d4f
MS
1466 if (!s)
1467 return -ENOENT;
1468
1469 old_hash = bucket_hash(s, old_key);
1470 idx = bucket_scan(s, old_hash, old_key);
1471 if (idx == IDX_NIL)
1472 return -ENOENT;
1473
1474 new_hash = bucket_hash(s, new_key);
1475 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1476 return -EEXIST;
1477
1478 remove_entry(s, idx);
1479
1480 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1481 e->key = new_key;
1482 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1483
1484 return 0;
1485}
1486
8fe914ec 1487int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1488 struct swap_entries swap;
1489 struct plain_hashmap_entry *e;
1490 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1491
1492 if (!h)
1493 return -ENOENT;
1494
a3b6fafe 1495 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1496 idx_old = bucket_scan(h, old_hash, old_key);
1497 if (idx_old == IDX_NIL)
8fe914ec
LP
1498 return -ENOENT;
1499
89439d4f 1500 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1501
89439d4f
MS
1502 new_hash = bucket_hash(h, new_key);
1503 idx_new = bucket_scan(h, new_hash, new_key);
1504 if (idx_new != IDX_NIL)
1505 if (idx_old != idx_new) {
1506 remove_entry(h, idx_new);
1507 /* Compensate for a possible backward shift. */
1508 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1509 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1510 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1511 }
1512
1513 remove_entry(h, idx_old);
1514
1515 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1516 e->b.key = new_key;
8fe914ec 1517 e->value = value;
89439d4f 1518 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1519
1520 return 0;
1521}
1522
89439d4f
MS
1523void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1524 struct plain_hashmap_entry *e;
1525 unsigned hash, idx;
3158713e
LP
1526
1527 if (!h)
1528 return NULL;
1529
a3b6fafe 1530 hash = bucket_hash(h, key);
89439d4f
MS
1531 idx = bucket_scan(h, hash, key);
1532 if (idx == IDX_NIL)
3158713e
LP
1533 return NULL;
1534
89439d4f 1535 e = plain_bucket_at(h, idx);
3158713e
LP
1536 if (e->value != value)
1537 return NULL;
1538
89439d4f 1539 remove_entry(h, idx);
3158713e
LP
1540
1541 return value;
1542}
1543
89439d4f
MS
1544static unsigned find_first_entry(HashmapBase *h) {
1545 Iterator i = ITERATOR_FIRST;
60918275 1546
89439d4f
MS
1547 if (!h || !n_entries(h))
1548 return IDX_NIL;
60918275 1549
89439d4f 1550 return hashmap_iterate_entry(h, &i);
60918275
LP
1551}
1552
89439d4f
MS
1553void *internal_hashmap_first(HashmapBase *h) {
1554 unsigned idx;
60918275 1555
89439d4f
MS
1556 idx = find_first_entry(h);
1557 if (idx == IDX_NIL)
60918275
LP
1558 return NULL;
1559
89439d4f 1560 return entry_value(h, bucket_at(h, idx));
60918275
LP
1561}
1562
89439d4f
MS
1563void *internal_hashmap_first_key(HashmapBase *h) {
1564 struct hashmap_base_entry *e;
1565 unsigned idx;
2e4a6ff4 1566
89439d4f
MS
1567 idx = find_first_entry(h);
1568 if (idx == IDX_NIL)
2e4a6ff4
LP
1569 return NULL;
1570
89439d4f
MS
1571 e = bucket_at(h, idx);
1572 return (void*) e->key;
2e4a6ff4
LP
1573}
1574
89439d4f
MS
1575void *internal_hashmap_steal_first(HashmapBase *h) {
1576 struct hashmap_base_entry *e;
60918275 1577 void *data;
89439d4f 1578 unsigned idx;
60918275 1579
89439d4f
MS
1580 idx = find_first_entry(h);
1581 if (idx == IDX_NIL)
60918275
LP
1582 return NULL;
1583
89439d4f
MS
1584 e = bucket_at(h, idx);
1585 data = entry_value(h, e);
1586 remove_entry(h, idx);
60918275
LP
1587
1588 return data;
1589}
1590
89439d4f
MS
1591void *internal_hashmap_steal_first_key(HashmapBase *h) {
1592 struct hashmap_base_entry *e;
22be093f 1593 void *key;
89439d4f 1594 unsigned idx;
22be093f 1595
89439d4f
MS
1596 idx = find_first_entry(h);
1597 if (idx == IDX_NIL)
22be093f
LP
1598 return NULL;
1599
89439d4f
MS
1600 e = bucket_at(h, idx);
1601 key = (void*) e->key;
1602 remove_entry(h, idx);
22be093f
LP
1603
1604 return key;
1605}
1606
89439d4f 1607unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1608
1609 if (!h)
1610 return 0;
1611
89439d4f 1612 return n_entries(h);
60918275
LP
1613}
1614
89439d4f 1615unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1616
1617 if (!h)
1618 return 0;
1619
89439d4f 1620 return n_buckets(h);
45fa9e29
LP
1621}
1622
89439d4f
MS
1623int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1624 Iterator i;
1625 unsigned idx;
60918275 1626
89439d4f 1627 assert(h);
60918275 1628
89439d4f
MS
1629 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1630 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1631 int r;
91cdde8a 1632
89439d4f
MS
1633 r = hashmap_put(h, pe->b.key, pe->value);
1634 if (r < 0 && r != -EEXIST)
1635 return r;
1636 }
91cdde8a 1637
89439d4f
MS
1638 return 0;
1639}
91cdde8a 1640
89439d4f
MS
1641int set_merge(Set *s, Set *other) {
1642 Iterator i;
1643 unsigned idx;
91cdde8a 1644
89439d4f
MS
1645 assert(s);
1646
1647 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1648 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1649 int r;
1650
89439d4f
MS
1651 r = set_put(s, se->b.key);
1652 if (r < 0)
a3b6fafe 1653 return r;
91cdde8a
LP
1654 }
1655
1656 return 0;
1657}
1658
89439d4f 1659int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1660 int r;
1661
1662 assert(h);
1663
1664 r = resize_buckets(h, entries_add);
1665 if (r < 0)
1666 return r;
1667
1668 return 0;
1669}
1670
89439d4f
MS
1671/*
1672 * The same as hashmap_merge(), but every new item from other is moved to h.
1673 * Keys already in h are skipped and stay in other.
1674 * Returns: 0 on success.
1675 * -ENOMEM on alloc failure, in which case no move has been done.
1676 */
1677int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1678 struct swap_entries swap;
1679 struct hashmap_base_entry *e, *n;
1680 Iterator i;
1681 unsigned idx;
1682 int r;
101d8e63
LP
1683
1684 assert(h);
1685
101d8e63 1686 if (!other)
7ad63f57 1687 return 0;
101d8e63 1688
89439d4f
MS
1689 assert(other->type == h->type);
1690
1691 /*
1692 * This reserves buckets for the worst case, where none of other's
1693 * entries are yet present in h. This is preferable to risking
1694 * an allocation failure in the middle of the moving and having to
1695 * rollback or return a partial result.
1696 */
1697 r = resize_buckets(h, n_entries(other));
1698 if (r < 0)
1699 return r;
101d8e63 1700
89439d4f
MS
1701 HASHMAP_FOREACH_IDX(idx, other, i) {
1702 unsigned h_hash;
101d8e63 1703
89439d4f 1704 e = bucket_at(other, idx);
a3b6fafe 1705 h_hash = bucket_hash(h, e->key);
89439d4f 1706 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1707 continue;
1708
89439d4f
MS
1709 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1710 n->key = e->key;
1711 if (h->type != HASHMAP_TYPE_SET)
1712 ((struct plain_hashmap_entry*) n)->value =
1713 ((struct plain_hashmap_entry*) e)->value;
1714 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1715
1716 remove_entry(other, idx);
101d8e63 1717 }
7ad63f57
MS
1718
1719 return 0;
101d8e63
LP
1720}
1721
89439d4f
MS
1722int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1723 struct swap_entries swap;
1724 unsigned h_hash, other_hash, idx;
1725 struct hashmap_base_entry *e, *n;
1726 int r;
101d8e63 1727
101d8e63
LP
1728 assert(h);
1729
a3b6fafe 1730 h_hash = bucket_hash(h, key);
89439d4f 1731 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1732 return -EEXIST;
1733
bf3d3e2b
MS
1734 if (!other)
1735 return -ENOENT;
1736
89439d4f
MS
1737 assert(other->type == h->type);
1738
a3b6fafe 1739 other_hash = bucket_hash(other, key);
89439d4f
MS
1740 idx = bucket_scan(other, other_hash, key);
1741 if (idx == IDX_NIL)
101d8e63
LP
1742 return -ENOENT;
1743
89439d4f
MS
1744 e = bucket_at(other, idx);
1745
1746 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1747 n->key = e->key;
1748 if (h->type != HASHMAP_TYPE_SET)
1749 ((struct plain_hashmap_entry*) n)->value =
1750 ((struct plain_hashmap_entry*) e)->value;
1751 r = hashmap_put_boldly(h, h_hash, &swap, true);
1752 if (r < 0)
1753 return r;
101d8e63 1754
89439d4f 1755 remove_entry(other, idx);
101d8e63
LP
1756 return 0;
1757}
1758
89439d4f
MS
1759HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1760 HashmapBase *copy;
1761 int r;
91cdde8a
LP
1762
1763 assert(h);
1764
89439d4f 1765 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1766 if (!copy)
91cdde8a
LP
1767 return NULL;
1768
89439d4f
MS
1769 switch (h->type) {
1770 case HASHMAP_TYPE_PLAIN:
1771 case HASHMAP_TYPE_ORDERED:
1772 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1773 break;
1774 case HASHMAP_TYPE_SET:
1775 r = set_merge((Set*)copy, (Set*)h);
1776 break;
1777 default:
1778 assert_not_reached("Unknown hashmap type");
1779 }
1780
1781 if (r < 0) {
1782 internal_hashmap_free(copy);
91cdde8a
LP
1783 return NULL;
1784 }
1785
1786 return copy;
1787}
db1413d7 1788
89439d4f 1789char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1790 char **sv;
89439d4f
MS
1791 Iterator i;
1792 unsigned idx, n;
db1413d7 1793
89439d4f 1794 sv = new(char*, n_entries(h)+1);
729e3769 1795 if (!sv)
db1413d7
KS
1796 return NULL;
1797
1798 n = 0;
89439d4f
MS
1799 HASHMAP_FOREACH_IDX(idx, h, i)
1800 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1801 sv[n] = NULL;
1802
1803 return sv;
1804}
3c1668da 1805
89439d4f
MS
1806void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1807 struct ordered_hashmap_entry *e;
1808 unsigned hash, idx;
3c1668da 1809
3c1668da
LP
1810 if (!h)
1811 return NULL;
1812
a3b6fafe 1813 hash = bucket_hash(h, key);
89439d4f
MS
1814 idx = bucket_scan(h, hash, key);
1815 if (idx == IDX_NIL)
3c1668da
LP
1816 return NULL;
1817
89439d4f
MS
1818 e = ordered_bucket_at(h, idx);
1819 if (e->iterate_next == IDX_NIL)
3c1668da 1820 return NULL;
89439d4f
MS
1821 return ordered_bucket_at(h, e->iterate_next)->p.value;
1822}
3c1668da 1823
89439d4f
MS
1824int set_consume(Set *s, void *value) {
1825 int r;
1826
1827 r = set_put(s, value);
575ccc1b 1828 if (r <= 0)
89439d4f
MS
1829 free(value);
1830
1831 return r;
1832}
1833
1834int set_put_strdup(Set *s, const char *p) {
1835 char *c;
1836 int r;
1837
1838 assert(s);
1839 assert(p);
1840
1841 c = strdup(p);
1842 if (!c)
1843 return -ENOMEM;
1844
1845 r = set_consume(s, c);
1846 if (r == -EEXIST)
1847 return 0;
1848
1849 return r;
1850}
1851
1852int set_put_strdupv(Set *s, char **l) {
1853 int n = 0, r;
1854 char **i;
1855
1856 STRV_FOREACH(i, l) {
1857 r = set_put_strdup(s, *i);
1858 if (r < 0)
1859 return r;
1860
1861 n += r;
1862 }
1863
1864 return n;
3c1668da 1865}