]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
util-lib: split out globbing related calls into glob-util.[ch]
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
d6c9574f 1/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
60918275 2
a7334b09
LP
3/***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
89439d4f 7 Copyright 2014 Michal Schmidt
a7334b09
LP
8
9 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
a7334b09
LP
12 (at your option) any later version.
13
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 17 Lesser General Public License for more details.
a7334b09 18
5430f7f2 19 You should have received a copy of the GNU Lesser General Public License
a7334b09
LP
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
21***/
22
60918275 23#include <errno.h>
4f1b3061 24#include <pthread.h>
d4510856 25#include <stdlib.h>
60918275 26
60918275
LP
27#include "hashmap.h"
28#include "macro.h"
b3dcf58e 29#include "mempool.h"
d4510856 30#include "process-util.h"
3df3e884 31#include "random-util.h"
d4510856
LP
32#include "set.h"
33#include "siphash24.h"
34#include "strv.h"
35#include "util.h"
60918275 36
2eec67ac
TA
37#ifdef ENABLE_DEBUG_HASHMAP
38#include "list.h"
39#endif
40
89439d4f
MS
41/*
42 * Implementation of hashmaps.
43 * Addressing: open
44 * - uses less RAM compared to closed addressing (chaining), because
45 * our entries are small (especially in Sets, which tend to contain
46 * the majority of entries in systemd).
47 * Collision resolution: Robin Hood
48 * - tends to equalize displacement of entries from their optimal buckets.
49 * Probe sequence: linear
50 * - though theoretically worse than random probing/uniform hashing/double
51 * hashing, it is good for cache locality.
52 *
53 * References:
54 * Celis, P. 1986. Robin Hood Hashing.
55 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
56 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
57 * - The results are derived for random probing. Suggests deletion with
58 * tombstones and two mean-centered search methods. None of that works
59 * well for linear probing.
60 *
61 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
62 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
63 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
64 * http://www.math.uu.se/~svante/papers/sj157.pdf
65 * - Applies to Robin Hood with linear probing. Contains remarks on
66 * the unsuitability of mean-centered search with linear probing.
67 *
68 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
69 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
70 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
71 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
72 * in a successful search), and Janson writes about displacement. C = d + 1.
73 *
74 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
75 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
76 * - Explanation of backward shift deletion with pictures.
77 *
78 * Khuong, P. 2013. The Other Robin Hood Hashing.
79 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
80 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
81 */
82
83/*
84 * XXX Ideas for improvement:
85 * For unordered hashmaps, randomize iteration order, similarly to Perl:
86 * http://blog.booking.com/hardening-perls-hash-function.html
87 */
88
89/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
90 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
91#define INV_KEEP_FREE 5U
92
93/* Fields common to entries of all hashmap/set types */
94struct hashmap_base_entry {
60918275 95 const void *key;
89439d4f
MS
96};
97
98/* Entry types for specific hashmap/set types
99 * hashmap_base_entry must be at the beginning of each entry struct. */
100
101struct plain_hashmap_entry {
102 struct hashmap_base_entry b;
60918275 103 void *value;
60918275
LP
104};
105
89439d4f
MS
106struct ordered_hashmap_entry {
107 struct plain_hashmap_entry p;
108 unsigned iterate_next, iterate_previous;
109};
60918275 110
89439d4f
MS
111struct set_entry {
112 struct hashmap_base_entry b;
113};
45fa9e29 114
89439d4f
MS
115/* In several functions it is advantageous to have the hash table extended
116 * virtually by a couple of additional buckets. We reserve special index values
117 * for these "swap" buckets. */
118#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
119#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
120#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
121#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 122
89439d4f
MS
123#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
124#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
125
126assert_cc(IDX_FIRST == _IDX_SWAP_END);
127assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
128
129/* Storage space for the "swap" buckets.
130 * All entry types can fit into a ordered_hashmap_entry. */
131struct swap_entries {
132 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
133};
134
89439d4f
MS
135/* Distance from Initial Bucket */
136typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
137#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
138#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
139#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
140#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
141
142#define DIB_FREE UINT_MAX
143
fc86aa0e 144#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
145struct hashmap_debug_info {
146 LIST_FIELDS(struct hashmap_debug_info, debug_list);
147 unsigned max_entries; /* high watermark of n_entries */
148
149 /* who allocated this hashmap */
150 int line;
151 const char *file;
152 const char *func;
153
154 /* fields to detect modification while iterating */
155 unsigned put_count; /* counts puts into the hashmap */
156 unsigned rem_count; /* counts removals from hashmap */
157 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
158};
159
89439d4f
MS
160/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
161static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 162static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 163
89439d4f 164#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 165
fc86aa0e 166#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 167#define HASHMAP_DEBUG_FIELDS
fc86aa0e 168#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 169
89439d4f
MS
170enum HashmapType {
171 HASHMAP_TYPE_PLAIN,
172 HASHMAP_TYPE_ORDERED,
173 HASHMAP_TYPE_SET,
174 _HASHMAP_TYPE_MAX
175};
39c2a6f1 176
89439d4f
MS
177struct _packed_ indirect_storage {
178 char *storage; /* where buckets and DIBs are stored */
179 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
180
181 unsigned n_entries; /* number of stored entries */
182 unsigned n_buckets; /* number of buckets */
183
184 unsigned idx_lowest_entry; /* Index below which all buckets are free.
185 Makes "while(hashmap_steal_first())" loops
186 O(n) instead of O(n^2) for unordered hashmaps. */
187 uint8_t _pad[3]; /* padding for the whole HashmapBase */
188 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
189};
190
191struct direct_storage {
192 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
193 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
194 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
195 char storage[sizeof(struct indirect_storage)];
196};
197
198#define DIRECT_BUCKETS(entry_t) \
199 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
200
201/* We should be able to store at least one entry directly. */
202assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
203
204/* We have 3 bits for n_direct_entries. */
205assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
206
207/* Hashmaps with directly stored entries all use this shared hash key.
208 * It's no big deal if the key is guessed, because there can be only
209 * a handful of directly stored entries in a hashmap. When a hashmap
210 * outgrows direct storage, it gets its own key for indirect storage. */
211static uint8_t shared_hash_key[HASH_KEY_SIZE];
212static bool shared_hash_key_initialized;
213
214/* Fields that all hashmap/set types must have */
215struct HashmapBase {
216 const struct hash_ops *hash_ops; /* hash and compare ops to use */
217
218 union _packed_ {
219 struct indirect_storage indirect; /* if has_indirect */
220 struct direct_storage direct; /* if !has_indirect */
221 };
222
223 enum HashmapType type:2; /* HASHMAP_TYPE_* */
224 bool has_indirect:1; /* whether indirect storage is used */
225 unsigned n_direct_entries:3; /* Number of entries in direct storage.
226 * Only valid if !has_indirect. */
227 bool from_pool:1; /* whether was allocated from mempool */
228 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
229};
230
231/* Specific hash types
232 * HashmapBase must be at the beginning of each hashmap struct. */
233
234struct Hashmap {
235 struct HashmapBase b;
236};
237
238struct OrderedHashmap {
239 struct HashmapBase b;
240 unsigned iterate_list_head, iterate_list_tail;
241};
242
243struct Set {
244 struct HashmapBase b;
245};
246
247DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
248DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
249/* No need for a separate Set pool */
250assert_cc(sizeof(Hashmap) == sizeof(Set));
251
252struct hashmap_type_info {
253 size_t head_size;
254 size_t entry_size;
255 struct mempool *mempool;
256 unsigned n_direct_buckets;
257};
258
259static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
260 [HASHMAP_TYPE_PLAIN] = {
261 .head_size = sizeof(Hashmap),
262 .entry_size = sizeof(struct plain_hashmap_entry),
263 .mempool = &hashmap_pool,
264 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
265 },
266 [HASHMAP_TYPE_ORDERED] = {
267 .head_size = sizeof(OrderedHashmap),
268 .entry_size = sizeof(struct ordered_hashmap_entry),
269 .mempool = &ordered_hashmap_pool,
270 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
271 },
272 [HASHMAP_TYPE_SET] = {
273 .head_size = sizeof(Set),
274 .entry_size = sizeof(struct set_entry),
275 .mempool = &hashmap_pool,
276 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
277 },
278};
39c2a6f1 279
b826ab58 280void string_hash_func(const void *p, struct siphash *state) {
1e2527a6 281 siphash24_compress(p, strlen(p) + 1, state);
60918275
LP
282}
283
284int string_compare_func(const void *a, const void *b) {
285 return strcmp(a, b);
286}
287
d5099efc
MS
288const struct hash_ops string_hash_ops = {
289 .hash = string_hash_func,
290 .compare = string_compare_func
291};
292
b826ab58
TG
293void trivial_hash_func(const void *p, struct siphash *state) {
294 siphash24_compress(&p, sizeof(p), state);
60918275
LP
295}
296
297int trivial_compare_func(const void *a, const void *b) {
298 return a < b ? -1 : (a > b ? 1 : 0);
299}
300
d5099efc
MS
301const struct hash_ops trivial_hash_ops = {
302 .hash = trivial_hash_func,
303 .compare = trivial_compare_func
304};
305
b826ab58
TG
306void uint64_hash_func(const void *p, struct siphash *state) {
307 siphash24_compress(p, sizeof(uint64_t), state);
a4bcff5b
LP
308}
309
310int uint64_compare_func(const void *_a, const void *_b) {
311 uint64_t a, b;
a4bcff5b
LP
312 a = *(const uint64_t*) _a;
313 b = *(const uint64_t*) _b;
a4bcff5b
LP
314 return a < b ? -1 : (a > b ? 1 : 0);
315}
316
d5099efc
MS
317const struct hash_ops uint64_hash_ops = {
318 .hash = uint64_hash_func,
319 .compare = uint64_compare_func
320};
321
de99c9dc 322#if SIZEOF_DEV_T != 8
b826ab58
TG
323void devt_hash_func(const void *p, struct siphash *state) {
324 siphash24_compress(p, sizeof(dev_t), state);
de99c9dc
LP
325}
326
327int devt_compare_func(const void *_a, const void *_b) {
328 dev_t a, b;
329 a = *(const dev_t*) _a;
330 b = *(const dev_t*) _b;
331 return a < b ? -1 : (a > b ? 1 : 0);
332}
d5099efc
MS
333
334const struct hash_ops devt_hash_ops = {
335 .hash = devt_hash_func,
336 .compare = devt_compare_func
337};
de99c9dc
LP
338#endif
339
89439d4f
MS
340static unsigned n_buckets(HashmapBase *h) {
341 return h->has_indirect ? h->indirect.n_buckets
342 : hashmap_type_info[h->type].n_direct_buckets;
343}
344
345static unsigned n_entries(HashmapBase *h) {
346 return h->has_indirect ? h->indirect.n_entries
347 : h->n_direct_entries;
348}
349
350static void n_entries_inc(HashmapBase *h) {
351 if (h->has_indirect)
352 h->indirect.n_entries++;
353 else
354 h->n_direct_entries++;
355}
356
357static void n_entries_dec(HashmapBase *h) {
358 if (h->has_indirect)
359 h->indirect.n_entries--;
360 else
361 h->n_direct_entries--;
362}
363
364static char *storage_ptr(HashmapBase *h) {
365 return h->has_indirect ? h->indirect.storage
366 : h->direct.storage;
367}
368
369static uint8_t *hash_key(HashmapBase *h) {
370 return h->has_indirect ? h->indirect.hash_key
371 : shared_hash_key;
372}
373
374static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 375 struct siphash state;
0cb3c286 376 uint64_t hash;
b826ab58 377
0cb3c286 378 siphash24_init(&state, hash_key(h));
b826ab58
TG
379
380 h->hash_ops->hash(p, &state);
381
0cb3c286
TG
382 siphash24_finalize((uint8_t*)&hash, &state);
383
384 return (unsigned) (hash % n_buckets(h));
9bf3b535 385}
89439d4f 386#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535
LP
387
388static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
389 static uint8_t current[HASH_KEY_SIZE];
390 static bool current_initialized = false;
391
392 /* Returns a hash function key to use. In order to keep things
393 * fast we will not generate a new key each time we allocate a
394 * new hash table. Instead, we'll just reuse the most recently
395 * generated one, except if we never generated one or when we
396 * are rehashing an entire hash table because we reached a
397 * fill level */
398
399 if (!current_initialized || !reuse_is_ok) {
400 random_bytes(current, sizeof(current));
401 current_initialized = true;
402 }
403
404 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
405}
406
89439d4f
MS
407static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
408 return (struct hashmap_base_entry*)
409 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
410}
411
412static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
413 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
414}
415
416static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
417 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
418}
39c2a6f1 419
89439d4f
MS
420static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
421 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
422}
39c2a6f1 423
89439d4f
MS
424static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
425 return &swap->e[idx - _IDX_SWAP_BEGIN];
426}
39c2a6f1 427
89439d4f
MS
428/* Returns a pointer to the bucket at index idx.
429 * Understands real indexes and swap indexes, hence "_virtual". */
430static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
431 unsigned idx) {
432 if (idx < _IDX_SWAP_BEGIN)
433 return bucket_at(h, idx);
434
435 if (idx < _IDX_SWAP_END)
436 return &bucket_at_swap(swap, idx)->p.b;
437
438 assert_not_reached("Invalid index");
439}
440
441static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
442 return (dib_raw_t*)
443 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
444}
445
446static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
447 return idx >= from ? idx - from
448 : n_buckets(h) + idx - from;
449}
450
451static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
452 unsigned initial_bucket;
453
454 if (raw_dib == DIB_RAW_FREE)
455 return DIB_FREE;
456
457 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
458 return raw_dib;
459
460 /*
461 * Having an overflow DIB value is very unlikely. The hash function
462 * would have to be bad. For example, in a table of size 2^24 filled
463 * to load factor 0.9 the maximum observed DIB is only about 60.
464 * In theory (assuming I used Maxima correctly), for an infinite size
465 * hash table with load factor 0.8 the probability of a given entry
466 * having DIB > 40 is 1.9e-8.
467 * This returns the correct DIB value by recomputing the hash value in
468 * the unlikely case. XXX Hitting this case could be a hint to rehash.
469 */
470 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
471 return bucket_distance(h, idx, initial_bucket);
472}
473
474static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
475 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
476}
477
478static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
479 dib_raw_t *dibs;
480
481 dibs = dib_raw_ptr(h);
482
483 for ( ; idx < n_buckets(h); idx++)
484 if (dibs[idx] != DIB_RAW_FREE)
485 return idx;
486
487 return IDX_NIL;
488}
489
490static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 491 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
492 bucket_set_dib(h, idx, DIB_FREE);
493}
494
495static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
496 unsigned from, unsigned to) {
497 struct hashmap_base_entry *e_from, *e_to;
498
499 assert(from != to);
39c2a6f1 500
89439d4f
MS
501 e_from = bucket_at_virtual(h, swap, from);
502 e_to = bucket_at_virtual(h, swap, to);
503
504 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
505
506 if (h->type == HASHMAP_TYPE_ORDERED) {
507 OrderedHashmap *lh = (OrderedHashmap*) h;
508 struct ordered_hashmap_entry *le, *le_to;
509
510 le_to = (struct ordered_hashmap_entry*) e_to;
511
512 if (le_to->iterate_next != IDX_NIL) {
513 le = (struct ordered_hashmap_entry*)
514 bucket_at_virtual(h, swap, le_to->iterate_next);
515 le->iterate_previous = to;
516 }
517
518 if (le_to->iterate_previous != IDX_NIL) {
519 le = (struct ordered_hashmap_entry*)
520 bucket_at_virtual(h, swap, le_to->iterate_previous);
521 le->iterate_next = to;
522 }
523
524 if (lh->iterate_list_head == from)
525 lh->iterate_list_head = to;
526 if (lh->iterate_list_tail == from)
527 lh->iterate_list_tail = to;
39c2a6f1 528 }
89439d4f 529}
60918275 530
89439d4f
MS
531static unsigned next_idx(HashmapBase *h, unsigned idx) {
532 return (idx + 1U) % n_buckets(h);
533}
60918275 534
89439d4f
MS
535static unsigned prev_idx(HashmapBase *h, unsigned idx) {
536 return (n_buckets(h) + idx - 1U) % n_buckets(h);
537}
60918275 538
89439d4f
MS
539static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
540 switch (h->type) {
45fa9e29 541
89439d4f
MS
542 case HASHMAP_TYPE_PLAIN:
543 case HASHMAP_TYPE_ORDERED:
544 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 545
89439d4f
MS
546 case HASHMAP_TYPE_SET:
547 return (void*) e->key;
a3b6fafe 548
89439d4f
MS
549 default:
550 assert_not_reached("Unknown hashmap type");
551 }
60918275
LP
552}
553
89439d4f
MS
554static void base_remove_entry(HashmapBase *h, unsigned idx) {
555 unsigned left, right, prev, dib;
556 dib_raw_t raw_dib, *dibs;
45fa9e29 557
89439d4f
MS
558 dibs = dib_raw_ptr(h);
559 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 560
fc86aa0e 561#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
562 h->debug.rem_count++;
563 h->debug.last_rem_idx = idx;
564#endif
034c6ed7 565
89439d4f
MS
566 left = idx;
567 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
568 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
569 raw_dib = dibs[right];
570 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
571 break;
572
573 /* The buckets are not supposed to be all occupied and with DIB > 0.
574 * That would mean we could make everyone better off by shifting them
575 * backward. This scenario is impossible. */
576 assert(left != right);
577 }
034c6ed7 578
89439d4f
MS
579 if (h->type == HASHMAP_TYPE_ORDERED) {
580 OrderedHashmap *lh = (OrderedHashmap*) h;
581 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
582
583 if (le->iterate_next != IDX_NIL)
584 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
585 else
586 lh->iterate_list_tail = le->iterate_previous;
587
588 if (le->iterate_previous != IDX_NIL)
589 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
590 else
591 lh->iterate_list_head = le->iterate_next;
592 }
593
594 /* Now shift all buckets in the interval (left, right) one step backwards */
595 for (prev = left, left = next_idx(h, left); left != right;
596 prev = left, left = next_idx(h, left)) {
597 dib = bucket_calculate_dib(h, left, dibs[left]);
598 assert(dib != 0);
599 bucket_move_entry(h, NULL, left, prev);
600 bucket_set_dib(h, prev, dib - 1);
601 }
602
603 bucket_mark_free(h, prev);
604 n_entries_dec(h);
034c6ed7 605}
89439d4f
MS
606#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
607
608static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
609 struct ordered_hashmap_entry *e;
610 unsigned idx;
034c6ed7 611
101d8e63 612 assert(h);
89439d4f
MS
613 assert(i);
614
615 if (i->idx == IDX_NIL)
616 goto at_end;
617
618 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
619 goto at_end;
620
621 if (i->idx == IDX_FIRST) {
622 idx = h->iterate_list_head;
623 e = ordered_bucket_at(h, idx);
101d8e63 624 } else {
89439d4f
MS
625 idx = i->idx;
626 e = ordered_bucket_at(h, idx);
627 /*
628 * We allow removing the current entry while iterating, but removal may cause
629 * a backward shift. The next entry may thus move one bucket to the left.
630 * To detect when it happens, we remember the key pointer of the entry we were
631 * going to iterate next. If it does not match, there was a backward shift.
632 */
633 if (e->p.b.key != i->next_key) {
634 idx = prev_idx(HASHMAP_BASE(h), idx);
635 e = ordered_bucket_at(h, idx);
636 }
637 assert(e->p.b.key == i->next_key);
101d8e63 638 }
101d8e63 639
fc86aa0e 640#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
641 i->prev_idx = idx;
642#endif
643
644 if (e->iterate_next != IDX_NIL) {
645 struct ordered_hashmap_entry *n;
646 i->idx = e->iterate_next;
647 n = ordered_bucket_at(h, i->idx);
648 i->next_key = n->p.b.key;
649 } else
650 i->idx = IDX_NIL;
651
652 return idx;
653
654at_end:
655 i->idx = IDX_NIL;
656 return IDX_NIL;
101d8e63
LP
657}
658
89439d4f
MS
659static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
660 unsigned idx;
661
60918275 662 assert(h);
89439d4f 663 assert(i);
60918275 664
89439d4f
MS
665 if (i->idx == IDX_NIL)
666 goto at_end;
60918275 667
89439d4f
MS
668 if (i->idx == IDX_FIRST) {
669 /* fast forward to the first occupied bucket */
670 if (h->has_indirect) {
671 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
672 h->indirect.idx_lowest_entry = i->idx;
673 } else
674 i->idx = skip_free_buckets(h, 0);
675
676 if (i->idx == IDX_NIL)
677 goto at_end;
678 } else {
679 struct hashmap_base_entry *e;
680
681 assert(i->idx > 0);
60918275 682
89439d4f
MS
683 e = bucket_at(h, i->idx);
684 /*
685 * We allow removing the current entry while iterating, but removal may cause
686 * a backward shift. The next entry may thus move one bucket to the left.
687 * To detect when it happens, we remember the key pointer of the entry we were
688 * going to iterate next. If it does not match, there was a backward shift.
689 */
690 if (e->key != i->next_key)
691 e = bucket_at(h, --i->idx);
60918275 692
89439d4f
MS
693 assert(e->key == i->next_key);
694 }
695
696 idx = i->idx;
fc86aa0e 697#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
698 i->prev_idx = idx;
699#endif
700
701 i->idx = skip_free_buckets(h, i->idx + 1);
702 if (i->idx != IDX_NIL)
703 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 704 else
89439d4f
MS
705 i->idx = IDX_NIL;
706
707 return idx;
60918275 708
89439d4f
MS
709at_end:
710 i->idx = IDX_NIL;
711 return IDX_NIL;
60918275
LP
712}
713
89439d4f
MS
714static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
715 if (!h) {
716 i->idx = IDX_NIL;
717 return IDX_NIL;
718 }
101d8e63 719
fc86aa0e 720#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
721 if (i->idx == IDX_FIRST) {
722 i->put_count = h->debug.put_count;
723 i->rem_count = h->debug.rem_count;
724 } else {
725 /* While iterating, must not add any new entries */
726 assert(i->put_count == h->debug.put_count);
727 /* ... or remove entries other than the current one */
728 assert(i->rem_count == h->debug.rem_count ||
729 (i->rem_count == h->debug.rem_count - 1 &&
730 i->prev_idx == h->debug.last_rem_idx));
731 /* Reset our removals counter */
732 i->rem_count = h->debug.rem_count;
733 }
734#endif
101d8e63 735
89439d4f
MS
736 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
737 : hashmap_iterate_in_internal_order(h, i);
738}
39c2a6f1 739
8927b1da 740bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
741 struct hashmap_base_entry *e;
742 void *data;
743 unsigned idx;
744
745 idx = hashmap_iterate_entry(h, i);
746 if (idx == IDX_NIL) {
8927b1da
DH
747 if (value)
748 *value = NULL;
89439d4f
MS
749 if (key)
750 *key = NULL;
751
8927b1da 752 return false;
89439d4f
MS
753 }
754
755 e = bucket_at(h, idx);
756 data = entry_value(h, e);
8927b1da
DH
757 if (value)
758 *value = data;
89439d4f
MS
759 if (key)
760 *key = e->key;
761
8927b1da 762 return true;
101d8e63
LP
763}
764
8927b1da
DH
765bool set_iterate(Set *s, Iterator *i, void **value) {
766 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 767}
60918275 768
89439d4f
MS
769#define HASHMAP_FOREACH_IDX(idx, h, i) \
770 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
771 (idx != IDX_NIL); \
772 (idx) = hashmap_iterate_entry((h), &(i)))
773
774static void reset_direct_storage(HashmapBase *h) {
775 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
776 void *p;
777
778 assert(!h->has_indirect);
779
780 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
781 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
782}
783
3ef11dcf 784static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
785 HashmapBase *h;
786 const struct hashmap_type_info *hi = &hashmap_type_info[type];
787 bool use_pool;
788
789 use_pool = is_main_thread();
790
791 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 792
60918275 793 if (!h)
89439d4f
MS
794 return NULL;
795
796 h->type = type;
797 h->from_pool = use_pool;
798 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
799
800 if (type == HASHMAP_TYPE_ORDERED) {
801 OrderedHashmap *lh = (OrderedHashmap*)h;
802 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
803 }
804
805 reset_direct_storage(h);
60918275 806
89439d4f
MS
807 if (!shared_hash_key_initialized) {
808 random_bytes(shared_hash_key, sizeof(shared_hash_key));
809 shared_hash_key_initialized= true;
810 }
811
fc86aa0e 812#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
813 h->debug.func = func;
814 h->debug.file = file;
815 h->debug.line = line;
4f1b3061
TG
816 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
817 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
818 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
819#endif
820
821 return h;
822}
60918275 823
89439d4f 824Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 825 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
826}
827
828OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 829 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
830}
831
832Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 833 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
834}
835
836static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 837 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
838 HashmapBase *q;
839
840 assert(h);
841
842 if (*h)
843 return 0;
844
3ef11dcf 845 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
846 if (!q)
847 return -ENOMEM;
848
849 *h = q;
850 return 0;
851}
852
853int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 854 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
855}
856
857int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 858 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
859}
860
861int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 862 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
863}
864
865static void hashmap_free_no_clear(HashmapBase *h) {
866 assert(!h->has_indirect);
867 assert(!h->n_direct_entries);
868
fc86aa0e 869#ifdef ENABLE_DEBUG_HASHMAP
4f1b3061 870 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 871 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 872 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 873#endif
45fa9e29 874
39c2a6f1 875 if (h->from_pool)
89439d4f 876 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
877 else
878 free(h);
60918275
LP
879}
880
cfe561a4 881HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
882
883 /* Free the hashmap, but nothing in it */
884
cfe561a4
DH
885 if (h) {
886 internal_hashmap_clear(h);
887 hashmap_free_no_clear(h);
888 }
89439d4f 889
cfe561a4 890 return NULL;
89439d4f
MS
891}
892
cfe561a4 893HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
894
895 /* Free the hashmap and all data objects in it, but not the
896 * keys */
897
cfe561a4
DH
898 if (h) {
899 internal_hashmap_clear_free(h);
900 hashmap_free_no_clear(h);
901 }
61b1477c 902
cfe561a4 903 return NULL;
449ddb2d
LP
904}
905
cfe561a4 906Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
907
908 /* Free the hashmap and all data and key objects in it */
909
cfe561a4
DH
910 if (h) {
911 hashmap_clear_free_free(h);
912 hashmap_free_no_clear(HASHMAP_BASE(h));
913 }
fabe5c0e 914
cfe561a4 915 return NULL;
fabe5c0e
LP
916}
917
89439d4f 918void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
919 if (!h)
920 return;
921
89439d4f
MS
922 if (h->has_indirect) {
923 free(h->indirect.storage);
924 h->has_indirect = false;
925 }
926
927 h->n_direct_entries = 0;
928 reset_direct_storage(h);
929
930 if (h->type == HASHMAP_TYPE_ORDERED) {
931 OrderedHashmap *lh = (OrderedHashmap*) h;
932 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
933 }
11dd41ce
LP
934}
935
89439d4f
MS
936void internal_hashmap_clear_free(HashmapBase *h) {
937 unsigned idx;
9946996c 938
61b1477c
LP
939 if (!h)
940 return;
9946996c 941
89439d4f
MS
942 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
943 idx = skip_free_buckets(h, idx + 1))
944 free(entry_value(h, bucket_at(h, idx)));
945
946 internal_hashmap_clear(h);
9946996c
LP
947}
948
fabe5c0e 949void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
950 unsigned idx;
951
fabe5c0e
LP
952 if (!h)
953 return;
954
89439d4f
MS
955 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
956 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
957 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
958 free((void*)e->b.key);
959 free(e->value);
fabe5c0e 960 }
89439d4f
MS
961
962 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
963}
964
89439d4f
MS
965static int resize_buckets(HashmapBase *h, unsigned entries_add);
966
967/*
968 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
969 * Performs Robin Hood swaps as it goes. The entry to put must be placed
970 * by the caller into swap slot IDX_PUT.
971 * If used for in-place resizing, may leave a displaced entry in swap slot
972 * IDX_PUT. Caller must rehash it next.
973 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
974 * false otherwise.
975 */
976static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
977 struct swap_entries *swap) {
978 dib_raw_t raw_dib, *dibs;
979 unsigned dib, distance;
980
fc86aa0e 981#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
982 h->debug.put_count++;
983#endif
984
985 dibs = dib_raw_ptr(h);
986
987 for (distance = 0; ; distance++) {
988 raw_dib = dibs[idx];
989 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
990 if (raw_dib == DIB_RAW_REHASH)
991 bucket_move_entry(h, swap, idx, IDX_TMP);
992
993 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
994 h->indirect.idx_lowest_entry = idx;
60918275 995
89439d4f
MS
996 bucket_set_dib(h, idx, distance);
997 bucket_move_entry(h, swap, IDX_PUT, idx);
998 if (raw_dib == DIB_RAW_REHASH) {
999 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1000 return true;
1001 }
60918275 1002
89439d4f
MS
1003 return false;
1004 }
1005
1006 dib = bucket_calculate_dib(h, idx, raw_dib);
1007
1008 if (dib < distance) {
1009 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
1010 bucket_set_dib(h, idx, distance);
1011
1012 /* swap the entries */
1013 bucket_move_entry(h, swap, idx, IDX_TMP);
1014 bucket_move_entry(h, swap, IDX_PUT, idx);
1015 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1016
1017 distance = dib;
1018 }
1019
1020 idx = next_idx(h, idx);
1021 }
60918275
LP
1022}
1023
89439d4f
MS
1024/*
1025 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1026 * The caller must place the entry (only its key and value, not link indexes)
1027 * in swap slot IDX_PUT.
1028 * Caller must ensure: the key does not exist yet in the hashmap.
1029 * that resize is not needed if !may_resize.
1030 * Returns: 1 if entry was put successfully.
1031 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1032 * Cannot return -ENOMEM if !may_resize.
1033 */
1034static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1035 struct swap_entries *swap, bool may_resize) {
1036 struct ordered_hashmap_entry *new_entry;
1037 int r;
1038
1039 assert(idx < n_buckets(h));
1040
1041 new_entry = bucket_at_swap(swap, IDX_PUT);
1042
1043 if (may_resize) {
1044 r = resize_buckets(h, 1);
1045 if (r < 0)
1046 return r;
1047 if (r > 0)
1048 idx = bucket_hash(h, new_entry->p.b.key);
1049 }
1050 assert(n_entries(h) < n_buckets(h));
1051
1052 if (h->type == HASHMAP_TYPE_ORDERED) {
1053 OrderedHashmap *lh = (OrderedHashmap*) h;
1054
1055 new_entry->iterate_next = IDX_NIL;
1056 new_entry->iterate_previous = lh->iterate_list_tail;
1057
1058 if (lh->iterate_list_tail != IDX_NIL) {
1059 struct ordered_hashmap_entry *old_tail;
1060
1061 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1062 assert(old_tail->iterate_next == IDX_NIL);
1063 old_tail->iterate_next = IDX_PUT;
1064 }
1065
1066 lh->iterate_list_tail = IDX_PUT;
1067 if (lh->iterate_list_head == IDX_NIL)
1068 lh->iterate_list_head = IDX_PUT;
1069 }
1070
1071 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1072
1073 n_entries_inc(h);
fc86aa0e 1074#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1075 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1076#endif
1077
1078 return 1;
1079}
1080#define hashmap_put_boldly(h, idx, swap, may_resize) \
1081 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1082
1083/*
1084 * Returns 0 if resize is not needed.
f131770b 1085 * 1 if successfully resized.
89439d4f
MS
1086 * -ENOMEM on allocation failure.
1087 */
1088static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1089 struct swap_entries swap;
1090 char *new_storage;
1091 dib_raw_t *old_dibs, *new_dibs;
1092 const struct hashmap_type_info *hi;
1093 unsigned idx, optimal_idx;
1094 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1095 uint8_t new_shift;
1096 bool rehash_next;
45fa9e29
LP
1097
1098 assert(h);
1099
89439d4f
MS
1100 hi = &hashmap_type_info[h->type];
1101 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1102
1103 /* overflow? */
89439d4f 1104 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1105 return -ENOMEM;
1106
89439d4f
MS
1107 /* For direct storage we allow 100% load, because it's tiny. */
1108 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1109 return 0;
45fa9e29 1110
89439d4f
MS
1111 /*
1112 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1113 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1114 */
1115 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1116 /* overflow? */
1117 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1118 return -ENOMEM;
45fa9e29 1119
89439d4f
MS
1120 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1121 return -ENOMEM;
a3b6fafe 1122
89439d4f 1123 old_n_buckets = n_buckets(h);
45fa9e29 1124
89439d4f
MS
1125 if (_likely_(new_n_buckets <= old_n_buckets))
1126 return 0;
45fa9e29 1127
89439d4f
MS
1128 new_shift = log2u_round_up(MAX(
1129 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1130 2 * sizeof(struct direct_storage)));
45fa9e29 1131
89439d4f
MS
1132 /* Realloc storage (buckets and DIB array). */
1133 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1134 1U << new_shift);
1135 if (!new_storage)
1136 return -ENOMEM;
45fa9e29 1137
89439d4f
MS
1138 /* Must upgrade direct to indirect storage. */
1139 if (!h->has_indirect) {
1140 memcpy(new_storage, h->direct.storage,
1141 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1142 h->indirect.n_entries = h->n_direct_entries;
1143 h->indirect.idx_lowest_entry = 0;
1144 h->n_direct_entries = 0;
1145 }
45fa9e29 1146
89439d4f
MS
1147 /* Get a new hash key. If we've just upgraded to indirect storage,
1148 * allow reusing a previously generated key. It's still a different key
1149 * from the shared one that we used for direct storage. */
1150 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1151
1152 h->has_indirect = true;
1153 h->indirect.storage = new_storage;
1154 h->indirect.n_buckets = (1U << new_shift) /
1155 (hi->entry_size + sizeof(dib_raw_t));
1156
1157 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1158 new_dibs = dib_raw_ptr(h);
1159
1160 /*
1161 * Move the DIB array to the new place, replacing valid DIB values with
1162 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1163 * Note: Overlap is not possible, because we have at least doubled the
1164 * number of buckets and dib_raw_t is smaller than any entry type.
1165 */
1166 for (idx = 0; idx < old_n_buckets; idx++) {
1167 assert(old_dibs[idx] != DIB_RAW_REHASH);
1168 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1169 : DIB_RAW_REHASH;
45fa9e29
LP
1170 }
1171
89439d4f 1172 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1173 memzero(bucket_at(h, old_n_buckets),
89439d4f 1174 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1175
89439d4f
MS
1176 /* The upper half of the new DIB array needs initialization */
1177 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1178 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1179
89439d4f
MS
1180 /* Rehash entries that need it */
1181 n_rehashed = 0;
1182 for (idx = 0; idx < old_n_buckets; idx++) {
1183 if (new_dibs[idx] != DIB_RAW_REHASH)
1184 continue;
45fa9e29 1185
89439d4f 1186 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1187
89439d4f
MS
1188 /*
1189 * Not much to do if by luck the entry hashes to its current
1190 * location. Just set its DIB.
1191 */
1192 if (optimal_idx == idx) {
1193 new_dibs[idx] = 0;
1194 n_rehashed++;
1195 continue;
1196 }
1197
1198 new_dibs[idx] = DIB_RAW_FREE;
1199 bucket_move_entry(h, &swap, idx, IDX_PUT);
1200 /* bucket_move_entry does not clear the source */
eccaf899 1201 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1202
1203 do {
1204 /*
1205 * Find the new bucket for the current entry. This may make
1206 * another entry homeless and load it into IDX_PUT.
1207 */
1208 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1209 n_rehashed++;
1210
1211 /* Did the current entry displace another one? */
1212 if (rehash_next)
1213 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1214 } while (rehash_next);
1215 }
60918275 1216
89439d4f 1217 assert(n_rehashed == n_entries(h));
60918275 1218
89439d4f
MS
1219 return 1;
1220}
45fa9e29 1221
89439d4f
MS
1222/*
1223 * Finds an entry with a matching key
1224 * Returns: index of the found entry, or IDX_NIL if not found.
1225 */
1226static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1227 struct hashmap_base_entry *e;
1228 unsigned dib, distance;
1229 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1230
89439d4f 1231 assert(idx < n_buckets(h));
60918275 1232
89439d4f
MS
1233 for (distance = 0; ; distance++) {
1234 if (dibs[idx] == DIB_RAW_FREE)
1235 return IDX_NIL;
60918275 1236
89439d4f 1237 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1238
89439d4f
MS
1239 if (dib < distance)
1240 return IDX_NIL;
1241 if (dib == distance) {
1242 e = bucket_at(h, idx);
1243 if (h->hash_ops->compare(e->key, key) == 0)
1244 return idx;
1245 }
1246
1247 idx = next_idx(h, idx);
1248 }
60918275 1249}
89439d4f 1250#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1251
923041cb 1252int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1253 struct swap_entries swap;
1254 struct plain_hashmap_entry *e;
1255 unsigned hash, idx;
923041cb
MS
1256
1257 assert(h);
1258
1259 hash = bucket_hash(h, key);
89439d4f
MS
1260 idx = bucket_scan(h, hash, key);
1261 if (idx != IDX_NIL) {
1262 e = plain_bucket_at(h, idx);
923041cb
MS
1263 if (e->value == value)
1264 return 0;
1265 return -EEXIST;
1266 }
1267
89439d4f
MS
1268 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1269 e->b.key = key;
1270 e->value = value;
1271 return hashmap_put_boldly(h, hash, &swap, true);
1272}
1273
1274int set_put(Set *s, const void *key) {
1275 struct swap_entries swap;
1276 struct hashmap_base_entry *e;
1277 unsigned hash, idx;
1278
1279 assert(s);
1280
1281 hash = bucket_hash(s, key);
1282 idx = bucket_scan(s, hash, key);
1283 if (idx != IDX_NIL)
1284 return 0;
1285
1286 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1287 e->key = key;
1288 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1289}
1290
3158713e 1291int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1292 struct swap_entries swap;
1293 struct plain_hashmap_entry *e;
1294 unsigned hash, idx;
3158713e
LP
1295
1296 assert(h);
1297
a3b6fafe 1298 hash = bucket_hash(h, key);
89439d4f
MS
1299 idx = bucket_scan(h, hash, key);
1300 if (idx != IDX_NIL) {
1301 e = plain_bucket_at(h, idx);
fc86aa0e 1302#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1303 /* Although the key is equal, the key pointer may have changed,
1304 * and this would break our assumption for iterating. So count
1305 * this operation as incompatible with iteration. */
1306 if (e->b.key != key) {
1307 h->b.debug.put_count++;
1308 h->b.debug.rem_count++;
1309 h->b.debug.last_rem_idx = idx;
1310 }
1311#endif
1312 e->b.key = key;
3158713e
LP
1313 e->value = value;
1314 return 0;
1315 }
1316
89439d4f
MS
1317 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1318 e->b.key = key;
1319 e->value = value;
1320 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1321}
1322
d99ae53a 1323int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1324 struct plain_hashmap_entry *e;
1325 unsigned hash, idx;
d99ae53a
LP
1326
1327 assert(h);
1328
a3b6fafe 1329 hash = bucket_hash(h, key);
89439d4f
MS
1330 idx = bucket_scan(h, hash, key);
1331 if (idx == IDX_NIL)
d99ae53a
LP
1332 return -ENOENT;
1333
89439d4f 1334 e = plain_bucket_at(h, idx);
d99ae53a
LP
1335 e->value = value;
1336 return 0;
1337}
1338
89439d4f
MS
1339void *internal_hashmap_get(HashmapBase *h, const void *key) {
1340 struct hashmap_base_entry *e;
1341 unsigned hash, idx;
60918275
LP
1342
1343 if (!h)
1344 return NULL;
1345
a3b6fafe 1346 hash = bucket_hash(h, key);
89439d4f
MS
1347 idx = bucket_scan(h, hash, key);
1348 if (idx == IDX_NIL)
60918275
LP
1349 return NULL;
1350
89439d4f
MS
1351 e = bucket_at(h, idx);
1352 return entry_value(h, e);
60918275
LP
1353}
1354
89439d4f
MS
1355void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1356 struct plain_hashmap_entry *e;
1357 unsigned hash, idx;
d99ae53a
LP
1358
1359 if (!h)
1360 return NULL;
1361
a3b6fafe 1362 hash = bucket_hash(h, key);
89439d4f
MS
1363 idx = bucket_scan(h, hash, key);
1364 if (idx == IDX_NIL)
d99ae53a
LP
1365 return NULL;
1366
89439d4f 1367 e = plain_bucket_at(h, idx);
d99ae53a 1368 if (key2)
89439d4f 1369 *key2 = (void*) e->b.key;
d99ae53a
LP
1370
1371 return e->value;
1372}
1373
89439d4f 1374bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1375 unsigned hash;
96342de6
LN
1376
1377 if (!h)
1378 return false;
1379
a3b6fafe 1380 hash = bucket_hash(h, key);
89439d4f 1381 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1382}
1383
89439d4f
MS
1384void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1385 struct hashmap_base_entry *e;
1386 unsigned hash, idx;
60918275
LP
1387 void *data;
1388
1389 if (!h)
1390 return NULL;
1391
a3b6fafe 1392 hash = bucket_hash(h, key);
89439d4f
MS
1393 idx = bucket_scan(h, hash, key);
1394 if (idx == IDX_NIL)
60918275
LP
1395 return NULL;
1396
89439d4f
MS
1397 e = bucket_at(h, idx);
1398 data = entry_value(h, e);
1399 remove_entry(h, idx);
60918275
LP
1400
1401 return data;
1402}
1403
89439d4f
MS
1404void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1405 struct plain_hashmap_entry *e;
1406 unsigned hash, idx;
c582a3b3
LP
1407 void *data;
1408
1409 if (!h) {
1410 if (rkey)
1411 *rkey = NULL;
1412 return NULL;
1413 }
1414
1415 hash = bucket_hash(h, key);
89439d4f
MS
1416 idx = bucket_scan(h, hash, key);
1417 if (idx == IDX_NIL) {
c582a3b3
LP
1418 if (rkey)
1419 *rkey = NULL;
1420 return NULL;
1421 }
1422
89439d4f 1423 e = plain_bucket_at(h, idx);
c582a3b3
LP
1424 data = e->value;
1425 if (rkey)
89439d4f 1426 *rkey = (void*) e->b.key;
c582a3b3 1427
89439d4f 1428 remove_entry(h, idx);
c582a3b3
LP
1429
1430 return data;
1431}
1432
101d8e63 1433int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1434 struct swap_entries swap;
1435 struct plain_hashmap_entry *e;
1436 unsigned old_hash, new_hash, idx;
101d8e63
LP
1437
1438 if (!h)
1439 return -ENOENT;
1440
a3b6fafe 1441 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1442 idx = bucket_scan(h, old_hash, old_key);
1443 if (idx == IDX_NIL)
101d8e63
LP
1444 return -ENOENT;
1445
a3b6fafe 1446 new_hash = bucket_hash(h, new_key);
89439d4f 1447 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1448 return -EEXIST;
1449
89439d4f 1450 remove_entry(h, idx);
101d8e63 1451
89439d4f
MS
1452 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1453 e->b.key = new_key;
101d8e63 1454 e->value = value;
89439d4f
MS
1455 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1456
1457 return 0;
1458}
1459
1460int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1461 struct swap_entries swap;
1462 struct hashmap_base_entry *e;
1463 unsigned old_hash, new_hash, idx;
101d8e63 1464
89439d4f
MS
1465 if (!s)
1466 return -ENOENT;
1467
1468 old_hash = bucket_hash(s, old_key);
1469 idx = bucket_scan(s, old_hash, old_key);
1470 if (idx == IDX_NIL)
1471 return -ENOENT;
1472
1473 new_hash = bucket_hash(s, new_key);
1474 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1475 return -EEXIST;
1476
1477 remove_entry(s, idx);
1478
1479 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1480 e->key = new_key;
1481 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1482
1483 return 0;
1484}
1485
8fe914ec 1486int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1487 struct swap_entries swap;
1488 struct plain_hashmap_entry *e;
1489 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1490
1491 if (!h)
1492 return -ENOENT;
1493
a3b6fafe 1494 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1495 idx_old = bucket_scan(h, old_hash, old_key);
1496 if (idx_old == IDX_NIL)
8fe914ec
LP
1497 return -ENOENT;
1498
89439d4f 1499 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1500
89439d4f
MS
1501 new_hash = bucket_hash(h, new_key);
1502 idx_new = bucket_scan(h, new_hash, new_key);
1503 if (idx_new != IDX_NIL)
1504 if (idx_old != idx_new) {
1505 remove_entry(h, idx_new);
1506 /* Compensate for a possible backward shift. */
1507 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1508 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1509 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1510 }
1511
1512 remove_entry(h, idx_old);
1513
1514 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1515 e->b.key = new_key;
8fe914ec 1516 e->value = value;
89439d4f 1517 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1518
1519 return 0;
1520}
1521
89439d4f
MS
1522void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1523 struct plain_hashmap_entry *e;
1524 unsigned hash, idx;
3158713e
LP
1525
1526 if (!h)
1527 return NULL;
1528
a3b6fafe 1529 hash = bucket_hash(h, key);
89439d4f
MS
1530 idx = bucket_scan(h, hash, key);
1531 if (idx == IDX_NIL)
3158713e
LP
1532 return NULL;
1533
89439d4f 1534 e = plain_bucket_at(h, idx);
3158713e
LP
1535 if (e->value != value)
1536 return NULL;
1537
89439d4f 1538 remove_entry(h, idx);
3158713e
LP
1539
1540 return value;
1541}
1542
89439d4f
MS
1543static unsigned find_first_entry(HashmapBase *h) {
1544 Iterator i = ITERATOR_FIRST;
60918275 1545
89439d4f
MS
1546 if (!h || !n_entries(h))
1547 return IDX_NIL;
60918275 1548
89439d4f 1549 return hashmap_iterate_entry(h, &i);
60918275
LP
1550}
1551
89439d4f
MS
1552void *internal_hashmap_first(HashmapBase *h) {
1553 unsigned idx;
60918275 1554
89439d4f
MS
1555 idx = find_first_entry(h);
1556 if (idx == IDX_NIL)
60918275
LP
1557 return NULL;
1558
89439d4f 1559 return entry_value(h, bucket_at(h, idx));
60918275
LP
1560}
1561
89439d4f
MS
1562void *internal_hashmap_first_key(HashmapBase *h) {
1563 struct hashmap_base_entry *e;
1564 unsigned idx;
2e4a6ff4 1565
89439d4f
MS
1566 idx = find_first_entry(h);
1567 if (idx == IDX_NIL)
2e4a6ff4
LP
1568 return NULL;
1569
89439d4f
MS
1570 e = bucket_at(h, idx);
1571 return (void*) e->key;
2e4a6ff4
LP
1572}
1573
89439d4f
MS
1574void *internal_hashmap_steal_first(HashmapBase *h) {
1575 struct hashmap_base_entry *e;
60918275 1576 void *data;
89439d4f 1577 unsigned idx;
60918275 1578
89439d4f
MS
1579 idx = find_first_entry(h);
1580 if (idx == IDX_NIL)
60918275
LP
1581 return NULL;
1582
89439d4f
MS
1583 e = bucket_at(h, idx);
1584 data = entry_value(h, e);
1585 remove_entry(h, idx);
60918275
LP
1586
1587 return data;
1588}
1589
89439d4f
MS
1590void *internal_hashmap_steal_first_key(HashmapBase *h) {
1591 struct hashmap_base_entry *e;
22be093f 1592 void *key;
89439d4f 1593 unsigned idx;
22be093f 1594
89439d4f
MS
1595 idx = find_first_entry(h);
1596 if (idx == IDX_NIL)
22be093f
LP
1597 return NULL;
1598
89439d4f
MS
1599 e = bucket_at(h, idx);
1600 key = (void*) e->key;
1601 remove_entry(h, idx);
22be093f
LP
1602
1603 return key;
1604}
1605
89439d4f 1606unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1607
1608 if (!h)
1609 return 0;
1610
89439d4f 1611 return n_entries(h);
60918275
LP
1612}
1613
89439d4f 1614unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1615
1616 if (!h)
1617 return 0;
1618
89439d4f 1619 return n_buckets(h);
45fa9e29
LP
1620}
1621
89439d4f
MS
1622int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1623 Iterator i;
1624 unsigned idx;
60918275 1625
89439d4f 1626 assert(h);
60918275 1627
89439d4f
MS
1628 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1629 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1630 int r;
91cdde8a 1631
89439d4f
MS
1632 r = hashmap_put(h, pe->b.key, pe->value);
1633 if (r < 0 && r != -EEXIST)
1634 return r;
1635 }
91cdde8a 1636
89439d4f
MS
1637 return 0;
1638}
91cdde8a 1639
89439d4f
MS
1640int set_merge(Set *s, Set *other) {
1641 Iterator i;
1642 unsigned idx;
91cdde8a 1643
89439d4f
MS
1644 assert(s);
1645
1646 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1647 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1648 int r;
1649
89439d4f
MS
1650 r = set_put(s, se->b.key);
1651 if (r < 0)
a3b6fafe 1652 return r;
91cdde8a
LP
1653 }
1654
1655 return 0;
1656}
1657
89439d4f 1658int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1659 int r;
1660
1661 assert(h);
1662
1663 r = resize_buckets(h, entries_add);
1664 if (r < 0)
1665 return r;
1666
1667 return 0;
1668}
1669
89439d4f
MS
1670/*
1671 * The same as hashmap_merge(), but every new item from other is moved to h.
1672 * Keys already in h are skipped and stay in other.
1673 * Returns: 0 on success.
1674 * -ENOMEM on alloc failure, in which case no move has been done.
1675 */
1676int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1677 struct swap_entries swap;
1678 struct hashmap_base_entry *e, *n;
1679 Iterator i;
1680 unsigned idx;
1681 int r;
101d8e63
LP
1682
1683 assert(h);
1684
101d8e63 1685 if (!other)
7ad63f57 1686 return 0;
101d8e63 1687
89439d4f
MS
1688 assert(other->type == h->type);
1689
1690 /*
1691 * This reserves buckets for the worst case, where none of other's
1692 * entries are yet present in h. This is preferable to risking
1693 * an allocation failure in the middle of the moving and having to
1694 * rollback or return a partial result.
1695 */
1696 r = resize_buckets(h, n_entries(other));
1697 if (r < 0)
1698 return r;
101d8e63 1699
89439d4f
MS
1700 HASHMAP_FOREACH_IDX(idx, other, i) {
1701 unsigned h_hash;
101d8e63 1702
89439d4f 1703 e = bucket_at(other, idx);
a3b6fafe 1704 h_hash = bucket_hash(h, e->key);
89439d4f 1705 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1706 continue;
1707
89439d4f
MS
1708 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1709 n->key = e->key;
1710 if (h->type != HASHMAP_TYPE_SET)
1711 ((struct plain_hashmap_entry*) n)->value =
1712 ((struct plain_hashmap_entry*) e)->value;
1713 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1714
1715 remove_entry(other, idx);
101d8e63 1716 }
7ad63f57
MS
1717
1718 return 0;
101d8e63
LP
1719}
1720
89439d4f
MS
1721int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1722 struct swap_entries swap;
1723 unsigned h_hash, other_hash, idx;
1724 struct hashmap_base_entry *e, *n;
1725 int r;
101d8e63 1726
101d8e63
LP
1727 assert(h);
1728
a3b6fafe 1729 h_hash = bucket_hash(h, key);
89439d4f 1730 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1731 return -EEXIST;
1732
bf3d3e2b
MS
1733 if (!other)
1734 return -ENOENT;
1735
89439d4f
MS
1736 assert(other->type == h->type);
1737
a3b6fafe 1738 other_hash = bucket_hash(other, key);
89439d4f
MS
1739 idx = bucket_scan(other, other_hash, key);
1740 if (idx == IDX_NIL)
101d8e63
LP
1741 return -ENOENT;
1742
89439d4f
MS
1743 e = bucket_at(other, idx);
1744
1745 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1746 n->key = e->key;
1747 if (h->type != HASHMAP_TYPE_SET)
1748 ((struct plain_hashmap_entry*) n)->value =
1749 ((struct plain_hashmap_entry*) e)->value;
1750 r = hashmap_put_boldly(h, h_hash, &swap, true);
1751 if (r < 0)
1752 return r;
101d8e63 1753
89439d4f 1754 remove_entry(other, idx);
101d8e63
LP
1755 return 0;
1756}
1757
89439d4f
MS
1758HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1759 HashmapBase *copy;
1760 int r;
91cdde8a
LP
1761
1762 assert(h);
1763
89439d4f 1764 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1765 if (!copy)
91cdde8a
LP
1766 return NULL;
1767
89439d4f
MS
1768 switch (h->type) {
1769 case HASHMAP_TYPE_PLAIN:
1770 case HASHMAP_TYPE_ORDERED:
1771 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1772 break;
1773 case HASHMAP_TYPE_SET:
1774 r = set_merge((Set*)copy, (Set*)h);
1775 break;
1776 default:
1777 assert_not_reached("Unknown hashmap type");
1778 }
1779
1780 if (r < 0) {
1781 internal_hashmap_free(copy);
91cdde8a
LP
1782 return NULL;
1783 }
1784
1785 return copy;
1786}
db1413d7 1787
89439d4f 1788char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1789 char **sv;
89439d4f
MS
1790 Iterator i;
1791 unsigned idx, n;
db1413d7 1792
89439d4f 1793 sv = new(char*, n_entries(h)+1);
729e3769 1794 if (!sv)
db1413d7
KS
1795 return NULL;
1796
1797 n = 0;
89439d4f
MS
1798 HASHMAP_FOREACH_IDX(idx, h, i)
1799 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1800 sv[n] = NULL;
1801
1802 return sv;
1803}
3c1668da 1804
89439d4f
MS
1805void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1806 struct ordered_hashmap_entry *e;
1807 unsigned hash, idx;
3c1668da 1808
3c1668da
LP
1809 if (!h)
1810 return NULL;
1811
a3b6fafe 1812 hash = bucket_hash(h, key);
89439d4f
MS
1813 idx = bucket_scan(h, hash, key);
1814 if (idx == IDX_NIL)
3c1668da
LP
1815 return NULL;
1816
89439d4f
MS
1817 e = ordered_bucket_at(h, idx);
1818 if (e->iterate_next == IDX_NIL)
3c1668da 1819 return NULL;
89439d4f
MS
1820 return ordered_bucket_at(h, e->iterate_next)->p.value;
1821}
3c1668da 1822
89439d4f
MS
1823int set_consume(Set *s, void *value) {
1824 int r;
1825
1826 r = set_put(s, value);
575ccc1b 1827 if (r <= 0)
89439d4f
MS
1828 free(value);
1829
1830 return r;
1831}
1832
1833int set_put_strdup(Set *s, const char *p) {
1834 char *c;
1835 int r;
1836
1837 assert(s);
1838 assert(p);
1839
1840 c = strdup(p);
1841 if (!c)
1842 return -ENOMEM;
1843
1844 r = set_consume(s, c);
1845 if (r == -EEXIST)
1846 return 0;
1847
1848 return r;
1849}
1850
1851int set_put_strdupv(Set *s, char **l) {
1852 int n = 0, r;
1853 char **i;
1854
1855 STRV_FOREACH(i, l) {
1856 r = set_put_strdup(s, *i);
1857 if (r < 0)
1858 return r;
1859
1860 n += r;
1861 }
1862
1863 return n;
3c1668da 1864}