]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
hwdb: add support for Alienware graphics amplifier
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
d6c9574f 1/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
60918275 2
a7334b09
LP
3/***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
89439d4f 7 Copyright 2014 Michal Schmidt
a7334b09
LP
8
9 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
a7334b09
LP
12 (at your option) any later version.
13
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 17 Lesser General Public License for more details.
a7334b09 18
5430f7f2 19 You should have received a copy of the GNU Lesser General Public License
a7334b09
LP
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
21***/
22
60918275 23#include <stdlib.h>
60918275
LP
24#include <errno.h>
25
26#include "util.h"
27#include "hashmap.h"
89439d4f 28#include "set.h"
60918275 29#include "macro.h"
9bf3b535 30#include "siphash24.h"
89439d4f 31#include "strv.h"
b3dcf58e 32#include "mempool.h"
3df3e884 33#include "random-util.h"
60918275 34
2eec67ac
TA
35#ifdef ENABLE_DEBUG_HASHMAP
36#include "list.h"
37#endif
38
89439d4f
MS
39/*
40 * Implementation of hashmaps.
41 * Addressing: open
42 * - uses less RAM compared to closed addressing (chaining), because
43 * our entries are small (especially in Sets, which tend to contain
44 * the majority of entries in systemd).
45 * Collision resolution: Robin Hood
46 * - tends to equalize displacement of entries from their optimal buckets.
47 * Probe sequence: linear
48 * - though theoretically worse than random probing/uniform hashing/double
49 * hashing, it is good for cache locality.
50 *
51 * References:
52 * Celis, P. 1986. Robin Hood Hashing.
53 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
54 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
55 * - The results are derived for random probing. Suggests deletion with
56 * tombstones and two mean-centered search methods. None of that works
57 * well for linear probing.
58 *
59 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
60 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
61 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
62 * http://www.math.uu.se/~svante/papers/sj157.pdf
63 * - Applies to Robin Hood with linear probing. Contains remarks on
64 * the unsuitability of mean-centered search with linear probing.
65 *
66 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
67 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
68 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
69 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
70 * in a successful search), and Janson writes about displacement. C = d + 1.
71 *
72 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
73 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
74 * - Explanation of backward shift deletion with pictures.
75 *
76 * Khuong, P. 2013. The Other Robin Hood Hashing.
77 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
78 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
79 */
80
81/*
82 * XXX Ideas for improvement:
83 * For unordered hashmaps, randomize iteration order, similarly to Perl:
84 * http://blog.booking.com/hardening-perls-hash-function.html
85 */
86
87/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
88 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
89#define INV_KEEP_FREE 5U
90
91/* Fields common to entries of all hashmap/set types */
92struct hashmap_base_entry {
60918275 93 const void *key;
89439d4f
MS
94};
95
96/* Entry types for specific hashmap/set types
97 * hashmap_base_entry must be at the beginning of each entry struct. */
98
99struct plain_hashmap_entry {
100 struct hashmap_base_entry b;
60918275 101 void *value;
60918275
LP
102};
103
89439d4f
MS
104struct ordered_hashmap_entry {
105 struct plain_hashmap_entry p;
106 unsigned iterate_next, iterate_previous;
107};
60918275 108
89439d4f
MS
109struct set_entry {
110 struct hashmap_base_entry b;
111};
45fa9e29 112
89439d4f
MS
113/* In several functions it is advantageous to have the hash table extended
114 * virtually by a couple of additional buckets. We reserve special index values
115 * for these "swap" buckets. */
116#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
117#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
118#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
119#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 120
89439d4f
MS
121#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
122#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
123
124assert_cc(IDX_FIRST == _IDX_SWAP_END);
125assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
126
127/* Storage space for the "swap" buckets.
128 * All entry types can fit into a ordered_hashmap_entry. */
129struct swap_entries {
130 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
131};
132
89439d4f
MS
133/* Distance from Initial Bucket */
134typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
135#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
136#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
137#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
138#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
139
140#define DIB_FREE UINT_MAX
141
fc86aa0e 142#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
143struct hashmap_debug_info {
144 LIST_FIELDS(struct hashmap_debug_info, debug_list);
145 unsigned max_entries; /* high watermark of n_entries */
146
147 /* who allocated this hashmap */
148 int line;
149 const char *file;
150 const char *func;
151
152 /* fields to detect modification while iterating */
153 unsigned put_count; /* counts puts into the hashmap */
154 unsigned rem_count; /* counts removals from hashmap */
155 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
156};
157
89439d4f
MS
158/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
159static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
39c2a6f1 160
89439d4f 161#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 162
fc86aa0e 163#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 164#define HASHMAP_DEBUG_FIELDS
fc86aa0e 165#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 166
89439d4f
MS
167enum HashmapType {
168 HASHMAP_TYPE_PLAIN,
169 HASHMAP_TYPE_ORDERED,
170 HASHMAP_TYPE_SET,
171 _HASHMAP_TYPE_MAX
172};
39c2a6f1 173
89439d4f
MS
174struct _packed_ indirect_storage {
175 char *storage; /* where buckets and DIBs are stored */
176 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
177
178 unsigned n_entries; /* number of stored entries */
179 unsigned n_buckets; /* number of buckets */
180
181 unsigned idx_lowest_entry; /* Index below which all buckets are free.
182 Makes "while(hashmap_steal_first())" loops
183 O(n) instead of O(n^2) for unordered hashmaps. */
184 uint8_t _pad[3]; /* padding for the whole HashmapBase */
185 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
186};
187
188struct direct_storage {
189 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
190 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
191 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
192 char storage[sizeof(struct indirect_storage)];
193};
194
195#define DIRECT_BUCKETS(entry_t) \
196 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
197
198/* We should be able to store at least one entry directly. */
199assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
200
201/* We have 3 bits for n_direct_entries. */
202assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
203
204/* Hashmaps with directly stored entries all use this shared hash key.
205 * It's no big deal if the key is guessed, because there can be only
206 * a handful of directly stored entries in a hashmap. When a hashmap
207 * outgrows direct storage, it gets its own key for indirect storage. */
208static uint8_t shared_hash_key[HASH_KEY_SIZE];
209static bool shared_hash_key_initialized;
210
211/* Fields that all hashmap/set types must have */
212struct HashmapBase {
213 const struct hash_ops *hash_ops; /* hash and compare ops to use */
214
215 union _packed_ {
216 struct indirect_storage indirect; /* if has_indirect */
217 struct direct_storage direct; /* if !has_indirect */
218 };
219
220 enum HashmapType type:2; /* HASHMAP_TYPE_* */
221 bool has_indirect:1; /* whether indirect storage is used */
222 unsigned n_direct_entries:3; /* Number of entries in direct storage.
223 * Only valid if !has_indirect. */
224 bool from_pool:1; /* whether was allocated from mempool */
225 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
226};
227
228/* Specific hash types
229 * HashmapBase must be at the beginning of each hashmap struct. */
230
231struct Hashmap {
232 struct HashmapBase b;
233};
234
235struct OrderedHashmap {
236 struct HashmapBase b;
237 unsigned iterate_list_head, iterate_list_tail;
238};
239
240struct Set {
241 struct HashmapBase b;
242};
243
244DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
245DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
246/* No need for a separate Set pool */
247assert_cc(sizeof(Hashmap) == sizeof(Set));
248
249struct hashmap_type_info {
250 size_t head_size;
251 size_t entry_size;
252 struct mempool *mempool;
253 unsigned n_direct_buckets;
254};
255
256static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
257 [HASHMAP_TYPE_PLAIN] = {
258 .head_size = sizeof(Hashmap),
259 .entry_size = sizeof(struct plain_hashmap_entry),
260 .mempool = &hashmap_pool,
261 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
262 },
263 [HASHMAP_TYPE_ORDERED] = {
264 .head_size = sizeof(OrderedHashmap),
265 .entry_size = sizeof(struct ordered_hashmap_entry),
266 .mempool = &ordered_hashmap_pool,
267 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
268 },
269 [HASHMAP_TYPE_SET] = {
270 .head_size = sizeof(Set),
271 .entry_size = sizeof(struct set_entry),
272 .mempool = &hashmap_pool,
273 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
274 },
275};
39c2a6f1 276
9bf3b535
LP
277unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
278 uint64_t u;
279 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
280 return (unsigned long) u;
60918275
LP
281}
282
283int string_compare_func(const void *a, const void *b) {
284 return strcmp(a, b);
285}
286
d5099efc
MS
287const struct hash_ops string_hash_ops = {
288 .hash = string_hash_func,
289 .compare = string_compare_func
290};
291
9bf3b535
LP
292unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
293 uint64_t u;
294 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
295 return (unsigned long) u;
60918275
LP
296}
297
298int trivial_compare_func(const void *a, const void *b) {
299 return a < b ? -1 : (a > b ? 1 : 0);
300}
301
d5099efc
MS
302const struct hash_ops trivial_hash_ops = {
303 .hash = trivial_hash_func,
304 .compare = trivial_compare_func
305};
306
9bf3b535 307unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
a4bcff5b 308 uint64_t u;
9bf3b535
LP
309 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
310 return (unsigned long) u;
a4bcff5b
LP
311}
312
313int uint64_compare_func(const void *_a, const void *_b) {
314 uint64_t a, b;
a4bcff5b
LP
315 a = *(const uint64_t*) _a;
316 b = *(const uint64_t*) _b;
a4bcff5b
LP
317 return a < b ? -1 : (a > b ? 1 : 0);
318}
319
d5099efc
MS
320const struct hash_ops uint64_hash_ops = {
321 .hash = uint64_hash_func,
322 .compare = uint64_compare_func
323};
324
de99c9dc
LP
325#if SIZEOF_DEV_T != 8
326unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
327 uint64_t u;
328 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
329 return (unsigned long) u;
330}
331
332int devt_compare_func(const void *_a, const void *_b) {
333 dev_t a, b;
334 a = *(const dev_t*) _a;
335 b = *(const dev_t*) _b;
336 return a < b ? -1 : (a > b ? 1 : 0);
337}
d5099efc
MS
338
339const struct hash_ops devt_hash_ops = {
340 .hash = devt_hash_func,
341 .compare = devt_compare_func
342};
de99c9dc
LP
343#endif
344
89439d4f
MS
345static unsigned n_buckets(HashmapBase *h) {
346 return h->has_indirect ? h->indirect.n_buckets
347 : hashmap_type_info[h->type].n_direct_buckets;
348}
349
350static unsigned n_entries(HashmapBase *h) {
351 return h->has_indirect ? h->indirect.n_entries
352 : h->n_direct_entries;
353}
354
355static void n_entries_inc(HashmapBase *h) {
356 if (h->has_indirect)
357 h->indirect.n_entries++;
358 else
359 h->n_direct_entries++;
360}
361
362static void n_entries_dec(HashmapBase *h) {
363 if (h->has_indirect)
364 h->indirect.n_entries--;
365 else
366 h->n_direct_entries--;
367}
368
369static char *storage_ptr(HashmapBase *h) {
370 return h->has_indirect ? h->indirect.storage
371 : h->direct.storage;
372}
373
374static uint8_t *hash_key(HashmapBase *h) {
375 return h->has_indirect ? h->indirect.hash_key
376 : shared_hash_key;
377}
378
379static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
380 return (unsigned) (h->hash_ops->hash(p, hash_key(h)) % n_buckets(h));
9bf3b535 381}
89439d4f 382#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535
LP
383
384static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
385 static uint8_t current[HASH_KEY_SIZE];
386 static bool current_initialized = false;
387
388 /* Returns a hash function key to use. In order to keep things
389 * fast we will not generate a new key each time we allocate a
390 * new hash table. Instead, we'll just reuse the most recently
391 * generated one, except if we never generated one or when we
392 * are rehashing an entire hash table because we reached a
393 * fill level */
394
395 if (!current_initialized || !reuse_is_ok) {
396 random_bytes(current, sizeof(current));
397 current_initialized = true;
398 }
399
400 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
401}
402
89439d4f
MS
403static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
404 return (struct hashmap_base_entry*)
405 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
406}
407
408static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
409 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
410}
411
412static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
413 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
414}
39c2a6f1 415
89439d4f
MS
416static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
417 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
418}
39c2a6f1 419
89439d4f
MS
420static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
421 return &swap->e[idx - _IDX_SWAP_BEGIN];
422}
39c2a6f1 423
89439d4f
MS
424/* Returns a pointer to the bucket at index idx.
425 * Understands real indexes and swap indexes, hence "_virtual". */
426static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
427 unsigned idx) {
428 if (idx < _IDX_SWAP_BEGIN)
429 return bucket_at(h, idx);
430
431 if (idx < _IDX_SWAP_END)
432 return &bucket_at_swap(swap, idx)->p.b;
433
434 assert_not_reached("Invalid index");
435}
436
437static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
438 return (dib_raw_t*)
439 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
440}
441
442static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
443 return idx >= from ? idx - from
444 : n_buckets(h) + idx - from;
445}
446
447static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
448 unsigned initial_bucket;
449
450 if (raw_dib == DIB_RAW_FREE)
451 return DIB_FREE;
452
453 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
454 return raw_dib;
455
456 /*
457 * Having an overflow DIB value is very unlikely. The hash function
458 * would have to be bad. For example, in a table of size 2^24 filled
459 * to load factor 0.9 the maximum observed DIB is only about 60.
460 * In theory (assuming I used Maxima correctly), for an infinite size
461 * hash table with load factor 0.8 the probability of a given entry
462 * having DIB > 40 is 1.9e-8.
463 * This returns the correct DIB value by recomputing the hash value in
464 * the unlikely case. XXX Hitting this case could be a hint to rehash.
465 */
466 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
467 return bucket_distance(h, idx, initial_bucket);
468}
469
470static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
471 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
472}
473
474static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
475 dib_raw_t *dibs;
476
477 dibs = dib_raw_ptr(h);
478
479 for ( ; idx < n_buckets(h); idx++)
480 if (dibs[idx] != DIB_RAW_FREE)
481 return idx;
482
483 return IDX_NIL;
484}
485
486static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 487 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
488 bucket_set_dib(h, idx, DIB_FREE);
489}
490
491static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
492 unsigned from, unsigned to) {
493 struct hashmap_base_entry *e_from, *e_to;
494
495 assert(from != to);
39c2a6f1 496
89439d4f
MS
497 e_from = bucket_at_virtual(h, swap, from);
498 e_to = bucket_at_virtual(h, swap, to);
499
500 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
501
502 if (h->type == HASHMAP_TYPE_ORDERED) {
503 OrderedHashmap *lh = (OrderedHashmap*) h;
504 struct ordered_hashmap_entry *le, *le_to;
505
506 le_to = (struct ordered_hashmap_entry*) e_to;
507
508 if (le_to->iterate_next != IDX_NIL) {
509 le = (struct ordered_hashmap_entry*)
510 bucket_at_virtual(h, swap, le_to->iterate_next);
511 le->iterate_previous = to;
512 }
513
514 if (le_to->iterate_previous != IDX_NIL) {
515 le = (struct ordered_hashmap_entry*)
516 bucket_at_virtual(h, swap, le_to->iterate_previous);
517 le->iterate_next = to;
518 }
519
520 if (lh->iterate_list_head == from)
521 lh->iterate_list_head = to;
522 if (lh->iterate_list_tail == from)
523 lh->iterate_list_tail = to;
39c2a6f1 524 }
89439d4f 525}
60918275 526
89439d4f
MS
527static unsigned next_idx(HashmapBase *h, unsigned idx) {
528 return (idx + 1U) % n_buckets(h);
529}
60918275 530
89439d4f
MS
531static unsigned prev_idx(HashmapBase *h, unsigned idx) {
532 return (n_buckets(h) + idx - 1U) % n_buckets(h);
533}
60918275 534
89439d4f
MS
535static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
536 switch (h->type) {
45fa9e29 537
89439d4f
MS
538 case HASHMAP_TYPE_PLAIN:
539 case HASHMAP_TYPE_ORDERED:
540 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 541
89439d4f
MS
542 case HASHMAP_TYPE_SET:
543 return (void*) e->key;
a3b6fafe 544
89439d4f
MS
545 default:
546 assert_not_reached("Unknown hashmap type");
547 }
60918275
LP
548}
549
89439d4f
MS
550static void base_remove_entry(HashmapBase *h, unsigned idx) {
551 unsigned left, right, prev, dib;
552 dib_raw_t raw_dib, *dibs;
45fa9e29 553
89439d4f
MS
554 dibs = dib_raw_ptr(h);
555 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 556
fc86aa0e 557#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
558 h->debug.rem_count++;
559 h->debug.last_rem_idx = idx;
560#endif
034c6ed7 561
89439d4f
MS
562 left = idx;
563 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
564 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
565 raw_dib = dibs[right];
566 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
567 break;
568
569 /* The buckets are not supposed to be all occupied and with DIB > 0.
570 * That would mean we could make everyone better off by shifting them
571 * backward. This scenario is impossible. */
572 assert(left != right);
573 }
034c6ed7 574
89439d4f
MS
575 if (h->type == HASHMAP_TYPE_ORDERED) {
576 OrderedHashmap *lh = (OrderedHashmap*) h;
577 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
578
579 if (le->iterate_next != IDX_NIL)
580 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
581 else
582 lh->iterate_list_tail = le->iterate_previous;
583
584 if (le->iterate_previous != IDX_NIL)
585 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
586 else
587 lh->iterate_list_head = le->iterate_next;
588 }
589
590 /* Now shift all buckets in the interval (left, right) one step backwards */
591 for (prev = left, left = next_idx(h, left); left != right;
592 prev = left, left = next_idx(h, left)) {
593 dib = bucket_calculate_dib(h, left, dibs[left]);
594 assert(dib != 0);
595 bucket_move_entry(h, NULL, left, prev);
596 bucket_set_dib(h, prev, dib - 1);
597 }
598
599 bucket_mark_free(h, prev);
600 n_entries_dec(h);
034c6ed7 601}
89439d4f
MS
602#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
603
604static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
605 struct ordered_hashmap_entry *e;
606 unsigned idx;
034c6ed7 607
101d8e63 608 assert(h);
89439d4f
MS
609 assert(i);
610
611 if (i->idx == IDX_NIL)
612 goto at_end;
613
614 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
615 goto at_end;
616
617 if (i->idx == IDX_FIRST) {
618 idx = h->iterate_list_head;
619 e = ordered_bucket_at(h, idx);
101d8e63 620 } else {
89439d4f
MS
621 idx = i->idx;
622 e = ordered_bucket_at(h, idx);
623 /*
624 * We allow removing the current entry while iterating, but removal may cause
625 * a backward shift. The next entry may thus move one bucket to the left.
626 * To detect when it happens, we remember the key pointer of the entry we were
627 * going to iterate next. If it does not match, there was a backward shift.
628 */
629 if (e->p.b.key != i->next_key) {
630 idx = prev_idx(HASHMAP_BASE(h), idx);
631 e = ordered_bucket_at(h, idx);
632 }
633 assert(e->p.b.key == i->next_key);
101d8e63 634 }
101d8e63 635
fc86aa0e 636#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
637 i->prev_idx = idx;
638#endif
639
640 if (e->iterate_next != IDX_NIL) {
641 struct ordered_hashmap_entry *n;
642 i->idx = e->iterate_next;
643 n = ordered_bucket_at(h, i->idx);
644 i->next_key = n->p.b.key;
645 } else
646 i->idx = IDX_NIL;
647
648 return idx;
649
650at_end:
651 i->idx = IDX_NIL;
652 return IDX_NIL;
101d8e63
LP
653}
654
89439d4f
MS
655static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
656 unsigned idx;
657
60918275 658 assert(h);
89439d4f 659 assert(i);
60918275 660
89439d4f
MS
661 if (i->idx == IDX_NIL)
662 goto at_end;
60918275 663
89439d4f
MS
664 if (i->idx == IDX_FIRST) {
665 /* fast forward to the first occupied bucket */
666 if (h->has_indirect) {
667 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
668 h->indirect.idx_lowest_entry = i->idx;
669 } else
670 i->idx = skip_free_buckets(h, 0);
671
672 if (i->idx == IDX_NIL)
673 goto at_end;
674 } else {
675 struct hashmap_base_entry *e;
676
677 assert(i->idx > 0);
60918275 678
89439d4f
MS
679 e = bucket_at(h, i->idx);
680 /*
681 * We allow removing the current entry while iterating, but removal may cause
682 * a backward shift. The next entry may thus move one bucket to the left.
683 * To detect when it happens, we remember the key pointer of the entry we were
684 * going to iterate next. If it does not match, there was a backward shift.
685 */
686 if (e->key != i->next_key)
687 e = bucket_at(h, --i->idx);
60918275 688
89439d4f
MS
689 assert(e->key == i->next_key);
690 }
691
692 idx = i->idx;
fc86aa0e 693#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
694 i->prev_idx = idx;
695#endif
696
697 i->idx = skip_free_buckets(h, i->idx + 1);
698 if (i->idx != IDX_NIL)
699 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 700 else
89439d4f
MS
701 i->idx = IDX_NIL;
702
703 return idx;
60918275 704
89439d4f
MS
705at_end:
706 i->idx = IDX_NIL;
707 return IDX_NIL;
60918275
LP
708}
709
89439d4f
MS
710static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
711 if (!h) {
712 i->idx = IDX_NIL;
713 return IDX_NIL;
714 }
101d8e63 715
fc86aa0e 716#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
717 if (i->idx == IDX_FIRST) {
718 i->put_count = h->debug.put_count;
719 i->rem_count = h->debug.rem_count;
720 } else {
721 /* While iterating, must not add any new entries */
722 assert(i->put_count == h->debug.put_count);
723 /* ... or remove entries other than the current one */
724 assert(i->rem_count == h->debug.rem_count ||
725 (i->rem_count == h->debug.rem_count - 1 &&
726 i->prev_idx == h->debug.last_rem_idx));
727 /* Reset our removals counter */
728 i->rem_count = h->debug.rem_count;
729 }
730#endif
101d8e63 731
89439d4f
MS
732 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
733 : hashmap_iterate_in_internal_order(h, i);
734}
39c2a6f1 735
89439d4f
MS
736void *internal_hashmap_iterate(HashmapBase *h, Iterator *i, const void **key) {
737 struct hashmap_base_entry *e;
738 void *data;
739 unsigned idx;
740
741 idx = hashmap_iterate_entry(h, i);
742 if (idx == IDX_NIL) {
743 if (key)
744 *key = NULL;
745
746 return NULL;
747 }
748
749 e = bucket_at(h, idx);
750 data = entry_value(h, e);
751 if (key)
752 *key = e->key;
753
754 return data;
101d8e63
LP
755}
756
89439d4f
MS
757void *set_iterate(Set *s, Iterator *i) {
758 return internal_hashmap_iterate(HASHMAP_BASE(s), i, NULL);
759}
60918275 760
89439d4f
MS
761#define HASHMAP_FOREACH_IDX(idx, h, i) \
762 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
763 (idx != IDX_NIL); \
764 (idx) = hashmap_iterate_entry((h), &(i)))
765
766static void reset_direct_storage(HashmapBase *h) {
767 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
768 void *p;
769
770 assert(!h->has_indirect);
771
772 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
773 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
774}
775
3ef11dcf 776static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
777 HashmapBase *h;
778 const struct hashmap_type_info *hi = &hashmap_type_info[type];
779 bool use_pool;
780
781 use_pool = is_main_thread();
782
783 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 784
60918275 785 if (!h)
89439d4f
MS
786 return NULL;
787
788 h->type = type;
789 h->from_pool = use_pool;
790 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
791
792 if (type == HASHMAP_TYPE_ORDERED) {
793 OrderedHashmap *lh = (OrderedHashmap*)h;
794 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
795 }
796
797 reset_direct_storage(h);
60918275 798
89439d4f
MS
799 if (!shared_hash_key_initialized) {
800 random_bytes(shared_hash_key, sizeof(shared_hash_key));
801 shared_hash_key_initialized= true;
802 }
803
fc86aa0e 804#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
805 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
806 h->debug.func = func;
807 h->debug.file = file;
808 h->debug.line = line;
809#endif
810
811 return h;
812}
60918275 813
89439d4f 814Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 815 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
816}
817
818OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 819 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
820}
821
822Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 823 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
824}
825
826static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 827 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
828 HashmapBase *q;
829
830 assert(h);
831
832 if (*h)
833 return 0;
834
3ef11dcf 835 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
836 if (!q)
837 return -ENOMEM;
838
839 *h = q;
840 return 0;
841}
842
843int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 844 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
845}
846
847int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 848 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
849}
850
851int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 852 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
853}
854
855static void hashmap_free_no_clear(HashmapBase *h) {
856 assert(!h->has_indirect);
857 assert(!h->n_direct_entries);
858
fc86aa0e 859#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
860 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
861#endif
45fa9e29 862
39c2a6f1 863 if (h->from_pool)
89439d4f 864 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
865 else
866 free(h);
60918275
LP
867}
868
cfe561a4 869HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
870
871 /* Free the hashmap, but nothing in it */
872
cfe561a4
DH
873 if (h) {
874 internal_hashmap_clear(h);
875 hashmap_free_no_clear(h);
876 }
89439d4f 877
cfe561a4 878 return NULL;
89439d4f
MS
879}
880
cfe561a4 881HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
882
883 /* Free the hashmap and all data objects in it, but not the
884 * keys */
885
cfe561a4
DH
886 if (h) {
887 internal_hashmap_clear_free(h);
888 hashmap_free_no_clear(h);
889 }
61b1477c 890
cfe561a4 891 return NULL;
449ddb2d
LP
892}
893
cfe561a4 894Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
895
896 /* Free the hashmap and all data and key objects in it */
897
cfe561a4
DH
898 if (h) {
899 hashmap_clear_free_free(h);
900 hashmap_free_no_clear(HASHMAP_BASE(h));
901 }
fabe5c0e 902
cfe561a4 903 return NULL;
fabe5c0e
LP
904}
905
89439d4f 906void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
907 if (!h)
908 return;
909
89439d4f
MS
910 if (h->has_indirect) {
911 free(h->indirect.storage);
912 h->has_indirect = false;
913 }
914
915 h->n_direct_entries = 0;
916 reset_direct_storage(h);
917
918 if (h->type == HASHMAP_TYPE_ORDERED) {
919 OrderedHashmap *lh = (OrderedHashmap*) h;
920 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
921 }
11dd41ce
LP
922}
923
89439d4f
MS
924void internal_hashmap_clear_free(HashmapBase *h) {
925 unsigned idx;
9946996c 926
61b1477c
LP
927 if (!h)
928 return;
9946996c 929
89439d4f
MS
930 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
931 idx = skip_free_buckets(h, idx + 1))
932 free(entry_value(h, bucket_at(h, idx)));
933
934 internal_hashmap_clear(h);
9946996c
LP
935}
936
fabe5c0e 937void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
938 unsigned idx;
939
fabe5c0e
LP
940 if (!h)
941 return;
942
89439d4f
MS
943 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
944 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
945 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
946 free((void*)e->b.key);
947 free(e->value);
fabe5c0e 948 }
89439d4f
MS
949
950 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
951}
952
89439d4f
MS
953static int resize_buckets(HashmapBase *h, unsigned entries_add);
954
955/*
956 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
957 * Performs Robin Hood swaps as it goes. The entry to put must be placed
958 * by the caller into swap slot IDX_PUT.
959 * If used for in-place resizing, may leave a displaced entry in swap slot
960 * IDX_PUT. Caller must rehash it next.
961 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
962 * false otherwise.
963 */
964static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
965 struct swap_entries *swap) {
966 dib_raw_t raw_dib, *dibs;
967 unsigned dib, distance;
968
fc86aa0e 969#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
970 h->debug.put_count++;
971#endif
972
973 dibs = dib_raw_ptr(h);
974
975 for (distance = 0; ; distance++) {
976 raw_dib = dibs[idx];
977 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
978 if (raw_dib == DIB_RAW_REHASH)
979 bucket_move_entry(h, swap, idx, IDX_TMP);
980
981 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
982 h->indirect.idx_lowest_entry = idx;
60918275 983
89439d4f
MS
984 bucket_set_dib(h, idx, distance);
985 bucket_move_entry(h, swap, IDX_PUT, idx);
986 if (raw_dib == DIB_RAW_REHASH) {
987 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
988 return true;
989 }
60918275 990
89439d4f
MS
991 return false;
992 }
993
994 dib = bucket_calculate_dib(h, idx, raw_dib);
995
996 if (dib < distance) {
997 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
998 bucket_set_dib(h, idx, distance);
999
1000 /* swap the entries */
1001 bucket_move_entry(h, swap, idx, IDX_TMP);
1002 bucket_move_entry(h, swap, IDX_PUT, idx);
1003 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1004
1005 distance = dib;
1006 }
1007
1008 idx = next_idx(h, idx);
1009 }
60918275
LP
1010}
1011
89439d4f
MS
1012/*
1013 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1014 * The caller must place the entry (only its key and value, not link indexes)
1015 * in swap slot IDX_PUT.
1016 * Caller must ensure: the key does not exist yet in the hashmap.
1017 * that resize is not needed if !may_resize.
1018 * Returns: 1 if entry was put successfully.
1019 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1020 * Cannot return -ENOMEM if !may_resize.
1021 */
1022static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1023 struct swap_entries *swap, bool may_resize) {
1024 struct ordered_hashmap_entry *new_entry;
1025 int r;
1026
1027 assert(idx < n_buckets(h));
1028
1029 new_entry = bucket_at_swap(swap, IDX_PUT);
1030
1031 if (may_resize) {
1032 r = resize_buckets(h, 1);
1033 if (r < 0)
1034 return r;
1035 if (r > 0)
1036 idx = bucket_hash(h, new_entry->p.b.key);
1037 }
1038 assert(n_entries(h) < n_buckets(h));
1039
1040 if (h->type == HASHMAP_TYPE_ORDERED) {
1041 OrderedHashmap *lh = (OrderedHashmap*) h;
1042
1043 new_entry->iterate_next = IDX_NIL;
1044 new_entry->iterate_previous = lh->iterate_list_tail;
1045
1046 if (lh->iterate_list_tail != IDX_NIL) {
1047 struct ordered_hashmap_entry *old_tail;
1048
1049 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1050 assert(old_tail->iterate_next == IDX_NIL);
1051 old_tail->iterate_next = IDX_PUT;
1052 }
1053
1054 lh->iterate_list_tail = IDX_PUT;
1055 if (lh->iterate_list_head == IDX_NIL)
1056 lh->iterate_list_head = IDX_PUT;
1057 }
1058
1059 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1060
1061 n_entries_inc(h);
fc86aa0e 1062#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1063 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1064#endif
1065
1066 return 1;
1067}
1068#define hashmap_put_boldly(h, idx, swap, may_resize) \
1069 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1070
1071/*
1072 * Returns 0 if resize is not needed.
f131770b 1073 * 1 if successfully resized.
89439d4f
MS
1074 * -ENOMEM on allocation failure.
1075 */
1076static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1077 struct swap_entries swap;
1078 char *new_storage;
1079 dib_raw_t *old_dibs, *new_dibs;
1080 const struct hashmap_type_info *hi;
1081 unsigned idx, optimal_idx;
1082 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1083 uint8_t new_shift;
1084 bool rehash_next;
45fa9e29
LP
1085
1086 assert(h);
1087
89439d4f
MS
1088 hi = &hashmap_type_info[h->type];
1089 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1090
1091 /* overflow? */
89439d4f 1092 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1093 return -ENOMEM;
1094
89439d4f
MS
1095 /* For direct storage we allow 100% load, because it's tiny. */
1096 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1097 return 0;
45fa9e29 1098
89439d4f
MS
1099 /*
1100 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1101 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1102 */
1103 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1104 /* overflow? */
1105 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1106 return -ENOMEM;
45fa9e29 1107
89439d4f
MS
1108 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1109 return -ENOMEM;
a3b6fafe 1110
89439d4f 1111 old_n_buckets = n_buckets(h);
45fa9e29 1112
89439d4f
MS
1113 if (_likely_(new_n_buckets <= old_n_buckets))
1114 return 0;
45fa9e29 1115
89439d4f
MS
1116 new_shift = log2u_round_up(MAX(
1117 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1118 2 * sizeof(struct direct_storage)));
45fa9e29 1119
89439d4f
MS
1120 /* Realloc storage (buckets and DIB array). */
1121 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1122 1U << new_shift);
1123 if (!new_storage)
1124 return -ENOMEM;
45fa9e29 1125
89439d4f
MS
1126 /* Must upgrade direct to indirect storage. */
1127 if (!h->has_indirect) {
1128 memcpy(new_storage, h->direct.storage,
1129 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1130 h->indirect.n_entries = h->n_direct_entries;
1131 h->indirect.idx_lowest_entry = 0;
1132 h->n_direct_entries = 0;
1133 }
45fa9e29 1134
89439d4f
MS
1135 /* Get a new hash key. If we've just upgraded to indirect storage,
1136 * allow reusing a previously generated key. It's still a different key
1137 * from the shared one that we used for direct storage. */
1138 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1139
1140 h->has_indirect = true;
1141 h->indirect.storage = new_storage;
1142 h->indirect.n_buckets = (1U << new_shift) /
1143 (hi->entry_size + sizeof(dib_raw_t));
1144
1145 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1146 new_dibs = dib_raw_ptr(h);
1147
1148 /*
1149 * Move the DIB array to the new place, replacing valid DIB values with
1150 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1151 * Note: Overlap is not possible, because we have at least doubled the
1152 * number of buckets and dib_raw_t is smaller than any entry type.
1153 */
1154 for (idx = 0; idx < old_n_buckets; idx++) {
1155 assert(old_dibs[idx] != DIB_RAW_REHASH);
1156 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1157 : DIB_RAW_REHASH;
45fa9e29
LP
1158 }
1159
89439d4f 1160 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1161 memzero(bucket_at(h, old_n_buckets),
89439d4f 1162 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1163
89439d4f
MS
1164 /* The upper half of the new DIB array needs initialization */
1165 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1166 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1167
89439d4f
MS
1168 /* Rehash entries that need it */
1169 n_rehashed = 0;
1170 for (idx = 0; idx < old_n_buckets; idx++) {
1171 if (new_dibs[idx] != DIB_RAW_REHASH)
1172 continue;
45fa9e29 1173
89439d4f 1174 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1175
89439d4f
MS
1176 /*
1177 * Not much to do if by luck the entry hashes to its current
1178 * location. Just set its DIB.
1179 */
1180 if (optimal_idx == idx) {
1181 new_dibs[idx] = 0;
1182 n_rehashed++;
1183 continue;
1184 }
1185
1186 new_dibs[idx] = DIB_RAW_FREE;
1187 bucket_move_entry(h, &swap, idx, IDX_PUT);
1188 /* bucket_move_entry does not clear the source */
eccaf899 1189 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1190
1191 do {
1192 /*
1193 * Find the new bucket for the current entry. This may make
1194 * another entry homeless and load it into IDX_PUT.
1195 */
1196 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1197 n_rehashed++;
1198
1199 /* Did the current entry displace another one? */
1200 if (rehash_next)
1201 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1202 } while (rehash_next);
1203 }
60918275 1204
89439d4f 1205 assert(n_rehashed == n_entries(h));
60918275 1206
89439d4f
MS
1207 return 1;
1208}
45fa9e29 1209
89439d4f
MS
1210/*
1211 * Finds an entry with a matching key
1212 * Returns: index of the found entry, or IDX_NIL if not found.
1213 */
1214static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1215 struct hashmap_base_entry *e;
1216 unsigned dib, distance;
1217 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1218
89439d4f 1219 assert(idx < n_buckets(h));
60918275 1220
89439d4f
MS
1221 for (distance = 0; ; distance++) {
1222 if (dibs[idx] == DIB_RAW_FREE)
1223 return IDX_NIL;
60918275 1224
89439d4f 1225 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1226
89439d4f
MS
1227 if (dib < distance)
1228 return IDX_NIL;
1229 if (dib == distance) {
1230 e = bucket_at(h, idx);
1231 if (h->hash_ops->compare(e->key, key) == 0)
1232 return idx;
1233 }
1234
1235 idx = next_idx(h, idx);
1236 }
60918275 1237}
89439d4f 1238#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1239
923041cb 1240int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1241 struct swap_entries swap;
1242 struct plain_hashmap_entry *e;
1243 unsigned hash, idx;
923041cb
MS
1244
1245 assert(h);
1246
1247 hash = bucket_hash(h, key);
89439d4f
MS
1248 idx = bucket_scan(h, hash, key);
1249 if (idx != IDX_NIL) {
1250 e = plain_bucket_at(h, idx);
923041cb
MS
1251 if (e->value == value)
1252 return 0;
1253 return -EEXIST;
1254 }
1255
89439d4f
MS
1256 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1257 e->b.key = key;
1258 e->value = value;
1259 return hashmap_put_boldly(h, hash, &swap, true);
1260}
1261
1262int set_put(Set *s, const void *key) {
1263 struct swap_entries swap;
1264 struct hashmap_base_entry *e;
1265 unsigned hash, idx;
1266
1267 assert(s);
1268
1269 hash = bucket_hash(s, key);
1270 idx = bucket_scan(s, hash, key);
1271 if (idx != IDX_NIL)
1272 return 0;
1273
1274 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1275 e->key = key;
1276 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1277}
1278
3158713e 1279int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1280 struct swap_entries swap;
1281 struct plain_hashmap_entry *e;
1282 unsigned hash, idx;
3158713e
LP
1283
1284 assert(h);
1285
a3b6fafe 1286 hash = bucket_hash(h, key);
89439d4f
MS
1287 idx = bucket_scan(h, hash, key);
1288 if (idx != IDX_NIL) {
1289 e = plain_bucket_at(h, idx);
fc86aa0e 1290#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1291 /* Although the key is equal, the key pointer may have changed,
1292 * and this would break our assumption for iterating. So count
1293 * this operation as incompatible with iteration. */
1294 if (e->b.key != key) {
1295 h->b.debug.put_count++;
1296 h->b.debug.rem_count++;
1297 h->b.debug.last_rem_idx = idx;
1298 }
1299#endif
1300 e->b.key = key;
3158713e
LP
1301 e->value = value;
1302 return 0;
1303 }
1304
89439d4f
MS
1305 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1306 e->b.key = key;
1307 e->value = value;
1308 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1309}
1310
d99ae53a 1311int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1312 struct plain_hashmap_entry *e;
1313 unsigned hash, idx;
d99ae53a
LP
1314
1315 assert(h);
1316
a3b6fafe 1317 hash = bucket_hash(h, key);
89439d4f
MS
1318 idx = bucket_scan(h, hash, key);
1319 if (idx == IDX_NIL)
d99ae53a
LP
1320 return -ENOENT;
1321
89439d4f 1322 e = plain_bucket_at(h, idx);
d99ae53a
LP
1323 e->value = value;
1324 return 0;
1325}
1326
89439d4f
MS
1327void *internal_hashmap_get(HashmapBase *h, const void *key) {
1328 struct hashmap_base_entry *e;
1329 unsigned hash, idx;
60918275
LP
1330
1331 if (!h)
1332 return NULL;
1333
a3b6fafe 1334 hash = bucket_hash(h, key);
89439d4f
MS
1335 idx = bucket_scan(h, hash, key);
1336 if (idx == IDX_NIL)
60918275
LP
1337 return NULL;
1338
89439d4f
MS
1339 e = bucket_at(h, idx);
1340 return entry_value(h, e);
60918275
LP
1341}
1342
89439d4f
MS
1343void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1344 struct plain_hashmap_entry *e;
1345 unsigned hash, idx;
d99ae53a
LP
1346
1347 if (!h)
1348 return NULL;
1349
a3b6fafe 1350 hash = bucket_hash(h, key);
89439d4f
MS
1351 idx = bucket_scan(h, hash, key);
1352 if (idx == IDX_NIL)
d99ae53a
LP
1353 return NULL;
1354
89439d4f 1355 e = plain_bucket_at(h, idx);
d99ae53a 1356 if (key2)
89439d4f 1357 *key2 = (void*) e->b.key;
d99ae53a
LP
1358
1359 return e->value;
1360}
1361
89439d4f 1362bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1363 unsigned hash;
96342de6
LN
1364
1365 if (!h)
1366 return false;
1367
a3b6fafe 1368 hash = bucket_hash(h, key);
89439d4f 1369 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1370}
1371
89439d4f
MS
1372void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1373 struct hashmap_base_entry *e;
1374 unsigned hash, idx;
60918275
LP
1375 void *data;
1376
1377 if (!h)
1378 return NULL;
1379
a3b6fafe 1380 hash = bucket_hash(h, key);
89439d4f
MS
1381 idx = bucket_scan(h, hash, key);
1382 if (idx == IDX_NIL)
60918275
LP
1383 return NULL;
1384
89439d4f
MS
1385 e = bucket_at(h, idx);
1386 data = entry_value(h, e);
1387 remove_entry(h, idx);
60918275
LP
1388
1389 return data;
1390}
1391
89439d4f
MS
1392void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1393 struct plain_hashmap_entry *e;
1394 unsigned hash, idx;
c582a3b3
LP
1395 void *data;
1396
1397 if (!h) {
1398 if (rkey)
1399 *rkey = NULL;
1400 return NULL;
1401 }
1402
1403 hash = bucket_hash(h, key);
89439d4f
MS
1404 idx = bucket_scan(h, hash, key);
1405 if (idx == IDX_NIL) {
c582a3b3
LP
1406 if (rkey)
1407 *rkey = NULL;
1408 return NULL;
1409 }
1410
89439d4f 1411 e = plain_bucket_at(h, idx);
c582a3b3
LP
1412 data = e->value;
1413 if (rkey)
89439d4f 1414 *rkey = (void*) e->b.key;
c582a3b3 1415
89439d4f 1416 remove_entry(h, idx);
c582a3b3
LP
1417
1418 return data;
1419}
1420
101d8e63 1421int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1422 struct swap_entries swap;
1423 struct plain_hashmap_entry *e;
1424 unsigned old_hash, new_hash, idx;
101d8e63
LP
1425
1426 if (!h)
1427 return -ENOENT;
1428
a3b6fafe 1429 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1430 idx = bucket_scan(h, old_hash, old_key);
1431 if (idx == IDX_NIL)
101d8e63
LP
1432 return -ENOENT;
1433
a3b6fafe 1434 new_hash = bucket_hash(h, new_key);
89439d4f 1435 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1436 return -EEXIST;
1437
89439d4f 1438 remove_entry(h, idx);
101d8e63 1439
89439d4f
MS
1440 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1441 e->b.key = new_key;
101d8e63 1442 e->value = value;
89439d4f
MS
1443 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1444
1445 return 0;
1446}
1447
1448int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1449 struct swap_entries swap;
1450 struct hashmap_base_entry *e;
1451 unsigned old_hash, new_hash, idx;
101d8e63 1452
89439d4f
MS
1453 if (!s)
1454 return -ENOENT;
1455
1456 old_hash = bucket_hash(s, old_key);
1457 idx = bucket_scan(s, old_hash, old_key);
1458 if (idx == IDX_NIL)
1459 return -ENOENT;
1460
1461 new_hash = bucket_hash(s, new_key);
1462 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1463 return -EEXIST;
1464
1465 remove_entry(s, idx);
1466
1467 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1468 e->key = new_key;
1469 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1470
1471 return 0;
1472}
1473
8fe914ec 1474int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1475 struct swap_entries swap;
1476 struct plain_hashmap_entry *e;
1477 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1478
1479 if (!h)
1480 return -ENOENT;
1481
a3b6fafe 1482 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1483 idx_old = bucket_scan(h, old_hash, old_key);
1484 if (idx_old == IDX_NIL)
8fe914ec
LP
1485 return -ENOENT;
1486
89439d4f 1487 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1488
89439d4f
MS
1489 new_hash = bucket_hash(h, new_key);
1490 idx_new = bucket_scan(h, new_hash, new_key);
1491 if (idx_new != IDX_NIL)
1492 if (idx_old != idx_new) {
1493 remove_entry(h, idx_new);
1494 /* Compensate for a possible backward shift. */
1495 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1496 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1497 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1498 }
1499
1500 remove_entry(h, idx_old);
1501
1502 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1503 e->b.key = new_key;
8fe914ec 1504 e->value = value;
89439d4f 1505 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1506
1507 return 0;
1508}
1509
89439d4f
MS
1510void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1511 struct plain_hashmap_entry *e;
1512 unsigned hash, idx;
3158713e
LP
1513
1514 if (!h)
1515 return NULL;
1516
a3b6fafe 1517 hash = bucket_hash(h, key);
89439d4f
MS
1518 idx = bucket_scan(h, hash, key);
1519 if (idx == IDX_NIL)
3158713e
LP
1520 return NULL;
1521
89439d4f 1522 e = plain_bucket_at(h, idx);
3158713e
LP
1523 if (e->value != value)
1524 return NULL;
1525
89439d4f 1526 remove_entry(h, idx);
3158713e
LP
1527
1528 return value;
1529}
1530
89439d4f
MS
1531static unsigned find_first_entry(HashmapBase *h) {
1532 Iterator i = ITERATOR_FIRST;
60918275 1533
89439d4f
MS
1534 if (!h || !n_entries(h))
1535 return IDX_NIL;
60918275 1536
89439d4f 1537 return hashmap_iterate_entry(h, &i);
60918275
LP
1538}
1539
89439d4f
MS
1540void *internal_hashmap_first(HashmapBase *h) {
1541 unsigned idx;
60918275 1542
89439d4f
MS
1543 idx = find_first_entry(h);
1544 if (idx == IDX_NIL)
60918275
LP
1545 return NULL;
1546
89439d4f 1547 return entry_value(h, bucket_at(h, idx));
60918275
LP
1548}
1549
89439d4f
MS
1550void *internal_hashmap_first_key(HashmapBase *h) {
1551 struct hashmap_base_entry *e;
1552 unsigned idx;
2e4a6ff4 1553
89439d4f
MS
1554 idx = find_first_entry(h);
1555 if (idx == IDX_NIL)
2e4a6ff4
LP
1556 return NULL;
1557
89439d4f
MS
1558 e = bucket_at(h, idx);
1559 return (void*) e->key;
2e4a6ff4
LP
1560}
1561
89439d4f
MS
1562void *internal_hashmap_steal_first(HashmapBase *h) {
1563 struct hashmap_base_entry *e;
60918275 1564 void *data;
89439d4f 1565 unsigned idx;
60918275 1566
89439d4f
MS
1567 idx = find_first_entry(h);
1568 if (idx == IDX_NIL)
60918275
LP
1569 return NULL;
1570
89439d4f
MS
1571 e = bucket_at(h, idx);
1572 data = entry_value(h, e);
1573 remove_entry(h, idx);
60918275
LP
1574
1575 return data;
1576}
1577
89439d4f
MS
1578void *internal_hashmap_steal_first_key(HashmapBase *h) {
1579 struct hashmap_base_entry *e;
22be093f 1580 void *key;
89439d4f 1581 unsigned idx;
22be093f 1582
89439d4f
MS
1583 idx = find_first_entry(h);
1584 if (idx == IDX_NIL)
22be093f
LP
1585 return NULL;
1586
89439d4f
MS
1587 e = bucket_at(h, idx);
1588 key = (void*) e->key;
1589 remove_entry(h, idx);
22be093f
LP
1590
1591 return key;
1592}
1593
89439d4f 1594unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1595
1596 if (!h)
1597 return 0;
1598
89439d4f 1599 return n_entries(h);
60918275
LP
1600}
1601
89439d4f 1602unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1603
1604 if (!h)
1605 return 0;
1606
89439d4f 1607 return n_buckets(h);
45fa9e29
LP
1608}
1609
89439d4f
MS
1610int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1611 Iterator i;
1612 unsigned idx;
60918275 1613
89439d4f 1614 assert(h);
60918275 1615
89439d4f
MS
1616 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1617 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1618 int r;
91cdde8a 1619
89439d4f
MS
1620 r = hashmap_put(h, pe->b.key, pe->value);
1621 if (r < 0 && r != -EEXIST)
1622 return r;
1623 }
91cdde8a 1624
89439d4f
MS
1625 return 0;
1626}
91cdde8a 1627
89439d4f
MS
1628int set_merge(Set *s, Set *other) {
1629 Iterator i;
1630 unsigned idx;
91cdde8a 1631
89439d4f
MS
1632 assert(s);
1633
1634 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1635 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1636 int r;
1637
89439d4f
MS
1638 r = set_put(s, se->b.key);
1639 if (r < 0)
a3b6fafe 1640 return r;
91cdde8a
LP
1641 }
1642
1643 return 0;
1644}
1645
89439d4f 1646int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1647 int r;
1648
1649 assert(h);
1650
1651 r = resize_buckets(h, entries_add);
1652 if (r < 0)
1653 return r;
1654
1655 return 0;
1656}
1657
89439d4f
MS
1658/*
1659 * The same as hashmap_merge(), but every new item from other is moved to h.
1660 * Keys already in h are skipped and stay in other.
1661 * Returns: 0 on success.
1662 * -ENOMEM on alloc failure, in which case no move has been done.
1663 */
1664int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1665 struct swap_entries swap;
1666 struct hashmap_base_entry *e, *n;
1667 Iterator i;
1668 unsigned idx;
1669 int r;
101d8e63
LP
1670
1671 assert(h);
1672
101d8e63 1673 if (!other)
7ad63f57 1674 return 0;
101d8e63 1675
89439d4f
MS
1676 assert(other->type == h->type);
1677
1678 /*
1679 * This reserves buckets for the worst case, where none of other's
1680 * entries are yet present in h. This is preferable to risking
1681 * an allocation failure in the middle of the moving and having to
1682 * rollback or return a partial result.
1683 */
1684 r = resize_buckets(h, n_entries(other));
1685 if (r < 0)
1686 return r;
101d8e63 1687
89439d4f
MS
1688 HASHMAP_FOREACH_IDX(idx, other, i) {
1689 unsigned h_hash;
101d8e63 1690
89439d4f 1691 e = bucket_at(other, idx);
a3b6fafe 1692 h_hash = bucket_hash(h, e->key);
89439d4f 1693 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1694 continue;
1695
89439d4f
MS
1696 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1697 n->key = e->key;
1698 if (h->type != HASHMAP_TYPE_SET)
1699 ((struct plain_hashmap_entry*) n)->value =
1700 ((struct plain_hashmap_entry*) e)->value;
1701 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1702
1703 remove_entry(other, idx);
101d8e63 1704 }
7ad63f57
MS
1705
1706 return 0;
101d8e63
LP
1707}
1708
89439d4f
MS
1709int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1710 struct swap_entries swap;
1711 unsigned h_hash, other_hash, idx;
1712 struct hashmap_base_entry *e, *n;
1713 int r;
101d8e63 1714
101d8e63
LP
1715 assert(h);
1716
a3b6fafe 1717 h_hash = bucket_hash(h, key);
89439d4f 1718 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1719 return -EEXIST;
1720
bf3d3e2b
MS
1721 if (!other)
1722 return -ENOENT;
1723
89439d4f
MS
1724 assert(other->type == h->type);
1725
a3b6fafe 1726 other_hash = bucket_hash(other, key);
89439d4f
MS
1727 idx = bucket_scan(other, other_hash, key);
1728 if (idx == IDX_NIL)
101d8e63
LP
1729 return -ENOENT;
1730
89439d4f
MS
1731 e = bucket_at(other, idx);
1732
1733 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1734 n->key = e->key;
1735 if (h->type != HASHMAP_TYPE_SET)
1736 ((struct plain_hashmap_entry*) n)->value =
1737 ((struct plain_hashmap_entry*) e)->value;
1738 r = hashmap_put_boldly(h, h_hash, &swap, true);
1739 if (r < 0)
1740 return r;
101d8e63 1741
89439d4f 1742 remove_entry(other, idx);
101d8e63
LP
1743 return 0;
1744}
1745
89439d4f
MS
1746HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1747 HashmapBase *copy;
1748 int r;
91cdde8a
LP
1749
1750 assert(h);
1751
89439d4f 1752 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1753 if (!copy)
91cdde8a
LP
1754 return NULL;
1755
89439d4f
MS
1756 switch (h->type) {
1757 case HASHMAP_TYPE_PLAIN:
1758 case HASHMAP_TYPE_ORDERED:
1759 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1760 break;
1761 case HASHMAP_TYPE_SET:
1762 r = set_merge((Set*)copy, (Set*)h);
1763 break;
1764 default:
1765 assert_not_reached("Unknown hashmap type");
1766 }
1767
1768 if (r < 0) {
1769 internal_hashmap_free(copy);
91cdde8a
LP
1770 return NULL;
1771 }
1772
1773 return copy;
1774}
db1413d7 1775
89439d4f 1776char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1777 char **sv;
89439d4f
MS
1778 Iterator i;
1779 unsigned idx, n;
db1413d7 1780
89439d4f 1781 sv = new(char*, n_entries(h)+1);
729e3769 1782 if (!sv)
db1413d7
KS
1783 return NULL;
1784
1785 n = 0;
89439d4f
MS
1786 HASHMAP_FOREACH_IDX(idx, h, i)
1787 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1788 sv[n] = NULL;
1789
1790 return sv;
1791}
3c1668da 1792
89439d4f
MS
1793void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1794 struct ordered_hashmap_entry *e;
1795 unsigned hash, idx;
3c1668da 1796
3c1668da
LP
1797 assert(key);
1798
1799 if (!h)
1800 return NULL;
1801
a3b6fafe 1802 hash = bucket_hash(h, key);
89439d4f
MS
1803 idx = bucket_scan(h, hash, key);
1804 if (idx == IDX_NIL)
3c1668da
LP
1805 return NULL;
1806
89439d4f
MS
1807 e = ordered_bucket_at(h, idx);
1808 if (e->iterate_next == IDX_NIL)
3c1668da 1809 return NULL;
89439d4f
MS
1810 return ordered_bucket_at(h, e->iterate_next)->p.value;
1811}
3c1668da 1812
89439d4f
MS
1813int set_consume(Set *s, void *value) {
1814 int r;
1815
1816 r = set_put(s, value);
575ccc1b 1817 if (r <= 0)
89439d4f
MS
1818 free(value);
1819
1820 return r;
1821}
1822
1823int set_put_strdup(Set *s, const char *p) {
1824 char *c;
1825 int r;
1826
1827 assert(s);
1828 assert(p);
1829
1830 c = strdup(p);
1831 if (!c)
1832 return -ENOMEM;
1833
1834 r = set_consume(s, c);
1835 if (r == -EEXIST)
1836 return 0;
1837
1838 return r;
1839}
1840
1841int set_put_strdupv(Set *s, char **l) {
1842 int n = 0, r;
1843 char **i;
1844
1845 STRV_FOREACH(i, l) {
1846 r = set_put_strdup(s, *i);
1847 if (r < 0)
1848 return r;
1849
1850 n += r;
1851 }
1852
1853 return n;
3c1668da 1854}