]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
resolved: don't check conflicts for DNS-SD enumeration RRs
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
53e1b683 1/* SPDX-License-Identifier: LGPL-2.1+ */
a7334b09
LP
2/***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
89439d4f 6 Copyright 2014 Michal Schmidt
a7334b09
LP
7
8 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
a7334b09
LP
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 16 Lesser General Public License for more details.
a7334b09 17
5430f7f2 18 You should have received a copy of the GNU Lesser General Public License
a7334b09
LP
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20***/
21
60918275 22#include <errno.h>
11c3a366 23#include <stdint.h>
d4510856 24#include <stdlib.h>
11c3a366 25#include <string.h>
60918275 26
b5efdb8a 27#include "alloc-util.h"
60918275 28#include "hashmap.h"
556c7bae 29#include "fileio.h"
60918275 30#include "macro.h"
b3dcf58e 31#include "mempool.h"
d4510856 32#include "process-util.h"
3df3e884 33#include "random-util.h"
d4510856
LP
34#include "set.h"
35#include "siphash24.h"
556c7bae 36#include "string-util.h"
d4510856
LP
37#include "strv.h"
38#include "util.h"
60918275 39
349cc4a5 40#if ENABLE_DEBUG_HASHMAP
3d4db144 41#include <pthread.h>
2eec67ac
TA
42#include "list.h"
43#endif
44
89439d4f
MS
45/*
46 * Implementation of hashmaps.
47 * Addressing: open
48 * - uses less RAM compared to closed addressing (chaining), because
49 * our entries are small (especially in Sets, which tend to contain
50 * the majority of entries in systemd).
51 * Collision resolution: Robin Hood
52 * - tends to equalize displacement of entries from their optimal buckets.
53 * Probe sequence: linear
54 * - though theoretically worse than random probing/uniform hashing/double
55 * hashing, it is good for cache locality.
56 *
57 * References:
58 * Celis, P. 1986. Robin Hood Hashing.
59 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
60 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
61 * - The results are derived for random probing. Suggests deletion with
62 * tombstones and two mean-centered search methods. None of that works
63 * well for linear probing.
64 *
65 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
66 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
67 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
68 * http://www.math.uu.se/~svante/papers/sj157.pdf
69 * - Applies to Robin Hood with linear probing. Contains remarks on
70 * the unsuitability of mean-centered search with linear probing.
71 *
72 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
73 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
74 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
75 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
76 * in a successful search), and Janson writes about displacement. C = d + 1.
77 *
78 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
79 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
80 * - Explanation of backward shift deletion with pictures.
81 *
82 * Khuong, P. 2013. The Other Robin Hood Hashing.
83 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
84 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
85 */
86
87/*
88 * XXX Ideas for improvement:
89 * For unordered hashmaps, randomize iteration order, similarly to Perl:
90 * http://blog.booking.com/hardening-perls-hash-function.html
91 */
92
93/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
94 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
95#define INV_KEEP_FREE 5U
96
97/* Fields common to entries of all hashmap/set types */
98struct hashmap_base_entry {
60918275 99 const void *key;
89439d4f
MS
100};
101
102/* Entry types for specific hashmap/set types
103 * hashmap_base_entry must be at the beginning of each entry struct. */
104
105struct plain_hashmap_entry {
106 struct hashmap_base_entry b;
60918275 107 void *value;
60918275
LP
108};
109
89439d4f
MS
110struct ordered_hashmap_entry {
111 struct plain_hashmap_entry p;
112 unsigned iterate_next, iterate_previous;
113};
60918275 114
89439d4f
MS
115struct set_entry {
116 struct hashmap_base_entry b;
117};
45fa9e29 118
89439d4f
MS
119/* In several functions it is advantageous to have the hash table extended
120 * virtually by a couple of additional buckets. We reserve special index values
121 * for these "swap" buckets. */
122#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
123#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
124#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
125#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 126
89439d4f
MS
127#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
128#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
129
130assert_cc(IDX_FIRST == _IDX_SWAP_END);
131assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
132
133/* Storage space for the "swap" buckets.
134 * All entry types can fit into a ordered_hashmap_entry. */
135struct swap_entries {
136 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
137};
138
89439d4f
MS
139/* Distance from Initial Bucket */
140typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
141#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
142#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
143#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
144#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
145
146#define DIB_FREE UINT_MAX
147
349cc4a5 148#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
149struct hashmap_debug_info {
150 LIST_FIELDS(struct hashmap_debug_info, debug_list);
151 unsigned max_entries; /* high watermark of n_entries */
152
153 /* who allocated this hashmap */
154 int line;
155 const char *file;
156 const char *func;
157
158 /* fields to detect modification while iterating */
159 unsigned put_count; /* counts puts into the hashmap */
160 unsigned rem_count; /* counts removals from hashmap */
161 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
162};
163
89439d4f
MS
164/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
165static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 166static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 167
89439d4f 168#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 169
fc86aa0e 170#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 171#define HASHMAP_DEBUG_FIELDS
fc86aa0e 172#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 173
89439d4f
MS
174enum HashmapType {
175 HASHMAP_TYPE_PLAIN,
176 HASHMAP_TYPE_ORDERED,
177 HASHMAP_TYPE_SET,
178 _HASHMAP_TYPE_MAX
179};
39c2a6f1 180
89439d4f 181struct _packed_ indirect_storage {
1a39bc8c 182 void *storage; /* where buckets and DIBs are stored */
89439d4f
MS
183 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
184
185 unsigned n_entries; /* number of stored entries */
186 unsigned n_buckets; /* number of buckets */
187
188 unsigned idx_lowest_entry; /* Index below which all buckets are free.
189 Makes "while(hashmap_steal_first())" loops
190 O(n) instead of O(n^2) for unordered hashmaps. */
191 uint8_t _pad[3]; /* padding for the whole HashmapBase */
192 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
193};
194
195struct direct_storage {
196 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
197 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
198 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
1a39bc8c 199 uint8_t storage[sizeof(struct indirect_storage)];
89439d4f
MS
200};
201
202#define DIRECT_BUCKETS(entry_t) \
203 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
204
205/* We should be able to store at least one entry directly. */
206assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
207
208/* We have 3 bits for n_direct_entries. */
209assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
210
211/* Hashmaps with directly stored entries all use this shared hash key.
212 * It's no big deal if the key is guessed, because there can be only
213 * a handful of directly stored entries in a hashmap. When a hashmap
214 * outgrows direct storage, it gets its own key for indirect storage. */
215static uint8_t shared_hash_key[HASH_KEY_SIZE];
216static bool shared_hash_key_initialized;
217
218/* Fields that all hashmap/set types must have */
219struct HashmapBase {
220 const struct hash_ops *hash_ops; /* hash and compare ops to use */
221
222 union _packed_ {
223 struct indirect_storage indirect; /* if has_indirect */
224 struct direct_storage direct; /* if !has_indirect */
225 };
226
227 enum HashmapType type:2; /* HASHMAP_TYPE_* */
228 bool has_indirect:1; /* whether indirect storage is used */
229 unsigned n_direct_entries:3; /* Number of entries in direct storage.
230 * Only valid if !has_indirect. */
231 bool from_pool:1; /* whether was allocated from mempool */
232 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
233};
234
235/* Specific hash types
236 * HashmapBase must be at the beginning of each hashmap struct. */
237
238struct Hashmap {
239 struct HashmapBase b;
240};
241
242struct OrderedHashmap {
243 struct HashmapBase b;
244 unsigned iterate_list_head, iterate_list_tail;
245};
246
247struct Set {
248 struct HashmapBase b;
249};
250
251DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
252DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
253/* No need for a separate Set pool */
254assert_cc(sizeof(Hashmap) == sizeof(Set));
255
256struct hashmap_type_info {
257 size_t head_size;
258 size_t entry_size;
259 struct mempool *mempool;
260 unsigned n_direct_buckets;
261};
262
263static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
264 [HASHMAP_TYPE_PLAIN] = {
265 .head_size = sizeof(Hashmap),
266 .entry_size = sizeof(struct plain_hashmap_entry),
267 .mempool = &hashmap_pool,
268 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
269 },
270 [HASHMAP_TYPE_ORDERED] = {
271 .head_size = sizeof(OrderedHashmap),
272 .entry_size = sizeof(struct ordered_hashmap_entry),
273 .mempool = &ordered_hashmap_pool,
274 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
275 },
276 [HASHMAP_TYPE_SET] = {
277 .head_size = sizeof(Set),
278 .entry_size = sizeof(struct set_entry),
279 .mempool = &hashmap_pool,
280 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
281 },
282};
39c2a6f1 283
556c7bae
ZJS
284#ifdef VALGRIND
285__attribute__((destructor)) static void cleanup_pools(void) {
286 _cleanup_free_ char *t = NULL;
287 int r;
288
289 /* Be nice to valgrind */
290
291 /* The pool is only allocated by the main thread, but the memory can
292 * be passed to other threads. Let's clean up if we are the main thread
293 * and no other threads are live. */
294 if (!is_main_thread())
295 return;
296
297 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
298 if (r < 0 || !streq(t, "1"))
299 return;
300
301 mempool_drop(&hashmap_pool);
302 mempool_drop(&ordered_hashmap_pool);
303}
304#endif
305
89439d4f
MS
306static unsigned n_buckets(HashmapBase *h) {
307 return h->has_indirect ? h->indirect.n_buckets
308 : hashmap_type_info[h->type].n_direct_buckets;
309}
310
311static unsigned n_entries(HashmapBase *h) {
312 return h->has_indirect ? h->indirect.n_entries
313 : h->n_direct_entries;
314}
315
316static void n_entries_inc(HashmapBase *h) {
317 if (h->has_indirect)
318 h->indirect.n_entries++;
319 else
320 h->n_direct_entries++;
321}
322
323static void n_entries_dec(HashmapBase *h) {
324 if (h->has_indirect)
325 h->indirect.n_entries--;
326 else
327 h->n_direct_entries--;
328}
329
1a39bc8c 330static void *storage_ptr(HashmapBase *h) {
89439d4f
MS
331 return h->has_indirect ? h->indirect.storage
332 : h->direct.storage;
333}
334
335static uint8_t *hash_key(HashmapBase *h) {
336 return h->has_indirect ? h->indirect.hash_key
337 : shared_hash_key;
338}
339
340static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 341 struct siphash state;
0cb3c286 342 uint64_t hash;
b826ab58 343
0cb3c286 344 siphash24_init(&state, hash_key(h));
b826ab58
TG
345
346 h->hash_ops->hash(p, &state);
347
933f9cae 348 hash = siphash24_finalize(&state);
0cb3c286
TG
349
350 return (unsigned) (hash % n_buckets(h));
9bf3b535 351}
89439d4f 352#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535
LP
353
354static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
355 static uint8_t current[HASH_KEY_SIZE];
356 static bool current_initialized = false;
357
358 /* Returns a hash function key to use. In order to keep things
359 * fast we will not generate a new key each time we allocate a
360 * new hash table. Instead, we'll just reuse the most recently
361 * generated one, except if we never generated one or when we
362 * are rehashing an entire hash table because we reached a
363 * fill level */
364
365 if (!current_initialized || !reuse_is_ok) {
366 random_bytes(current, sizeof(current));
367 current_initialized = true;
368 }
369
370 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
371}
372
89439d4f
MS
373static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
374 return (struct hashmap_base_entry*)
1a39bc8c 375 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
89439d4f
MS
376}
377
378static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
379 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
380}
381
382static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
383 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
384}
39c2a6f1 385
89439d4f
MS
386static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
387 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
388}
39c2a6f1 389
89439d4f
MS
390static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
391 return &swap->e[idx - _IDX_SWAP_BEGIN];
392}
39c2a6f1 393
89439d4f
MS
394/* Returns a pointer to the bucket at index idx.
395 * Understands real indexes and swap indexes, hence "_virtual". */
396static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
397 unsigned idx) {
398 if (idx < _IDX_SWAP_BEGIN)
399 return bucket_at(h, idx);
400
401 if (idx < _IDX_SWAP_END)
402 return &bucket_at_swap(swap, idx)->p.b;
403
404 assert_not_reached("Invalid index");
405}
406
407static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
408 return (dib_raw_t*)
1a39bc8c 409 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
89439d4f
MS
410}
411
412static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
413 return idx >= from ? idx - from
414 : n_buckets(h) + idx - from;
415}
416
417static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
418 unsigned initial_bucket;
419
420 if (raw_dib == DIB_RAW_FREE)
421 return DIB_FREE;
422
423 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
424 return raw_dib;
425
426 /*
427 * Having an overflow DIB value is very unlikely. The hash function
428 * would have to be bad. For example, in a table of size 2^24 filled
429 * to load factor 0.9 the maximum observed DIB is only about 60.
430 * In theory (assuming I used Maxima correctly), for an infinite size
431 * hash table with load factor 0.8 the probability of a given entry
432 * having DIB > 40 is 1.9e-8.
433 * This returns the correct DIB value by recomputing the hash value in
434 * the unlikely case. XXX Hitting this case could be a hint to rehash.
435 */
436 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
437 return bucket_distance(h, idx, initial_bucket);
438}
439
440static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
441 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
442}
443
444static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
445 dib_raw_t *dibs;
446
447 dibs = dib_raw_ptr(h);
448
449 for ( ; idx < n_buckets(h); idx++)
450 if (dibs[idx] != DIB_RAW_FREE)
451 return idx;
452
453 return IDX_NIL;
454}
455
456static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 457 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
458 bucket_set_dib(h, idx, DIB_FREE);
459}
460
461static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
462 unsigned from, unsigned to) {
463 struct hashmap_base_entry *e_from, *e_to;
464
465 assert(from != to);
39c2a6f1 466
89439d4f
MS
467 e_from = bucket_at_virtual(h, swap, from);
468 e_to = bucket_at_virtual(h, swap, to);
469
470 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
471
472 if (h->type == HASHMAP_TYPE_ORDERED) {
473 OrderedHashmap *lh = (OrderedHashmap*) h;
474 struct ordered_hashmap_entry *le, *le_to;
475
476 le_to = (struct ordered_hashmap_entry*) e_to;
477
478 if (le_to->iterate_next != IDX_NIL) {
479 le = (struct ordered_hashmap_entry*)
480 bucket_at_virtual(h, swap, le_to->iterate_next);
481 le->iterate_previous = to;
482 }
483
484 if (le_to->iterate_previous != IDX_NIL) {
485 le = (struct ordered_hashmap_entry*)
486 bucket_at_virtual(h, swap, le_to->iterate_previous);
487 le->iterate_next = to;
488 }
489
490 if (lh->iterate_list_head == from)
491 lh->iterate_list_head = to;
492 if (lh->iterate_list_tail == from)
493 lh->iterate_list_tail = to;
39c2a6f1 494 }
89439d4f 495}
60918275 496
89439d4f
MS
497static unsigned next_idx(HashmapBase *h, unsigned idx) {
498 return (idx + 1U) % n_buckets(h);
499}
60918275 500
89439d4f
MS
501static unsigned prev_idx(HashmapBase *h, unsigned idx) {
502 return (n_buckets(h) + idx - 1U) % n_buckets(h);
503}
60918275 504
89439d4f
MS
505static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
506 switch (h->type) {
45fa9e29 507
89439d4f
MS
508 case HASHMAP_TYPE_PLAIN:
509 case HASHMAP_TYPE_ORDERED:
510 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 511
89439d4f
MS
512 case HASHMAP_TYPE_SET:
513 return (void*) e->key;
a3b6fafe 514
89439d4f
MS
515 default:
516 assert_not_reached("Unknown hashmap type");
517 }
60918275
LP
518}
519
89439d4f
MS
520static void base_remove_entry(HashmapBase *h, unsigned idx) {
521 unsigned left, right, prev, dib;
522 dib_raw_t raw_dib, *dibs;
45fa9e29 523
89439d4f
MS
524 dibs = dib_raw_ptr(h);
525 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 526
349cc4a5 527#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
528 h->debug.rem_count++;
529 h->debug.last_rem_idx = idx;
530#endif
034c6ed7 531
89439d4f
MS
532 left = idx;
533 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
534 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
535 raw_dib = dibs[right];
4c701096 536 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
89439d4f
MS
537 break;
538
539 /* The buckets are not supposed to be all occupied and with DIB > 0.
540 * That would mean we could make everyone better off by shifting them
541 * backward. This scenario is impossible. */
542 assert(left != right);
543 }
034c6ed7 544
89439d4f
MS
545 if (h->type == HASHMAP_TYPE_ORDERED) {
546 OrderedHashmap *lh = (OrderedHashmap*) h;
547 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
548
549 if (le->iterate_next != IDX_NIL)
550 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
551 else
552 lh->iterate_list_tail = le->iterate_previous;
553
554 if (le->iterate_previous != IDX_NIL)
555 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
556 else
557 lh->iterate_list_head = le->iterate_next;
558 }
559
560 /* Now shift all buckets in the interval (left, right) one step backwards */
561 for (prev = left, left = next_idx(h, left); left != right;
562 prev = left, left = next_idx(h, left)) {
563 dib = bucket_calculate_dib(h, left, dibs[left]);
564 assert(dib != 0);
565 bucket_move_entry(h, NULL, left, prev);
566 bucket_set_dib(h, prev, dib - 1);
567 }
568
569 bucket_mark_free(h, prev);
570 n_entries_dec(h);
034c6ed7 571}
89439d4f
MS
572#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
573
574static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
575 struct ordered_hashmap_entry *e;
576 unsigned idx;
034c6ed7 577
101d8e63 578 assert(h);
89439d4f
MS
579 assert(i);
580
581 if (i->idx == IDX_NIL)
582 goto at_end;
583
584 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
585 goto at_end;
586
587 if (i->idx == IDX_FIRST) {
588 idx = h->iterate_list_head;
589 e = ordered_bucket_at(h, idx);
101d8e63 590 } else {
89439d4f
MS
591 idx = i->idx;
592 e = ordered_bucket_at(h, idx);
593 /*
594 * We allow removing the current entry while iterating, but removal may cause
595 * a backward shift. The next entry may thus move one bucket to the left.
596 * To detect when it happens, we remember the key pointer of the entry we were
597 * going to iterate next. If it does not match, there was a backward shift.
598 */
599 if (e->p.b.key != i->next_key) {
600 idx = prev_idx(HASHMAP_BASE(h), idx);
601 e = ordered_bucket_at(h, idx);
602 }
603 assert(e->p.b.key == i->next_key);
101d8e63 604 }
101d8e63 605
349cc4a5 606#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
607 i->prev_idx = idx;
608#endif
609
610 if (e->iterate_next != IDX_NIL) {
611 struct ordered_hashmap_entry *n;
612 i->idx = e->iterate_next;
613 n = ordered_bucket_at(h, i->idx);
614 i->next_key = n->p.b.key;
615 } else
616 i->idx = IDX_NIL;
617
618 return idx;
619
620at_end:
621 i->idx = IDX_NIL;
622 return IDX_NIL;
101d8e63
LP
623}
624
89439d4f
MS
625static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
626 unsigned idx;
627
60918275 628 assert(h);
89439d4f 629 assert(i);
60918275 630
89439d4f
MS
631 if (i->idx == IDX_NIL)
632 goto at_end;
60918275 633
89439d4f
MS
634 if (i->idx == IDX_FIRST) {
635 /* fast forward to the first occupied bucket */
636 if (h->has_indirect) {
637 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
638 h->indirect.idx_lowest_entry = i->idx;
639 } else
640 i->idx = skip_free_buckets(h, 0);
641
642 if (i->idx == IDX_NIL)
643 goto at_end;
644 } else {
645 struct hashmap_base_entry *e;
646
647 assert(i->idx > 0);
60918275 648
89439d4f
MS
649 e = bucket_at(h, i->idx);
650 /*
651 * We allow removing the current entry while iterating, but removal may cause
652 * a backward shift. The next entry may thus move one bucket to the left.
653 * To detect when it happens, we remember the key pointer of the entry we were
654 * going to iterate next. If it does not match, there was a backward shift.
655 */
656 if (e->key != i->next_key)
657 e = bucket_at(h, --i->idx);
60918275 658
89439d4f
MS
659 assert(e->key == i->next_key);
660 }
661
662 idx = i->idx;
349cc4a5 663#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
664 i->prev_idx = idx;
665#endif
666
667 i->idx = skip_free_buckets(h, i->idx + 1);
668 if (i->idx != IDX_NIL)
669 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 670 else
89439d4f
MS
671 i->idx = IDX_NIL;
672
673 return idx;
60918275 674
89439d4f
MS
675at_end:
676 i->idx = IDX_NIL;
677 return IDX_NIL;
60918275
LP
678}
679
89439d4f
MS
680static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
681 if (!h) {
682 i->idx = IDX_NIL;
683 return IDX_NIL;
684 }
101d8e63 685
349cc4a5 686#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
687 if (i->idx == IDX_FIRST) {
688 i->put_count = h->debug.put_count;
689 i->rem_count = h->debug.rem_count;
690 } else {
691 /* While iterating, must not add any new entries */
692 assert(i->put_count == h->debug.put_count);
693 /* ... or remove entries other than the current one */
694 assert(i->rem_count == h->debug.rem_count ||
695 (i->rem_count == h->debug.rem_count - 1 &&
696 i->prev_idx == h->debug.last_rem_idx));
697 /* Reset our removals counter */
698 i->rem_count = h->debug.rem_count;
699 }
700#endif
101d8e63 701
89439d4f
MS
702 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
703 : hashmap_iterate_in_internal_order(h, i);
704}
39c2a6f1 705
8927b1da 706bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
707 struct hashmap_base_entry *e;
708 void *data;
709 unsigned idx;
710
711 idx = hashmap_iterate_entry(h, i);
712 if (idx == IDX_NIL) {
8927b1da
DH
713 if (value)
714 *value = NULL;
89439d4f
MS
715 if (key)
716 *key = NULL;
717
8927b1da 718 return false;
89439d4f
MS
719 }
720
721 e = bucket_at(h, idx);
722 data = entry_value(h, e);
8927b1da
DH
723 if (value)
724 *value = data;
89439d4f
MS
725 if (key)
726 *key = e->key;
727
8927b1da 728 return true;
101d8e63
LP
729}
730
8927b1da
DH
731bool set_iterate(Set *s, Iterator *i, void **value) {
732 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 733}
60918275 734
89439d4f
MS
735#define HASHMAP_FOREACH_IDX(idx, h, i) \
736 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
737 (idx != IDX_NIL); \
738 (idx) = hashmap_iterate_entry((h), &(i)))
739
740static void reset_direct_storage(HashmapBase *h) {
741 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
742 void *p;
743
744 assert(!h->has_indirect);
745
746 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
747 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
748}
749
3ef11dcf 750static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
751 HashmapBase *h;
752 const struct hashmap_type_info *hi = &hashmap_type_info[type];
753 bool use_pool;
754
755 use_pool = is_main_thread();
756
757 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 758
60918275 759 if (!h)
89439d4f
MS
760 return NULL;
761
762 h->type = type;
763 h->from_pool = use_pool;
764 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
765
766 if (type == HASHMAP_TYPE_ORDERED) {
767 OrderedHashmap *lh = (OrderedHashmap*)h;
768 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
769 }
770
771 reset_direct_storage(h);
60918275 772
89439d4f
MS
773 if (!shared_hash_key_initialized) {
774 random_bytes(shared_hash_key, sizeof(shared_hash_key));
775 shared_hash_key_initialized= true;
776 }
777
349cc4a5 778#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
779 h->debug.func = func;
780 h->debug.file = file;
781 h->debug.line = line;
4f1b3061
TG
782 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
783 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
784 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
785#endif
786
787 return h;
788}
60918275 789
89439d4f 790Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 791 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
792}
793
794OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 795 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
796}
797
798Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 799 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
800}
801
802static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 803 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
804 HashmapBase *q;
805
806 assert(h);
807
808 if (*h)
809 return 0;
810
3ef11dcf 811 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
812 if (!q)
813 return -ENOMEM;
814
815 *h = q;
816 return 0;
817}
818
819int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 820 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
821}
822
823int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 824 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
825}
826
827int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 828 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
829}
830
831static void hashmap_free_no_clear(HashmapBase *h) {
832 assert(!h->has_indirect);
833 assert(!h->n_direct_entries);
834
349cc4a5 835#if ENABLE_DEBUG_HASHMAP
4f1b3061 836 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 837 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 838 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 839#endif
45fa9e29 840
39c2a6f1 841 if (h->from_pool)
89439d4f 842 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
843 else
844 free(h);
60918275
LP
845}
846
cfe561a4 847HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
848
849 /* Free the hashmap, but nothing in it */
850
cfe561a4
DH
851 if (h) {
852 internal_hashmap_clear(h);
853 hashmap_free_no_clear(h);
854 }
89439d4f 855
cfe561a4 856 return NULL;
89439d4f
MS
857}
858
cfe561a4 859HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
860
861 /* Free the hashmap and all data objects in it, but not the
862 * keys */
863
cfe561a4
DH
864 if (h) {
865 internal_hashmap_clear_free(h);
866 hashmap_free_no_clear(h);
867 }
61b1477c 868
cfe561a4 869 return NULL;
449ddb2d
LP
870}
871
cfe561a4 872Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
873
874 /* Free the hashmap and all data and key objects in it */
875
cfe561a4
DH
876 if (h) {
877 hashmap_clear_free_free(h);
878 hashmap_free_no_clear(HASHMAP_BASE(h));
879 }
fabe5c0e 880
cfe561a4 881 return NULL;
fabe5c0e
LP
882}
883
89439d4f 884void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
885 if (!h)
886 return;
887
89439d4f
MS
888 if (h->has_indirect) {
889 free(h->indirect.storage);
890 h->has_indirect = false;
891 }
892
893 h->n_direct_entries = 0;
894 reset_direct_storage(h);
895
896 if (h->type == HASHMAP_TYPE_ORDERED) {
897 OrderedHashmap *lh = (OrderedHashmap*) h;
898 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
899 }
11dd41ce
LP
900}
901
89439d4f
MS
902void internal_hashmap_clear_free(HashmapBase *h) {
903 unsigned idx;
9946996c 904
61b1477c
LP
905 if (!h)
906 return;
9946996c 907
89439d4f
MS
908 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
909 idx = skip_free_buckets(h, idx + 1))
910 free(entry_value(h, bucket_at(h, idx)));
911
912 internal_hashmap_clear(h);
9946996c
LP
913}
914
fabe5c0e 915void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
916 unsigned idx;
917
fabe5c0e
LP
918 if (!h)
919 return;
920
89439d4f
MS
921 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
922 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
923 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
924 free((void*)e->b.key);
925 free(e->value);
fabe5c0e 926 }
89439d4f
MS
927
928 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
929}
930
89439d4f
MS
931static int resize_buckets(HashmapBase *h, unsigned entries_add);
932
933/*
934 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
935 * Performs Robin Hood swaps as it goes. The entry to put must be placed
936 * by the caller into swap slot IDX_PUT.
937 * If used for in-place resizing, may leave a displaced entry in swap slot
938 * IDX_PUT. Caller must rehash it next.
939 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
940 * false otherwise.
941 */
942static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
943 struct swap_entries *swap) {
944 dib_raw_t raw_dib, *dibs;
945 unsigned dib, distance;
946
349cc4a5 947#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
948 h->debug.put_count++;
949#endif
950
951 dibs = dib_raw_ptr(h);
952
953 for (distance = 0; ; distance++) {
954 raw_dib = dibs[idx];
3742095b 955 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
89439d4f
MS
956 if (raw_dib == DIB_RAW_REHASH)
957 bucket_move_entry(h, swap, idx, IDX_TMP);
958
959 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
960 h->indirect.idx_lowest_entry = idx;
60918275 961
89439d4f
MS
962 bucket_set_dib(h, idx, distance);
963 bucket_move_entry(h, swap, IDX_PUT, idx);
964 if (raw_dib == DIB_RAW_REHASH) {
965 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
966 return true;
967 }
60918275 968
89439d4f
MS
969 return false;
970 }
971
972 dib = bucket_calculate_dib(h, idx, raw_dib);
973
974 if (dib < distance) {
975 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
976 bucket_set_dib(h, idx, distance);
977
978 /* swap the entries */
979 bucket_move_entry(h, swap, idx, IDX_TMP);
980 bucket_move_entry(h, swap, IDX_PUT, idx);
981 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
982
983 distance = dib;
984 }
985
986 idx = next_idx(h, idx);
987 }
60918275
LP
988}
989
89439d4f
MS
990/*
991 * Puts an entry into a hashmap, boldly - no check whether key already exists.
992 * The caller must place the entry (only its key and value, not link indexes)
993 * in swap slot IDX_PUT.
994 * Caller must ensure: the key does not exist yet in the hashmap.
995 * that resize is not needed if !may_resize.
996 * Returns: 1 if entry was put successfully.
997 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
998 * Cannot return -ENOMEM if !may_resize.
999 */
1000static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1001 struct swap_entries *swap, bool may_resize) {
1002 struct ordered_hashmap_entry *new_entry;
1003 int r;
1004
1005 assert(idx < n_buckets(h));
1006
1007 new_entry = bucket_at_swap(swap, IDX_PUT);
1008
1009 if (may_resize) {
1010 r = resize_buckets(h, 1);
1011 if (r < 0)
1012 return r;
1013 if (r > 0)
1014 idx = bucket_hash(h, new_entry->p.b.key);
1015 }
1016 assert(n_entries(h) < n_buckets(h));
1017
1018 if (h->type == HASHMAP_TYPE_ORDERED) {
1019 OrderedHashmap *lh = (OrderedHashmap*) h;
1020
1021 new_entry->iterate_next = IDX_NIL;
1022 new_entry->iterate_previous = lh->iterate_list_tail;
1023
1024 if (lh->iterate_list_tail != IDX_NIL) {
1025 struct ordered_hashmap_entry *old_tail;
1026
1027 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1028 assert(old_tail->iterate_next == IDX_NIL);
1029 old_tail->iterate_next = IDX_PUT;
1030 }
1031
1032 lh->iterate_list_tail = IDX_PUT;
1033 if (lh->iterate_list_head == IDX_NIL)
1034 lh->iterate_list_head = IDX_PUT;
1035 }
1036
1037 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1038
1039 n_entries_inc(h);
349cc4a5 1040#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1041 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1042#endif
1043
1044 return 1;
1045}
1046#define hashmap_put_boldly(h, idx, swap, may_resize) \
1047 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1048
1049/*
1050 * Returns 0 if resize is not needed.
f131770b 1051 * 1 if successfully resized.
89439d4f
MS
1052 * -ENOMEM on allocation failure.
1053 */
1054static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1055 struct swap_entries swap;
1a39bc8c 1056 void *new_storage;
89439d4f
MS
1057 dib_raw_t *old_dibs, *new_dibs;
1058 const struct hashmap_type_info *hi;
1059 unsigned idx, optimal_idx;
1060 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1061 uint8_t new_shift;
1062 bool rehash_next;
45fa9e29
LP
1063
1064 assert(h);
1065
89439d4f
MS
1066 hi = &hashmap_type_info[h->type];
1067 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1068
1069 /* overflow? */
89439d4f 1070 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1071 return -ENOMEM;
1072
89439d4f
MS
1073 /* For direct storage we allow 100% load, because it's tiny. */
1074 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1075 return 0;
45fa9e29 1076
89439d4f
MS
1077 /*
1078 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1079 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1080 */
1081 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1082 /* overflow? */
1083 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1084 return -ENOMEM;
45fa9e29 1085
89439d4f
MS
1086 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1087 return -ENOMEM;
a3b6fafe 1088
89439d4f 1089 old_n_buckets = n_buckets(h);
45fa9e29 1090
89439d4f
MS
1091 if (_likely_(new_n_buckets <= old_n_buckets))
1092 return 0;
45fa9e29 1093
89439d4f
MS
1094 new_shift = log2u_round_up(MAX(
1095 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1096 2 * sizeof(struct direct_storage)));
45fa9e29 1097
89439d4f
MS
1098 /* Realloc storage (buckets and DIB array). */
1099 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1100 1U << new_shift);
1101 if (!new_storage)
1102 return -ENOMEM;
45fa9e29 1103
89439d4f
MS
1104 /* Must upgrade direct to indirect storage. */
1105 if (!h->has_indirect) {
1106 memcpy(new_storage, h->direct.storage,
1107 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1108 h->indirect.n_entries = h->n_direct_entries;
1109 h->indirect.idx_lowest_entry = 0;
1110 h->n_direct_entries = 0;
1111 }
45fa9e29 1112
89439d4f
MS
1113 /* Get a new hash key. If we've just upgraded to indirect storage,
1114 * allow reusing a previously generated key. It's still a different key
1115 * from the shared one that we used for direct storage. */
1116 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1117
1118 h->has_indirect = true;
1119 h->indirect.storage = new_storage;
1120 h->indirect.n_buckets = (1U << new_shift) /
1121 (hi->entry_size + sizeof(dib_raw_t));
1122
1a39bc8c 1123 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
89439d4f
MS
1124 new_dibs = dib_raw_ptr(h);
1125
1126 /*
1127 * Move the DIB array to the new place, replacing valid DIB values with
1128 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1129 * Note: Overlap is not possible, because we have at least doubled the
1130 * number of buckets and dib_raw_t is smaller than any entry type.
1131 */
1132 for (idx = 0; idx < old_n_buckets; idx++) {
1133 assert(old_dibs[idx] != DIB_RAW_REHASH);
1134 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1135 : DIB_RAW_REHASH;
45fa9e29
LP
1136 }
1137
89439d4f 1138 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1139 memzero(bucket_at(h, old_n_buckets),
89439d4f 1140 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1141
89439d4f
MS
1142 /* The upper half of the new DIB array needs initialization */
1143 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1144 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1145
89439d4f
MS
1146 /* Rehash entries that need it */
1147 n_rehashed = 0;
1148 for (idx = 0; idx < old_n_buckets; idx++) {
1149 if (new_dibs[idx] != DIB_RAW_REHASH)
1150 continue;
45fa9e29 1151
89439d4f 1152 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1153
89439d4f
MS
1154 /*
1155 * Not much to do if by luck the entry hashes to its current
1156 * location. Just set its DIB.
1157 */
1158 if (optimal_idx == idx) {
1159 new_dibs[idx] = 0;
1160 n_rehashed++;
1161 continue;
1162 }
1163
1164 new_dibs[idx] = DIB_RAW_FREE;
1165 bucket_move_entry(h, &swap, idx, IDX_PUT);
1166 /* bucket_move_entry does not clear the source */
eccaf899 1167 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1168
1169 do {
1170 /*
1171 * Find the new bucket for the current entry. This may make
1172 * another entry homeless and load it into IDX_PUT.
1173 */
1174 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1175 n_rehashed++;
1176
1177 /* Did the current entry displace another one? */
1178 if (rehash_next)
1179 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1180 } while (rehash_next);
1181 }
60918275 1182
89439d4f 1183 assert(n_rehashed == n_entries(h));
60918275 1184
89439d4f
MS
1185 return 1;
1186}
45fa9e29 1187
89439d4f
MS
1188/*
1189 * Finds an entry with a matching key
1190 * Returns: index of the found entry, or IDX_NIL if not found.
1191 */
1192static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1193 struct hashmap_base_entry *e;
1194 unsigned dib, distance;
1195 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1196
89439d4f 1197 assert(idx < n_buckets(h));
60918275 1198
89439d4f
MS
1199 for (distance = 0; ; distance++) {
1200 if (dibs[idx] == DIB_RAW_FREE)
1201 return IDX_NIL;
60918275 1202
89439d4f 1203 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1204
89439d4f
MS
1205 if (dib < distance)
1206 return IDX_NIL;
1207 if (dib == distance) {
1208 e = bucket_at(h, idx);
1209 if (h->hash_ops->compare(e->key, key) == 0)
1210 return idx;
1211 }
1212
1213 idx = next_idx(h, idx);
1214 }
60918275 1215}
89439d4f 1216#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1217
923041cb 1218int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1219 struct swap_entries swap;
1220 struct plain_hashmap_entry *e;
1221 unsigned hash, idx;
923041cb
MS
1222
1223 assert(h);
1224
1225 hash = bucket_hash(h, key);
89439d4f
MS
1226 idx = bucket_scan(h, hash, key);
1227 if (idx != IDX_NIL) {
1228 e = plain_bucket_at(h, idx);
923041cb
MS
1229 if (e->value == value)
1230 return 0;
1231 return -EEXIST;
1232 }
1233
89439d4f
MS
1234 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1235 e->b.key = key;
1236 e->value = value;
1237 return hashmap_put_boldly(h, hash, &swap, true);
1238}
1239
1240int set_put(Set *s, const void *key) {
1241 struct swap_entries swap;
1242 struct hashmap_base_entry *e;
1243 unsigned hash, idx;
1244
1245 assert(s);
1246
1247 hash = bucket_hash(s, key);
1248 idx = bucket_scan(s, hash, key);
1249 if (idx != IDX_NIL)
1250 return 0;
1251
1252 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1253 e->key = key;
1254 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1255}
1256
3158713e 1257int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1258 struct swap_entries swap;
1259 struct plain_hashmap_entry *e;
1260 unsigned hash, idx;
3158713e
LP
1261
1262 assert(h);
1263
a3b6fafe 1264 hash = bucket_hash(h, key);
89439d4f
MS
1265 idx = bucket_scan(h, hash, key);
1266 if (idx != IDX_NIL) {
1267 e = plain_bucket_at(h, idx);
349cc4a5 1268#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1269 /* Although the key is equal, the key pointer may have changed,
1270 * and this would break our assumption for iterating. So count
1271 * this operation as incompatible with iteration. */
1272 if (e->b.key != key) {
1273 h->b.debug.put_count++;
1274 h->b.debug.rem_count++;
1275 h->b.debug.last_rem_idx = idx;
1276 }
1277#endif
1278 e->b.key = key;
3158713e
LP
1279 e->value = value;
1280 return 0;
1281 }
1282
89439d4f
MS
1283 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1284 e->b.key = key;
1285 e->value = value;
1286 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1287}
1288
d99ae53a 1289int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1290 struct plain_hashmap_entry *e;
1291 unsigned hash, idx;
d99ae53a
LP
1292
1293 assert(h);
1294
a3b6fafe 1295 hash = bucket_hash(h, key);
89439d4f
MS
1296 idx = bucket_scan(h, hash, key);
1297 if (idx == IDX_NIL)
d99ae53a
LP
1298 return -ENOENT;
1299
89439d4f 1300 e = plain_bucket_at(h, idx);
d99ae53a
LP
1301 e->value = value;
1302 return 0;
1303}
1304
89439d4f
MS
1305void *internal_hashmap_get(HashmapBase *h, const void *key) {
1306 struct hashmap_base_entry *e;
1307 unsigned hash, idx;
60918275
LP
1308
1309 if (!h)
1310 return NULL;
1311
a3b6fafe 1312 hash = bucket_hash(h, key);
89439d4f
MS
1313 idx = bucket_scan(h, hash, key);
1314 if (idx == IDX_NIL)
60918275
LP
1315 return NULL;
1316
89439d4f
MS
1317 e = bucket_at(h, idx);
1318 return entry_value(h, e);
60918275
LP
1319}
1320
89439d4f
MS
1321void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1322 struct plain_hashmap_entry *e;
1323 unsigned hash, idx;
d99ae53a
LP
1324
1325 if (!h)
1326 return NULL;
1327
a3b6fafe 1328 hash = bucket_hash(h, key);
89439d4f
MS
1329 idx = bucket_scan(h, hash, key);
1330 if (idx == IDX_NIL)
d99ae53a
LP
1331 return NULL;
1332
89439d4f 1333 e = plain_bucket_at(h, idx);
d99ae53a 1334 if (key2)
89439d4f 1335 *key2 = (void*) e->b.key;
d99ae53a
LP
1336
1337 return e->value;
1338}
1339
89439d4f 1340bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1341 unsigned hash;
96342de6
LN
1342
1343 if (!h)
1344 return false;
1345
a3b6fafe 1346 hash = bucket_hash(h, key);
89439d4f 1347 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1348}
1349
89439d4f
MS
1350void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1351 struct hashmap_base_entry *e;
1352 unsigned hash, idx;
60918275
LP
1353 void *data;
1354
1355 if (!h)
1356 return NULL;
1357
a3b6fafe 1358 hash = bucket_hash(h, key);
89439d4f
MS
1359 idx = bucket_scan(h, hash, key);
1360 if (idx == IDX_NIL)
60918275
LP
1361 return NULL;
1362
89439d4f
MS
1363 e = bucket_at(h, idx);
1364 data = entry_value(h, e);
1365 remove_entry(h, idx);
60918275
LP
1366
1367 return data;
1368}
1369
89439d4f
MS
1370void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1371 struct plain_hashmap_entry *e;
1372 unsigned hash, idx;
c582a3b3
LP
1373 void *data;
1374
1375 if (!h) {
1376 if (rkey)
1377 *rkey = NULL;
1378 return NULL;
1379 }
1380
1381 hash = bucket_hash(h, key);
89439d4f
MS
1382 idx = bucket_scan(h, hash, key);
1383 if (idx == IDX_NIL) {
c582a3b3
LP
1384 if (rkey)
1385 *rkey = NULL;
1386 return NULL;
1387 }
1388
89439d4f 1389 e = plain_bucket_at(h, idx);
c582a3b3
LP
1390 data = e->value;
1391 if (rkey)
89439d4f 1392 *rkey = (void*) e->b.key;
c582a3b3 1393
89439d4f 1394 remove_entry(h, idx);
c582a3b3
LP
1395
1396 return data;
1397}
1398
101d8e63 1399int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1400 struct swap_entries swap;
1401 struct plain_hashmap_entry *e;
1402 unsigned old_hash, new_hash, idx;
101d8e63
LP
1403
1404 if (!h)
1405 return -ENOENT;
1406
a3b6fafe 1407 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1408 idx = bucket_scan(h, old_hash, old_key);
1409 if (idx == IDX_NIL)
101d8e63
LP
1410 return -ENOENT;
1411
a3b6fafe 1412 new_hash = bucket_hash(h, new_key);
89439d4f 1413 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1414 return -EEXIST;
1415
89439d4f 1416 remove_entry(h, idx);
101d8e63 1417
89439d4f
MS
1418 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1419 e->b.key = new_key;
101d8e63 1420 e->value = value;
89439d4f
MS
1421 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1422
1423 return 0;
1424}
1425
1426int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1427 struct swap_entries swap;
1428 struct hashmap_base_entry *e;
1429 unsigned old_hash, new_hash, idx;
101d8e63 1430
89439d4f
MS
1431 if (!s)
1432 return -ENOENT;
1433
1434 old_hash = bucket_hash(s, old_key);
1435 idx = bucket_scan(s, old_hash, old_key);
1436 if (idx == IDX_NIL)
1437 return -ENOENT;
1438
1439 new_hash = bucket_hash(s, new_key);
1440 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1441 return -EEXIST;
1442
1443 remove_entry(s, idx);
1444
1445 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1446 e->key = new_key;
1447 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1448
1449 return 0;
1450}
1451
8fe914ec 1452int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1453 struct swap_entries swap;
1454 struct plain_hashmap_entry *e;
1455 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1456
1457 if (!h)
1458 return -ENOENT;
1459
a3b6fafe 1460 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1461 idx_old = bucket_scan(h, old_hash, old_key);
1462 if (idx_old == IDX_NIL)
8fe914ec
LP
1463 return -ENOENT;
1464
89439d4f 1465 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1466
89439d4f
MS
1467 new_hash = bucket_hash(h, new_key);
1468 idx_new = bucket_scan(h, new_hash, new_key);
1469 if (idx_new != IDX_NIL)
1470 if (idx_old != idx_new) {
1471 remove_entry(h, idx_new);
1472 /* Compensate for a possible backward shift. */
1473 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1474 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1475 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1476 }
1477
1478 remove_entry(h, idx_old);
1479
1480 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1481 e->b.key = new_key;
8fe914ec 1482 e->value = value;
89439d4f 1483 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1484
1485 return 0;
1486}
1487
89439d4f
MS
1488void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1489 struct plain_hashmap_entry *e;
1490 unsigned hash, idx;
3158713e
LP
1491
1492 if (!h)
1493 return NULL;
1494
a3b6fafe 1495 hash = bucket_hash(h, key);
89439d4f
MS
1496 idx = bucket_scan(h, hash, key);
1497 if (idx == IDX_NIL)
3158713e
LP
1498 return NULL;
1499
89439d4f 1500 e = plain_bucket_at(h, idx);
3158713e
LP
1501 if (e->value != value)
1502 return NULL;
1503
89439d4f 1504 remove_entry(h, idx);
3158713e
LP
1505
1506 return value;
1507}
1508
89439d4f
MS
1509static unsigned find_first_entry(HashmapBase *h) {
1510 Iterator i = ITERATOR_FIRST;
60918275 1511
89439d4f
MS
1512 if (!h || !n_entries(h))
1513 return IDX_NIL;
60918275 1514
89439d4f 1515 return hashmap_iterate_entry(h, &i);
60918275
LP
1516}
1517
89439d4f
MS
1518void *internal_hashmap_first(HashmapBase *h) {
1519 unsigned idx;
60918275 1520
89439d4f
MS
1521 idx = find_first_entry(h);
1522 if (idx == IDX_NIL)
60918275
LP
1523 return NULL;
1524
89439d4f 1525 return entry_value(h, bucket_at(h, idx));
60918275
LP
1526}
1527
89439d4f
MS
1528void *internal_hashmap_first_key(HashmapBase *h) {
1529 struct hashmap_base_entry *e;
1530 unsigned idx;
2e4a6ff4 1531
89439d4f
MS
1532 idx = find_first_entry(h);
1533 if (idx == IDX_NIL)
2e4a6ff4
LP
1534 return NULL;
1535
89439d4f
MS
1536 e = bucket_at(h, idx);
1537 return (void*) e->key;
2e4a6ff4
LP
1538}
1539
89439d4f
MS
1540void *internal_hashmap_steal_first(HashmapBase *h) {
1541 struct hashmap_base_entry *e;
60918275 1542 void *data;
89439d4f 1543 unsigned idx;
60918275 1544
89439d4f
MS
1545 idx = find_first_entry(h);
1546 if (idx == IDX_NIL)
60918275
LP
1547 return NULL;
1548
89439d4f
MS
1549 e = bucket_at(h, idx);
1550 data = entry_value(h, e);
1551 remove_entry(h, idx);
60918275
LP
1552
1553 return data;
1554}
1555
89439d4f
MS
1556void *internal_hashmap_steal_first_key(HashmapBase *h) {
1557 struct hashmap_base_entry *e;
22be093f 1558 void *key;
89439d4f 1559 unsigned idx;
22be093f 1560
89439d4f
MS
1561 idx = find_first_entry(h);
1562 if (idx == IDX_NIL)
22be093f
LP
1563 return NULL;
1564
89439d4f
MS
1565 e = bucket_at(h, idx);
1566 key = (void*) e->key;
1567 remove_entry(h, idx);
22be093f
LP
1568
1569 return key;
1570}
1571
89439d4f 1572unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1573
1574 if (!h)
1575 return 0;
1576
89439d4f 1577 return n_entries(h);
60918275
LP
1578}
1579
89439d4f 1580unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1581
1582 if (!h)
1583 return 0;
1584
89439d4f 1585 return n_buckets(h);
45fa9e29
LP
1586}
1587
89439d4f
MS
1588int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1589 Iterator i;
1590 unsigned idx;
60918275 1591
89439d4f 1592 assert(h);
60918275 1593
89439d4f
MS
1594 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1595 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1596 int r;
91cdde8a 1597
89439d4f
MS
1598 r = hashmap_put(h, pe->b.key, pe->value);
1599 if (r < 0 && r != -EEXIST)
1600 return r;
1601 }
91cdde8a 1602
89439d4f
MS
1603 return 0;
1604}
91cdde8a 1605
89439d4f
MS
1606int set_merge(Set *s, Set *other) {
1607 Iterator i;
1608 unsigned idx;
91cdde8a 1609
89439d4f
MS
1610 assert(s);
1611
1612 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1613 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1614 int r;
1615
89439d4f
MS
1616 r = set_put(s, se->b.key);
1617 if (r < 0)
a3b6fafe 1618 return r;
91cdde8a
LP
1619 }
1620
1621 return 0;
1622}
1623
89439d4f 1624int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1625 int r;
1626
1627 assert(h);
1628
1629 r = resize_buckets(h, entries_add);
1630 if (r < 0)
1631 return r;
1632
1633 return 0;
1634}
1635
89439d4f
MS
1636/*
1637 * The same as hashmap_merge(), but every new item from other is moved to h.
1638 * Keys already in h are skipped and stay in other.
1639 * Returns: 0 on success.
1640 * -ENOMEM on alloc failure, in which case no move has been done.
1641 */
1642int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1643 struct swap_entries swap;
1644 struct hashmap_base_entry *e, *n;
1645 Iterator i;
1646 unsigned idx;
1647 int r;
101d8e63
LP
1648
1649 assert(h);
1650
101d8e63 1651 if (!other)
7ad63f57 1652 return 0;
101d8e63 1653
89439d4f
MS
1654 assert(other->type == h->type);
1655
1656 /*
1657 * This reserves buckets for the worst case, where none of other's
1658 * entries are yet present in h. This is preferable to risking
1659 * an allocation failure in the middle of the moving and having to
1660 * rollback or return a partial result.
1661 */
1662 r = resize_buckets(h, n_entries(other));
1663 if (r < 0)
1664 return r;
101d8e63 1665
89439d4f
MS
1666 HASHMAP_FOREACH_IDX(idx, other, i) {
1667 unsigned h_hash;
101d8e63 1668
89439d4f 1669 e = bucket_at(other, idx);
a3b6fafe 1670 h_hash = bucket_hash(h, e->key);
89439d4f 1671 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1672 continue;
1673
89439d4f
MS
1674 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1675 n->key = e->key;
1676 if (h->type != HASHMAP_TYPE_SET)
1677 ((struct plain_hashmap_entry*) n)->value =
1678 ((struct plain_hashmap_entry*) e)->value;
1679 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1680
1681 remove_entry(other, idx);
101d8e63 1682 }
7ad63f57
MS
1683
1684 return 0;
101d8e63
LP
1685}
1686
89439d4f
MS
1687int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1688 struct swap_entries swap;
1689 unsigned h_hash, other_hash, idx;
1690 struct hashmap_base_entry *e, *n;
1691 int r;
101d8e63 1692
101d8e63
LP
1693 assert(h);
1694
a3b6fafe 1695 h_hash = bucket_hash(h, key);
89439d4f 1696 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1697 return -EEXIST;
1698
bf3d3e2b
MS
1699 if (!other)
1700 return -ENOENT;
1701
89439d4f
MS
1702 assert(other->type == h->type);
1703
a3b6fafe 1704 other_hash = bucket_hash(other, key);
89439d4f
MS
1705 idx = bucket_scan(other, other_hash, key);
1706 if (idx == IDX_NIL)
101d8e63
LP
1707 return -ENOENT;
1708
89439d4f
MS
1709 e = bucket_at(other, idx);
1710
1711 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1712 n->key = e->key;
1713 if (h->type != HASHMAP_TYPE_SET)
1714 ((struct plain_hashmap_entry*) n)->value =
1715 ((struct plain_hashmap_entry*) e)->value;
1716 r = hashmap_put_boldly(h, h_hash, &swap, true);
1717 if (r < 0)
1718 return r;
101d8e63 1719
89439d4f 1720 remove_entry(other, idx);
101d8e63
LP
1721 return 0;
1722}
1723
89439d4f
MS
1724HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1725 HashmapBase *copy;
1726 int r;
91cdde8a
LP
1727
1728 assert(h);
1729
89439d4f 1730 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1731 if (!copy)
91cdde8a
LP
1732 return NULL;
1733
89439d4f
MS
1734 switch (h->type) {
1735 case HASHMAP_TYPE_PLAIN:
1736 case HASHMAP_TYPE_ORDERED:
1737 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1738 break;
1739 case HASHMAP_TYPE_SET:
1740 r = set_merge((Set*)copy, (Set*)h);
1741 break;
1742 default:
1743 assert_not_reached("Unknown hashmap type");
1744 }
1745
1746 if (r < 0) {
1747 internal_hashmap_free(copy);
91cdde8a
LP
1748 return NULL;
1749 }
1750
1751 return copy;
1752}
db1413d7 1753
89439d4f 1754char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1755 char **sv;
89439d4f
MS
1756 Iterator i;
1757 unsigned idx, n;
db1413d7 1758
89439d4f 1759 sv = new(char*, n_entries(h)+1);
729e3769 1760 if (!sv)
db1413d7
KS
1761 return NULL;
1762
1763 n = 0;
89439d4f
MS
1764 HASHMAP_FOREACH_IDX(idx, h, i)
1765 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1766 sv[n] = NULL;
1767
1768 return sv;
1769}
3c1668da 1770
89439d4f
MS
1771void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1772 struct ordered_hashmap_entry *e;
1773 unsigned hash, idx;
3c1668da 1774
3c1668da
LP
1775 if (!h)
1776 return NULL;
1777
a3b6fafe 1778 hash = bucket_hash(h, key);
89439d4f
MS
1779 idx = bucket_scan(h, hash, key);
1780 if (idx == IDX_NIL)
3c1668da
LP
1781 return NULL;
1782
89439d4f
MS
1783 e = ordered_bucket_at(h, idx);
1784 if (e->iterate_next == IDX_NIL)
3c1668da 1785 return NULL;
89439d4f
MS
1786 return ordered_bucket_at(h, e->iterate_next)->p.value;
1787}
3c1668da 1788
89439d4f
MS
1789int set_consume(Set *s, void *value) {
1790 int r;
1791
d97c5aea
LP
1792 assert(s);
1793 assert(value);
1794
89439d4f 1795 r = set_put(s, value);
575ccc1b 1796 if (r <= 0)
89439d4f
MS
1797 free(value);
1798
1799 return r;
1800}
1801
1802int set_put_strdup(Set *s, const char *p) {
1803 char *c;
89439d4f
MS
1804
1805 assert(s);
1806 assert(p);
1807
454f0f86
LP
1808 if (set_contains(s, (char*) p))
1809 return 0;
1810
89439d4f
MS
1811 c = strdup(p);
1812 if (!c)
1813 return -ENOMEM;
1814
454f0f86 1815 return set_consume(s, c);
89439d4f
MS
1816}
1817
1818int set_put_strdupv(Set *s, char **l) {
1819 int n = 0, r;
1820 char **i;
1821
d97c5aea
LP
1822 assert(s);
1823
89439d4f
MS
1824 STRV_FOREACH(i, l) {
1825 r = set_put_strdup(s, *i);
1826 if (r < 0)
1827 return r;
1828
1829 n += r;
1830 }
1831
1832 return n;
3c1668da 1833}
d97c5aea
LP
1834
1835int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1836 const char *p = v;
1837 int r;
1838
1839 assert(s);
1840 assert(v);
1841
1842 for (;;) {
1843 char *word;
1844
1845 r = extract_first_word(&p, &word, separators, flags);
1846 if (r <= 0)
1847 return r;
1848
1849 r = set_consume(s, word);
1850 if (r < 0)
1851 return r;
1852 }
1853}