]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/basic/hashmap.c
Merge pull request #7388 from keszybz/doc-tweak
[thirdparty/systemd.git] / src / basic / hashmap.c
CommitLineData
a7334b09
LP
1/***
2 This file is part of systemd.
3
4 Copyright 2010 Lennart Poettering
89439d4f 5 Copyright 2014 Michal Schmidt
a7334b09
LP
6
7 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
a7334b09
LP
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 15 Lesser General Public License for more details.
a7334b09 16
5430f7f2 17 You should have received a copy of the GNU Lesser General Public License
a7334b09
LP
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19***/
20
60918275 21#include <errno.h>
11c3a366 22#include <stdint.h>
d4510856 23#include <stdlib.h>
11c3a366 24#include <string.h>
60918275 25
b5efdb8a 26#include "alloc-util.h"
60918275 27#include "hashmap.h"
556c7bae 28#include "fileio.h"
60918275 29#include "macro.h"
b3dcf58e 30#include "mempool.h"
d4510856 31#include "process-util.h"
3df3e884 32#include "random-util.h"
d4510856
LP
33#include "set.h"
34#include "siphash24.h"
556c7bae 35#include "string-util.h"
d4510856
LP
36#include "strv.h"
37#include "util.h"
60918275 38
349cc4a5 39#if ENABLE_DEBUG_HASHMAP
3d4db144 40#include <pthread.h>
2eec67ac
TA
41#include "list.h"
42#endif
43
89439d4f
MS
44/*
45 * Implementation of hashmaps.
46 * Addressing: open
47 * - uses less RAM compared to closed addressing (chaining), because
48 * our entries are small (especially in Sets, which tend to contain
49 * the majority of entries in systemd).
50 * Collision resolution: Robin Hood
51 * - tends to equalize displacement of entries from their optimal buckets.
52 * Probe sequence: linear
53 * - though theoretically worse than random probing/uniform hashing/double
54 * hashing, it is good for cache locality.
55 *
56 * References:
57 * Celis, P. 1986. Robin Hood Hashing.
58 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
59 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
60 * - The results are derived for random probing. Suggests deletion with
61 * tombstones and two mean-centered search methods. None of that works
62 * well for linear probing.
63 *
64 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
65 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
66 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
67 * http://www.math.uu.se/~svante/papers/sj157.pdf
68 * - Applies to Robin Hood with linear probing. Contains remarks on
69 * the unsuitability of mean-centered search with linear probing.
70 *
71 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
72 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
73 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
74 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
75 * in a successful search), and Janson writes about displacement. C = d + 1.
76 *
77 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
78 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
79 * - Explanation of backward shift deletion with pictures.
80 *
81 * Khuong, P. 2013. The Other Robin Hood Hashing.
82 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
83 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
84 */
85
86/*
87 * XXX Ideas for improvement:
88 * For unordered hashmaps, randomize iteration order, similarly to Perl:
89 * http://blog.booking.com/hardening-perls-hash-function.html
90 */
91
92/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
93 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
94#define INV_KEEP_FREE 5U
95
96/* Fields common to entries of all hashmap/set types */
97struct hashmap_base_entry {
60918275 98 const void *key;
89439d4f
MS
99};
100
101/* Entry types for specific hashmap/set types
102 * hashmap_base_entry must be at the beginning of each entry struct. */
103
104struct plain_hashmap_entry {
105 struct hashmap_base_entry b;
60918275 106 void *value;
60918275
LP
107};
108
89439d4f
MS
109struct ordered_hashmap_entry {
110 struct plain_hashmap_entry p;
111 unsigned iterate_next, iterate_previous;
112};
60918275 113
89439d4f
MS
114struct set_entry {
115 struct hashmap_base_entry b;
116};
45fa9e29 117
89439d4f
MS
118/* In several functions it is advantageous to have the hash table extended
119 * virtually by a couple of additional buckets. We reserve special index values
120 * for these "swap" buckets. */
121#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
122#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
123#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
124#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 125
89439d4f
MS
126#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
127#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
128
129assert_cc(IDX_FIRST == _IDX_SWAP_END);
130assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
131
132/* Storage space for the "swap" buckets.
133 * All entry types can fit into a ordered_hashmap_entry. */
134struct swap_entries {
135 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
136};
137
89439d4f
MS
138/* Distance from Initial Bucket */
139typedef uint8_t dib_raw_t;
3ef11dcf
ZJS
140#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
141#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
142#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
143#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
89439d4f
MS
144
145#define DIB_FREE UINT_MAX
146
349cc4a5 147#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
148struct hashmap_debug_info {
149 LIST_FIELDS(struct hashmap_debug_info, debug_list);
150 unsigned max_entries; /* high watermark of n_entries */
151
152 /* who allocated this hashmap */
153 int line;
154 const char *file;
155 const char *func;
156
157 /* fields to detect modification while iterating */
158 unsigned put_count; /* counts puts into the hashmap */
159 unsigned rem_count; /* counts removals from hashmap */
160 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
161};
162
89439d4f
MS
163/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
164static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
4f1b3061 165static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
39c2a6f1 166
89439d4f 167#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 168
fc86aa0e 169#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 170#define HASHMAP_DEBUG_FIELDS
fc86aa0e 171#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 172
89439d4f
MS
173enum HashmapType {
174 HASHMAP_TYPE_PLAIN,
175 HASHMAP_TYPE_ORDERED,
176 HASHMAP_TYPE_SET,
177 _HASHMAP_TYPE_MAX
178};
39c2a6f1 179
89439d4f 180struct _packed_ indirect_storage {
1a39bc8c 181 void *storage; /* where buckets and DIBs are stored */
89439d4f
MS
182 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
183
184 unsigned n_entries; /* number of stored entries */
185 unsigned n_buckets; /* number of buckets */
186
187 unsigned idx_lowest_entry; /* Index below which all buckets are free.
188 Makes "while(hashmap_steal_first())" loops
189 O(n) instead of O(n^2) for unordered hashmaps. */
190 uint8_t _pad[3]; /* padding for the whole HashmapBase */
191 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
192};
193
194struct direct_storage {
195 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
196 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
197 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
1a39bc8c 198 uint8_t storage[sizeof(struct indirect_storage)];
89439d4f
MS
199};
200
201#define DIRECT_BUCKETS(entry_t) \
202 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
203
204/* We should be able to store at least one entry directly. */
205assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
206
207/* We have 3 bits for n_direct_entries. */
208assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
209
210/* Hashmaps with directly stored entries all use this shared hash key.
211 * It's no big deal if the key is guessed, because there can be only
212 * a handful of directly stored entries in a hashmap. When a hashmap
213 * outgrows direct storage, it gets its own key for indirect storage. */
214static uint8_t shared_hash_key[HASH_KEY_SIZE];
215static bool shared_hash_key_initialized;
216
217/* Fields that all hashmap/set types must have */
218struct HashmapBase {
219 const struct hash_ops *hash_ops; /* hash and compare ops to use */
220
221 union _packed_ {
222 struct indirect_storage indirect; /* if has_indirect */
223 struct direct_storage direct; /* if !has_indirect */
224 };
225
226 enum HashmapType type:2; /* HASHMAP_TYPE_* */
227 bool has_indirect:1; /* whether indirect storage is used */
228 unsigned n_direct_entries:3; /* Number of entries in direct storage.
229 * Only valid if !has_indirect. */
230 bool from_pool:1; /* whether was allocated from mempool */
231 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
232};
233
234/* Specific hash types
235 * HashmapBase must be at the beginning of each hashmap struct. */
236
237struct Hashmap {
238 struct HashmapBase b;
239};
240
241struct OrderedHashmap {
242 struct HashmapBase b;
243 unsigned iterate_list_head, iterate_list_tail;
244};
245
246struct Set {
247 struct HashmapBase b;
248};
249
250DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
251DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
252/* No need for a separate Set pool */
253assert_cc(sizeof(Hashmap) == sizeof(Set));
254
255struct hashmap_type_info {
256 size_t head_size;
257 size_t entry_size;
258 struct mempool *mempool;
259 unsigned n_direct_buckets;
260};
261
262static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
263 [HASHMAP_TYPE_PLAIN] = {
264 .head_size = sizeof(Hashmap),
265 .entry_size = sizeof(struct plain_hashmap_entry),
266 .mempool = &hashmap_pool,
267 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
268 },
269 [HASHMAP_TYPE_ORDERED] = {
270 .head_size = sizeof(OrderedHashmap),
271 .entry_size = sizeof(struct ordered_hashmap_entry),
272 .mempool = &ordered_hashmap_pool,
273 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
274 },
275 [HASHMAP_TYPE_SET] = {
276 .head_size = sizeof(Set),
277 .entry_size = sizeof(struct set_entry),
278 .mempool = &hashmap_pool,
279 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
280 },
281};
39c2a6f1 282
556c7bae
ZJS
283#ifdef VALGRIND
284__attribute__((destructor)) static void cleanup_pools(void) {
285 _cleanup_free_ char *t = NULL;
286 int r;
287
288 /* Be nice to valgrind */
289
290 /* The pool is only allocated by the main thread, but the memory can
291 * be passed to other threads. Let's clean up if we are the main thread
292 * and no other threads are live. */
293 if (!is_main_thread())
294 return;
295
296 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
297 if (r < 0 || !streq(t, "1"))
298 return;
299
300 mempool_drop(&hashmap_pool);
301 mempool_drop(&ordered_hashmap_pool);
302}
303#endif
304
89439d4f
MS
305static unsigned n_buckets(HashmapBase *h) {
306 return h->has_indirect ? h->indirect.n_buckets
307 : hashmap_type_info[h->type].n_direct_buckets;
308}
309
310static unsigned n_entries(HashmapBase *h) {
311 return h->has_indirect ? h->indirect.n_entries
312 : h->n_direct_entries;
313}
314
315static void n_entries_inc(HashmapBase *h) {
316 if (h->has_indirect)
317 h->indirect.n_entries++;
318 else
319 h->n_direct_entries++;
320}
321
322static void n_entries_dec(HashmapBase *h) {
323 if (h->has_indirect)
324 h->indirect.n_entries--;
325 else
326 h->n_direct_entries--;
327}
328
1a39bc8c 329static void *storage_ptr(HashmapBase *h) {
89439d4f
MS
330 return h->has_indirect ? h->indirect.storage
331 : h->direct.storage;
332}
333
334static uint8_t *hash_key(HashmapBase *h) {
335 return h->has_indirect ? h->indirect.hash_key
336 : shared_hash_key;
337}
338
339static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
b826ab58 340 struct siphash state;
0cb3c286 341 uint64_t hash;
b826ab58 342
0cb3c286 343 siphash24_init(&state, hash_key(h));
b826ab58
TG
344
345 h->hash_ops->hash(p, &state);
346
933f9cae 347 hash = siphash24_finalize(&state);
0cb3c286
TG
348
349 return (unsigned) (hash % n_buckets(h));
9bf3b535 350}
89439d4f 351#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535
LP
352
353static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
354 static uint8_t current[HASH_KEY_SIZE];
355 static bool current_initialized = false;
356
357 /* Returns a hash function key to use. In order to keep things
358 * fast we will not generate a new key each time we allocate a
359 * new hash table. Instead, we'll just reuse the most recently
360 * generated one, except if we never generated one or when we
361 * are rehashing an entire hash table because we reached a
362 * fill level */
363
364 if (!current_initialized || !reuse_is_ok) {
365 random_bytes(current, sizeof(current));
366 current_initialized = true;
367 }
368
369 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
370}
371
89439d4f
MS
372static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
373 return (struct hashmap_base_entry*)
1a39bc8c 374 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
89439d4f
MS
375}
376
377static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
378 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
379}
380
381static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
382 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
383}
39c2a6f1 384
89439d4f
MS
385static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
386 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
387}
39c2a6f1 388
89439d4f
MS
389static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
390 return &swap->e[idx - _IDX_SWAP_BEGIN];
391}
39c2a6f1 392
89439d4f
MS
393/* Returns a pointer to the bucket at index idx.
394 * Understands real indexes and swap indexes, hence "_virtual". */
395static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
396 unsigned idx) {
397 if (idx < _IDX_SWAP_BEGIN)
398 return bucket_at(h, idx);
399
400 if (idx < _IDX_SWAP_END)
401 return &bucket_at_swap(swap, idx)->p.b;
402
403 assert_not_reached("Invalid index");
404}
405
406static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
407 return (dib_raw_t*)
1a39bc8c 408 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
89439d4f
MS
409}
410
411static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
412 return idx >= from ? idx - from
413 : n_buckets(h) + idx - from;
414}
415
416static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
417 unsigned initial_bucket;
418
419 if (raw_dib == DIB_RAW_FREE)
420 return DIB_FREE;
421
422 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
423 return raw_dib;
424
425 /*
426 * Having an overflow DIB value is very unlikely. The hash function
427 * would have to be bad. For example, in a table of size 2^24 filled
428 * to load factor 0.9 the maximum observed DIB is only about 60.
429 * In theory (assuming I used Maxima correctly), for an infinite size
430 * hash table with load factor 0.8 the probability of a given entry
431 * having DIB > 40 is 1.9e-8.
432 * This returns the correct DIB value by recomputing the hash value in
433 * the unlikely case. XXX Hitting this case could be a hint to rehash.
434 */
435 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
436 return bucket_distance(h, idx, initial_bucket);
437}
438
439static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
440 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
441}
442
443static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
444 dib_raw_t *dibs;
445
446 dibs = dib_raw_ptr(h);
447
448 for ( ; idx < n_buckets(h); idx++)
449 if (dibs[idx] != DIB_RAW_FREE)
450 return idx;
451
452 return IDX_NIL;
453}
454
455static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 456 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
457 bucket_set_dib(h, idx, DIB_FREE);
458}
459
460static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
461 unsigned from, unsigned to) {
462 struct hashmap_base_entry *e_from, *e_to;
463
464 assert(from != to);
39c2a6f1 465
89439d4f
MS
466 e_from = bucket_at_virtual(h, swap, from);
467 e_to = bucket_at_virtual(h, swap, to);
468
469 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
470
471 if (h->type == HASHMAP_TYPE_ORDERED) {
472 OrderedHashmap *lh = (OrderedHashmap*) h;
473 struct ordered_hashmap_entry *le, *le_to;
474
475 le_to = (struct ordered_hashmap_entry*) e_to;
476
477 if (le_to->iterate_next != IDX_NIL) {
478 le = (struct ordered_hashmap_entry*)
479 bucket_at_virtual(h, swap, le_to->iterate_next);
480 le->iterate_previous = to;
481 }
482
483 if (le_to->iterate_previous != IDX_NIL) {
484 le = (struct ordered_hashmap_entry*)
485 bucket_at_virtual(h, swap, le_to->iterate_previous);
486 le->iterate_next = to;
487 }
488
489 if (lh->iterate_list_head == from)
490 lh->iterate_list_head = to;
491 if (lh->iterate_list_tail == from)
492 lh->iterate_list_tail = to;
39c2a6f1 493 }
89439d4f 494}
60918275 495
89439d4f
MS
496static unsigned next_idx(HashmapBase *h, unsigned idx) {
497 return (idx + 1U) % n_buckets(h);
498}
60918275 499
89439d4f
MS
500static unsigned prev_idx(HashmapBase *h, unsigned idx) {
501 return (n_buckets(h) + idx - 1U) % n_buckets(h);
502}
60918275 503
89439d4f
MS
504static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
505 switch (h->type) {
45fa9e29 506
89439d4f
MS
507 case HASHMAP_TYPE_PLAIN:
508 case HASHMAP_TYPE_ORDERED:
509 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 510
89439d4f
MS
511 case HASHMAP_TYPE_SET:
512 return (void*) e->key;
a3b6fafe 513
89439d4f
MS
514 default:
515 assert_not_reached("Unknown hashmap type");
516 }
60918275
LP
517}
518
89439d4f
MS
519static void base_remove_entry(HashmapBase *h, unsigned idx) {
520 unsigned left, right, prev, dib;
521 dib_raw_t raw_dib, *dibs;
45fa9e29 522
89439d4f
MS
523 dibs = dib_raw_ptr(h);
524 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 525
349cc4a5 526#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
527 h->debug.rem_count++;
528 h->debug.last_rem_idx = idx;
529#endif
034c6ed7 530
89439d4f
MS
531 left = idx;
532 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
533 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
534 raw_dib = dibs[right];
4c701096 535 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
89439d4f
MS
536 break;
537
538 /* The buckets are not supposed to be all occupied and with DIB > 0.
539 * That would mean we could make everyone better off by shifting them
540 * backward. This scenario is impossible. */
541 assert(left != right);
542 }
034c6ed7 543
89439d4f
MS
544 if (h->type == HASHMAP_TYPE_ORDERED) {
545 OrderedHashmap *lh = (OrderedHashmap*) h;
546 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
547
548 if (le->iterate_next != IDX_NIL)
549 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
550 else
551 lh->iterate_list_tail = le->iterate_previous;
552
553 if (le->iterate_previous != IDX_NIL)
554 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
555 else
556 lh->iterate_list_head = le->iterate_next;
557 }
558
559 /* Now shift all buckets in the interval (left, right) one step backwards */
560 for (prev = left, left = next_idx(h, left); left != right;
561 prev = left, left = next_idx(h, left)) {
562 dib = bucket_calculate_dib(h, left, dibs[left]);
563 assert(dib != 0);
564 bucket_move_entry(h, NULL, left, prev);
565 bucket_set_dib(h, prev, dib - 1);
566 }
567
568 bucket_mark_free(h, prev);
569 n_entries_dec(h);
034c6ed7 570}
89439d4f
MS
571#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
572
573static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
574 struct ordered_hashmap_entry *e;
575 unsigned idx;
034c6ed7 576
101d8e63 577 assert(h);
89439d4f
MS
578 assert(i);
579
580 if (i->idx == IDX_NIL)
581 goto at_end;
582
583 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
584 goto at_end;
585
586 if (i->idx == IDX_FIRST) {
587 idx = h->iterate_list_head;
588 e = ordered_bucket_at(h, idx);
101d8e63 589 } else {
89439d4f
MS
590 idx = i->idx;
591 e = ordered_bucket_at(h, idx);
592 /*
593 * We allow removing the current entry while iterating, but removal may cause
594 * a backward shift. The next entry may thus move one bucket to the left.
595 * To detect when it happens, we remember the key pointer of the entry we were
596 * going to iterate next. If it does not match, there was a backward shift.
597 */
598 if (e->p.b.key != i->next_key) {
599 idx = prev_idx(HASHMAP_BASE(h), idx);
600 e = ordered_bucket_at(h, idx);
601 }
602 assert(e->p.b.key == i->next_key);
101d8e63 603 }
101d8e63 604
349cc4a5 605#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
606 i->prev_idx = idx;
607#endif
608
609 if (e->iterate_next != IDX_NIL) {
610 struct ordered_hashmap_entry *n;
611 i->idx = e->iterate_next;
612 n = ordered_bucket_at(h, i->idx);
613 i->next_key = n->p.b.key;
614 } else
615 i->idx = IDX_NIL;
616
617 return idx;
618
619at_end:
620 i->idx = IDX_NIL;
621 return IDX_NIL;
101d8e63
LP
622}
623
89439d4f
MS
624static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
625 unsigned idx;
626
60918275 627 assert(h);
89439d4f 628 assert(i);
60918275 629
89439d4f
MS
630 if (i->idx == IDX_NIL)
631 goto at_end;
60918275 632
89439d4f
MS
633 if (i->idx == IDX_FIRST) {
634 /* fast forward to the first occupied bucket */
635 if (h->has_indirect) {
636 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
637 h->indirect.idx_lowest_entry = i->idx;
638 } else
639 i->idx = skip_free_buckets(h, 0);
640
641 if (i->idx == IDX_NIL)
642 goto at_end;
643 } else {
644 struct hashmap_base_entry *e;
645
646 assert(i->idx > 0);
60918275 647
89439d4f
MS
648 e = bucket_at(h, i->idx);
649 /*
650 * We allow removing the current entry while iterating, but removal may cause
651 * a backward shift. The next entry may thus move one bucket to the left.
652 * To detect when it happens, we remember the key pointer of the entry we were
653 * going to iterate next. If it does not match, there was a backward shift.
654 */
655 if (e->key != i->next_key)
656 e = bucket_at(h, --i->idx);
60918275 657
89439d4f
MS
658 assert(e->key == i->next_key);
659 }
660
661 idx = i->idx;
349cc4a5 662#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
663 i->prev_idx = idx;
664#endif
665
666 i->idx = skip_free_buckets(h, i->idx + 1);
667 if (i->idx != IDX_NIL)
668 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 669 else
89439d4f
MS
670 i->idx = IDX_NIL;
671
672 return idx;
60918275 673
89439d4f
MS
674at_end:
675 i->idx = IDX_NIL;
676 return IDX_NIL;
60918275
LP
677}
678
89439d4f
MS
679static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
680 if (!h) {
681 i->idx = IDX_NIL;
682 return IDX_NIL;
683 }
101d8e63 684
349cc4a5 685#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
686 if (i->idx == IDX_FIRST) {
687 i->put_count = h->debug.put_count;
688 i->rem_count = h->debug.rem_count;
689 } else {
690 /* While iterating, must not add any new entries */
691 assert(i->put_count == h->debug.put_count);
692 /* ... or remove entries other than the current one */
693 assert(i->rem_count == h->debug.rem_count ||
694 (i->rem_count == h->debug.rem_count - 1 &&
695 i->prev_idx == h->debug.last_rem_idx));
696 /* Reset our removals counter */
697 i->rem_count = h->debug.rem_count;
698 }
699#endif
101d8e63 700
89439d4f
MS
701 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
702 : hashmap_iterate_in_internal_order(h, i);
703}
39c2a6f1 704
8927b1da 705bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
89439d4f
MS
706 struct hashmap_base_entry *e;
707 void *data;
708 unsigned idx;
709
710 idx = hashmap_iterate_entry(h, i);
711 if (idx == IDX_NIL) {
8927b1da
DH
712 if (value)
713 *value = NULL;
89439d4f
MS
714 if (key)
715 *key = NULL;
716
8927b1da 717 return false;
89439d4f
MS
718 }
719
720 e = bucket_at(h, idx);
721 data = entry_value(h, e);
8927b1da
DH
722 if (value)
723 *value = data;
89439d4f
MS
724 if (key)
725 *key = e->key;
726
8927b1da 727 return true;
101d8e63
LP
728}
729
8927b1da
DH
730bool set_iterate(Set *s, Iterator *i, void **value) {
731 return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
89439d4f 732}
60918275 733
89439d4f
MS
734#define HASHMAP_FOREACH_IDX(idx, h, i) \
735 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
736 (idx != IDX_NIL); \
737 (idx) = hashmap_iterate_entry((h), &(i)))
738
739static void reset_direct_storage(HashmapBase *h) {
740 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
741 void *p;
742
743 assert(!h->has_indirect);
744
745 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
746 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
747}
748
3ef11dcf 749static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
750 HashmapBase *h;
751 const struct hashmap_type_info *hi = &hashmap_type_info[type];
752 bool use_pool;
753
754 use_pool = is_main_thread();
755
756 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 757
60918275 758 if (!h)
89439d4f
MS
759 return NULL;
760
761 h->type = type;
762 h->from_pool = use_pool;
763 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
764
765 if (type == HASHMAP_TYPE_ORDERED) {
766 OrderedHashmap *lh = (OrderedHashmap*)h;
767 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
768 }
769
770 reset_direct_storage(h);
60918275 771
89439d4f
MS
772 if (!shared_hash_key_initialized) {
773 random_bytes(shared_hash_key, sizeof(shared_hash_key));
774 shared_hash_key_initialized= true;
775 }
776
349cc4a5 777#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
778 h->debug.func = func;
779 h->debug.file = file;
780 h->debug.line = line;
4f1b3061
TG
781 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
782 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
783 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f
MS
784#endif
785
786 return h;
787}
60918275 788
89439d4f 789Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 790 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
791}
792
793OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 794 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
795}
796
797Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 798 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
799}
800
801static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
3ef11dcf 802 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
89439d4f
MS
803 HashmapBase *q;
804
805 assert(h);
806
807 if (*h)
808 return 0;
809
3ef11dcf 810 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
811 if (!q)
812 return -ENOMEM;
813
814 *h = q;
815 return 0;
816}
817
818int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 819 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
820}
821
822int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 823 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
824}
825
826int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
3ef11dcf 827 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
89439d4f
MS
828}
829
830static void hashmap_free_no_clear(HashmapBase *h) {
831 assert(!h->has_indirect);
832 assert(!h->n_direct_entries);
833
349cc4a5 834#if ENABLE_DEBUG_HASHMAP
4f1b3061 835 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
89439d4f 836 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
4f1b3061 837 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
89439d4f 838#endif
45fa9e29 839
39c2a6f1 840 if (h->from_pool)
89439d4f 841 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
842 else
843 free(h);
60918275
LP
844}
845
cfe561a4 846HashmapBase *internal_hashmap_free(HashmapBase *h) {
89439d4f
MS
847
848 /* Free the hashmap, but nothing in it */
849
cfe561a4
DH
850 if (h) {
851 internal_hashmap_clear(h);
852 hashmap_free_no_clear(h);
853 }
89439d4f 854
cfe561a4 855 return NULL;
89439d4f
MS
856}
857
cfe561a4 858HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
859
860 /* Free the hashmap and all data objects in it, but not the
861 * keys */
862
cfe561a4
DH
863 if (h) {
864 internal_hashmap_clear_free(h);
865 hashmap_free_no_clear(h);
866 }
61b1477c 867
cfe561a4 868 return NULL;
449ddb2d
LP
869}
870
cfe561a4 871Hashmap *hashmap_free_free_free(Hashmap *h) {
fabe5c0e
LP
872
873 /* Free the hashmap and all data and key objects in it */
874
cfe561a4
DH
875 if (h) {
876 hashmap_clear_free_free(h);
877 hashmap_free_no_clear(HASHMAP_BASE(h));
878 }
fabe5c0e 879
cfe561a4 880 return NULL;
fabe5c0e
LP
881}
882
89439d4f 883void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
884 if (!h)
885 return;
886
89439d4f
MS
887 if (h->has_indirect) {
888 free(h->indirect.storage);
889 h->has_indirect = false;
890 }
891
892 h->n_direct_entries = 0;
893 reset_direct_storage(h);
894
895 if (h->type == HASHMAP_TYPE_ORDERED) {
896 OrderedHashmap *lh = (OrderedHashmap*) h;
897 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
898 }
11dd41ce
LP
899}
900
89439d4f
MS
901void internal_hashmap_clear_free(HashmapBase *h) {
902 unsigned idx;
9946996c 903
61b1477c
LP
904 if (!h)
905 return;
9946996c 906
89439d4f
MS
907 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
908 idx = skip_free_buckets(h, idx + 1))
909 free(entry_value(h, bucket_at(h, idx)));
910
911 internal_hashmap_clear(h);
9946996c
LP
912}
913
fabe5c0e 914void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
915 unsigned idx;
916
fabe5c0e
LP
917 if (!h)
918 return;
919
89439d4f
MS
920 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
921 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
922 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
923 free((void*)e->b.key);
924 free(e->value);
fabe5c0e 925 }
89439d4f
MS
926
927 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
928}
929
89439d4f
MS
930static int resize_buckets(HashmapBase *h, unsigned entries_add);
931
932/*
933 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
934 * Performs Robin Hood swaps as it goes. The entry to put must be placed
935 * by the caller into swap slot IDX_PUT.
936 * If used for in-place resizing, may leave a displaced entry in swap slot
937 * IDX_PUT. Caller must rehash it next.
938 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
939 * false otherwise.
940 */
941static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
942 struct swap_entries *swap) {
943 dib_raw_t raw_dib, *dibs;
944 unsigned dib, distance;
945
349cc4a5 946#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
947 h->debug.put_count++;
948#endif
949
950 dibs = dib_raw_ptr(h);
951
952 for (distance = 0; ; distance++) {
953 raw_dib = dibs[idx];
3742095b 954 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
89439d4f
MS
955 if (raw_dib == DIB_RAW_REHASH)
956 bucket_move_entry(h, swap, idx, IDX_TMP);
957
958 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
959 h->indirect.idx_lowest_entry = idx;
60918275 960
89439d4f
MS
961 bucket_set_dib(h, idx, distance);
962 bucket_move_entry(h, swap, IDX_PUT, idx);
963 if (raw_dib == DIB_RAW_REHASH) {
964 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
965 return true;
966 }
60918275 967
89439d4f
MS
968 return false;
969 }
970
971 dib = bucket_calculate_dib(h, idx, raw_dib);
972
973 if (dib < distance) {
974 /* Found a wealthier entry. Go Robin Hood! */
89439d4f
MS
975 bucket_set_dib(h, idx, distance);
976
977 /* swap the entries */
978 bucket_move_entry(h, swap, idx, IDX_TMP);
979 bucket_move_entry(h, swap, IDX_PUT, idx);
980 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
981
982 distance = dib;
983 }
984
985 idx = next_idx(h, idx);
986 }
60918275
LP
987}
988
89439d4f
MS
989/*
990 * Puts an entry into a hashmap, boldly - no check whether key already exists.
991 * The caller must place the entry (only its key and value, not link indexes)
992 * in swap slot IDX_PUT.
993 * Caller must ensure: the key does not exist yet in the hashmap.
994 * that resize is not needed if !may_resize.
995 * Returns: 1 if entry was put successfully.
996 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
997 * Cannot return -ENOMEM if !may_resize.
998 */
999static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1000 struct swap_entries *swap, bool may_resize) {
1001 struct ordered_hashmap_entry *new_entry;
1002 int r;
1003
1004 assert(idx < n_buckets(h));
1005
1006 new_entry = bucket_at_swap(swap, IDX_PUT);
1007
1008 if (may_resize) {
1009 r = resize_buckets(h, 1);
1010 if (r < 0)
1011 return r;
1012 if (r > 0)
1013 idx = bucket_hash(h, new_entry->p.b.key);
1014 }
1015 assert(n_entries(h) < n_buckets(h));
1016
1017 if (h->type == HASHMAP_TYPE_ORDERED) {
1018 OrderedHashmap *lh = (OrderedHashmap*) h;
1019
1020 new_entry->iterate_next = IDX_NIL;
1021 new_entry->iterate_previous = lh->iterate_list_tail;
1022
1023 if (lh->iterate_list_tail != IDX_NIL) {
1024 struct ordered_hashmap_entry *old_tail;
1025
1026 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1027 assert(old_tail->iterate_next == IDX_NIL);
1028 old_tail->iterate_next = IDX_PUT;
1029 }
1030
1031 lh->iterate_list_tail = IDX_PUT;
1032 if (lh->iterate_list_head == IDX_NIL)
1033 lh->iterate_list_head = IDX_PUT;
1034 }
1035
1036 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1037
1038 n_entries_inc(h);
349cc4a5 1039#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1040 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1041#endif
1042
1043 return 1;
1044}
1045#define hashmap_put_boldly(h, idx, swap, may_resize) \
1046 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1047
1048/*
1049 * Returns 0 if resize is not needed.
f131770b 1050 * 1 if successfully resized.
89439d4f
MS
1051 * -ENOMEM on allocation failure.
1052 */
1053static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1054 struct swap_entries swap;
1a39bc8c 1055 void *new_storage;
89439d4f
MS
1056 dib_raw_t *old_dibs, *new_dibs;
1057 const struct hashmap_type_info *hi;
1058 unsigned idx, optimal_idx;
1059 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1060 uint8_t new_shift;
1061 bool rehash_next;
45fa9e29
LP
1062
1063 assert(h);
1064
89439d4f
MS
1065 hi = &hashmap_type_info[h->type];
1066 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1067
1068 /* overflow? */
89439d4f 1069 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1070 return -ENOMEM;
1071
89439d4f
MS
1072 /* For direct storage we allow 100% load, because it's tiny. */
1073 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1074 return 0;
45fa9e29 1075
89439d4f
MS
1076 /*
1077 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1078 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1079 */
1080 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1081 /* overflow? */
1082 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1083 return -ENOMEM;
45fa9e29 1084
89439d4f
MS
1085 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1086 return -ENOMEM;
a3b6fafe 1087
89439d4f 1088 old_n_buckets = n_buckets(h);
45fa9e29 1089
89439d4f
MS
1090 if (_likely_(new_n_buckets <= old_n_buckets))
1091 return 0;
45fa9e29 1092
89439d4f
MS
1093 new_shift = log2u_round_up(MAX(
1094 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1095 2 * sizeof(struct direct_storage)));
45fa9e29 1096
89439d4f
MS
1097 /* Realloc storage (buckets and DIB array). */
1098 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1099 1U << new_shift);
1100 if (!new_storage)
1101 return -ENOMEM;
45fa9e29 1102
89439d4f
MS
1103 /* Must upgrade direct to indirect storage. */
1104 if (!h->has_indirect) {
1105 memcpy(new_storage, h->direct.storage,
1106 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1107 h->indirect.n_entries = h->n_direct_entries;
1108 h->indirect.idx_lowest_entry = 0;
1109 h->n_direct_entries = 0;
1110 }
45fa9e29 1111
89439d4f
MS
1112 /* Get a new hash key. If we've just upgraded to indirect storage,
1113 * allow reusing a previously generated key. It's still a different key
1114 * from the shared one that we used for direct storage. */
1115 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1116
1117 h->has_indirect = true;
1118 h->indirect.storage = new_storage;
1119 h->indirect.n_buckets = (1U << new_shift) /
1120 (hi->entry_size + sizeof(dib_raw_t));
1121
1a39bc8c 1122 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
89439d4f
MS
1123 new_dibs = dib_raw_ptr(h);
1124
1125 /*
1126 * Move the DIB array to the new place, replacing valid DIB values with
1127 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1128 * Note: Overlap is not possible, because we have at least doubled the
1129 * number of buckets and dib_raw_t is smaller than any entry type.
1130 */
1131 for (idx = 0; idx < old_n_buckets; idx++) {
1132 assert(old_dibs[idx] != DIB_RAW_REHASH);
1133 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1134 : DIB_RAW_REHASH;
45fa9e29
LP
1135 }
1136
89439d4f 1137 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1138 memzero(bucket_at(h, old_n_buckets),
89439d4f 1139 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1140
89439d4f
MS
1141 /* The upper half of the new DIB array needs initialization */
1142 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1143 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1144
89439d4f
MS
1145 /* Rehash entries that need it */
1146 n_rehashed = 0;
1147 for (idx = 0; idx < old_n_buckets; idx++) {
1148 if (new_dibs[idx] != DIB_RAW_REHASH)
1149 continue;
45fa9e29 1150
89439d4f 1151 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1152
89439d4f
MS
1153 /*
1154 * Not much to do if by luck the entry hashes to its current
1155 * location. Just set its DIB.
1156 */
1157 if (optimal_idx == idx) {
1158 new_dibs[idx] = 0;
1159 n_rehashed++;
1160 continue;
1161 }
1162
1163 new_dibs[idx] = DIB_RAW_FREE;
1164 bucket_move_entry(h, &swap, idx, IDX_PUT);
1165 /* bucket_move_entry does not clear the source */
eccaf899 1166 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1167
1168 do {
1169 /*
1170 * Find the new bucket for the current entry. This may make
1171 * another entry homeless and load it into IDX_PUT.
1172 */
1173 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1174 n_rehashed++;
1175
1176 /* Did the current entry displace another one? */
1177 if (rehash_next)
1178 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1179 } while (rehash_next);
1180 }
60918275 1181
89439d4f 1182 assert(n_rehashed == n_entries(h));
60918275 1183
89439d4f
MS
1184 return 1;
1185}
45fa9e29 1186
89439d4f
MS
1187/*
1188 * Finds an entry with a matching key
1189 * Returns: index of the found entry, or IDX_NIL if not found.
1190 */
1191static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1192 struct hashmap_base_entry *e;
1193 unsigned dib, distance;
1194 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1195
89439d4f 1196 assert(idx < n_buckets(h));
60918275 1197
89439d4f
MS
1198 for (distance = 0; ; distance++) {
1199 if (dibs[idx] == DIB_RAW_FREE)
1200 return IDX_NIL;
60918275 1201
89439d4f 1202 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1203
89439d4f
MS
1204 if (dib < distance)
1205 return IDX_NIL;
1206 if (dib == distance) {
1207 e = bucket_at(h, idx);
1208 if (h->hash_ops->compare(e->key, key) == 0)
1209 return idx;
1210 }
1211
1212 idx = next_idx(h, idx);
1213 }
60918275 1214}
89439d4f 1215#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1216
923041cb 1217int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1218 struct swap_entries swap;
1219 struct plain_hashmap_entry *e;
1220 unsigned hash, idx;
923041cb
MS
1221
1222 assert(h);
1223
1224 hash = bucket_hash(h, key);
89439d4f
MS
1225 idx = bucket_scan(h, hash, key);
1226 if (idx != IDX_NIL) {
1227 e = plain_bucket_at(h, idx);
923041cb
MS
1228 if (e->value == value)
1229 return 0;
1230 return -EEXIST;
1231 }
1232
89439d4f
MS
1233 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1234 e->b.key = key;
1235 e->value = value;
1236 return hashmap_put_boldly(h, hash, &swap, true);
1237}
1238
1239int set_put(Set *s, const void *key) {
1240 struct swap_entries swap;
1241 struct hashmap_base_entry *e;
1242 unsigned hash, idx;
1243
1244 assert(s);
1245
1246 hash = bucket_hash(s, key);
1247 idx = bucket_scan(s, hash, key);
1248 if (idx != IDX_NIL)
1249 return 0;
1250
1251 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1252 e->key = key;
1253 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1254}
1255
3158713e 1256int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1257 struct swap_entries swap;
1258 struct plain_hashmap_entry *e;
1259 unsigned hash, idx;
3158713e
LP
1260
1261 assert(h);
1262
a3b6fafe 1263 hash = bucket_hash(h, key);
89439d4f
MS
1264 idx = bucket_scan(h, hash, key);
1265 if (idx != IDX_NIL) {
1266 e = plain_bucket_at(h, idx);
349cc4a5 1267#if ENABLE_DEBUG_HASHMAP
89439d4f
MS
1268 /* Although the key is equal, the key pointer may have changed,
1269 * and this would break our assumption for iterating. So count
1270 * this operation as incompatible with iteration. */
1271 if (e->b.key != key) {
1272 h->b.debug.put_count++;
1273 h->b.debug.rem_count++;
1274 h->b.debug.last_rem_idx = idx;
1275 }
1276#endif
1277 e->b.key = key;
3158713e
LP
1278 e->value = value;
1279 return 0;
1280 }
1281
89439d4f
MS
1282 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1283 e->b.key = key;
1284 e->value = value;
1285 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1286}
1287
d99ae53a 1288int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1289 struct plain_hashmap_entry *e;
1290 unsigned hash, idx;
d99ae53a
LP
1291
1292 assert(h);
1293
a3b6fafe 1294 hash = bucket_hash(h, key);
89439d4f
MS
1295 idx = bucket_scan(h, hash, key);
1296 if (idx == IDX_NIL)
d99ae53a
LP
1297 return -ENOENT;
1298
89439d4f 1299 e = plain_bucket_at(h, idx);
d99ae53a
LP
1300 e->value = value;
1301 return 0;
1302}
1303
89439d4f
MS
1304void *internal_hashmap_get(HashmapBase *h, const void *key) {
1305 struct hashmap_base_entry *e;
1306 unsigned hash, idx;
60918275
LP
1307
1308 if (!h)
1309 return NULL;
1310
a3b6fafe 1311 hash = bucket_hash(h, key);
89439d4f
MS
1312 idx = bucket_scan(h, hash, key);
1313 if (idx == IDX_NIL)
60918275
LP
1314 return NULL;
1315
89439d4f
MS
1316 e = bucket_at(h, idx);
1317 return entry_value(h, e);
60918275
LP
1318}
1319
89439d4f
MS
1320void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1321 struct plain_hashmap_entry *e;
1322 unsigned hash, idx;
d99ae53a
LP
1323
1324 if (!h)
1325 return NULL;
1326
a3b6fafe 1327 hash = bucket_hash(h, key);
89439d4f
MS
1328 idx = bucket_scan(h, hash, key);
1329 if (idx == IDX_NIL)
d99ae53a
LP
1330 return NULL;
1331
89439d4f 1332 e = plain_bucket_at(h, idx);
d99ae53a 1333 if (key2)
89439d4f 1334 *key2 = (void*) e->b.key;
d99ae53a
LP
1335
1336 return e->value;
1337}
1338
89439d4f 1339bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1340 unsigned hash;
96342de6
LN
1341
1342 if (!h)
1343 return false;
1344
a3b6fafe 1345 hash = bucket_hash(h, key);
89439d4f 1346 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1347}
1348
89439d4f
MS
1349void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1350 struct hashmap_base_entry *e;
1351 unsigned hash, idx;
60918275
LP
1352 void *data;
1353
1354 if (!h)
1355 return NULL;
1356
a3b6fafe 1357 hash = bucket_hash(h, key);
89439d4f
MS
1358 idx = bucket_scan(h, hash, key);
1359 if (idx == IDX_NIL)
60918275
LP
1360 return NULL;
1361
89439d4f
MS
1362 e = bucket_at(h, idx);
1363 data = entry_value(h, e);
1364 remove_entry(h, idx);
60918275
LP
1365
1366 return data;
1367}
1368
89439d4f
MS
1369void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1370 struct plain_hashmap_entry *e;
1371 unsigned hash, idx;
c582a3b3
LP
1372 void *data;
1373
1374 if (!h) {
1375 if (rkey)
1376 *rkey = NULL;
1377 return NULL;
1378 }
1379
1380 hash = bucket_hash(h, key);
89439d4f
MS
1381 idx = bucket_scan(h, hash, key);
1382 if (idx == IDX_NIL) {
c582a3b3
LP
1383 if (rkey)
1384 *rkey = NULL;
1385 return NULL;
1386 }
1387
89439d4f 1388 e = plain_bucket_at(h, idx);
c582a3b3
LP
1389 data = e->value;
1390 if (rkey)
89439d4f 1391 *rkey = (void*) e->b.key;
c582a3b3 1392
89439d4f 1393 remove_entry(h, idx);
c582a3b3
LP
1394
1395 return data;
1396}
1397
101d8e63 1398int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1399 struct swap_entries swap;
1400 struct plain_hashmap_entry *e;
1401 unsigned old_hash, new_hash, idx;
101d8e63
LP
1402
1403 if (!h)
1404 return -ENOENT;
1405
a3b6fafe 1406 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1407 idx = bucket_scan(h, old_hash, old_key);
1408 if (idx == IDX_NIL)
101d8e63
LP
1409 return -ENOENT;
1410
a3b6fafe 1411 new_hash = bucket_hash(h, new_key);
89439d4f 1412 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1413 return -EEXIST;
1414
89439d4f 1415 remove_entry(h, idx);
101d8e63 1416
89439d4f
MS
1417 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1418 e->b.key = new_key;
101d8e63 1419 e->value = value;
89439d4f
MS
1420 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1421
1422 return 0;
1423}
1424
1425int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1426 struct swap_entries swap;
1427 struct hashmap_base_entry *e;
1428 unsigned old_hash, new_hash, idx;
101d8e63 1429
89439d4f
MS
1430 if (!s)
1431 return -ENOENT;
1432
1433 old_hash = bucket_hash(s, old_key);
1434 idx = bucket_scan(s, old_hash, old_key);
1435 if (idx == IDX_NIL)
1436 return -ENOENT;
1437
1438 new_hash = bucket_hash(s, new_key);
1439 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1440 return -EEXIST;
1441
1442 remove_entry(s, idx);
1443
1444 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1445 e->key = new_key;
1446 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1447
1448 return 0;
1449}
1450
8fe914ec 1451int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1452 struct swap_entries swap;
1453 struct plain_hashmap_entry *e;
1454 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1455
1456 if (!h)
1457 return -ENOENT;
1458
a3b6fafe 1459 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1460 idx_old = bucket_scan(h, old_hash, old_key);
1461 if (idx_old == IDX_NIL)
8fe914ec
LP
1462 return -ENOENT;
1463
89439d4f 1464 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1465
89439d4f
MS
1466 new_hash = bucket_hash(h, new_key);
1467 idx_new = bucket_scan(h, new_hash, new_key);
1468 if (idx_new != IDX_NIL)
1469 if (idx_old != idx_new) {
1470 remove_entry(h, idx_new);
1471 /* Compensate for a possible backward shift. */
1472 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1473 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1474 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1475 }
1476
1477 remove_entry(h, idx_old);
1478
1479 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1480 e->b.key = new_key;
8fe914ec 1481 e->value = value;
89439d4f 1482 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1483
1484 return 0;
1485}
1486
89439d4f
MS
1487void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1488 struct plain_hashmap_entry *e;
1489 unsigned hash, idx;
3158713e
LP
1490
1491 if (!h)
1492 return NULL;
1493
a3b6fafe 1494 hash = bucket_hash(h, key);
89439d4f
MS
1495 idx = bucket_scan(h, hash, key);
1496 if (idx == IDX_NIL)
3158713e
LP
1497 return NULL;
1498
89439d4f 1499 e = plain_bucket_at(h, idx);
3158713e
LP
1500 if (e->value != value)
1501 return NULL;
1502
89439d4f 1503 remove_entry(h, idx);
3158713e
LP
1504
1505 return value;
1506}
1507
89439d4f
MS
1508static unsigned find_first_entry(HashmapBase *h) {
1509 Iterator i = ITERATOR_FIRST;
60918275 1510
89439d4f
MS
1511 if (!h || !n_entries(h))
1512 return IDX_NIL;
60918275 1513
89439d4f 1514 return hashmap_iterate_entry(h, &i);
60918275
LP
1515}
1516
89439d4f
MS
1517void *internal_hashmap_first(HashmapBase *h) {
1518 unsigned idx;
60918275 1519
89439d4f
MS
1520 idx = find_first_entry(h);
1521 if (idx == IDX_NIL)
60918275
LP
1522 return NULL;
1523
89439d4f 1524 return entry_value(h, bucket_at(h, idx));
60918275
LP
1525}
1526
89439d4f
MS
1527void *internal_hashmap_first_key(HashmapBase *h) {
1528 struct hashmap_base_entry *e;
1529 unsigned idx;
2e4a6ff4 1530
89439d4f
MS
1531 idx = find_first_entry(h);
1532 if (idx == IDX_NIL)
2e4a6ff4
LP
1533 return NULL;
1534
89439d4f
MS
1535 e = bucket_at(h, idx);
1536 return (void*) e->key;
2e4a6ff4
LP
1537}
1538
89439d4f
MS
1539void *internal_hashmap_steal_first(HashmapBase *h) {
1540 struct hashmap_base_entry *e;
60918275 1541 void *data;
89439d4f 1542 unsigned idx;
60918275 1543
89439d4f
MS
1544 idx = find_first_entry(h);
1545 if (idx == IDX_NIL)
60918275
LP
1546 return NULL;
1547
89439d4f
MS
1548 e = bucket_at(h, idx);
1549 data = entry_value(h, e);
1550 remove_entry(h, idx);
60918275
LP
1551
1552 return data;
1553}
1554
89439d4f
MS
1555void *internal_hashmap_steal_first_key(HashmapBase *h) {
1556 struct hashmap_base_entry *e;
22be093f 1557 void *key;
89439d4f 1558 unsigned idx;
22be093f 1559
89439d4f
MS
1560 idx = find_first_entry(h);
1561 if (idx == IDX_NIL)
22be093f
LP
1562 return NULL;
1563
89439d4f
MS
1564 e = bucket_at(h, idx);
1565 key = (void*) e->key;
1566 remove_entry(h, idx);
22be093f
LP
1567
1568 return key;
1569}
1570
89439d4f 1571unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1572
1573 if (!h)
1574 return 0;
1575
89439d4f 1576 return n_entries(h);
60918275
LP
1577}
1578
89439d4f 1579unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1580
1581 if (!h)
1582 return 0;
1583
89439d4f 1584 return n_buckets(h);
45fa9e29
LP
1585}
1586
89439d4f
MS
1587int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1588 Iterator i;
1589 unsigned idx;
60918275 1590
89439d4f 1591 assert(h);
60918275 1592
89439d4f
MS
1593 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1594 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1595 int r;
91cdde8a 1596
89439d4f
MS
1597 r = hashmap_put(h, pe->b.key, pe->value);
1598 if (r < 0 && r != -EEXIST)
1599 return r;
1600 }
91cdde8a 1601
89439d4f
MS
1602 return 0;
1603}
91cdde8a 1604
89439d4f
MS
1605int set_merge(Set *s, Set *other) {
1606 Iterator i;
1607 unsigned idx;
91cdde8a 1608
89439d4f
MS
1609 assert(s);
1610
1611 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1612 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1613 int r;
1614
89439d4f
MS
1615 r = set_put(s, se->b.key);
1616 if (r < 0)
a3b6fafe 1617 return r;
91cdde8a
LP
1618 }
1619
1620 return 0;
1621}
1622
89439d4f 1623int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1624 int r;
1625
1626 assert(h);
1627
1628 r = resize_buckets(h, entries_add);
1629 if (r < 0)
1630 return r;
1631
1632 return 0;
1633}
1634
89439d4f
MS
1635/*
1636 * The same as hashmap_merge(), but every new item from other is moved to h.
1637 * Keys already in h are skipped and stay in other.
1638 * Returns: 0 on success.
1639 * -ENOMEM on alloc failure, in which case no move has been done.
1640 */
1641int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1642 struct swap_entries swap;
1643 struct hashmap_base_entry *e, *n;
1644 Iterator i;
1645 unsigned idx;
1646 int r;
101d8e63
LP
1647
1648 assert(h);
1649
101d8e63 1650 if (!other)
7ad63f57 1651 return 0;
101d8e63 1652
89439d4f
MS
1653 assert(other->type == h->type);
1654
1655 /*
1656 * This reserves buckets for the worst case, where none of other's
1657 * entries are yet present in h. This is preferable to risking
1658 * an allocation failure in the middle of the moving and having to
1659 * rollback or return a partial result.
1660 */
1661 r = resize_buckets(h, n_entries(other));
1662 if (r < 0)
1663 return r;
101d8e63 1664
89439d4f
MS
1665 HASHMAP_FOREACH_IDX(idx, other, i) {
1666 unsigned h_hash;
101d8e63 1667
89439d4f 1668 e = bucket_at(other, idx);
a3b6fafe 1669 h_hash = bucket_hash(h, e->key);
89439d4f 1670 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1671 continue;
1672
89439d4f
MS
1673 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1674 n->key = e->key;
1675 if (h->type != HASHMAP_TYPE_SET)
1676 ((struct plain_hashmap_entry*) n)->value =
1677 ((struct plain_hashmap_entry*) e)->value;
1678 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1679
1680 remove_entry(other, idx);
101d8e63 1681 }
7ad63f57
MS
1682
1683 return 0;
101d8e63
LP
1684}
1685
89439d4f
MS
1686int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1687 struct swap_entries swap;
1688 unsigned h_hash, other_hash, idx;
1689 struct hashmap_base_entry *e, *n;
1690 int r;
101d8e63 1691
101d8e63
LP
1692 assert(h);
1693
a3b6fafe 1694 h_hash = bucket_hash(h, key);
89439d4f 1695 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1696 return -EEXIST;
1697
bf3d3e2b
MS
1698 if (!other)
1699 return -ENOENT;
1700
89439d4f
MS
1701 assert(other->type == h->type);
1702
a3b6fafe 1703 other_hash = bucket_hash(other, key);
89439d4f
MS
1704 idx = bucket_scan(other, other_hash, key);
1705 if (idx == IDX_NIL)
101d8e63
LP
1706 return -ENOENT;
1707
89439d4f
MS
1708 e = bucket_at(other, idx);
1709
1710 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1711 n->key = e->key;
1712 if (h->type != HASHMAP_TYPE_SET)
1713 ((struct plain_hashmap_entry*) n)->value =
1714 ((struct plain_hashmap_entry*) e)->value;
1715 r = hashmap_put_boldly(h, h_hash, &swap, true);
1716 if (r < 0)
1717 return r;
101d8e63 1718
89439d4f 1719 remove_entry(other, idx);
101d8e63
LP
1720 return 0;
1721}
1722
89439d4f
MS
1723HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1724 HashmapBase *copy;
1725 int r;
91cdde8a
LP
1726
1727 assert(h);
1728
89439d4f 1729 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1730 if (!copy)
91cdde8a
LP
1731 return NULL;
1732
89439d4f
MS
1733 switch (h->type) {
1734 case HASHMAP_TYPE_PLAIN:
1735 case HASHMAP_TYPE_ORDERED:
1736 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1737 break;
1738 case HASHMAP_TYPE_SET:
1739 r = set_merge((Set*)copy, (Set*)h);
1740 break;
1741 default:
1742 assert_not_reached("Unknown hashmap type");
1743 }
1744
1745 if (r < 0) {
1746 internal_hashmap_free(copy);
91cdde8a
LP
1747 return NULL;
1748 }
1749
1750 return copy;
1751}
db1413d7 1752
89439d4f 1753char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1754 char **sv;
89439d4f
MS
1755 Iterator i;
1756 unsigned idx, n;
db1413d7 1757
89439d4f 1758 sv = new(char*, n_entries(h)+1);
729e3769 1759 if (!sv)
db1413d7
KS
1760 return NULL;
1761
1762 n = 0;
89439d4f
MS
1763 HASHMAP_FOREACH_IDX(idx, h, i)
1764 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1765 sv[n] = NULL;
1766
1767 return sv;
1768}
3c1668da 1769
89439d4f
MS
1770void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1771 struct ordered_hashmap_entry *e;
1772 unsigned hash, idx;
3c1668da 1773
3c1668da
LP
1774 if (!h)
1775 return NULL;
1776
a3b6fafe 1777 hash = bucket_hash(h, key);
89439d4f
MS
1778 idx = bucket_scan(h, hash, key);
1779 if (idx == IDX_NIL)
3c1668da
LP
1780 return NULL;
1781
89439d4f
MS
1782 e = ordered_bucket_at(h, idx);
1783 if (e->iterate_next == IDX_NIL)
3c1668da 1784 return NULL;
89439d4f
MS
1785 return ordered_bucket_at(h, e->iterate_next)->p.value;
1786}
3c1668da 1787
89439d4f
MS
1788int set_consume(Set *s, void *value) {
1789 int r;
1790
d97c5aea
LP
1791 assert(s);
1792 assert(value);
1793
89439d4f 1794 r = set_put(s, value);
575ccc1b 1795 if (r <= 0)
89439d4f
MS
1796 free(value);
1797
1798 return r;
1799}
1800
1801int set_put_strdup(Set *s, const char *p) {
1802 char *c;
89439d4f
MS
1803
1804 assert(s);
1805 assert(p);
1806
454f0f86
LP
1807 if (set_contains(s, (char*) p))
1808 return 0;
1809
89439d4f
MS
1810 c = strdup(p);
1811 if (!c)
1812 return -ENOMEM;
1813
454f0f86 1814 return set_consume(s, c);
89439d4f
MS
1815}
1816
1817int set_put_strdupv(Set *s, char **l) {
1818 int n = 0, r;
1819 char **i;
1820
d97c5aea
LP
1821 assert(s);
1822
89439d4f
MS
1823 STRV_FOREACH(i, l) {
1824 r = set_put_strdup(s, *i);
1825 if (r < 0)
1826 return r;
1827
1828 n += r;
1829 }
1830
1831 return n;
3c1668da 1832}
d97c5aea
LP
1833
1834int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1835 const char *p = v;
1836 int r;
1837
1838 assert(s);
1839 assert(v);
1840
1841 for (;;) {
1842 char *word;
1843
1844 r = extract_first_word(&p, &word, separators, flags);
1845 if (r <= 0)
1846 return r;
1847
1848 r = set_consume(s, word);
1849 if (r < 0)
1850 return r;
1851 }
1852}