]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/shared/hashmap.c
shared: add process-util.[ch]
[thirdparty/systemd.git] / src / shared / hashmap.c
CommitLineData
d6c9574f 1/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
60918275 2
a7334b09
LP
3/***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
89439d4f 7 Copyright 2014 Michal Schmidt
a7334b09
LP
8
9 systemd is free software; you can redistribute it and/or modify it
5430f7f2
LP
10 under the terms of the GNU Lesser General Public License as published by
11 the Free Software Foundation; either version 2.1 of the License, or
a7334b09
LP
12 (at your option) any later version.
13
14 systemd is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5430f7f2 17 Lesser General Public License for more details.
a7334b09 18
5430f7f2 19 You should have received a copy of the GNU Lesser General Public License
a7334b09
LP
20 along with systemd; If not, see <http://www.gnu.org/licenses/>.
21***/
22
60918275 23#include <stdlib.h>
60918275
LP
24#include <errno.h>
25
26#include "util.h"
27#include "hashmap.h"
89439d4f 28#include "set.h"
60918275 29#include "macro.h"
9bf3b535 30#include "siphash24.h"
89439d4f 31#include "strv.h"
b3dcf58e 32#include "mempool.h"
60918275 33
2eec67ac
TA
34#ifdef ENABLE_DEBUG_HASHMAP
35#include "list.h"
36#endif
37
89439d4f
MS
38/*
39 * Implementation of hashmaps.
40 * Addressing: open
41 * - uses less RAM compared to closed addressing (chaining), because
42 * our entries are small (especially in Sets, which tend to contain
43 * the majority of entries in systemd).
44 * Collision resolution: Robin Hood
45 * - tends to equalize displacement of entries from their optimal buckets.
46 * Probe sequence: linear
47 * - though theoretically worse than random probing/uniform hashing/double
48 * hashing, it is good for cache locality.
49 *
50 * References:
51 * Celis, P. 1986. Robin Hood Hashing.
52 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
53 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
54 * - The results are derived for random probing. Suggests deletion with
55 * tombstones and two mean-centered search methods. None of that works
56 * well for linear probing.
57 *
58 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
59 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
60 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
61 * http://www.math.uu.se/~svante/papers/sj157.pdf
62 * - Applies to Robin Hood with linear probing. Contains remarks on
63 * the unsuitability of mean-centered search with linear probing.
64 *
65 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
66 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
67 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
68 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
69 * in a successful search), and Janson writes about displacement. C = d + 1.
70 *
71 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
72 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
73 * - Explanation of backward shift deletion with pictures.
74 *
75 * Khuong, P. 2013. The Other Robin Hood Hashing.
76 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
77 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
78 */
79
80/*
81 * XXX Ideas for improvement:
82 * For unordered hashmaps, randomize iteration order, similarly to Perl:
83 * http://blog.booking.com/hardening-perls-hash-function.html
84 */
85
86/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
87 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
88#define INV_KEEP_FREE 5U
89
90/* Fields common to entries of all hashmap/set types */
91struct hashmap_base_entry {
60918275 92 const void *key;
89439d4f
MS
93};
94
95/* Entry types for specific hashmap/set types
96 * hashmap_base_entry must be at the beginning of each entry struct. */
97
98struct plain_hashmap_entry {
99 struct hashmap_base_entry b;
60918275 100 void *value;
60918275
LP
101};
102
89439d4f
MS
103struct ordered_hashmap_entry {
104 struct plain_hashmap_entry p;
105 unsigned iterate_next, iterate_previous;
106};
60918275 107
89439d4f
MS
108struct set_entry {
109 struct hashmap_base_entry b;
110};
45fa9e29 111
89439d4f
MS
112/* In several functions it is advantageous to have the hash table extended
113 * virtually by a couple of additional buckets. We reserve special index values
114 * for these "swap" buckets. */
115#define _IDX_SWAP_BEGIN (UINT_MAX - 3)
116#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
117#define IDX_TMP (_IDX_SWAP_BEGIN + 1)
118#define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
39c2a6f1 119
89439d4f
MS
120#define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
121#define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
122
123assert_cc(IDX_FIRST == _IDX_SWAP_END);
124assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
125
126/* Storage space for the "swap" buckets.
127 * All entry types can fit into a ordered_hashmap_entry. */
128struct swap_entries {
129 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
60918275
LP
130};
131
89439d4f
MS
132/* Distance from Initial Bucket */
133typedef uint8_t dib_raw_t;
134#define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
135#define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
136#define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
137#define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
138
139#define DIB_FREE UINT_MAX
140
fc86aa0e 141#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
142struct hashmap_debug_info {
143 LIST_FIELDS(struct hashmap_debug_info, debug_list);
144 unsigned max_entries; /* high watermark of n_entries */
145
146 /* who allocated this hashmap */
147 int line;
148 const char *file;
149 const char *func;
150
151 /* fields to detect modification while iterating */
152 unsigned put_count; /* counts puts into the hashmap */
153 unsigned rem_count; /* counts removals from hashmap */
154 unsigned last_rem_idx; /* remembers last removal index */
39c2a6f1
LP
155};
156
89439d4f
MS
157/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
158static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
39c2a6f1 159
89439d4f 160#define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
39c2a6f1 161
fc86aa0e 162#else /* !ENABLE_DEBUG_HASHMAP */
89439d4f 163#define HASHMAP_DEBUG_FIELDS
fc86aa0e 164#endif /* ENABLE_DEBUG_HASHMAP */
39c2a6f1 165
89439d4f
MS
166enum HashmapType {
167 HASHMAP_TYPE_PLAIN,
168 HASHMAP_TYPE_ORDERED,
169 HASHMAP_TYPE_SET,
170 _HASHMAP_TYPE_MAX
171};
39c2a6f1 172
89439d4f
MS
173struct _packed_ indirect_storage {
174 char *storage; /* where buckets and DIBs are stored */
175 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
176
177 unsigned n_entries; /* number of stored entries */
178 unsigned n_buckets; /* number of buckets */
179
180 unsigned idx_lowest_entry; /* Index below which all buckets are free.
181 Makes "while(hashmap_steal_first())" loops
182 O(n) instead of O(n^2) for unordered hashmaps. */
183 uint8_t _pad[3]; /* padding for the whole HashmapBase */
184 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
185};
186
187struct direct_storage {
188 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
189 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
190 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
191 char storage[sizeof(struct indirect_storage)];
192};
193
194#define DIRECT_BUCKETS(entry_t) \
195 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
196
197/* We should be able to store at least one entry directly. */
198assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
199
200/* We have 3 bits for n_direct_entries. */
201assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
202
203/* Hashmaps with directly stored entries all use this shared hash key.
204 * It's no big deal if the key is guessed, because there can be only
205 * a handful of directly stored entries in a hashmap. When a hashmap
206 * outgrows direct storage, it gets its own key for indirect storage. */
207static uint8_t shared_hash_key[HASH_KEY_SIZE];
208static bool shared_hash_key_initialized;
209
210/* Fields that all hashmap/set types must have */
211struct HashmapBase {
212 const struct hash_ops *hash_ops; /* hash and compare ops to use */
213
214 union _packed_ {
215 struct indirect_storage indirect; /* if has_indirect */
216 struct direct_storage direct; /* if !has_indirect */
217 };
218
219 enum HashmapType type:2; /* HASHMAP_TYPE_* */
220 bool has_indirect:1; /* whether indirect storage is used */
221 unsigned n_direct_entries:3; /* Number of entries in direct storage.
222 * Only valid if !has_indirect. */
223 bool from_pool:1; /* whether was allocated from mempool */
224 HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
225};
226
227/* Specific hash types
228 * HashmapBase must be at the beginning of each hashmap struct. */
229
230struct Hashmap {
231 struct HashmapBase b;
232};
233
234struct OrderedHashmap {
235 struct HashmapBase b;
236 unsigned iterate_list_head, iterate_list_tail;
237};
238
239struct Set {
240 struct HashmapBase b;
241};
242
243DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
244DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
245/* No need for a separate Set pool */
246assert_cc(sizeof(Hashmap) == sizeof(Set));
247
248struct hashmap_type_info {
249 size_t head_size;
250 size_t entry_size;
251 struct mempool *mempool;
252 unsigned n_direct_buckets;
253};
254
255static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
256 [HASHMAP_TYPE_PLAIN] = {
257 .head_size = sizeof(Hashmap),
258 .entry_size = sizeof(struct plain_hashmap_entry),
259 .mempool = &hashmap_pool,
260 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
261 },
262 [HASHMAP_TYPE_ORDERED] = {
263 .head_size = sizeof(OrderedHashmap),
264 .entry_size = sizeof(struct ordered_hashmap_entry),
265 .mempool = &ordered_hashmap_pool,
266 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
267 },
268 [HASHMAP_TYPE_SET] = {
269 .head_size = sizeof(Set),
270 .entry_size = sizeof(struct set_entry),
271 .mempool = &hashmap_pool,
272 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
273 },
274};
39c2a6f1 275
9bf3b535
LP
276unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
277 uint64_t u;
278 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
279 return (unsigned long) u;
60918275
LP
280}
281
282int string_compare_func(const void *a, const void *b) {
283 return strcmp(a, b);
284}
285
d5099efc
MS
286const struct hash_ops string_hash_ops = {
287 .hash = string_hash_func,
288 .compare = string_compare_func
289};
290
9bf3b535
LP
291unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
292 uint64_t u;
293 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
294 return (unsigned long) u;
60918275
LP
295}
296
297int trivial_compare_func(const void *a, const void *b) {
298 return a < b ? -1 : (a > b ? 1 : 0);
299}
300
d5099efc
MS
301const struct hash_ops trivial_hash_ops = {
302 .hash = trivial_hash_func,
303 .compare = trivial_compare_func
304};
305
9bf3b535 306unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
a4bcff5b 307 uint64_t u;
9bf3b535
LP
308 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
309 return (unsigned long) u;
a4bcff5b
LP
310}
311
312int uint64_compare_func(const void *_a, const void *_b) {
313 uint64_t a, b;
a4bcff5b
LP
314 a = *(const uint64_t*) _a;
315 b = *(const uint64_t*) _b;
a4bcff5b
LP
316 return a < b ? -1 : (a > b ? 1 : 0);
317}
318
d5099efc
MS
319const struct hash_ops uint64_hash_ops = {
320 .hash = uint64_hash_func,
321 .compare = uint64_compare_func
322};
323
de99c9dc
LP
324#if SIZEOF_DEV_T != 8
325unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
326 uint64_t u;
327 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
328 return (unsigned long) u;
329}
330
331int devt_compare_func(const void *_a, const void *_b) {
332 dev_t a, b;
333 a = *(const dev_t*) _a;
334 b = *(const dev_t*) _b;
335 return a < b ? -1 : (a > b ? 1 : 0);
336}
d5099efc
MS
337
338const struct hash_ops devt_hash_ops = {
339 .hash = devt_hash_func,
340 .compare = devt_compare_func
341};
de99c9dc
LP
342#endif
343
89439d4f
MS
344static unsigned n_buckets(HashmapBase *h) {
345 return h->has_indirect ? h->indirect.n_buckets
346 : hashmap_type_info[h->type].n_direct_buckets;
347}
348
349static unsigned n_entries(HashmapBase *h) {
350 return h->has_indirect ? h->indirect.n_entries
351 : h->n_direct_entries;
352}
353
354static void n_entries_inc(HashmapBase *h) {
355 if (h->has_indirect)
356 h->indirect.n_entries++;
357 else
358 h->n_direct_entries++;
359}
360
361static void n_entries_dec(HashmapBase *h) {
362 if (h->has_indirect)
363 h->indirect.n_entries--;
364 else
365 h->n_direct_entries--;
366}
367
368static char *storage_ptr(HashmapBase *h) {
369 return h->has_indirect ? h->indirect.storage
370 : h->direct.storage;
371}
372
373static uint8_t *hash_key(HashmapBase *h) {
374 return h->has_indirect ? h->indirect.hash_key
375 : shared_hash_key;
376}
377
378static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
379 return (unsigned) (h->hash_ops->hash(p, hash_key(h)) % n_buckets(h));
9bf3b535 380}
89439d4f 381#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
9bf3b535
LP
382
383static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
384 static uint8_t current[HASH_KEY_SIZE];
385 static bool current_initialized = false;
386
387 /* Returns a hash function key to use. In order to keep things
388 * fast we will not generate a new key each time we allocate a
389 * new hash table. Instead, we'll just reuse the most recently
390 * generated one, except if we never generated one or when we
391 * are rehashing an entire hash table because we reached a
392 * fill level */
393
394 if (!current_initialized || !reuse_is_ok) {
395 random_bytes(current, sizeof(current));
396 current_initialized = true;
397 }
398
399 memcpy(hash_key, current, sizeof(current));
a3b6fafe
LP
400}
401
89439d4f
MS
402static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
403 return (struct hashmap_base_entry*)
404 (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
405}
406
407static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
408 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
409}
410
411static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
412 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
413}
39c2a6f1 414
89439d4f
MS
415static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
416 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
417}
39c2a6f1 418
89439d4f
MS
419static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
420 return &swap->e[idx - _IDX_SWAP_BEGIN];
421}
39c2a6f1 422
89439d4f
MS
423/* Returns a pointer to the bucket at index idx.
424 * Understands real indexes and swap indexes, hence "_virtual". */
425static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
426 unsigned idx) {
427 if (idx < _IDX_SWAP_BEGIN)
428 return bucket_at(h, idx);
429
430 if (idx < _IDX_SWAP_END)
431 return &bucket_at_swap(swap, idx)->p.b;
432
433 assert_not_reached("Invalid index");
434}
435
436static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
437 return (dib_raw_t*)
438 (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
439}
440
441static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
442 return idx >= from ? idx - from
443 : n_buckets(h) + idx - from;
444}
445
446static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
447 unsigned initial_bucket;
448
449 if (raw_dib == DIB_RAW_FREE)
450 return DIB_FREE;
451
452 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
453 return raw_dib;
454
455 /*
456 * Having an overflow DIB value is very unlikely. The hash function
457 * would have to be bad. For example, in a table of size 2^24 filled
458 * to load factor 0.9 the maximum observed DIB is only about 60.
459 * In theory (assuming I used Maxima correctly), for an infinite size
460 * hash table with load factor 0.8 the probability of a given entry
461 * having DIB > 40 is 1.9e-8.
462 * This returns the correct DIB value by recomputing the hash value in
463 * the unlikely case. XXX Hitting this case could be a hint to rehash.
464 */
465 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
466 return bucket_distance(h, idx, initial_bucket);
467}
468
469static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
470 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
471}
472
473static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
474 dib_raw_t *dibs;
475
476 dibs = dib_raw_ptr(h);
477
478 for ( ; idx < n_buckets(h); idx++)
479 if (dibs[idx] != DIB_RAW_FREE)
480 return idx;
481
482 return IDX_NIL;
483}
484
485static void bucket_mark_free(HashmapBase *h, unsigned idx) {
eccaf899 486 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
89439d4f
MS
487 bucket_set_dib(h, idx, DIB_FREE);
488}
489
490static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
491 unsigned from, unsigned to) {
492 struct hashmap_base_entry *e_from, *e_to;
493
494 assert(from != to);
39c2a6f1 495
89439d4f
MS
496 e_from = bucket_at_virtual(h, swap, from);
497 e_to = bucket_at_virtual(h, swap, to);
498
499 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
500
501 if (h->type == HASHMAP_TYPE_ORDERED) {
502 OrderedHashmap *lh = (OrderedHashmap*) h;
503 struct ordered_hashmap_entry *le, *le_to;
504
505 le_to = (struct ordered_hashmap_entry*) e_to;
506
507 if (le_to->iterate_next != IDX_NIL) {
508 le = (struct ordered_hashmap_entry*)
509 bucket_at_virtual(h, swap, le_to->iterate_next);
510 le->iterate_previous = to;
511 }
512
513 if (le_to->iterate_previous != IDX_NIL) {
514 le = (struct ordered_hashmap_entry*)
515 bucket_at_virtual(h, swap, le_to->iterate_previous);
516 le->iterate_next = to;
517 }
518
519 if (lh->iterate_list_head == from)
520 lh->iterate_list_head = to;
521 if (lh->iterate_list_tail == from)
522 lh->iterate_list_tail = to;
39c2a6f1 523 }
89439d4f 524}
60918275 525
89439d4f
MS
526static unsigned next_idx(HashmapBase *h, unsigned idx) {
527 return (idx + 1U) % n_buckets(h);
528}
60918275 529
89439d4f
MS
530static unsigned prev_idx(HashmapBase *h, unsigned idx) {
531 return (n_buckets(h) + idx - 1U) % n_buckets(h);
532}
60918275 533
89439d4f
MS
534static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
535 switch (h->type) {
45fa9e29 536
89439d4f
MS
537 case HASHMAP_TYPE_PLAIN:
538 case HASHMAP_TYPE_ORDERED:
539 return ((struct plain_hashmap_entry*)e)->value;
39c2a6f1 540
89439d4f
MS
541 case HASHMAP_TYPE_SET:
542 return (void*) e->key;
a3b6fafe 543
89439d4f
MS
544 default:
545 assert_not_reached("Unknown hashmap type");
546 }
60918275
LP
547}
548
89439d4f
MS
549static void base_remove_entry(HashmapBase *h, unsigned idx) {
550 unsigned left, right, prev, dib;
551 dib_raw_t raw_dib, *dibs;
45fa9e29 552
89439d4f
MS
553 dibs = dib_raw_ptr(h);
554 assert(dibs[idx] != DIB_RAW_FREE);
034c6ed7 555
fc86aa0e 556#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
557 h->debug.rem_count++;
558 h->debug.last_rem_idx = idx;
559#endif
034c6ed7 560
89439d4f
MS
561 left = idx;
562 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
563 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
564 raw_dib = dibs[right];
565 if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
566 break;
567
568 /* The buckets are not supposed to be all occupied and with DIB > 0.
569 * That would mean we could make everyone better off by shifting them
570 * backward. This scenario is impossible. */
571 assert(left != right);
572 }
034c6ed7 573
89439d4f
MS
574 if (h->type == HASHMAP_TYPE_ORDERED) {
575 OrderedHashmap *lh = (OrderedHashmap*) h;
576 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
577
578 if (le->iterate_next != IDX_NIL)
579 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
580 else
581 lh->iterate_list_tail = le->iterate_previous;
582
583 if (le->iterate_previous != IDX_NIL)
584 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
585 else
586 lh->iterate_list_head = le->iterate_next;
587 }
588
589 /* Now shift all buckets in the interval (left, right) one step backwards */
590 for (prev = left, left = next_idx(h, left); left != right;
591 prev = left, left = next_idx(h, left)) {
592 dib = bucket_calculate_dib(h, left, dibs[left]);
593 assert(dib != 0);
594 bucket_move_entry(h, NULL, left, prev);
595 bucket_set_dib(h, prev, dib - 1);
596 }
597
598 bucket_mark_free(h, prev);
599 n_entries_dec(h);
034c6ed7 600}
89439d4f
MS
601#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
602
603static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
604 struct ordered_hashmap_entry *e;
605 unsigned idx;
034c6ed7 606
101d8e63 607 assert(h);
89439d4f
MS
608 assert(i);
609
610 if (i->idx == IDX_NIL)
611 goto at_end;
612
613 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
614 goto at_end;
615
616 if (i->idx == IDX_FIRST) {
617 idx = h->iterate_list_head;
618 e = ordered_bucket_at(h, idx);
101d8e63 619 } else {
89439d4f
MS
620 idx = i->idx;
621 e = ordered_bucket_at(h, idx);
622 /*
623 * We allow removing the current entry while iterating, but removal may cause
624 * a backward shift. The next entry may thus move one bucket to the left.
625 * To detect when it happens, we remember the key pointer of the entry we were
626 * going to iterate next. If it does not match, there was a backward shift.
627 */
628 if (e->p.b.key != i->next_key) {
629 idx = prev_idx(HASHMAP_BASE(h), idx);
630 e = ordered_bucket_at(h, idx);
631 }
632 assert(e->p.b.key == i->next_key);
101d8e63 633 }
101d8e63 634
fc86aa0e 635#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
636 i->prev_idx = idx;
637#endif
638
639 if (e->iterate_next != IDX_NIL) {
640 struct ordered_hashmap_entry *n;
641 i->idx = e->iterate_next;
642 n = ordered_bucket_at(h, i->idx);
643 i->next_key = n->p.b.key;
644 } else
645 i->idx = IDX_NIL;
646
647 return idx;
648
649at_end:
650 i->idx = IDX_NIL;
651 return IDX_NIL;
101d8e63
LP
652}
653
89439d4f
MS
654static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
655 unsigned idx;
656
60918275 657 assert(h);
89439d4f 658 assert(i);
60918275 659
89439d4f
MS
660 if (i->idx == IDX_NIL)
661 goto at_end;
60918275 662
89439d4f
MS
663 if (i->idx == IDX_FIRST) {
664 /* fast forward to the first occupied bucket */
665 if (h->has_indirect) {
666 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
667 h->indirect.idx_lowest_entry = i->idx;
668 } else
669 i->idx = skip_free_buckets(h, 0);
670
671 if (i->idx == IDX_NIL)
672 goto at_end;
673 } else {
674 struct hashmap_base_entry *e;
675
676 assert(i->idx > 0);
60918275 677
89439d4f
MS
678 e = bucket_at(h, i->idx);
679 /*
680 * We allow removing the current entry while iterating, but removal may cause
681 * a backward shift. The next entry may thus move one bucket to the left.
682 * To detect when it happens, we remember the key pointer of the entry we were
683 * going to iterate next. If it does not match, there was a backward shift.
684 */
685 if (e->key != i->next_key)
686 e = bucket_at(h, --i->idx);
60918275 687
89439d4f
MS
688 assert(e->key == i->next_key);
689 }
690
691 idx = i->idx;
fc86aa0e 692#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
693 i->prev_idx = idx;
694#endif
695
696 i->idx = skip_free_buckets(h, i->idx + 1);
697 if (i->idx != IDX_NIL)
698 i->next_key = bucket_at(h, i->idx)->key;
101d8e63 699 else
89439d4f
MS
700 i->idx = IDX_NIL;
701
702 return idx;
60918275 703
89439d4f
MS
704at_end:
705 i->idx = IDX_NIL;
706 return IDX_NIL;
60918275
LP
707}
708
89439d4f
MS
709static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
710 if (!h) {
711 i->idx = IDX_NIL;
712 return IDX_NIL;
713 }
101d8e63 714
fc86aa0e 715#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
716 if (i->idx == IDX_FIRST) {
717 i->put_count = h->debug.put_count;
718 i->rem_count = h->debug.rem_count;
719 } else {
720 /* While iterating, must not add any new entries */
721 assert(i->put_count == h->debug.put_count);
722 /* ... or remove entries other than the current one */
723 assert(i->rem_count == h->debug.rem_count ||
724 (i->rem_count == h->debug.rem_count - 1 &&
725 i->prev_idx == h->debug.last_rem_idx));
726 /* Reset our removals counter */
727 i->rem_count = h->debug.rem_count;
728 }
729#endif
101d8e63 730
89439d4f
MS
731 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
732 : hashmap_iterate_in_internal_order(h, i);
733}
39c2a6f1 734
89439d4f
MS
735void *internal_hashmap_iterate(HashmapBase *h, Iterator *i, const void **key) {
736 struct hashmap_base_entry *e;
737 void *data;
738 unsigned idx;
739
740 idx = hashmap_iterate_entry(h, i);
741 if (idx == IDX_NIL) {
742 if (key)
743 *key = NULL;
744
745 return NULL;
746 }
747
748 e = bucket_at(h, idx);
749 data = entry_value(h, e);
750 if (key)
751 *key = e->key;
752
753 return data;
101d8e63
LP
754}
755
89439d4f
MS
756void *set_iterate(Set *s, Iterator *i) {
757 return internal_hashmap_iterate(HASHMAP_BASE(s), i, NULL);
758}
60918275 759
89439d4f
MS
760#define HASHMAP_FOREACH_IDX(idx, h, i) \
761 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
762 (idx != IDX_NIL); \
763 (idx) = hashmap_iterate_entry((h), &(i)))
764
765static void reset_direct_storage(HashmapBase *h) {
766 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
767 void *p;
768
769 assert(!h->has_indirect);
770
771 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
772 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
773}
774
775static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
776 HashmapBase *h;
777 const struct hashmap_type_info *hi = &hashmap_type_info[type];
778 bool use_pool;
779
780 use_pool = is_main_thread();
781
782 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
67f3c402 783
60918275 784 if (!h)
89439d4f
MS
785 return NULL;
786
787 h->type = type;
788 h->from_pool = use_pool;
789 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
790
791 if (type == HASHMAP_TYPE_ORDERED) {
792 OrderedHashmap *lh = (OrderedHashmap*)h;
793 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
794 }
795
796 reset_direct_storage(h);
60918275 797
89439d4f
MS
798 if (!shared_hash_key_initialized) {
799 random_bytes(shared_hash_key, sizeof(shared_hash_key));
800 shared_hash_key_initialized= true;
801 }
802
fc86aa0e 803#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
804 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
805 h->debug.func = func;
806 h->debug.file = file;
807 h->debug.line = line;
808#endif
809
810 return h;
811}
60918275 812
89439d4f
MS
813Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
814 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
815}
816
817OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
818 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
819}
820
821Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
822 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
823}
824
825static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
826 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
827 HashmapBase *q;
828
829 assert(h);
830
831 if (*h)
832 return 0;
833
834 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
835 if (!q)
836 return -ENOMEM;
837
838 *h = q;
839 return 0;
840}
841
842int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
843 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
844}
845
846int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
847 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
848}
849
850int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
851 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
852}
853
854static void hashmap_free_no_clear(HashmapBase *h) {
855 assert(!h->has_indirect);
856 assert(!h->n_direct_entries);
857
fc86aa0e 858#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
859 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
860#endif
45fa9e29 861
39c2a6f1 862 if (h->from_pool)
89439d4f 863 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
39c2a6f1
LP
864 else
865 free(h);
60918275
LP
866}
867
89439d4f
MS
868void internal_hashmap_free(HashmapBase *h) {
869
870 /* Free the hashmap, but nothing in it */
871
872 if (!h)
873 return;
874
875 internal_hashmap_clear(h);
876 hashmap_free_no_clear(h);
877}
878
879void internal_hashmap_free_free(HashmapBase *h) {
67f3c402
LP
880
881 /* Free the hashmap and all data objects in it, but not the
882 * keys */
883
61b1477c
LP
884 if (!h)
885 return;
886
89439d4f
MS
887 internal_hashmap_clear_free(h);
888 hashmap_free_no_clear(h);
449ddb2d
LP
889}
890
fabe5c0e
LP
891void hashmap_free_free_free(Hashmap *h) {
892
893 /* Free the hashmap and all data and key objects in it */
894
895 if (!h)
896 return;
897
898 hashmap_clear_free_free(h);
89439d4f 899 hashmap_free_no_clear(HASHMAP_BASE(h));
fabe5c0e
LP
900}
901
89439d4f 902void internal_hashmap_clear(HashmapBase *h) {
11dd41ce
LP
903 if (!h)
904 return;
905
89439d4f
MS
906 if (h->has_indirect) {
907 free(h->indirect.storage);
908 h->has_indirect = false;
909 }
910
911 h->n_direct_entries = 0;
912 reset_direct_storage(h);
913
914 if (h->type == HASHMAP_TYPE_ORDERED) {
915 OrderedHashmap *lh = (OrderedHashmap*) h;
916 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
917 }
11dd41ce
LP
918}
919
89439d4f
MS
920void internal_hashmap_clear_free(HashmapBase *h) {
921 unsigned idx;
9946996c 922
61b1477c
LP
923 if (!h)
924 return;
9946996c 925
89439d4f
MS
926 for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
927 idx = skip_free_buckets(h, idx + 1))
928 free(entry_value(h, bucket_at(h, idx)));
929
930 internal_hashmap_clear(h);
9946996c
LP
931}
932
fabe5c0e 933void hashmap_clear_free_free(Hashmap *h) {
89439d4f
MS
934 unsigned idx;
935
fabe5c0e
LP
936 if (!h)
937 return;
938
89439d4f
MS
939 for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
940 idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
941 struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
942 free((void*)e->b.key);
943 free(e->value);
fabe5c0e 944 }
89439d4f
MS
945
946 internal_hashmap_clear(HASHMAP_BASE(h));
fabe5c0e
LP
947}
948
89439d4f
MS
949static int resize_buckets(HashmapBase *h, unsigned entries_add);
950
951/*
952 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
953 * Performs Robin Hood swaps as it goes. The entry to put must be placed
954 * by the caller into swap slot IDX_PUT.
955 * If used for in-place resizing, may leave a displaced entry in swap slot
956 * IDX_PUT. Caller must rehash it next.
957 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
958 * false otherwise.
959 */
960static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
961 struct swap_entries *swap) {
962 dib_raw_t raw_dib, *dibs;
963 unsigned dib, distance;
964
fc86aa0e 965#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
966 h->debug.put_count++;
967#endif
968
969 dibs = dib_raw_ptr(h);
970
971 for (distance = 0; ; distance++) {
972 raw_dib = dibs[idx];
973 if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
974 if (raw_dib == DIB_RAW_REHASH)
975 bucket_move_entry(h, swap, idx, IDX_TMP);
976
977 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
978 h->indirect.idx_lowest_entry = idx;
60918275 979
89439d4f
MS
980 bucket_set_dib(h, idx, distance);
981 bucket_move_entry(h, swap, IDX_PUT, idx);
982 if (raw_dib == DIB_RAW_REHASH) {
983 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
984 return true;
985 }
60918275 986
89439d4f
MS
987 return false;
988 }
989
990 dib = bucket_calculate_dib(h, idx, raw_dib);
991
992 if (dib < distance) {
993 /* Found a wealthier entry. Go Robin Hood! */
994
995 bucket_set_dib(h, idx, distance);
996
997 /* swap the entries */
998 bucket_move_entry(h, swap, idx, IDX_TMP);
999 bucket_move_entry(h, swap, IDX_PUT, idx);
1000 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1001
1002 distance = dib;
1003 }
1004
1005 idx = next_idx(h, idx);
1006 }
60918275
LP
1007}
1008
89439d4f
MS
1009/*
1010 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1011 * The caller must place the entry (only its key and value, not link indexes)
1012 * in swap slot IDX_PUT.
1013 * Caller must ensure: the key does not exist yet in the hashmap.
1014 * that resize is not needed if !may_resize.
1015 * Returns: 1 if entry was put successfully.
1016 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1017 * Cannot return -ENOMEM if !may_resize.
1018 */
1019static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1020 struct swap_entries *swap, bool may_resize) {
1021 struct ordered_hashmap_entry *new_entry;
1022 int r;
1023
1024 assert(idx < n_buckets(h));
1025
1026 new_entry = bucket_at_swap(swap, IDX_PUT);
1027
1028 if (may_resize) {
1029 r = resize_buckets(h, 1);
1030 if (r < 0)
1031 return r;
1032 if (r > 0)
1033 idx = bucket_hash(h, new_entry->p.b.key);
1034 }
1035 assert(n_entries(h) < n_buckets(h));
1036
1037 if (h->type == HASHMAP_TYPE_ORDERED) {
1038 OrderedHashmap *lh = (OrderedHashmap*) h;
1039
1040 new_entry->iterate_next = IDX_NIL;
1041 new_entry->iterate_previous = lh->iterate_list_tail;
1042
1043 if (lh->iterate_list_tail != IDX_NIL) {
1044 struct ordered_hashmap_entry *old_tail;
1045
1046 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1047 assert(old_tail->iterate_next == IDX_NIL);
1048 old_tail->iterate_next = IDX_PUT;
1049 }
1050
1051 lh->iterate_list_tail = IDX_PUT;
1052 if (lh->iterate_list_head == IDX_NIL)
1053 lh->iterate_list_head = IDX_PUT;
1054 }
1055
1056 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1057
1058 n_entries_inc(h);
fc86aa0e 1059#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1060 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1061#endif
1062
1063 return 1;
1064}
1065#define hashmap_put_boldly(h, idx, swap, may_resize) \
1066 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1067
1068/*
1069 * Returns 0 if resize is not needed.
f131770b 1070 * 1 if successfully resized.
89439d4f
MS
1071 * -ENOMEM on allocation failure.
1072 */
1073static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1074 struct swap_entries swap;
1075 char *new_storage;
1076 dib_raw_t *old_dibs, *new_dibs;
1077 const struct hashmap_type_info *hi;
1078 unsigned idx, optimal_idx;
1079 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1080 uint8_t new_shift;
1081 bool rehash_next;
45fa9e29
LP
1082
1083 assert(h);
1084
89439d4f
MS
1085 hi = &hashmap_type_info[h->type];
1086 new_n_entries = n_entries(h) + entries_add;
e4c691b5
MS
1087
1088 /* overflow? */
89439d4f 1089 if (_unlikely_(new_n_entries < entries_add))
e4c691b5
MS
1090 return -ENOMEM;
1091
89439d4f
MS
1092 /* For direct storage we allow 100% load, because it's tiny. */
1093 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
9700d698 1094 return 0;
45fa9e29 1095
89439d4f
MS
1096 /*
1097 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1098 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1099 */
1100 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1101 /* overflow? */
1102 if (_unlikely_(new_n_buckets < new_n_entries))
9700d698 1103 return -ENOMEM;
45fa9e29 1104
89439d4f
MS
1105 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1106 return -ENOMEM;
a3b6fafe 1107
89439d4f 1108 old_n_buckets = n_buckets(h);
45fa9e29 1109
89439d4f
MS
1110 if (_likely_(new_n_buckets <= old_n_buckets))
1111 return 0;
45fa9e29 1112
89439d4f
MS
1113 new_shift = log2u_round_up(MAX(
1114 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1115 2 * sizeof(struct direct_storage)));
45fa9e29 1116
89439d4f
MS
1117 /* Realloc storage (buckets and DIB array). */
1118 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1119 1U << new_shift);
1120 if (!new_storage)
1121 return -ENOMEM;
45fa9e29 1122
89439d4f
MS
1123 /* Must upgrade direct to indirect storage. */
1124 if (!h->has_indirect) {
1125 memcpy(new_storage, h->direct.storage,
1126 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1127 h->indirect.n_entries = h->n_direct_entries;
1128 h->indirect.idx_lowest_entry = 0;
1129 h->n_direct_entries = 0;
1130 }
45fa9e29 1131
89439d4f
MS
1132 /* Get a new hash key. If we've just upgraded to indirect storage,
1133 * allow reusing a previously generated key. It's still a different key
1134 * from the shared one that we used for direct storage. */
1135 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1136
1137 h->has_indirect = true;
1138 h->indirect.storage = new_storage;
1139 h->indirect.n_buckets = (1U << new_shift) /
1140 (hi->entry_size + sizeof(dib_raw_t));
1141
1142 old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1143 new_dibs = dib_raw_ptr(h);
1144
1145 /*
1146 * Move the DIB array to the new place, replacing valid DIB values with
1147 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1148 * Note: Overlap is not possible, because we have at least doubled the
1149 * number of buckets and dib_raw_t is smaller than any entry type.
1150 */
1151 for (idx = 0; idx < old_n_buckets; idx++) {
1152 assert(old_dibs[idx] != DIB_RAW_REHASH);
1153 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1154 : DIB_RAW_REHASH;
45fa9e29
LP
1155 }
1156
89439d4f 1157 /* Zero the area of newly added entries (including the old DIB area) */
eccaf899 1158 memzero(bucket_at(h, old_n_buckets),
89439d4f 1159 (n_buckets(h) - old_n_buckets) * hi->entry_size);
45fa9e29 1160
89439d4f
MS
1161 /* The upper half of the new DIB array needs initialization */
1162 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1163 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
9bf3b535 1164
89439d4f
MS
1165 /* Rehash entries that need it */
1166 n_rehashed = 0;
1167 for (idx = 0; idx < old_n_buckets; idx++) {
1168 if (new_dibs[idx] != DIB_RAW_REHASH)
1169 continue;
45fa9e29 1170
89439d4f 1171 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
45fa9e29 1172
89439d4f
MS
1173 /*
1174 * Not much to do if by luck the entry hashes to its current
1175 * location. Just set its DIB.
1176 */
1177 if (optimal_idx == idx) {
1178 new_dibs[idx] = 0;
1179 n_rehashed++;
1180 continue;
1181 }
1182
1183 new_dibs[idx] = DIB_RAW_FREE;
1184 bucket_move_entry(h, &swap, idx, IDX_PUT);
1185 /* bucket_move_entry does not clear the source */
eccaf899 1186 memzero(bucket_at(h, idx), hi->entry_size);
89439d4f
MS
1187
1188 do {
1189 /*
1190 * Find the new bucket for the current entry. This may make
1191 * another entry homeless and load it into IDX_PUT.
1192 */
1193 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1194 n_rehashed++;
1195
1196 /* Did the current entry displace another one? */
1197 if (rehash_next)
1198 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1199 } while (rehash_next);
1200 }
60918275 1201
89439d4f 1202 assert(n_rehashed == n_entries(h));
60918275 1203
89439d4f
MS
1204 return 1;
1205}
45fa9e29 1206
89439d4f
MS
1207/*
1208 * Finds an entry with a matching key
1209 * Returns: index of the found entry, or IDX_NIL if not found.
1210 */
1211static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1212 struct hashmap_base_entry *e;
1213 unsigned dib, distance;
1214 dib_raw_t *dibs = dib_raw_ptr(h);
39c2a6f1 1215
89439d4f 1216 assert(idx < n_buckets(h));
60918275 1217
89439d4f
MS
1218 for (distance = 0; ; distance++) {
1219 if (dibs[idx] == DIB_RAW_FREE)
1220 return IDX_NIL;
60918275 1221
89439d4f 1222 dib = bucket_calculate_dib(h, idx, dibs[idx]);
60918275 1223
89439d4f
MS
1224 if (dib < distance)
1225 return IDX_NIL;
1226 if (dib == distance) {
1227 e = bucket_at(h, idx);
1228 if (h->hash_ops->compare(e->key, key) == 0)
1229 return idx;
1230 }
1231
1232 idx = next_idx(h, idx);
1233 }
60918275 1234}
89439d4f 1235#define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
60918275 1236
923041cb 1237int hashmap_put(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1238 struct swap_entries swap;
1239 struct plain_hashmap_entry *e;
1240 unsigned hash, idx;
923041cb
MS
1241
1242 assert(h);
1243
1244 hash = bucket_hash(h, key);
89439d4f
MS
1245 idx = bucket_scan(h, hash, key);
1246 if (idx != IDX_NIL) {
1247 e = plain_bucket_at(h, idx);
923041cb
MS
1248 if (e->value == value)
1249 return 0;
1250 return -EEXIST;
1251 }
1252
89439d4f
MS
1253 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1254 e->b.key = key;
1255 e->value = value;
1256 return hashmap_put_boldly(h, hash, &swap, true);
1257}
1258
1259int set_put(Set *s, const void *key) {
1260 struct swap_entries swap;
1261 struct hashmap_base_entry *e;
1262 unsigned hash, idx;
1263
1264 assert(s);
1265
1266 hash = bucket_hash(s, key);
1267 idx = bucket_scan(s, hash, key);
1268 if (idx != IDX_NIL)
1269 return 0;
1270
1271 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1272 e->key = key;
1273 return hashmap_put_boldly(s, hash, &swap, true);
923041cb
MS
1274}
1275
3158713e 1276int hashmap_replace(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1277 struct swap_entries swap;
1278 struct plain_hashmap_entry *e;
1279 unsigned hash, idx;
3158713e
LP
1280
1281 assert(h);
1282
a3b6fafe 1283 hash = bucket_hash(h, key);
89439d4f
MS
1284 idx = bucket_scan(h, hash, key);
1285 if (idx != IDX_NIL) {
1286 e = plain_bucket_at(h, idx);
fc86aa0e 1287#ifdef ENABLE_DEBUG_HASHMAP
89439d4f
MS
1288 /* Although the key is equal, the key pointer may have changed,
1289 * and this would break our assumption for iterating. So count
1290 * this operation as incompatible with iteration. */
1291 if (e->b.key != key) {
1292 h->b.debug.put_count++;
1293 h->b.debug.rem_count++;
1294 h->b.debug.last_rem_idx = idx;
1295 }
1296#endif
1297 e->b.key = key;
3158713e
LP
1298 e->value = value;
1299 return 0;
1300 }
1301
89439d4f
MS
1302 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1303 e->b.key = key;
1304 e->value = value;
1305 return hashmap_put_boldly(h, hash, &swap, true);
3158713e
LP
1306}
1307
d99ae53a 1308int hashmap_update(Hashmap *h, const void *key, void *value) {
89439d4f
MS
1309 struct plain_hashmap_entry *e;
1310 unsigned hash, idx;
d99ae53a
LP
1311
1312 assert(h);
1313
a3b6fafe 1314 hash = bucket_hash(h, key);
89439d4f
MS
1315 idx = bucket_scan(h, hash, key);
1316 if (idx == IDX_NIL)
d99ae53a
LP
1317 return -ENOENT;
1318
89439d4f 1319 e = plain_bucket_at(h, idx);
d99ae53a
LP
1320 e->value = value;
1321 return 0;
1322}
1323
89439d4f
MS
1324void *internal_hashmap_get(HashmapBase *h, const void *key) {
1325 struct hashmap_base_entry *e;
1326 unsigned hash, idx;
60918275
LP
1327
1328 if (!h)
1329 return NULL;
1330
a3b6fafe 1331 hash = bucket_hash(h, key);
89439d4f
MS
1332 idx = bucket_scan(h, hash, key);
1333 if (idx == IDX_NIL)
60918275
LP
1334 return NULL;
1335
89439d4f
MS
1336 e = bucket_at(h, idx);
1337 return entry_value(h, e);
60918275
LP
1338}
1339
89439d4f
MS
1340void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1341 struct plain_hashmap_entry *e;
1342 unsigned hash, idx;
d99ae53a
LP
1343
1344 if (!h)
1345 return NULL;
1346
a3b6fafe 1347 hash = bucket_hash(h, key);
89439d4f
MS
1348 idx = bucket_scan(h, hash, key);
1349 if (idx == IDX_NIL)
d99ae53a
LP
1350 return NULL;
1351
89439d4f 1352 e = plain_bucket_at(h, idx);
d99ae53a 1353 if (key2)
89439d4f 1354 *key2 = (void*) e->b.key;
d99ae53a
LP
1355
1356 return e->value;
1357}
1358
89439d4f 1359bool internal_hashmap_contains(HashmapBase *h, const void *key) {
96342de6 1360 unsigned hash;
96342de6
LN
1361
1362 if (!h)
1363 return false;
1364
a3b6fafe 1365 hash = bucket_hash(h, key);
89439d4f 1366 return bucket_scan(h, hash, key) != IDX_NIL;
96342de6
LN
1367}
1368
89439d4f
MS
1369void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1370 struct hashmap_base_entry *e;
1371 unsigned hash, idx;
60918275
LP
1372 void *data;
1373
1374 if (!h)
1375 return NULL;
1376
a3b6fafe 1377 hash = bucket_hash(h, key);
89439d4f
MS
1378 idx = bucket_scan(h, hash, key);
1379 if (idx == IDX_NIL)
60918275
LP
1380 return NULL;
1381
89439d4f
MS
1382 e = bucket_at(h, idx);
1383 data = entry_value(h, e);
1384 remove_entry(h, idx);
60918275
LP
1385
1386 return data;
1387}
1388
89439d4f
MS
1389void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1390 struct plain_hashmap_entry *e;
1391 unsigned hash, idx;
c582a3b3
LP
1392 void *data;
1393
1394 if (!h) {
1395 if (rkey)
1396 *rkey = NULL;
1397 return NULL;
1398 }
1399
1400 hash = bucket_hash(h, key);
89439d4f
MS
1401 idx = bucket_scan(h, hash, key);
1402 if (idx == IDX_NIL) {
c582a3b3
LP
1403 if (rkey)
1404 *rkey = NULL;
1405 return NULL;
1406 }
1407
89439d4f 1408 e = plain_bucket_at(h, idx);
c582a3b3
LP
1409 data = e->value;
1410 if (rkey)
89439d4f 1411 *rkey = (void*) e->b.key;
c582a3b3 1412
89439d4f 1413 remove_entry(h, idx);
c582a3b3
LP
1414
1415 return data;
1416}
1417
101d8e63 1418int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1419 struct swap_entries swap;
1420 struct plain_hashmap_entry *e;
1421 unsigned old_hash, new_hash, idx;
101d8e63
LP
1422
1423 if (!h)
1424 return -ENOENT;
1425
a3b6fafe 1426 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1427 idx = bucket_scan(h, old_hash, old_key);
1428 if (idx == IDX_NIL)
101d8e63
LP
1429 return -ENOENT;
1430
a3b6fafe 1431 new_hash = bucket_hash(h, new_key);
89439d4f 1432 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
101d8e63
LP
1433 return -EEXIST;
1434
89439d4f 1435 remove_entry(h, idx);
101d8e63 1436
89439d4f
MS
1437 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1438 e->b.key = new_key;
101d8e63 1439 e->value = value;
89439d4f
MS
1440 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1441
1442 return 0;
1443}
1444
1445int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1446 struct swap_entries swap;
1447 struct hashmap_base_entry *e;
1448 unsigned old_hash, new_hash, idx;
101d8e63 1449
89439d4f
MS
1450 if (!s)
1451 return -ENOENT;
1452
1453 old_hash = bucket_hash(s, old_key);
1454 idx = bucket_scan(s, old_hash, old_key);
1455 if (idx == IDX_NIL)
1456 return -ENOENT;
1457
1458 new_hash = bucket_hash(s, new_key);
1459 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1460 return -EEXIST;
1461
1462 remove_entry(s, idx);
1463
1464 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1465 e->key = new_key;
1466 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
101d8e63
LP
1467
1468 return 0;
1469}
1470
8fe914ec 1471int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
89439d4f
MS
1472 struct swap_entries swap;
1473 struct plain_hashmap_entry *e;
1474 unsigned old_hash, new_hash, idx_old, idx_new;
8fe914ec
LP
1475
1476 if (!h)
1477 return -ENOENT;
1478
a3b6fafe 1479 old_hash = bucket_hash(h, old_key);
89439d4f
MS
1480 idx_old = bucket_scan(h, old_hash, old_key);
1481 if (idx_old == IDX_NIL)
8fe914ec
LP
1482 return -ENOENT;
1483
89439d4f 1484 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
8fe914ec 1485
89439d4f
MS
1486 new_hash = bucket_hash(h, new_key);
1487 idx_new = bucket_scan(h, new_hash, new_key);
1488 if (idx_new != IDX_NIL)
1489 if (idx_old != idx_new) {
1490 remove_entry(h, idx_new);
1491 /* Compensate for a possible backward shift. */
1492 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1493 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1494 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1495 }
1496
1497 remove_entry(h, idx_old);
1498
1499 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1500 e->b.key = new_key;
8fe914ec 1501 e->value = value;
89439d4f 1502 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
8fe914ec
LP
1503
1504 return 0;
1505}
1506
89439d4f
MS
1507void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1508 struct plain_hashmap_entry *e;
1509 unsigned hash, idx;
3158713e
LP
1510
1511 if (!h)
1512 return NULL;
1513
a3b6fafe 1514 hash = bucket_hash(h, key);
89439d4f
MS
1515 idx = bucket_scan(h, hash, key);
1516 if (idx == IDX_NIL)
3158713e
LP
1517 return NULL;
1518
89439d4f 1519 e = plain_bucket_at(h, idx);
3158713e
LP
1520 if (e->value != value)
1521 return NULL;
1522
89439d4f 1523 remove_entry(h, idx);
3158713e
LP
1524
1525 return value;
1526}
1527
89439d4f
MS
1528static unsigned find_first_entry(HashmapBase *h) {
1529 Iterator i = ITERATOR_FIRST;
60918275 1530
89439d4f
MS
1531 if (!h || !n_entries(h))
1532 return IDX_NIL;
60918275 1533
89439d4f 1534 return hashmap_iterate_entry(h, &i);
60918275
LP
1535}
1536
89439d4f
MS
1537void *internal_hashmap_first(HashmapBase *h) {
1538 unsigned idx;
60918275 1539
89439d4f
MS
1540 idx = find_first_entry(h);
1541 if (idx == IDX_NIL)
60918275
LP
1542 return NULL;
1543
89439d4f 1544 return entry_value(h, bucket_at(h, idx));
60918275
LP
1545}
1546
89439d4f
MS
1547void *internal_hashmap_first_key(HashmapBase *h) {
1548 struct hashmap_base_entry *e;
1549 unsigned idx;
2e4a6ff4 1550
89439d4f
MS
1551 idx = find_first_entry(h);
1552 if (idx == IDX_NIL)
2e4a6ff4
LP
1553 return NULL;
1554
89439d4f
MS
1555 e = bucket_at(h, idx);
1556 return (void*) e->key;
2e4a6ff4
LP
1557}
1558
89439d4f
MS
1559void *internal_hashmap_steal_first(HashmapBase *h) {
1560 struct hashmap_base_entry *e;
60918275 1561 void *data;
89439d4f 1562 unsigned idx;
60918275 1563
89439d4f
MS
1564 idx = find_first_entry(h);
1565 if (idx == IDX_NIL)
60918275
LP
1566 return NULL;
1567
89439d4f
MS
1568 e = bucket_at(h, idx);
1569 data = entry_value(h, e);
1570 remove_entry(h, idx);
60918275
LP
1571
1572 return data;
1573}
1574
89439d4f
MS
1575void *internal_hashmap_steal_first_key(HashmapBase *h) {
1576 struct hashmap_base_entry *e;
22be093f 1577 void *key;
89439d4f 1578 unsigned idx;
22be093f 1579
89439d4f
MS
1580 idx = find_first_entry(h);
1581 if (idx == IDX_NIL)
22be093f
LP
1582 return NULL;
1583
89439d4f
MS
1584 e = bucket_at(h, idx);
1585 key = (void*) e->key;
1586 remove_entry(h, idx);
22be093f
LP
1587
1588 return key;
1589}
1590
89439d4f 1591unsigned internal_hashmap_size(HashmapBase *h) {
60918275
LP
1592
1593 if (!h)
1594 return 0;
1595
89439d4f 1596 return n_entries(h);
60918275
LP
1597}
1598
89439d4f 1599unsigned internal_hashmap_buckets(HashmapBase *h) {
45fa9e29
LP
1600
1601 if (!h)
1602 return 0;
1603
89439d4f 1604 return n_buckets(h);
45fa9e29
LP
1605}
1606
89439d4f
MS
1607int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1608 Iterator i;
1609 unsigned idx;
60918275 1610
89439d4f 1611 assert(h);
60918275 1612
89439d4f
MS
1613 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1614 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1615 int r;
91cdde8a 1616
89439d4f
MS
1617 r = hashmap_put(h, pe->b.key, pe->value);
1618 if (r < 0 && r != -EEXIST)
1619 return r;
1620 }
91cdde8a 1621
89439d4f
MS
1622 return 0;
1623}
91cdde8a 1624
89439d4f
MS
1625int set_merge(Set *s, Set *other) {
1626 Iterator i;
1627 unsigned idx;
91cdde8a 1628
89439d4f
MS
1629 assert(s);
1630
1631 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1632 struct set_entry *se = set_bucket_at(other, idx);
91cdde8a
LP
1633 int r;
1634
89439d4f
MS
1635 r = set_put(s, se->b.key);
1636 if (r < 0)
a3b6fafe 1637 return r;
91cdde8a
LP
1638 }
1639
1640 return 0;
1641}
1642
89439d4f 1643int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
e4c691b5
MS
1644 int r;
1645
1646 assert(h);
1647
1648 r = resize_buckets(h, entries_add);
1649 if (r < 0)
1650 return r;
1651
1652 return 0;
1653}
1654
89439d4f
MS
1655/*
1656 * The same as hashmap_merge(), but every new item from other is moved to h.
1657 * Keys already in h are skipped and stay in other.
1658 * Returns: 0 on success.
1659 * -ENOMEM on alloc failure, in which case no move has been done.
1660 */
1661int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1662 struct swap_entries swap;
1663 struct hashmap_base_entry *e, *n;
1664 Iterator i;
1665 unsigned idx;
1666 int r;
101d8e63
LP
1667
1668 assert(h);
1669
101d8e63 1670 if (!other)
7ad63f57 1671 return 0;
101d8e63 1672
89439d4f
MS
1673 assert(other->type == h->type);
1674
1675 /*
1676 * This reserves buckets for the worst case, where none of other's
1677 * entries are yet present in h. This is preferable to risking
1678 * an allocation failure in the middle of the moving and having to
1679 * rollback or return a partial result.
1680 */
1681 r = resize_buckets(h, n_entries(other));
1682 if (r < 0)
1683 return r;
101d8e63 1684
89439d4f
MS
1685 HASHMAP_FOREACH_IDX(idx, other, i) {
1686 unsigned h_hash;
101d8e63 1687
89439d4f 1688 e = bucket_at(other, idx);
a3b6fafe 1689 h_hash = bucket_hash(h, e->key);
89439d4f 1690 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
101d8e63
LP
1691 continue;
1692
89439d4f
MS
1693 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1694 n->key = e->key;
1695 if (h->type != HASHMAP_TYPE_SET)
1696 ((struct plain_hashmap_entry*) n)->value =
1697 ((struct plain_hashmap_entry*) e)->value;
1698 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1699
1700 remove_entry(other, idx);
101d8e63 1701 }
7ad63f57
MS
1702
1703 return 0;
101d8e63
LP
1704}
1705
89439d4f
MS
1706int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1707 struct swap_entries swap;
1708 unsigned h_hash, other_hash, idx;
1709 struct hashmap_base_entry *e, *n;
1710 int r;
101d8e63 1711
101d8e63
LP
1712 assert(h);
1713
a3b6fafe 1714 h_hash = bucket_hash(h, key);
89439d4f 1715 if (bucket_scan(h, h_hash, key) != IDX_NIL)
101d8e63
LP
1716 return -EEXIST;
1717
bf3d3e2b
MS
1718 if (!other)
1719 return -ENOENT;
1720
89439d4f
MS
1721 assert(other->type == h->type);
1722
a3b6fafe 1723 other_hash = bucket_hash(other, key);
89439d4f
MS
1724 idx = bucket_scan(other, other_hash, key);
1725 if (idx == IDX_NIL)
101d8e63
LP
1726 return -ENOENT;
1727
89439d4f
MS
1728 e = bucket_at(other, idx);
1729
1730 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1731 n->key = e->key;
1732 if (h->type != HASHMAP_TYPE_SET)
1733 ((struct plain_hashmap_entry*) n)->value =
1734 ((struct plain_hashmap_entry*) e)->value;
1735 r = hashmap_put_boldly(h, h_hash, &swap, true);
1736 if (r < 0)
1737 return r;
101d8e63 1738
89439d4f 1739 remove_entry(other, idx);
101d8e63
LP
1740 return 0;
1741}
1742
89439d4f
MS
1743HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1744 HashmapBase *copy;
1745 int r;
91cdde8a
LP
1746
1747 assert(h);
1748
89439d4f 1749 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
45fa9e29 1750 if (!copy)
91cdde8a
LP
1751 return NULL;
1752
89439d4f
MS
1753 switch (h->type) {
1754 case HASHMAP_TYPE_PLAIN:
1755 case HASHMAP_TYPE_ORDERED:
1756 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1757 break;
1758 case HASHMAP_TYPE_SET:
1759 r = set_merge((Set*)copy, (Set*)h);
1760 break;
1761 default:
1762 assert_not_reached("Unknown hashmap type");
1763 }
1764
1765 if (r < 0) {
1766 internal_hashmap_free(copy);
91cdde8a
LP
1767 return NULL;
1768 }
1769
1770 return copy;
1771}
db1413d7 1772
89439d4f 1773char **internal_hashmap_get_strv(HashmapBase *h) {
db1413d7 1774 char **sv;
89439d4f
MS
1775 Iterator i;
1776 unsigned idx, n;
db1413d7 1777
89439d4f 1778 sv = new(char*, n_entries(h)+1);
729e3769 1779 if (!sv)
db1413d7
KS
1780 return NULL;
1781
1782 n = 0;
89439d4f
MS
1783 HASHMAP_FOREACH_IDX(idx, h, i)
1784 sv[n++] = entry_value(h, bucket_at(h, idx));
db1413d7
KS
1785 sv[n] = NULL;
1786
1787 return sv;
1788}
3c1668da 1789
89439d4f
MS
1790void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1791 struct ordered_hashmap_entry *e;
1792 unsigned hash, idx;
3c1668da 1793
3c1668da
LP
1794 assert(key);
1795
1796 if (!h)
1797 return NULL;
1798
a3b6fafe 1799 hash = bucket_hash(h, key);
89439d4f
MS
1800 idx = bucket_scan(h, hash, key);
1801 if (idx == IDX_NIL)
3c1668da
LP
1802 return NULL;
1803
89439d4f
MS
1804 e = ordered_bucket_at(h, idx);
1805 if (e->iterate_next == IDX_NIL)
3c1668da 1806 return NULL;
89439d4f
MS
1807 return ordered_bucket_at(h, e->iterate_next)->p.value;
1808}
3c1668da 1809
89439d4f
MS
1810int set_consume(Set *s, void *value) {
1811 int r;
1812
1813 r = set_put(s, value);
575ccc1b 1814 if (r <= 0)
89439d4f
MS
1815 free(value);
1816
1817 return r;
1818}
1819
1820int set_put_strdup(Set *s, const char *p) {
1821 char *c;
1822 int r;
1823
1824 assert(s);
1825 assert(p);
1826
1827 c = strdup(p);
1828 if (!c)
1829 return -ENOMEM;
1830
1831 r = set_consume(s, c);
1832 if (r == -EEXIST)
1833 return 0;
1834
1835 return r;
1836}
1837
1838int set_put_strdupv(Set *s, char **l) {
1839 int n = 0, r;
1840 char **i;
1841
1842 STRV_FOREACH(i, l) {
1843 r = set_put_strdup(s, *i);
1844 if (r < 0)
1845 return r;
1846
1847 n += r;
1848 }
1849
1850 return n;
3c1668da 1851}