]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/hashmap.c
451bb0d1ec25aaf07d9dc6323487aedb8e61992b
[thirdparty/systemd.git] / src / basic / hashmap.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <fnmatch.h>
4 #include <pthread.h>
5 #include <unistd.h>
6 #if HAVE_VALGRIND_VALGRIND_H
7 # include <valgrind/valgrind.h>
8 #endif
9
10 #include "alloc-util.h"
11 #include "extract-word.h"
12 #include "hashmap.h"
13 #include "log.h"
14 #include "logarithm.h"
15 #include "memory-util.h"
16 #include "mempool.h"
17 #include "process-util.h"
18 #include "random-util.h"
19 #include "set.h"
20 #include "siphash24.h"
21 #include "sort-util.h"
22 #include "string-util.h"
23 #include "strv.h"
24
25 #if ENABLE_DEBUG_HASHMAP
26 #include "list.h"
27 #endif
28
29 /*
30 * Implementation of hashmaps.
31 * Addressing: open
32 * - uses less RAM compared to closed addressing (chaining), because
33 * our entries are small (especially in Sets, which tend to contain
34 * the majority of entries in systemd).
35 * Collision resolution: Robin Hood
36 * - tends to equalize displacement of entries from their optimal buckets.
37 * Probe sequence: linear
38 * - though theoretically worse than random probing/uniform hashing/double
39 * hashing, it is good for cache locality.
40 *
41 * References:
42 * Celis, P. 1986. Robin Hood Hashing.
43 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
44 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
45 * - The results are derived for random probing. Suggests deletion with
46 * tombstones and two mean-centered search methods. None of that works
47 * well for linear probing.
48 *
49 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
50 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
51 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
52 * http://www.math.uu.se/~svante/papers/sj157.pdf
53 * - Applies to Robin Hood with linear probing. Contains remarks on
54 * the unsuitability of mean-centered search with linear probing.
55 *
56 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
57 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
58 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
59 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
60 * in a successful search), and Janson writes about displacement. C = d + 1.
61 *
62 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
63 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
64 * - Explanation of backward shift deletion with pictures.
65 *
66 * Khuong, P. 2013. The Other Robin Hood Hashing.
67 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
68 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
69 */
70
71 /*
72 * XXX Ideas for improvement:
73 * For unordered hashmaps, randomize iteration order, similarly to Perl:
74 * http://blog.booking.com/hardening-perls-hash-function.html
75 */
76
77 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
78 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
79 #define INV_KEEP_FREE 5U
80
81 /* Fields common to entries of all hashmap/set types */
82 struct hashmap_base_entry {
83 const void *key;
84 };
85
86 /* Entry types for specific hashmap/set types
87 * hashmap_base_entry must be at the beginning of each entry struct. */
88
89 struct plain_hashmap_entry {
90 struct hashmap_base_entry b;
91 void *value;
92 };
93
94 struct ordered_hashmap_entry {
95 struct plain_hashmap_entry p;
96 unsigned iterate_next, iterate_previous;
97 };
98
99 struct set_entry {
100 struct hashmap_base_entry b;
101 };
102
103 /* In several functions it is advantageous to have the hash table extended
104 * virtually by a couple of additional buckets. We reserve special index values
105 * for these "swap" buckets. */
106 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
107 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
108 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
109 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
110
111 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
112 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
113
114 assert_cc(IDX_FIRST == _IDX_SWAP_END);
115 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
116
117 /* Storage space for the "swap" buckets.
118 * All entry types can fit into an ordered_hashmap_entry. */
119 struct swap_entries {
120 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
121 };
122
123 /* Distance from Initial Bucket */
124 typedef uint8_t dib_raw_t;
125 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
126 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
127 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
128 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
129
130 #define DIB_FREE UINT_MAX
131
132 #if ENABLE_DEBUG_HASHMAP
133 struct hashmap_debug_info {
134 LIST_FIELDS(struct hashmap_debug_info, debug_list);
135 unsigned max_entries; /* high watermark of n_entries */
136
137 /* fields to detect modification while iterating */
138 unsigned put_count; /* counts puts into the hashmap */
139 unsigned rem_count; /* counts removals from hashmap */
140 unsigned last_rem_idx; /* remembers last removal index */
141 };
142
143 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
144 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
145 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
146 #endif
147
148 enum HashmapType {
149 HASHMAP_TYPE_PLAIN,
150 HASHMAP_TYPE_ORDERED,
151 HASHMAP_TYPE_SET,
152 _HASHMAP_TYPE_MAX
153 };
154
155 struct _packed_ indirect_storage {
156 void *storage; /* where buckets and DIBs are stored */
157 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
158
159 unsigned n_entries; /* number of stored entries */
160 unsigned n_buckets; /* number of buckets */
161
162 unsigned idx_lowest_entry; /* Index below which all buckets are free.
163 Makes "while (hashmap_steal_first())" loops
164 O(n) instead of O(n^2) for unordered hashmaps. */
165 uint8_t _pad[3]; /* padding for the whole HashmapBase */
166 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
167 };
168
169 struct direct_storage {
170 /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit.
171 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit,
172 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */
173 uint8_t storage[sizeof(struct indirect_storage)];
174 };
175
176 #define DIRECT_BUCKETS(entry_t) \
177 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
178
179 /* We should be able to store at least one entry directly. */
180 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
181
182 /* We have 3 bits for n_direct_entries. */
183 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
184
185 /* Hashmaps with directly stored entries all use this shared hash key.
186 * It's no big deal if the key is guessed, because there can be only
187 * a handful of directly stored entries in a hashmap. When a hashmap
188 * outgrows direct storage, it gets its own key for indirect storage. */
189 static uint8_t shared_hash_key[HASH_KEY_SIZE];
190
191 /* Fields that all hashmap/set types must have */
192 struct HashmapBase {
193 const struct hash_ops *hash_ops; /* hash and compare ops to use */
194
195 union _packed_ {
196 struct indirect_storage indirect; /* if has_indirect */
197 struct direct_storage direct; /* if !has_indirect */
198 };
199
200 enum HashmapType type:2; /* HASHMAP_TYPE_* */
201 bool has_indirect:1; /* whether indirect storage is used */
202 unsigned n_direct_entries:3; /* Number of entries in direct storage.
203 * Only valid if !has_indirect. */
204 bool from_pool:1; /* whether was allocated from mempool */
205 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
206 bool cached:1; /* whether this hashmap is being cached */
207
208 #if ENABLE_DEBUG_HASHMAP
209 struct hashmap_debug_info debug;
210 #endif
211 };
212
213 /* Specific hash types
214 * HashmapBase must be at the beginning of each hashmap struct. */
215
216 struct Hashmap {
217 struct HashmapBase b;
218 };
219
220 struct OrderedHashmap {
221 struct HashmapBase b;
222 unsigned iterate_list_head, iterate_list_tail;
223 };
224
225 struct Set {
226 struct HashmapBase b;
227 };
228
229 typedef struct CacheMem {
230 const void **ptr;
231 size_t n_populated;
232 bool active:1;
233 } CacheMem;
234
235 struct IteratedCache {
236 HashmapBase *hashmap;
237 CacheMem keys, values;
238 };
239
240 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
241 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
242 /* No need for a separate Set pool */
243 assert_cc(sizeof(Hashmap) == sizeof(Set));
244
245 struct hashmap_type_info {
246 size_t head_size;
247 size_t entry_size;
248 struct mempool *mempool;
249 unsigned n_direct_buckets;
250 };
251
252 static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
253 [HASHMAP_TYPE_PLAIN] = {
254 .head_size = sizeof(Hashmap),
255 .entry_size = sizeof(struct plain_hashmap_entry),
256 .mempool = &hashmap_pool,
257 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
258 },
259 [HASHMAP_TYPE_ORDERED] = {
260 .head_size = sizeof(OrderedHashmap),
261 .entry_size = sizeof(struct ordered_hashmap_entry),
262 .mempool = &ordered_hashmap_pool,
263 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
264 },
265 [HASHMAP_TYPE_SET] = {
266 .head_size = sizeof(Set),
267 .entry_size = sizeof(struct set_entry),
268 .mempool = &hashmap_pool,
269 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
270 },
271 };
272
273 void hashmap_trim_pools(void) {
274 int r;
275
276 /* The pool is only allocated by the main thread, but the memory can be passed to other
277 * threads. Let's clean up if we are the main thread and no other threads are live. */
278
279 /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
280 * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
281 if (getpid() != gettid())
282 return (void) log_debug("Not cleaning up memory pools, not in main thread.");
283
284 r = get_process_threads(0);
285 if (r < 0)
286 return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
287 if (r != 1)
288 return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
289
290 mempool_trim(&hashmap_pool);
291 mempool_trim(&ordered_hashmap_pool);
292 }
293
294 #if HAVE_VALGRIND_VALGRIND_H
295 _destructor_ static void cleanup_pools(void) {
296 /* Be nice to valgrind */
297 if (RUNNING_ON_VALGRIND)
298 hashmap_trim_pools();
299 }
300 #endif
301
302 static unsigned n_buckets(HashmapBase *h) {
303 return h->has_indirect ? h->indirect.n_buckets
304 : hashmap_type_info[h->type].n_direct_buckets;
305 }
306
307 static unsigned n_entries(HashmapBase *h) {
308 return h->has_indirect ? h->indirect.n_entries
309 : h->n_direct_entries;
310 }
311
312 static void n_entries_inc(HashmapBase *h) {
313 if (h->has_indirect)
314 h->indirect.n_entries++;
315 else
316 h->n_direct_entries++;
317 }
318
319 static void n_entries_dec(HashmapBase *h) {
320 if (h->has_indirect)
321 h->indirect.n_entries--;
322 else
323 h->n_direct_entries--;
324 }
325
326 static void* storage_ptr(HashmapBase *h) {
327 return h->has_indirect ? h->indirect.storage
328 : h->direct.storage;
329 }
330
331 static uint8_t* hash_key(HashmapBase *h) {
332 return h->has_indirect ? h->indirect.hash_key
333 : shared_hash_key;
334 }
335
336 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
337 struct siphash state;
338 uint64_t hash;
339
340 siphash24_init(&state, hash_key(h));
341
342 h->hash_ops->hash(p, &state);
343
344 hash = siphash24_finalize(&state);
345
346 return (unsigned) (hash % n_buckets(h));
347 }
348 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
349
350 static void base_set_dirty(HashmapBase *h) {
351 h->dirty = true;
352 }
353 #define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
354
355 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
356 static uint8_t current[HASH_KEY_SIZE];
357 static bool current_initialized = false;
358
359 /* Returns a hash function key to use. In order to keep things
360 * fast we will not generate a new key each time we allocate a
361 * new hash table. Instead, we'll just reuse the most recently
362 * generated one, except if we never generated one or when we
363 * are rehashing an entire hash table because we reached a
364 * fill level */
365
366 if (!current_initialized || !reuse_is_ok) {
367 random_bytes(current, sizeof(current));
368 current_initialized = true;
369 }
370
371 memcpy(hash_key, current, sizeof(current));
372 }
373
374 static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
375 return CAST_ALIGN_PTR(
376 struct hashmap_base_entry,
377 (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
378 }
379
380 static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
381 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
382 }
383
384 static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
385 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
386 }
387
388 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
389 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
390 }
391
392 static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
393 return &swap->e[idx - _IDX_SWAP_BEGIN];
394 }
395
396 /* Returns a pointer to the bucket at index idx.
397 * Understands real indexes and swap indexes, hence "_virtual". */
398 static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
399 unsigned idx) {
400 if (idx < _IDX_SWAP_BEGIN)
401 return bucket_at(h, idx);
402
403 if (idx < _IDX_SWAP_END)
404 return &bucket_at_swap(swap, idx)->p.b;
405
406 assert_not_reached();
407 }
408
409 static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
410 return (dib_raw_t*)
411 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
412 }
413
414 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
415 return idx >= from ? idx - from
416 : n_buckets(h) + idx - from;
417 }
418
419 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
420 unsigned initial_bucket;
421
422 if (raw_dib == DIB_RAW_FREE)
423 return DIB_FREE;
424
425 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
426 return raw_dib;
427
428 /*
429 * Having an overflow DIB value is very unlikely. The hash function
430 * would have to be bad. For example, in a table of size 2^24 filled
431 * to load factor 0.9 the maximum observed DIB is only about 60.
432 * In theory (assuming I used Maxima correctly), for an infinite size
433 * hash table with load factor 0.8 the probability of a given entry
434 * having DIB > 40 is 1.9e-8.
435 * This returns the correct DIB value by recomputing the hash value in
436 * the unlikely case. XXX Hitting this case could be a hint to rehash.
437 */
438 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
439 return bucket_distance(h, idx, initial_bucket);
440 }
441
442 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
443 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
444 }
445
446 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
447 dib_raw_t *dibs;
448
449 dibs = dib_raw_ptr(h);
450
451 for ( ; idx < n_buckets(h); idx++)
452 if (dibs[idx] != DIB_RAW_FREE)
453 return idx;
454
455 return IDX_NIL;
456 }
457
458 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
459 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
460 bucket_set_dib(h, idx, DIB_FREE);
461 }
462
463 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
464 unsigned from, unsigned to) {
465 struct hashmap_base_entry *e_from, *e_to;
466
467 assert(from != to);
468
469 e_from = bucket_at_virtual(h, swap, from);
470 e_to = bucket_at_virtual(h, swap, to);
471
472 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
473
474 if (h->type == HASHMAP_TYPE_ORDERED) {
475 OrderedHashmap *lh = (OrderedHashmap*) h;
476 struct ordered_hashmap_entry *le, *le_to;
477
478 le_to = (struct ordered_hashmap_entry*) e_to;
479
480 if (le_to->iterate_next != IDX_NIL) {
481 le = (struct ordered_hashmap_entry*)
482 bucket_at_virtual(h, swap, le_to->iterate_next);
483 le->iterate_previous = to;
484 }
485
486 if (le_to->iterate_previous != IDX_NIL) {
487 le = (struct ordered_hashmap_entry*)
488 bucket_at_virtual(h, swap, le_to->iterate_previous);
489 le->iterate_next = to;
490 }
491
492 if (lh->iterate_list_head == from)
493 lh->iterate_list_head = to;
494 if (lh->iterate_list_tail == from)
495 lh->iterate_list_tail = to;
496 }
497 }
498
499 static unsigned next_idx(HashmapBase *h, unsigned idx) {
500 return (idx + 1U) % n_buckets(h);
501 }
502
503 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
504 return (n_buckets(h) + idx - 1U) % n_buckets(h);
505 }
506
507 static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
508 switch (h->type) {
509
510 case HASHMAP_TYPE_PLAIN:
511 case HASHMAP_TYPE_ORDERED:
512 return ((struct plain_hashmap_entry*)e)->value;
513
514 case HASHMAP_TYPE_SET:
515 return (void*) e->key;
516
517 default:
518 assert_not_reached();
519 }
520 }
521
522 static void base_remove_entry(HashmapBase *h, unsigned idx) {
523 unsigned left, right, prev, dib;
524 dib_raw_t raw_dib, *dibs;
525
526 dibs = dib_raw_ptr(h);
527 assert(dibs[idx] != DIB_RAW_FREE);
528
529 #if ENABLE_DEBUG_HASHMAP
530 h->debug.rem_count++;
531 h->debug.last_rem_idx = idx;
532 #endif
533
534 left = idx;
535 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
536 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
537 raw_dib = dibs[right];
538 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
539 break;
540
541 /* The buckets are not supposed to be all occupied and with DIB > 0.
542 * That would mean we could make everyone better off by shifting them
543 * backward. This scenario is impossible. */
544 assert(left != right);
545 }
546
547 if (h->type == HASHMAP_TYPE_ORDERED) {
548 OrderedHashmap *lh = (OrderedHashmap*) h;
549 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
550
551 if (le->iterate_next != IDX_NIL)
552 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
553 else
554 lh->iterate_list_tail = le->iterate_previous;
555
556 if (le->iterate_previous != IDX_NIL)
557 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
558 else
559 lh->iterate_list_head = le->iterate_next;
560 }
561
562 /* Now shift all buckets in the interval (left, right) one step backwards */
563 for (prev = left, left = next_idx(h, left); left != right;
564 prev = left, left = next_idx(h, left)) {
565 dib = bucket_calculate_dib(h, left, dibs[left]);
566 assert(dib != 0);
567 bucket_move_entry(h, NULL, left, prev);
568 bucket_set_dib(h, prev, dib - 1);
569 }
570
571 bucket_mark_free(h, prev);
572 n_entries_dec(h);
573 base_set_dirty(h);
574 }
575 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
576
577 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
578 struct ordered_hashmap_entry *e;
579 unsigned idx;
580
581 assert(h);
582 assert(i);
583
584 if (i->idx == IDX_NIL)
585 goto at_end;
586
587 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
588 goto at_end;
589
590 if (i->idx == IDX_FIRST) {
591 idx = h->iterate_list_head;
592 e = ordered_bucket_at(h, idx);
593 } else {
594 idx = i->idx;
595 e = ordered_bucket_at(h, idx);
596 /*
597 * We allow removing the current entry while iterating, but removal may cause
598 * a backward shift. The next entry may thus move one bucket to the left.
599 * To detect when it happens, we remember the key pointer of the entry we were
600 * going to iterate next. If it does not match, there was a backward shift.
601 */
602 if (e->p.b.key != i->next_key) {
603 idx = prev_idx(HASHMAP_BASE(h), idx);
604 e = ordered_bucket_at(h, idx);
605 }
606 assert(e->p.b.key == i->next_key);
607 }
608
609 #if ENABLE_DEBUG_HASHMAP
610 i->prev_idx = idx;
611 #endif
612
613 if (e->iterate_next != IDX_NIL) {
614 struct ordered_hashmap_entry *n;
615 i->idx = e->iterate_next;
616 n = ordered_bucket_at(h, i->idx);
617 i->next_key = n->p.b.key;
618 } else
619 i->idx = IDX_NIL;
620
621 return idx;
622
623 at_end:
624 i->idx = IDX_NIL;
625 return IDX_NIL;
626 }
627
628 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
629 unsigned idx;
630
631 assert(h);
632 assert(i);
633
634 if (i->idx == IDX_NIL)
635 goto at_end;
636
637 if (i->idx == IDX_FIRST) {
638 /* fast forward to the first occupied bucket */
639 if (h->has_indirect) {
640 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
641 h->indirect.idx_lowest_entry = i->idx;
642 } else
643 i->idx = skip_free_buckets(h, 0);
644
645 if (i->idx == IDX_NIL)
646 goto at_end;
647 } else {
648 struct hashmap_base_entry *e;
649
650 assert(i->idx > 0);
651
652 e = bucket_at(h, i->idx);
653 /*
654 * We allow removing the current entry while iterating, but removal may cause
655 * a backward shift. The next entry may thus move one bucket to the left.
656 * To detect when it happens, we remember the key pointer of the entry we were
657 * going to iterate next. If it does not match, there was a backward shift.
658 */
659 if (e->key != i->next_key)
660 e = bucket_at(h, --i->idx);
661
662 assert(e->key == i->next_key);
663 }
664
665 idx = i->idx;
666 #if ENABLE_DEBUG_HASHMAP
667 i->prev_idx = idx;
668 #endif
669
670 i->idx = skip_free_buckets(h, i->idx + 1);
671 if (i->idx != IDX_NIL)
672 i->next_key = bucket_at(h, i->idx)->key;
673 else
674 i->idx = IDX_NIL;
675
676 return idx;
677
678 at_end:
679 i->idx = IDX_NIL;
680 return IDX_NIL;
681 }
682
683 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
684 if (!h) {
685 i->idx = IDX_NIL;
686 return IDX_NIL;
687 }
688
689 #if ENABLE_DEBUG_HASHMAP
690 if (i->idx == IDX_FIRST) {
691 i->put_count = h->debug.put_count;
692 i->rem_count = h->debug.rem_count;
693 } else {
694 /* While iterating, must not add any new entries */
695 assert(i->put_count == h->debug.put_count);
696 /* ... or remove entries other than the current one */
697 assert(i->rem_count == h->debug.rem_count ||
698 (i->rem_count == h->debug.rem_count - 1 &&
699 i->prev_idx == h->debug.last_rem_idx));
700 /* Reset our removals counter */
701 i->rem_count = h->debug.rem_count;
702 }
703 #endif
704
705 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
706 : hashmap_iterate_in_internal_order(h, i);
707 }
708
709 bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
710 struct hashmap_base_entry *e;
711 void *data;
712 unsigned idx;
713
714 idx = hashmap_iterate_entry(h, i);
715 if (idx == IDX_NIL) {
716 if (value)
717 *value = NULL;
718 if (key)
719 *key = NULL;
720
721 return false;
722 }
723
724 e = bucket_at(h, idx);
725 data = entry_value(h, e);
726 if (value)
727 *value = data;
728 if (key)
729 *key = e->key;
730
731 return true;
732 }
733
734 #define HASHMAP_FOREACH_IDX(idx, h, i) \
735 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
736 (idx != IDX_NIL); \
737 (idx) = hashmap_iterate_entry((h), &(i)))
738
739 IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
740 IteratedCache *cache;
741
742 assert(h);
743 assert(!h->cached);
744
745 if (h->cached)
746 return NULL;
747
748 cache = new0(IteratedCache, 1);
749 if (!cache)
750 return NULL;
751
752 cache->hashmap = h;
753 h->cached = true;
754
755 return cache;
756 }
757
758 static void reset_direct_storage(HashmapBase *h) {
759 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
760 void *p;
761
762 assert(!h->has_indirect);
763
764 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
765 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
766 }
767
768 static void shared_hash_key_initialize(void) {
769 random_bytes(shared_hash_key, sizeof(shared_hash_key));
770 }
771
772 static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type) {
773 HashmapBase *h;
774 const struct hashmap_type_info *hi = &hashmap_type_info[type];
775
776 bool use_pool = mempool_enabled && mempool_enabled(); /* mempool_enabled is a weak symbol */
777
778 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
779 if (!h)
780 return NULL;
781
782 h->type = type;
783 h->from_pool = use_pool;
784 h->hash_ops = hash_ops ?: &trivial_hash_ops;
785
786 if (type == HASHMAP_TYPE_ORDERED) {
787 OrderedHashmap *lh = (OrderedHashmap*)h;
788 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
789 }
790
791 reset_direct_storage(h);
792
793 static pthread_once_t once = PTHREAD_ONCE_INIT;
794 assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
795
796 #if ENABLE_DEBUG_HASHMAP
797 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
798 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
799 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
800 #endif
801
802 return h;
803 }
804
805 Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
806 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN);
807 }
808
809 OrderedHashmap *ordered_hashmap_new(const struct hash_ops *hash_ops) {
810 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED);
811 }
812
813 Set *set_new(const struct hash_ops *hash_ops) {
814 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET);
815 }
816
817 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
818 enum HashmapType type) {
819 HashmapBase *q;
820
821 assert(h);
822
823 if (*h) {
824 assert((*h)->hash_ops == (hash_ops ?: &trivial_hash_ops));
825 return 0;
826 }
827
828 q = hashmap_base_new(hash_ops, type);
829 if (!q)
830 return -ENOMEM;
831
832 *h = q;
833 return 1;
834 }
835
836 int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
837 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN);
838 }
839
840 int ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops) {
841 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED);
842 }
843
844 int set_ensure_allocated(Set **s, const struct hash_ops *hash_ops) {
845 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET);
846 }
847
848 int hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
849 int r;
850
851 assert(h);
852
853 r = hashmap_ensure_allocated(h, hash_ops);
854 if (r < 0)
855 return r;
856
857 return hashmap_put(*h, key, value);
858 }
859
860 int ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
861 int r;
862
863 assert(h);
864
865 r = ordered_hashmap_ensure_allocated(h, hash_ops);
866 if (r < 0)
867 return r;
868
869 return ordered_hashmap_put(*h, key, value);
870 }
871
872 int ordered_hashmap_ensure_replace(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
873 int r;
874
875 assert(h);
876
877 r = ordered_hashmap_ensure_allocated(h, hash_ops);
878 if (r < 0)
879 return r;
880
881 return ordered_hashmap_replace(*h, key, value);
882 }
883
884 int hashmap_ensure_replace(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value) {
885 int r;
886
887 assert(h);
888
889 r = hashmap_ensure_allocated(h, hash_ops);
890 if (r < 0)
891 return r;
892
893 return hashmap_replace(*h, key, value);
894 }
895
896 static void hashmap_free_no_clear(HashmapBase *h) {
897 assert(!h->has_indirect);
898 assert(h->n_direct_entries == 0);
899
900 #if ENABLE_DEBUG_HASHMAP
901 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
902 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
903 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
904 #endif
905
906 if (h->from_pool) {
907 /* Ensure that the object didn't get migrated between threads. */
908 assert_se(is_main_thread());
909 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
910 } else
911 free(h);
912 }
913
914 HashmapBase* _hashmap_free(HashmapBase *h) {
915 if (h) {
916 _hashmap_clear(h);
917 hashmap_free_no_clear(h);
918 }
919
920 return NULL;
921 }
922
923 void _hashmap_clear(HashmapBase *h) {
924 if (!h)
925 return;
926
927 if (h->hash_ops->free_key || h->hash_ops->free_value) {
928
929 /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
930 * hash table, and only then call the destructor functions. If these destructors then try to unregister
931 * themselves from our hash table a second time, the entry is already gone. */
932
933 while (_hashmap_size(h) > 0) {
934 void *k = NULL;
935 void *v;
936
937 v = _hashmap_first_key_and_value(h, true, &k);
938
939 if (h->hash_ops->free_key)
940 h->hash_ops->free_key(k);
941
942 if (h->hash_ops->free_value)
943 h->hash_ops->free_value(v);
944 }
945 }
946
947 if (h->has_indirect) {
948 free(h->indirect.storage);
949 h->has_indirect = false;
950 }
951
952 h->n_direct_entries = 0;
953 reset_direct_storage(h);
954
955 if (h->type == HASHMAP_TYPE_ORDERED) {
956 OrderedHashmap *lh = (OrderedHashmap*) h;
957 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
958 }
959
960 base_set_dirty(h);
961 }
962
963 static int resize_buckets(HashmapBase *h, unsigned entries_add);
964
965 /*
966 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
967 * Performs Robin Hood swaps as it goes. The entry to put must be placed
968 * by the caller into swap slot IDX_PUT.
969 * If used for in-place resizing, may leave a displaced entry in swap slot
970 * IDX_PUT. Caller must rehash it next.
971 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
972 * false otherwise.
973 */
974 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
975 struct swap_entries *swap) {
976 dib_raw_t raw_dib, *dibs;
977 unsigned dib, distance;
978
979 #if ENABLE_DEBUG_HASHMAP
980 h->debug.put_count++;
981 #endif
982
983 dibs = dib_raw_ptr(h);
984
985 for (distance = 0; ; distance++) {
986 raw_dib = dibs[idx];
987 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
988 if (raw_dib == DIB_RAW_REHASH)
989 bucket_move_entry(h, swap, idx, IDX_TMP);
990
991 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
992 h->indirect.idx_lowest_entry = idx;
993
994 bucket_set_dib(h, idx, distance);
995 bucket_move_entry(h, swap, IDX_PUT, idx);
996 if (raw_dib == DIB_RAW_REHASH) {
997 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
998 return true;
999 }
1000
1001 return false;
1002 }
1003
1004 dib = bucket_calculate_dib(h, idx, raw_dib);
1005
1006 if (dib < distance) {
1007 /* Found a wealthier entry. Go Robin Hood! */
1008 bucket_set_dib(h, idx, distance);
1009
1010 /* swap the entries */
1011 bucket_move_entry(h, swap, idx, IDX_TMP);
1012 bucket_move_entry(h, swap, IDX_PUT, idx);
1013 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1014
1015 distance = dib;
1016 }
1017
1018 idx = next_idx(h, idx);
1019 }
1020 }
1021
1022 /*
1023 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1024 * The caller must place the entry (only its key and value, not link indexes)
1025 * in swap slot IDX_PUT.
1026 * Caller must ensure: the key does not exist yet in the hashmap.
1027 * that resize is not needed if !may_resize.
1028 * Returns: 1 if entry was put successfully.
1029 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1030 * Cannot return -ENOMEM if !may_resize.
1031 */
1032 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1033 struct swap_entries *swap, bool may_resize) {
1034 struct ordered_hashmap_entry *new_entry;
1035 int r;
1036
1037 assert(idx < n_buckets(h));
1038
1039 new_entry = bucket_at_swap(swap, IDX_PUT);
1040
1041 if (may_resize) {
1042 r = resize_buckets(h, 1);
1043 if (r < 0)
1044 return r;
1045 if (r > 0)
1046 idx = bucket_hash(h, new_entry->p.b.key);
1047 }
1048 assert(n_entries(h) < n_buckets(h));
1049
1050 if (h->type == HASHMAP_TYPE_ORDERED) {
1051 OrderedHashmap *lh = (OrderedHashmap*) h;
1052
1053 new_entry->iterate_next = IDX_NIL;
1054 new_entry->iterate_previous = lh->iterate_list_tail;
1055
1056 if (lh->iterate_list_tail != IDX_NIL) {
1057 struct ordered_hashmap_entry *old_tail;
1058
1059 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1060 assert(old_tail->iterate_next == IDX_NIL);
1061 old_tail->iterate_next = IDX_PUT;
1062 }
1063
1064 lh->iterate_list_tail = IDX_PUT;
1065 if (lh->iterate_list_head == IDX_NIL)
1066 lh->iterate_list_head = IDX_PUT;
1067 }
1068
1069 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1070
1071 n_entries_inc(h);
1072 #if ENABLE_DEBUG_HASHMAP
1073 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1074 #endif
1075
1076 base_set_dirty(h);
1077
1078 return 1;
1079 }
1080 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1081 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1082
1083 /*
1084 * Returns 0 if resize is not needed.
1085 * 1 if successfully resized.
1086 * -ENOMEM on allocation failure.
1087 */
1088 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1089 struct swap_entries swap;
1090 void *new_storage;
1091 dib_raw_t *old_dibs, *new_dibs;
1092 const struct hashmap_type_info *hi;
1093 unsigned idx, optimal_idx;
1094 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1095 uint8_t new_shift;
1096 bool rehash_next;
1097
1098 assert(h);
1099
1100 hi = &hashmap_type_info[h->type];
1101 new_n_entries = n_entries(h) + entries_add;
1102
1103 /* overflow? */
1104 if (_unlikely_(new_n_entries < entries_add))
1105 return -ENOMEM;
1106
1107 /* For direct storage we allow 100% load, because it's tiny. */
1108 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1109 return 0;
1110
1111 /*
1112 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1113 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1114 */
1115 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1116 /* overflow? */
1117 if (_unlikely_(new_n_buckets < new_n_entries))
1118 return -ENOMEM;
1119
1120 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1121 return -ENOMEM;
1122
1123 old_n_buckets = n_buckets(h);
1124
1125 if (_likely_(new_n_buckets <= old_n_buckets))
1126 return 0;
1127
1128 new_shift = log2u_round_up(MAX(
1129 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1130 2 * sizeof(struct direct_storage)));
1131
1132 /* Realloc storage (buckets and DIB array). */
1133 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1134 1U << new_shift);
1135 if (!new_storage)
1136 return -ENOMEM;
1137
1138 /* Must upgrade direct to indirect storage. */
1139 if (!h->has_indirect) {
1140 memcpy(new_storage, h->direct.storage,
1141 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1142 h->indirect.n_entries = h->n_direct_entries;
1143 h->indirect.idx_lowest_entry = 0;
1144 h->n_direct_entries = 0;
1145 }
1146
1147 /* Get a new hash key. If we've just upgraded to indirect storage,
1148 * allow reusing a previously generated key. It's still a different key
1149 * from the shared one that we used for direct storage. */
1150 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1151
1152 h->has_indirect = true;
1153 h->indirect.storage = new_storage;
1154 h->indirect.n_buckets = (1U << new_shift) /
1155 (hi->entry_size + sizeof(dib_raw_t));
1156
1157 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
1158 new_dibs = dib_raw_ptr(h);
1159
1160 /*
1161 * Move the DIB array to the new place, replacing valid DIB values with
1162 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1163 * Note: Overlap is not possible, because we have at least doubled the
1164 * number of buckets and dib_raw_t is smaller than any entry type.
1165 */
1166 for (idx = 0; idx < old_n_buckets; idx++) {
1167 assert(old_dibs[idx] != DIB_RAW_REHASH);
1168 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1169 : DIB_RAW_REHASH;
1170 }
1171
1172 /* Zero the area of newly added entries (including the old DIB area) */
1173 memzero(bucket_at(h, old_n_buckets),
1174 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1175
1176 /* The upper half of the new DIB array needs initialization */
1177 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1178 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1179
1180 /* Rehash entries that need it */
1181 n_rehashed = 0;
1182 for (idx = 0; idx < old_n_buckets; idx++) {
1183 if (new_dibs[idx] != DIB_RAW_REHASH)
1184 continue;
1185
1186 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1187
1188 /*
1189 * Not much to do if by luck the entry hashes to its current
1190 * location. Just set its DIB.
1191 */
1192 if (optimal_idx == idx) {
1193 new_dibs[idx] = 0;
1194 n_rehashed++;
1195 continue;
1196 }
1197
1198 new_dibs[idx] = DIB_RAW_FREE;
1199 bucket_move_entry(h, &swap, idx, IDX_PUT);
1200 /* bucket_move_entry does not clear the source */
1201 memzero(bucket_at(h, idx), hi->entry_size);
1202
1203 do {
1204 /*
1205 * Find the new bucket for the current entry. This may make
1206 * another entry homeless and load it into IDX_PUT.
1207 */
1208 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1209 n_rehashed++;
1210
1211 /* Did the current entry displace another one? */
1212 if (rehash_next)
1213 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1214 } while (rehash_next);
1215 }
1216
1217 assert_se(n_rehashed == n_entries(h));
1218
1219 return 1;
1220 }
1221
1222 /*
1223 * Finds an entry with a matching key
1224 * Returns: index of the found entry, or IDX_NIL if not found.
1225 */
1226 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1227 struct hashmap_base_entry *e;
1228 unsigned dib, distance;
1229 dib_raw_t *dibs = dib_raw_ptr(h);
1230
1231 assert(idx < n_buckets(h));
1232
1233 for (distance = 0; ; distance++) {
1234 if (dibs[idx] == DIB_RAW_FREE)
1235 return IDX_NIL;
1236
1237 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1238
1239 if (dib < distance)
1240 return IDX_NIL;
1241 if (dib == distance) {
1242 e = bucket_at(h, idx);
1243 if (h->hash_ops->compare(e->key, key) == 0)
1244 return idx;
1245 }
1246
1247 idx = next_idx(h, idx);
1248 }
1249 }
1250 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1251
1252 int hashmap_put(Hashmap *h, const void *key, void *value) {
1253 struct swap_entries swap;
1254 struct plain_hashmap_entry *e;
1255 unsigned hash, idx;
1256
1257 assert(h);
1258
1259 hash = bucket_hash(h, key);
1260 idx = bucket_scan(h, hash, key);
1261 if (idx != IDX_NIL) {
1262 e = plain_bucket_at(h, idx);
1263 if (e->value == value)
1264 return 0;
1265 return -EEXIST;
1266 }
1267
1268 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1269 e->b.key = key;
1270 e->value = value;
1271 return hashmap_put_boldly(h, hash, &swap, true);
1272 }
1273
1274 int set_put(Set *s, const void *key) {
1275 struct swap_entries swap;
1276 struct hashmap_base_entry *e;
1277 unsigned hash, idx;
1278
1279 assert(s);
1280
1281 hash = bucket_hash(s, key);
1282 idx = bucket_scan(s, hash, key);
1283 if (idx != IDX_NIL)
1284 return 0;
1285
1286 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1287 e->key = key;
1288 return hashmap_put_boldly(s, hash, &swap, true);
1289 }
1290
1291 int set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key) {
1292 int r;
1293
1294 assert(s);
1295
1296 r = set_ensure_allocated(s, hash_ops);
1297 if (r < 0)
1298 return r;
1299
1300 return set_put(*s, key);
1301 }
1302
1303 int set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key) {
1304 int r;
1305
1306 r = set_ensure_put(s, hash_ops, key);
1307 if (r <= 0) {
1308 if (hash_ops && hash_ops->free_key)
1309 hash_ops->free_key(key);
1310 else
1311 free(key);
1312 }
1313
1314 return r;
1315 }
1316
1317 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1318 struct swap_entries swap;
1319 struct plain_hashmap_entry *e;
1320 unsigned hash, idx;
1321
1322 assert(h);
1323
1324 hash = bucket_hash(h, key);
1325 idx = bucket_scan(h, hash, key);
1326 if (idx != IDX_NIL) {
1327 e = plain_bucket_at(h, idx);
1328 #if ENABLE_DEBUG_HASHMAP
1329 /* Although the key is equal, the key pointer may have changed,
1330 * and this would break our assumption for iterating. So count
1331 * this operation as incompatible with iteration. */
1332 if (e->b.key != key) {
1333 h->b.debug.put_count++;
1334 h->b.debug.rem_count++;
1335 h->b.debug.last_rem_idx = idx;
1336 }
1337 #endif
1338 e->b.key = key;
1339 e->value = value;
1340 hashmap_set_dirty(h);
1341
1342 return 0;
1343 }
1344
1345 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1346 e->b.key = key;
1347 e->value = value;
1348 return hashmap_put_boldly(h, hash, &swap, true);
1349 }
1350
1351 int hashmap_update(Hashmap *h, const void *key, void *value) {
1352 struct plain_hashmap_entry *e;
1353 unsigned hash, idx;
1354
1355 assert(h);
1356
1357 hash = bucket_hash(h, key);
1358 idx = bucket_scan(h, hash, key);
1359 if (idx == IDX_NIL)
1360 return -ENOENT;
1361
1362 e = plain_bucket_at(h, idx);
1363 e->value = value;
1364 hashmap_set_dirty(h);
1365
1366 return 0;
1367 }
1368
1369 void* _hashmap_get(HashmapBase *h, const void *key) {
1370 struct hashmap_base_entry *e;
1371 unsigned hash, idx;
1372
1373 if (!h)
1374 return NULL;
1375
1376 hash = bucket_hash(h, key);
1377 idx = bucket_scan(h, hash, key);
1378 if (idx == IDX_NIL)
1379 return NULL;
1380
1381 e = bucket_at(h, idx);
1382 return entry_value(h, e);
1383 }
1384
1385 void* hashmap_get2(Hashmap *h, const void *key, void **ret) {
1386 struct plain_hashmap_entry *e;
1387 unsigned hash, idx;
1388
1389 if (!h)
1390 return NULL;
1391
1392 hash = bucket_hash(h, key);
1393 idx = bucket_scan(h, hash, key);
1394 if (idx == IDX_NIL)
1395 return NULL;
1396
1397 e = plain_bucket_at(h, idx);
1398 if (ret)
1399 *ret = (void*) e->b.key;
1400
1401 return e->value;
1402 }
1403
1404 bool _hashmap_contains(HashmapBase *h, const void *key) {
1405 unsigned hash;
1406
1407 if (!h)
1408 return false;
1409
1410 hash = bucket_hash(h, key);
1411 return bucket_scan(h, hash, key) != IDX_NIL;
1412 }
1413
1414 void* _hashmap_remove(HashmapBase *h, const void *key) {
1415 struct hashmap_base_entry *e;
1416 unsigned hash, idx;
1417 void *data;
1418
1419 if (!h)
1420 return NULL;
1421
1422 hash = bucket_hash(h, key);
1423 idx = bucket_scan(h, hash, key);
1424 if (idx == IDX_NIL)
1425 return NULL;
1426
1427 e = bucket_at(h, idx);
1428 data = entry_value(h, e);
1429 remove_entry(h, idx);
1430
1431 return data;
1432 }
1433
1434 void* hashmap_remove2(Hashmap *h, const void *key, void **ret) {
1435 struct plain_hashmap_entry *e;
1436 unsigned hash, idx;
1437 void *data;
1438
1439 if (!h) {
1440 if (ret)
1441 *ret = NULL;
1442 return NULL;
1443 }
1444
1445 hash = bucket_hash(h, key);
1446 idx = bucket_scan(h, hash, key);
1447 if (idx == IDX_NIL) {
1448 if (ret)
1449 *ret = NULL;
1450 return NULL;
1451 }
1452
1453 e = plain_bucket_at(h, idx);
1454 data = e->value;
1455 if (ret)
1456 *ret = (void*) e->b.key;
1457
1458 remove_entry(h, idx);
1459
1460 return data;
1461 }
1462
1463 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1464 struct swap_entries swap;
1465 struct plain_hashmap_entry *e;
1466 unsigned old_hash, new_hash, idx;
1467
1468 if (!h)
1469 return -ENOENT;
1470
1471 old_hash = bucket_hash(h, old_key);
1472 idx = bucket_scan(h, old_hash, old_key);
1473 if (idx == IDX_NIL)
1474 return -ENOENT;
1475
1476 new_hash = bucket_hash(h, new_key);
1477 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1478 return -EEXIST;
1479
1480 remove_entry(h, idx);
1481
1482 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1483 e->b.key = new_key;
1484 e->value = value;
1485 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1486
1487 return 0;
1488 }
1489
1490 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1491 struct swap_entries swap;
1492 struct hashmap_base_entry *e;
1493 unsigned old_hash, new_hash, idx;
1494
1495 if (!s)
1496 return -ENOENT;
1497
1498 old_hash = bucket_hash(s, old_key);
1499 idx = bucket_scan(s, old_hash, old_key);
1500 if (idx == IDX_NIL)
1501 return -ENOENT;
1502
1503 new_hash = bucket_hash(s, new_key);
1504 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1505 return -EEXIST;
1506
1507 remove_entry(s, idx);
1508
1509 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1510 e->key = new_key;
1511 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1512
1513 return 0;
1514 }
1515
1516 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1517 struct swap_entries swap;
1518 struct plain_hashmap_entry *e;
1519 unsigned old_hash, new_hash, idx_old, idx_new;
1520
1521 if (!h)
1522 return -ENOENT;
1523
1524 old_hash = bucket_hash(h, old_key);
1525 idx_old = bucket_scan(h, old_hash, old_key);
1526 if (idx_old == IDX_NIL)
1527 return -ENOENT;
1528
1529 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1530
1531 new_hash = bucket_hash(h, new_key);
1532 idx_new = bucket_scan(h, new_hash, new_key);
1533 if (idx_new != IDX_NIL)
1534 if (idx_old != idx_new) {
1535 remove_entry(h, idx_new);
1536 /* Compensate for a possible backward shift. */
1537 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1538 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1539 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1540 }
1541
1542 remove_entry(h, idx_old);
1543
1544 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1545 e->b.key = new_key;
1546 e->value = value;
1547 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1548
1549 return 0;
1550 }
1551
1552 void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
1553 struct hashmap_base_entry *e;
1554 unsigned hash, idx;
1555
1556 if (!h)
1557 return NULL;
1558
1559 hash = bucket_hash(h, key);
1560 idx = bucket_scan(h, hash, key);
1561 if (idx == IDX_NIL)
1562 return NULL;
1563
1564 e = bucket_at(h, idx);
1565 if (entry_value(h, e) != value)
1566 return NULL;
1567
1568 remove_entry(h, idx);
1569
1570 return value;
1571 }
1572
1573 static unsigned find_first_entry(HashmapBase *h) {
1574 Iterator i = ITERATOR_FIRST;
1575
1576 if (!h || !n_entries(h))
1577 return IDX_NIL;
1578
1579 return hashmap_iterate_entry(h, &i);
1580 }
1581
1582 void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
1583 struct hashmap_base_entry *e;
1584 void *key, *data;
1585 unsigned idx;
1586
1587 idx = find_first_entry(h);
1588 if (idx == IDX_NIL) {
1589 if (ret_key)
1590 *ret_key = NULL;
1591 return NULL;
1592 }
1593
1594 e = bucket_at(h, idx);
1595 key = (void*) e->key;
1596 data = entry_value(h, e);
1597
1598 if (remove)
1599 remove_entry(h, idx);
1600
1601 if (ret_key)
1602 *ret_key = key;
1603
1604 return data;
1605 }
1606
1607 unsigned _hashmap_size(HashmapBase *h) {
1608 if (!h)
1609 return 0;
1610
1611 return n_entries(h);
1612 }
1613
1614 unsigned _hashmap_buckets(HashmapBase *h) {
1615 if (!h)
1616 return 0;
1617
1618 return n_buckets(h);
1619 }
1620
1621 int _hashmap_merge(Hashmap *h, Hashmap *other) {
1622 Iterator i;
1623 unsigned idx;
1624
1625 assert(h);
1626
1627 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1628 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1629 int r;
1630
1631 r = hashmap_put(h, pe->b.key, pe->value);
1632 if (r < 0 && r != -EEXIST)
1633 return r;
1634 }
1635
1636 return 0;
1637 }
1638
1639 int set_merge(Set *s, Set *other) {
1640 Iterator i;
1641 unsigned idx;
1642
1643 assert(s);
1644
1645 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1646 struct set_entry *se = set_bucket_at(other, idx);
1647 int r;
1648
1649 r = set_put(s, se->b.key);
1650 if (r < 0)
1651 return r;
1652 }
1653
1654 return 0;
1655 }
1656
1657 int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1658 int r;
1659
1660 assert(h);
1661
1662 r = resize_buckets(h, entries_add);
1663 if (r < 0)
1664 return r;
1665
1666 return 0;
1667 }
1668
1669 /*
1670 * The same as hashmap_merge(), but every new item from other is moved to h.
1671 * Keys already in h are skipped and stay in other.
1672 * Returns: 0 on success.
1673 * -ENOMEM on alloc failure, in which case no move has been done.
1674 */
1675 int _hashmap_move(HashmapBase *h, HashmapBase *other) {
1676 struct swap_entries swap;
1677 struct hashmap_base_entry *e, *n;
1678 Iterator i;
1679 unsigned idx;
1680 int r;
1681
1682 assert(h);
1683
1684 if (!other)
1685 return 0;
1686
1687 assert(other->type == h->type);
1688
1689 /*
1690 * This reserves buckets for the worst case, where none of other's
1691 * entries are yet present in h. This is preferable to risking
1692 * an allocation failure in the middle of the moving and having to
1693 * rollback or return a partial result.
1694 */
1695 r = resize_buckets(h, n_entries(other));
1696 if (r < 0)
1697 return r;
1698
1699 HASHMAP_FOREACH_IDX(idx, other, i) {
1700 unsigned h_hash;
1701
1702 e = bucket_at(other, idx);
1703 h_hash = bucket_hash(h, e->key);
1704 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1705 continue;
1706
1707 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1708 n->key = e->key;
1709 if (h->type != HASHMAP_TYPE_SET)
1710 ((struct plain_hashmap_entry*) n)->value =
1711 ((struct plain_hashmap_entry*) e)->value;
1712 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1713
1714 remove_entry(other, idx);
1715 }
1716
1717 return 0;
1718 }
1719
1720 int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1721 struct swap_entries swap;
1722 unsigned h_hash, other_hash, idx;
1723 struct hashmap_base_entry *e, *n;
1724 int r;
1725
1726 assert(h);
1727
1728 h_hash = bucket_hash(h, key);
1729 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1730 return -EEXIST;
1731
1732 if (!other)
1733 return -ENOENT;
1734
1735 assert(other->type == h->type);
1736
1737 other_hash = bucket_hash(other, key);
1738 idx = bucket_scan(other, other_hash, key);
1739 if (idx == IDX_NIL)
1740 return -ENOENT;
1741
1742 e = bucket_at(other, idx);
1743
1744 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1745 n->key = e->key;
1746 if (h->type != HASHMAP_TYPE_SET)
1747 ((struct plain_hashmap_entry*) n)->value =
1748 ((struct plain_hashmap_entry*) e)->value;
1749 r = hashmap_put_boldly(h, h_hash, &swap, true);
1750 if (r < 0)
1751 return r;
1752
1753 remove_entry(other, idx);
1754 return 0;
1755 }
1756
1757 HashmapBase* _hashmap_copy(HashmapBase *h) {
1758 HashmapBase *copy;
1759 int r;
1760
1761 assert(h);
1762
1763 copy = hashmap_base_new(h->hash_ops, h->type);
1764 if (!copy)
1765 return NULL;
1766
1767 switch (h->type) {
1768 case HASHMAP_TYPE_PLAIN:
1769 case HASHMAP_TYPE_ORDERED:
1770 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1771 break;
1772 case HASHMAP_TYPE_SET:
1773 r = set_merge((Set*)copy, (Set*)h);
1774 break;
1775 default:
1776 assert_not_reached();
1777 }
1778
1779 if (r < 0)
1780 return _hashmap_free(copy);
1781
1782 return copy;
1783 }
1784
1785 char** _hashmap_get_strv(HashmapBase *h) {
1786 char **sv;
1787 Iterator i;
1788 unsigned idx, n;
1789
1790 if (!h)
1791 return new0(char*, 1);
1792
1793 sv = new(char*, n_entries(h)+1);
1794 if (!sv)
1795 return NULL;
1796
1797 n = 0;
1798 HASHMAP_FOREACH_IDX(idx, h, i)
1799 sv[n++] = entry_value(h, bucket_at(h, idx));
1800 sv[n] = NULL;
1801
1802 return sv;
1803 }
1804
1805 char** set_to_strv(Set **s) {
1806 assert(s);
1807
1808 /* This is similar to set_get_strv(), but invalidates the set on success. */
1809
1810 char **v = new(char*, set_size(*s) + 1);
1811 if (!v)
1812 return NULL;
1813
1814 for (char **p = v; (*p = set_steal_first(*s)); p++)
1815 ;
1816
1817 assert(set_isempty(*s));
1818 *s = set_free(*s);
1819 return v;
1820 }
1821
1822 void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1823 struct ordered_hashmap_entry *e;
1824 unsigned hash, idx;
1825
1826 if (!h)
1827 return NULL;
1828
1829 hash = bucket_hash(h, key);
1830 idx = bucket_scan(h, hash, key);
1831 if (idx == IDX_NIL)
1832 return NULL;
1833
1834 e = ordered_bucket_at(h, idx);
1835 if (e->iterate_next == IDX_NIL)
1836 return NULL;
1837 return ordered_bucket_at(h, e->iterate_next)->p.value;
1838 }
1839
1840 int set_consume(Set *s, void *value) {
1841 int r;
1842
1843 assert(s);
1844 assert(value);
1845
1846 r = set_put(s, value);
1847 if (r <= 0)
1848 free(value);
1849
1850 return r;
1851 }
1852
1853 int hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v) {
1854 int r;
1855
1856 assert(h);
1857
1858 r = hashmap_ensure_allocated(h, hash_ops);
1859 if (r < 0)
1860 return r;
1861
1862 _cleanup_free_ char *kdup = NULL, *vdup = NULL;
1863
1864 kdup = strdup(k);
1865 if (!kdup)
1866 return -ENOMEM;
1867
1868 if (v) {
1869 vdup = strdup(v);
1870 if (!vdup)
1871 return -ENOMEM;
1872 }
1873
1874 r = hashmap_put(*h, kdup, vdup);
1875 if (r < 0) {
1876 if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
1877 return 0;
1878 return r;
1879 }
1880
1881 /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1882 assert(vdup == NULL || r > 0);
1883 if (r > 0)
1884 kdup = vdup = NULL;
1885
1886 return r;
1887 }
1888
1889 int set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n) {
1890 char *c;
1891 int r;
1892
1893 assert(s);
1894 assert(p);
1895
1896 r = set_ensure_allocated(s, hash_ops);
1897 if (r < 0)
1898 return r;
1899
1900 if (n == SIZE_MAX) {
1901 if (set_contains(*s, (char*) p))
1902 return 0;
1903
1904 c = strdup(p);
1905 } else
1906 c = strndup(p, n);
1907 if (!c)
1908 return -ENOMEM;
1909
1910 return set_consume(*s, c);
1911 }
1912
1913 int set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l) {
1914 int n = 0, r;
1915
1916 assert(s);
1917
1918 STRV_FOREACH(i, l) {
1919 r = set_put_strndup_full(s, hash_ops, *i, SIZE_MAX);
1920 if (r < 0)
1921 return r;
1922
1923 n += r;
1924 }
1925
1926 return n;
1927 }
1928
1929 int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1930 const char *p = ASSERT_PTR(v);
1931 int r;
1932
1933 assert(s);
1934
1935 for (;;) {
1936 char *word;
1937
1938 r = extract_first_word(&p, &word, separators, flags);
1939 if (r <= 0)
1940 return r;
1941
1942 r = set_consume(s, word);
1943 if (r < 0)
1944 return r;
1945 }
1946 }
1947
1948 /* expand the cachemem if needed, return true if newly (re)activated. */
1949 static int cachemem_maintain(CacheMem *mem, size_t size) {
1950 assert(mem);
1951
1952 if (!GREEDY_REALLOC(mem->ptr, size)) {
1953 if (size > 0)
1954 return -ENOMEM;
1955 }
1956
1957 if (!mem->active) {
1958 mem->active = true;
1959 return true;
1960 }
1961
1962 return false;
1963 }
1964
1965 int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1966 bool sync_keys = false, sync_values = false;
1967 size_t size;
1968 int r;
1969
1970 assert(cache);
1971 assert(cache->hashmap);
1972
1973 size = n_entries(cache->hashmap);
1974
1975 if (res_keys) {
1976 r = cachemem_maintain(&cache->keys, size);
1977 if (r < 0)
1978 return r;
1979
1980 sync_keys = r;
1981 } else
1982 cache->keys.active = false;
1983
1984 if (res_values) {
1985 r = cachemem_maintain(&cache->values, size);
1986 if (r < 0)
1987 return r;
1988
1989 sync_values = r;
1990 } else
1991 cache->values.active = false;
1992
1993 if (cache->hashmap->dirty) {
1994 if (cache->keys.active)
1995 sync_keys = true;
1996 if (cache->values.active)
1997 sync_values = true;
1998
1999 cache->hashmap->dirty = false;
2000 }
2001
2002 if (sync_keys || sync_values) {
2003 unsigned i, idx;
2004 Iterator iter;
2005
2006 i = 0;
2007 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
2008 struct hashmap_base_entry *e;
2009
2010 e = bucket_at(cache->hashmap, idx);
2011
2012 if (sync_keys)
2013 cache->keys.ptr[i] = e->key;
2014 if (sync_values)
2015 cache->values.ptr[i] = entry_value(cache->hashmap, e);
2016 i++;
2017 }
2018 }
2019
2020 if (res_keys)
2021 *res_keys = cache->keys.ptr;
2022 if (res_values)
2023 *res_values = cache->values.ptr;
2024 if (res_n_entries)
2025 *res_n_entries = size;
2026
2027 return 0;
2028 }
2029
2030 IteratedCache* iterated_cache_free(IteratedCache *cache) {
2031 if (cache) {
2032 free(cache->keys.ptr);
2033 free(cache->values.ptr);
2034 }
2035
2036 return mfree(cache);
2037 }
2038
2039 int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
2040 _cleanup_free_ char *str = NULL;
2041 size_t separator_len, len = 0;
2042 const char *value;
2043 bool first;
2044
2045 assert(ret);
2046
2047 if (set_isempty(s)) {
2048 *ret = NULL;
2049 return 0;
2050 }
2051
2052 separator_len = strlen_ptr(separator);
2053
2054 if (separator_len == 0)
2055 wrap_with_separator = false;
2056
2057 first = !wrap_with_separator;
2058
2059 SET_FOREACH(value, s) {
2060 size_t l = strlen_ptr(value);
2061
2062 if (l == 0)
2063 continue;
2064
2065 if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
2066 return -ENOMEM;
2067
2068 if (separator_len > 0 && !first) {
2069 memcpy(str + len, separator, separator_len);
2070 len += separator_len;
2071 }
2072
2073 memcpy(str + len, value, l);
2074 len += l;
2075 first = false;
2076 }
2077
2078 if (wrap_with_separator) {
2079 memcpy(str + len, separator, separator_len);
2080 len += separator_len;
2081 }
2082
2083 str[len] = '\0';
2084
2085 *ret = TAKE_PTR(str);
2086 return 0;
2087 }
2088
2089 bool set_equal(Set *a, Set *b) {
2090 void *p;
2091
2092 /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2093 * entries */
2094
2095 if (a == b)
2096 return true;
2097
2098 if (set_isempty(a) && set_isempty(b))
2099 return true;
2100
2101 if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
2102 * already */
2103 return false;
2104
2105 SET_FOREACH(p, a)
2106 if (!set_contains(b, p))
2107 return false;
2108
2109 /* If we have the same hashops, then we don't need to check things backwards given we compared the
2110 * size and that all of a is in b. */
2111 if (a->b.hash_ops == b->b.hash_ops)
2112 return true;
2113
2114 SET_FOREACH(p, b)
2115 if (!set_contains(a, p))
2116 return false;
2117
2118 return true;
2119 }
2120
2121 static bool set_fnmatch_one(Set *patterns, const char *needle) {
2122 const char *p;
2123
2124 assert(needle);
2125
2126 /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2127
2128 SET_FOREACH(p, patterns)
2129 if (fnmatch(p, needle, 0) == 0)
2130 return true;
2131
2132 return false;
2133 }
2134
2135 bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
2136 assert(needle);
2137
2138 if (set_fnmatch_one(exclude_patterns, needle))
2139 return false;
2140
2141 if (set_isempty(include_patterns))
2142 return true;
2143
2144 return set_fnmatch_one(include_patterns, needle);
2145 }
2146
2147 static int hashmap_entry_compare(
2148 struct hashmap_base_entry * const *a,
2149 struct hashmap_base_entry * const *b,
2150 compare_func_t compare) {
2151
2152 assert(a && *a);
2153 assert(b && *b);
2154 assert(compare);
2155
2156 return compare((*a)->key, (*b)->key);
2157 }
2158
2159 static int _hashmap_dump_entries_sorted(
2160 HashmapBase *h,
2161 void ***ret,
2162 size_t *ret_n) {
2163 _cleanup_free_ void **entries = NULL;
2164 Iterator iter;
2165 unsigned idx;
2166 size_t n = 0;
2167
2168 assert(ret);
2169 assert(ret_n);
2170
2171 if (_hashmap_size(h) == 0) {
2172 *ret = NULL;
2173 *ret_n = 0;
2174 return 0;
2175 }
2176
2177 /* We append one more element than needed so that the resulting array can be used as a strv. We
2178 * don't count this entry in the returned size. */
2179 entries = new(void*, _hashmap_size(h) + 1);
2180 if (!entries)
2181 return -ENOMEM;
2182
2183 HASHMAP_FOREACH_IDX(idx, h, iter)
2184 entries[n++] = bucket_at(h, idx);
2185
2186 assert(n == _hashmap_size(h));
2187 entries[n] = NULL;
2188
2189 typesafe_qsort_r((struct hashmap_base_entry**) entries, n,
2190 hashmap_entry_compare, h->hash_ops->compare);
2191
2192 *ret = TAKE_PTR(entries);
2193 *ret_n = n;
2194 return 0;
2195 }
2196
2197 int _hashmap_dump_keys_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
2198 _cleanup_free_ void **entries = NULL;
2199 size_t n;
2200 int r;
2201
2202 assert(ret);
2203
2204 r = _hashmap_dump_entries_sorted(h, &entries, &n);
2205 if (r < 0)
2206 return r;
2207
2208 /* Reuse the array. */
2209 FOREACH_ARRAY(e, entries, n)
2210 *e = (void*) (*(struct hashmap_base_entry**) e)->key;
2211
2212 *ret = TAKE_PTR(entries);
2213 if (ret_n)
2214 *ret_n = n;
2215 return 0;
2216 }
2217
2218 int _hashmap_dump_sorted(HashmapBase *h, void ***ret, size_t *ret_n) {
2219 _cleanup_free_ void **entries = NULL;
2220 size_t n;
2221 int r;
2222
2223 assert(ret);
2224
2225 r = _hashmap_dump_entries_sorted(h, &entries, &n);
2226 if (r < 0)
2227 return r;
2228
2229 /* Reuse the array. */
2230 FOREACH_ARRAY(e, entries, n)
2231 *e = entry_value(h, *(struct hashmap_base_entry**) e);
2232
2233 *ret = TAKE_PTR(entries);
2234 if (ret_n)
2235 *ret_n = n;
2236 return 0;
2237 }