]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/hashmap.c
Merge pull request #25132 from yuwata/core-device-inactivate-removed-device-on-switch...
[thirdparty/systemd.git] / src / basic / hashmap.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <fnmatch.h>
5 #include <pthread.h>
6 #include <stdint.h>
7 #include <stdlib.h>
8
9 #include "alloc-util.h"
10 #include "fileio.h"
11 #include "hashmap.h"
12 #include "logarithm.h"
13 #include "macro.h"
14 #include "memory-util.h"
15 #include "mempool.h"
16 #include "missing_syscall.h"
17 #include "process-util.h"
18 #include "random-util.h"
19 #include "set.h"
20 #include "siphash24.h"
21 #include "string-util.h"
22 #include "strv.h"
23
24 #if ENABLE_DEBUG_HASHMAP
25 #include "list.h"
26 #endif
27
28 /*
29 * Implementation of hashmaps.
30 * Addressing: open
31 * - uses less RAM compared to closed addressing (chaining), because
32 * our entries are small (especially in Sets, which tend to contain
33 * the majority of entries in systemd).
34 * Collision resolution: Robin Hood
35 * - tends to equalize displacement of entries from their optimal buckets.
36 * Probe sequence: linear
37 * - though theoretically worse than random probing/uniform hashing/double
38 * hashing, it is good for cache locality.
39 *
40 * References:
41 * Celis, P. 1986. Robin Hood Hashing.
42 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
43 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
44 * - The results are derived for random probing. Suggests deletion with
45 * tombstones and two mean-centered search methods. None of that works
46 * well for linear probing.
47 *
48 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
49 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
50 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
51 * http://www.math.uu.se/~svante/papers/sj157.pdf
52 * - Applies to Robin Hood with linear probing. Contains remarks on
53 * the unsuitability of mean-centered search with linear probing.
54 *
55 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
56 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
57 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
58 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
59 * in a successful search), and Janson writes about displacement. C = d + 1.
60 *
61 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
62 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
63 * - Explanation of backward shift deletion with pictures.
64 *
65 * Khuong, P. 2013. The Other Robin Hood Hashing.
66 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
67 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
68 */
69
70 /*
71 * XXX Ideas for improvement:
72 * For unordered hashmaps, randomize iteration order, similarly to Perl:
73 * http://blog.booking.com/hardening-perls-hash-function.html
74 */
75
76 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
77 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
78 #define INV_KEEP_FREE 5U
79
80 /* Fields common to entries of all hashmap/set types */
81 struct hashmap_base_entry {
82 const void *key;
83 };
84
85 /* Entry types for specific hashmap/set types
86 * hashmap_base_entry must be at the beginning of each entry struct. */
87
88 struct plain_hashmap_entry {
89 struct hashmap_base_entry b;
90 void *value;
91 };
92
93 struct ordered_hashmap_entry {
94 struct plain_hashmap_entry p;
95 unsigned iterate_next, iterate_previous;
96 };
97
98 struct set_entry {
99 struct hashmap_base_entry b;
100 };
101
102 /* In several functions it is advantageous to have the hash table extended
103 * virtually by a couple of additional buckets. We reserve special index values
104 * for these "swap" buckets. */
105 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
106 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
107 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
108 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
109
110 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
111 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
112
113 assert_cc(IDX_FIRST == _IDX_SWAP_END);
114 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
115
116 /* Storage space for the "swap" buckets.
117 * All entry types can fit into an ordered_hashmap_entry. */
118 struct swap_entries {
119 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
120 };
121
122 /* Distance from Initial Bucket */
123 typedef uint8_t dib_raw_t;
124 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
125 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
126 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
127 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
128
129 #define DIB_FREE UINT_MAX
130
131 #if ENABLE_DEBUG_HASHMAP
132 struct hashmap_debug_info {
133 LIST_FIELDS(struct hashmap_debug_info, debug_list);
134 unsigned max_entries; /* high watermark of n_entries */
135
136 /* who allocated this hashmap */
137 int line;
138 const char *file;
139 const char *func;
140
141 /* fields to detect modification while iterating */
142 unsigned put_count; /* counts puts into the hashmap */
143 unsigned rem_count; /* counts removals from hashmap */
144 unsigned last_rem_idx; /* remembers last removal index */
145 };
146
147 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
148 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
149 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
150 #endif
151
152 enum HashmapType {
153 HASHMAP_TYPE_PLAIN,
154 HASHMAP_TYPE_ORDERED,
155 HASHMAP_TYPE_SET,
156 _HASHMAP_TYPE_MAX
157 };
158
159 struct _packed_ indirect_storage {
160 void *storage; /* where buckets and DIBs are stored */
161 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
162
163 unsigned n_entries; /* number of stored entries */
164 unsigned n_buckets; /* number of buckets */
165
166 unsigned idx_lowest_entry; /* Index below which all buckets are free.
167 Makes "while (hashmap_steal_first())" loops
168 O(n) instead of O(n^2) for unordered hashmaps. */
169 uint8_t _pad[3]; /* padding for the whole HashmapBase */
170 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
171 };
172
173 struct direct_storage {
174 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
175 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
176 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
177 uint8_t storage[sizeof(struct indirect_storage)];
178 };
179
180 #define DIRECT_BUCKETS(entry_t) \
181 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
182
183 /* We should be able to store at least one entry directly. */
184 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
185
186 /* We have 3 bits for n_direct_entries. */
187 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
188
189 /* Hashmaps with directly stored entries all use this shared hash key.
190 * It's no big deal if the key is guessed, because there can be only
191 * a handful of directly stored entries in a hashmap. When a hashmap
192 * outgrows direct storage, it gets its own key for indirect storage. */
193 static uint8_t shared_hash_key[HASH_KEY_SIZE];
194
195 /* Fields that all hashmap/set types must have */
196 struct HashmapBase {
197 const struct hash_ops *hash_ops; /* hash and compare ops to use */
198
199 union _packed_ {
200 struct indirect_storage indirect; /* if has_indirect */
201 struct direct_storage direct; /* if !has_indirect */
202 };
203
204 enum HashmapType type:2; /* HASHMAP_TYPE_* */
205 bool has_indirect:1; /* whether indirect storage is used */
206 unsigned n_direct_entries:3; /* Number of entries in direct storage.
207 * Only valid if !has_indirect. */
208 bool from_pool:1; /* whether was allocated from mempool */
209 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
210 bool cached:1; /* whether this hashmap is being cached */
211
212 #if ENABLE_DEBUG_HASHMAP
213 struct hashmap_debug_info debug;
214 #endif
215 };
216
217 /* Specific hash types
218 * HashmapBase must be at the beginning of each hashmap struct. */
219
220 struct Hashmap {
221 struct HashmapBase b;
222 };
223
224 struct OrderedHashmap {
225 struct HashmapBase b;
226 unsigned iterate_list_head, iterate_list_tail;
227 };
228
229 struct Set {
230 struct HashmapBase b;
231 };
232
233 typedef struct CacheMem {
234 const void **ptr;
235 size_t n_populated;
236 bool active:1;
237 } CacheMem;
238
239 struct IteratedCache {
240 HashmapBase *hashmap;
241 CacheMem keys, values;
242 };
243
244 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
245 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
246 /* No need for a separate Set pool */
247 assert_cc(sizeof(Hashmap) == sizeof(Set));
248
249 struct hashmap_type_info {
250 size_t head_size;
251 size_t entry_size;
252 struct mempool *mempool;
253 unsigned n_direct_buckets;
254 };
255
256 static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
257 [HASHMAP_TYPE_PLAIN] = {
258 .head_size = sizeof(Hashmap),
259 .entry_size = sizeof(struct plain_hashmap_entry),
260 .mempool = &hashmap_pool,
261 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
262 },
263 [HASHMAP_TYPE_ORDERED] = {
264 .head_size = sizeof(OrderedHashmap),
265 .entry_size = sizeof(struct ordered_hashmap_entry),
266 .mempool = &ordered_hashmap_pool,
267 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
268 },
269 [HASHMAP_TYPE_SET] = {
270 .head_size = sizeof(Set),
271 .entry_size = sizeof(struct set_entry),
272 .mempool = &hashmap_pool,
273 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
274 },
275 };
276
277 #if VALGRIND
278 _destructor_ static void cleanup_pools(void) {
279 _cleanup_free_ char *t = NULL;
280 int r;
281
282 /* Be nice to valgrind */
283
284 /* The pool is only allocated by the main thread, but the memory can
285 * be passed to other threads. Let's clean up if we are the main thread
286 * and no other threads are live. */
287 /* We build our own is_main_thread() here, which doesn't use C11
288 * TLS based caching of the result. That's because valgrind apparently
289 * doesn't like malloc() (which C11 TLS internally uses) to be called
290 * from a GCC destructors. */
291 if (getpid() != gettid())
292 return;
293
294 r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
295 if (r < 0 || !streq(t, "1"))
296 return;
297
298 mempool_drop(&hashmap_pool);
299 mempool_drop(&ordered_hashmap_pool);
300 }
301 #endif
302
303 static unsigned n_buckets(HashmapBase *h) {
304 return h->has_indirect ? h->indirect.n_buckets
305 : hashmap_type_info[h->type].n_direct_buckets;
306 }
307
308 static unsigned n_entries(HashmapBase *h) {
309 return h->has_indirect ? h->indirect.n_entries
310 : h->n_direct_entries;
311 }
312
313 static void n_entries_inc(HashmapBase *h) {
314 if (h->has_indirect)
315 h->indirect.n_entries++;
316 else
317 h->n_direct_entries++;
318 }
319
320 static void n_entries_dec(HashmapBase *h) {
321 if (h->has_indirect)
322 h->indirect.n_entries--;
323 else
324 h->n_direct_entries--;
325 }
326
327 static void* storage_ptr(HashmapBase *h) {
328 return h->has_indirect ? h->indirect.storage
329 : h->direct.storage;
330 }
331
332 static uint8_t* hash_key(HashmapBase *h) {
333 return h->has_indirect ? h->indirect.hash_key
334 : shared_hash_key;
335 }
336
337 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
338 struct siphash state;
339 uint64_t hash;
340
341 siphash24_init(&state, hash_key(h));
342
343 h->hash_ops->hash(p, &state);
344
345 hash = siphash24_finalize(&state);
346
347 return (unsigned) (hash % n_buckets(h));
348 }
349 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
350
351 static void base_set_dirty(HashmapBase *h) {
352 h->dirty = true;
353 }
354 #define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
355
356 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
357 static uint8_t current[HASH_KEY_SIZE];
358 static bool current_initialized = false;
359
360 /* Returns a hash function key to use. In order to keep things
361 * fast we will not generate a new key each time we allocate a
362 * new hash table. Instead, we'll just reuse the most recently
363 * generated one, except if we never generated one or when we
364 * are rehashing an entire hash table because we reached a
365 * fill level */
366
367 if (!current_initialized || !reuse_is_ok) {
368 random_bytes(current, sizeof(current));
369 current_initialized = true;
370 }
371
372 memcpy(hash_key, current, sizeof(current));
373 }
374
375 static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
376 return (struct hashmap_base_entry*)
377 ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
378 }
379
380 static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
381 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
382 }
383
384 static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
385 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
386 }
387
388 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
389 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
390 }
391
392 static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
393 return &swap->e[idx - _IDX_SWAP_BEGIN];
394 }
395
396 /* Returns a pointer to the bucket at index idx.
397 * Understands real indexes and swap indexes, hence "_virtual". */
398 static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
399 unsigned idx) {
400 if (idx < _IDX_SWAP_BEGIN)
401 return bucket_at(h, idx);
402
403 if (idx < _IDX_SWAP_END)
404 return &bucket_at_swap(swap, idx)->p.b;
405
406 assert_not_reached();
407 }
408
409 static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
410 return (dib_raw_t*)
411 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
412 }
413
414 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
415 return idx >= from ? idx - from
416 : n_buckets(h) + idx - from;
417 }
418
419 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
420 unsigned initial_bucket;
421
422 if (raw_dib == DIB_RAW_FREE)
423 return DIB_FREE;
424
425 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
426 return raw_dib;
427
428 /*
429 * Having an overflow DIB value is very unlikely. The hash function
430 * would have to be bad. For example, in a table of size 2^24 filled
431 * to load factor 0.9 the maximum observed DIB is only about 60.
432 * In theory (assuming I used Maxima correctly), for an infinite size
433 * hash table with load factor 0.8 the probability of a given entry
434 * having DIB > 40 is 1.9e-8.
435 * This returns the correct DIB value by recomputing the hash value in
436 * the unlikely case. XXX Hitting this case could be a hint to rehash.
437 */
438 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
439 return bucket_distance(h, idx, initial_bucket);
440 }
441
442 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
443 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
444 }
445
446 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
447 dib_raw_t *dibs;
448
449 dibs = dib_raw_ptr(h);
450
451 for ( ; idx < n_buckets(h); idx++)
452 if (dibs[idx] != DIB_RAW_FREE)
453 return idx;
454
455 return IDX_NIL;
456 }
457
458 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
459 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
460 bucket_set_dib(h, idx, DIB_FREE);
461 }
462
463 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
464 unsigned from, unsigned to) {
465 struct hashmap_base_entry *e_from, *e_to;
466
467 assert(from != to);
468
469 e_from = bucket_at_virtual(h, swap, from);
470 e_to = bucket_at_virtual(h, swap, to);
471
472 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
473
474 if (h->type == HASHMAP_TYPE_ORDERED) {
475 OrderedHashmap *lh = (OrderedHashmap*) h;
476 struct ordered_hashmap_entry *le, *le_to;
477
478 le_to = (struct ordered_hashmap_entry*) e_to;
479
480 if (le_to->iterate_next != IDX_NIL) {
481 le = (struct ordered_hashmap_entry*)
482 bucket_at_virtual(h, swap, le_to->iterate_next);
483 le->iterate_previous = to;
484 }
485
486 if (le_to->iterate_previous != IDX_NIL) {
487 le = (struct ordered_hashmap_entry*)
488 bucket_at_virtual(h, swap, le_to->iterate_previous);
489 le->iterate_next = to;
490 }
491
492 if (lh->iterate_list_head == from)
493 lh->iterate_list_head = to;
494 if (lh->iterate_list_tail == from)
495 lh->iterate_list_tail = to;
496 }
497 }
498
499 static unsigned next_idx(HashmapBase *h, unsigned idx) {
500 return (idx + 1U) % n_buckets(h);
501 }
502
503 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
504 return (n_buckets(h) + idx - 1U) % n_buckets(h);
505 }
506
507 static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
508 switch (h->type) {
509
510 case HASHMAP_TYPE_PLAIN:
511 case HASHMAP_TYPE_ORDERED:
512 return ((struct plain_hashmap_entry*)e)->value;
513
514 case HASHMAP_TYPE_SET:
515 return (void*) e->key;
516
517 default:
518 assert_not_reached();
519 }
520 }
521
522 static void base_remove_entry(HashmapBase *h, unsigned idx) {
523 unsigned left, right, prev, dib;
524 dib_raw_t raw_dib, *dibs;
525
526 dibs = dib_raw_ptr(h);
527 assert(dibs[idx] != DIB_RAW_FREE);
528
529 #if ENABLE_DEBUG_HASHMAP
530 h->debug.rem_count++;
531 h->debug.last_rem_idx = idx;
532 #endif
533
534 left = idx;
535 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
536 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
537 raw_dib = dibs[right];
538 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
539 break;
540
541 /* The buckets are not supposed to be all occupied and with DIB > 0.
542 * That would mean we could make everyone better off by shifting them
543 * backward. This scenario is impossible. */
544 assert(left != right);
545 }
546
547 if (h->type == HASHMAP_TYPE_ORDERED) {
548 OrderedHashmap *lh = (OrderedHashmap*) h;
549 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
550
551 if (le->iterate_next != IDX_NIL)
552 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
553 else
554 lh->iterate_list_tail = le->iterate_previous;
555
556 if (le->iterate_previous != IDX_NIL)
557 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
558 else
559 lh->iterate_list_head = le->iterate_next;
560 }
561
562 /* Now shift all buckets in the interval (left, right) one step backwards */
563 for (prev = left, left = next_idx(h, left); left != right;
564 prev = left, left = next_idx(h, left)) {
565 dib = bucket_calculate_dib(h, left, dibs[left]);
566 assert(dib != 0);
567 bucket_move_entry(h, NULL, left, prev);
568 bucket_set_dib(h, prev, dib - 1);
569 }
570
571 bucket_mark_free(h, prev);
572 n_entries_dec(h);
573 base_set_dirty(h);
574 }
575 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
576
577 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
578 struct ordered_hashmap_entry *e;
579 unsigned idx;
580
581 assert(h);
582 assert(i);
583
584 if (i->idx == IDX_NIL)
585 goto at_end;
586
587 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
588 goto at_end;
589
590 if (i->idx == IDX_FIRST) {
591 idx = h->iterate_list_head;
592 e = ordered_bucket_at(h, idx);
593 } else {
594 idx = i->idx;
595 e = ordered_bucket_at(h, idx);
596 /*
597 * We allow removing the current entry while iterating, but removal may cause
598 * a backward shift. The next entry may thus move one bucket to the left.
599 * To detect when it happens, we remember the key pointer of the entry we were
600 * going to iterate next. If it does not match, there was a backward shift.
601 */
602 if (e->p.b.key != i->next_key) {
603 idx = prev_idx(HASHMAP_BASE(h), idx);
604 e = ordered_bucket_at(h, idx);
605 }
606 assert(e->p.b.key == i->next_key);
607 }
608
609 #if ENABLE_DEBUG_HASHMAP
610 i->prev_idx = idx;
611 #endif
612
613 if (e->iterate_next != IDX_NIL) {
614 struct ordered_hashmap_entry *n;
615 i->idx = e->iterate_next;
616 n = ordered_bucket_at(h, i->idx);
617 i->next_key = n->p.b.key;
618 } else
619 i->idx = IDX_NIL;
620
621 return idx;
622
623 at_end:
624 i->idx = IDX_NIL;
625 return IDX_NIL;
626 }
627
628 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
629 unsigned idx;
630
631 assert(h);
632 assert(i);
633
634 if (i->idx == IDX_NIL)
635 goto at_end;
636
637 if (i->idx == IDX_FIRST) {
638 /* fast forward to the first occupied bucket */
639 if (h->has_indirect) {
640 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
641 h->indirect.idx_lowest_entry = i->idx;
642 } else
643 i->idx = skip_free_buckets(h, 0);
644
645 if (i->idx == IDX_NIL)
646 goto at_end;
647 } else {
648 struct hashmap_base_entry *e;
649
650 assert(i->idx > 0);
651
652 e = bucket_at(h, i->idx);
653 /*
654 * We allow removing the current entry while iterating, but removal may cause
655 * a backward shift. The next entry may thus move one bucket to the left.
656 * To detect when it happens, we remember the key pointer of the entry we were
657 * going to iterate next. If it does not match, there was a backward shift.
658 */
659 if (e->key != i->next_key)
660 e = bucket_at(h, --i->idx);
661
662 assert(e->key == i->next_key);
663 }
664
665 idx = i->idx;
666 #if ENABLE_DEBUG_HASHMAP
667 i->prev_idx = idx;
668 #endif
669
670 i->idx = skip_free_buckets(h, i->idx + 1);
671 if (i->idx != IDX_NIL)
672 i->next_key = bucket_at(h, i->idx)->key;
673 else
674 i->idx = IDX_NIL;
675
676 return idx;
677
678 at_end:
679 i->idx = IDX_NIL;
680 return IDX_NIL;
681 }
682
683 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
684 if (!h) {
685 i->idx = IDX_NIL;
686 return IDX_NIL;
687 }
688
689 #if ENABLE_DEBUG_HASHMAP
690 if (i->idx == IDX_FIRST) {
691 i->put_count = h->debug.put_count;
692 i->rem_count = h->debug.rem_count;
693 } else {
694 /* While iterating, must not add any new entries */
695 assert(i->put_count == h->debug.put_count);
696 /* ... or remove entries other than the current one */
697 assert(i->rem_count == h->debug.rem_count ||
698 (i->rem_count == h->debug.rem_count - 1 &&
699 i->prev_idx == h->debug.last_rem_idx));
700 /* Reset our removals counter */
701 i->rem_count = h->debug.rem_count;
702 }
703 #endif
704
705 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
706 : hashmap_iterate_in_internal_order(h, i);
707 }
708
709 bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
710 struct hashmap_base_entry *e;
711 void *data;
712 unsigned idx;
713
714 idx = hashmap_iterate_entry(h, i);
715 if (idx == IDX_NIL) {
716 if (value)
717 *value = NULL;
718 if (key)
719 *key = NULL;
720
721 return false;
722 }
723
724 e = bucket_at(h, idx);
725 data = entry_value(h, e);
726 if (value)
727 *value = data;
728 if (key)
729 *key = e->key;
730
731 return true;
732 }
733
734 #define HASHMAP_FOREACH_IDX(idx, h, i) \
735 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
736 (idx != IDX_NIL); \
737 (idx) = hashmap_iterate_entry((h), &(i)))
738
739 IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
740 IteratedCache *cache;
741
742 assert(h);
743 assert(!h->cached);
744
745 if (h->cached)
746 return NULL;
747
748 cache = new0(IteratedCache, 1);
749 if (!cache)
750 return NULL;
751
752 cache->hashmap = h;
753 h->cached = true;
754
755 return cache;
756 }
757
758 static void reset_direct_storage(HashmapBase *h) {
759 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
760 void *p;
761
762 assert(!h->has_indirect);
763
764 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
765 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
766 }
767
768 static void shared_hash_key_initialize(void) {
769 random_bytes(shared_hash_key, sizeof(shared_hash_key));
770 }
771
772 static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
773 HashmapBase *h;
774 const struct hashmap_type_info *hi = &hashmap_type_info[type];
775
776 bool use_pool = mempool_enabled && mempool_enabled();
777
778 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
779 if (!h)
780 return NULL;
781
782 h->type = type;
783 h->from_pool = use_pool;
784 h->hash_ops = hash_ops ?: &trivial_hash_ops;
785
786 if (type == HASHMAP_TYPE_ORDERED) {
787 OrderedHashmap *lh = (OrderedHashmap*)h;
788 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
789 }
790
791 reset_direct_storage(h);
792
793 static pthread_once_t once = PTHREAD_ONCE_INIT;
794 assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
795
796 #if ENABLE_DEBUG_HASHMAP
797 h->debug.func = func;
798 h->debug.file = file;
799 h->debug.line = line;
800 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
801 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
802 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
803 #endif
804
805 return h;
806 }
807
808 Hashmap *_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
809 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
810 }
811
812 OrderedHashmap *_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
813 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
814 }
815
816 Set *_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
817 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
818 }
819
820 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
821 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
822 HashmapBase *q;
823
824 assert(h);
825
826 if (*h)
827 return 0;
828
829 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
830 if (!q)
831 return -ENOMEM;
832
833 *h = q;
834 return 1;
835 }
836
837 int _hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
838 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
839 }
840
841 int _ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
842 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
843 }
844
845 int _set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
846 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
847 }
848
849 int _hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) {
850 int r;
851
852 r = _hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
853 if (r < 0)
854 return r;
855
856 return hashmap_put(*h, key, value);
857 }
858
859 int _ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) {
860 int r;
861
862 r = _ordered_hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
863 if (r < 0)
864 return r;
865
866 return ordered_hashmap_put(*h, key, value);
867 }
868
869 static void hashmap_free_no_clear(HashmapBase *h) {
870 assert(!h->has_indirect);
871 assert(h->n_direct_entries == 0);
872
873 #if ENABLE_DEBUG_HASHMAP
874 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
875 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
876 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
877 #endif
878
879 if (h->from_pool) {
880 /* Ensure that the object didn't get migrated between threads. */
881 assert_se(is_main_thread());
882 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
883 } else
884 free(h);
885 }
886
887 HashmapBase* _hashmap_free(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
888 if (h) {
889 _hashmap_clear(h, default_free_key, default_free_value);
890 hashmap_free_no_clear(h);
891 }
892
893 return NULL;
894 }
895
896 void _hashmap_clear(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
897 free_func_t free_key, free_value;
898 if (!h)
899 return;
900
901 free_key = h->hash_ops->free_key ?: default_free_key;
902 free_value = h->hash_ops->free_value ?: default_free_value;
903
904 if (free_key || free_value) {
905
906 /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
907 * hash table, and only then call the destructor functions. If these destructors then try to unregister
908 * themselves from our hash table a second time, the entry is already gone. */
909
910 while (_hashmap_size(h) > 0) {
911 void *k = NULL;
912 void *v;
913
914 v = _hashmap_first_key_and_value(h, true, &k);
915
916 if (free_key)
917 free_key(k);
918
919 if (free_value)
920 free_value(v);
921 }
922 }
923
924 if (h->has_indirect) {
925 free(h->indirect.storage);
926 h->has_indirect = false;
927 }
928
929 h->n_direct_entries = 0;
930 reset_direct_storage(h);
931
932 if (h->type == HASHMAP_TYPE_ORDERED) {
933 OrderedHashmap *lh = (OrderedHashmap*) h;
934 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
935 }
936
937 base_set_dirty(h);
938 }
939
940 static int resize_buckets(HashmapBase *h, unsigned entries_add);
941
942 /*
943 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
944 * Performs Robin Hood swaps as it goes. The entry to put must be placed
945 * by the caller into swap slot IDX_PUT.
946 * If used for in-place resizing, may leave a displaced entry in swap slot
947 * IDX_PUT. Caller must rehash it next.
948 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
949 * false otherwise.
950 */
951 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
952 struct swap_entries *swap) {
953 dib_raw_t raw_dib, *dibs;
954 unsigned dib, distance;
955
956 #if ENABLE_DEBUG_HASHMAP
957 h->debug.put_count++;
958 #endif
959
960 dibs = dib_raw_ptr(h);
961
962 for (distance = 0; ; distance++) {
963 raw_dib = dibs[idx];
964 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
965 if (raw_dib == DIB_RAW_REHASH)
966 bucket_move_entry(h, swap, idx, IDX_TMP);
967
968 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
969 h->indirect.idx_lowest_entry = idx;
970
971 bucket_set_dib(h, idx, distance);
972 bucket_move_entry(h, swap, IDX_PUT, idx);
973 if (raw_dib == DIB_RAW_REHASH) {
974 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
975 return true;
976 }
977
978 return false;
979 }
980
981 dib = bucket_calculate_dib(h, idx, raw_dib);
982
983 if (dib < distance) {
984 /* Found a wealthier entry. Go Robin Hood! */
985 bucket_set_dib(h, idx, distance);
986
987 /* swap the entries */
988 bucket_move_entry(h, swap, idx, IDX_TMP);
989 bucket_move_entry(h, swap, IDX_PUT, idx);
990 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
991
992 distance = dib;
993 }
994
995 idx = next_idx(h, idx);
996 }
997 }
998
999 /*
1000 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1001 * The caller must place the entry (only its key and value, not link indexes)
1002 * in swap slot IDX_PUT.
1003 * Caller must ensure: the key does not exist yet in the hashmap.
1004 * that resize is not needed if !may_resize.
1005 * Returns: 1 if entry was put successfully.
1006 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1007 * Cannot return -ENOMEM if !may_resize.
1008 */
1009 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1010 struct swap_entries *swap, bool may_resize) {
1011 struct ordered_hashmap_entry *new_entry;
1012 int r;
1013
1014 assert(idx < n_buckets(h));
1015
1016 new_entry = bucket_at_swap(swap, IDX_PUT);
1017
1018 if (may_resize) {
1019 r = resize_buckets(h, 1);
1020 if (r < 0)
1021 return r;
1022 if (r > 0)
1023 idx = bucket_hash(h, new_entry->p.b.key);
1024 }
1025 assert(n_entries(h) < n_buckets(h));
1026
1027 if (h->type == HASHMAP_TYPE_ORDERED) {
1028 OrderedHashmap *lh = (OrderedHashmap*) h;
1029
1030 new_entry->iterate_next = IDX_NIL;
1031 new_entry->iterate_previous = lh->iterate_list_tail;
1032
1033 if (lh->iterate_list_tail != IDX_NIL) {
1034 struct ordered_hashmap_entry *old_tail;
1035
1036 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1037 assert(old_tail->iterate_next == IDX_NIL);
1038 old_tail->iterate_next = IDX_PUT;
1039 }
1040
1041 lh->iterate_list_tail = IDX_PUT;
1042 if (lh->iterate_list_head == IDX_NIL)
1043 lh->iterate_list_head = IDX_PUT;
1044 }
1045
1046 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1047
1048 n_entries_inc(h);
1049 #if ENABLE_DEBUG_HASHMAP
1050 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1051 #endif
1052
1053 base_set_dirty(h);
1054
1055 return 1;
1056 }
1057 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1058 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1059
1060 /*
1061 * Returns 0 if resize is not needed.
1062 * 1 if successfully resized.
1063 * -ENOMEM on allocation failure.
1064 */
1065 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1066 struct swap_entries swap;
1067 void *new_storage;
1068 dib_raw_t *old_dibs, *new_dibs;
1069 const struct hashmap_type_info *hi;
1070 unsigned idx, optimal_idx;
1071 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1072 uint8_t new_shift;
1073 bool rehash_next;
1074
1075 assert(h);
1076
1077 hi = &hashmap_type_info[h->type];
1078 new_n_entries = n_entries(h) + entries_add;
1079
1080 /* overflow? */
1081 if (_unlikely_(new_n_entries < entries_add))
1082 return -ENOMEM;
1083
1084 /* For direct storage we allow 100% load, because it's tiny. */
1085 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1086 return 0;
1087
1088 /*
1089 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1090 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1091 */
1092 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1093 /* overflow? */
1094 if (_unlikely_(new_n_buckets < new_n_entries))
1095 return -ENOMEM;
1096
1097 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1098 return -ENOMEM;
1099
1100 old_n_buckets = n_buckets(h);
1101
1102 if (_likely_(new_n_buckets <= old_n_buckets))
1103 return 0;
1104
1105 new_shift = log2u_round_up(MAX(
1106 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1107 2 * sizeof(struct direct_storage)));
1108
1109 /* Realloc storage (buckets and DIB array). */
1110 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1111 1U << new_shift);
1112 if (!new_storage)
1113 return -ENOMEM;
1114
1115 /* Must upgrade direct to indirect storage. */
1116 if (!h->has_indirect) {
1117 memcpy(new_storage, h->direct.storage,
1118 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1119 h->indirect.n_entries = h->n_direct_entries;
1120 h->indirect.idx_lowest_entry = 0;
1121 h->n_direct_entries = 0;
1122 }
1123
1124 /* Get a new hash key. If we've just upgraded to indirect storage,
1125 * allow reusing a previously generated key. It's still a different key
1126 * from the shared one that we used for direct storage. */
1127 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1128
1129 h->has_indirect = true;
1130 h->indirect.storage = new_storage;
1131 h->indirect.n_buckets = (1U << new_shift) /
1132 (hi->entry_size + sizeof(dib_raw_t));
1133
1134 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
1135 new_dibs = dib_raw_ptr(h);
1136
1137 /*
1138 * Move the DIB array to the new place, replacing valid DIB values with
1139 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1140 * Note: Overlap is not possible, because we have at least doubled the
1141 * number of buckets and dib_raw_t is smaller than any entry type.
1142 */
1143 for (idx = 0; idx < old_n_buckets; idx++) {
1144 assert(old_dibs[idx] != DIB_RAW_REHASH);
1145 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1146 : DIB_RAW_REHASH;
1147 }
1148
1149 /* Zero the area of newly added entries (including the old DIB area) */
1150 memzero(bucket_at(h, old_n_buckets),
1151 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1152
1153 /* The upper half of the new DIB array needs initialization */
1154 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1155 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1156
1157 /* Rehash entries that need it */
1158 n_rehashed = 0;
1159 for (idx = 0; idx < old_n_buckets; idx++) {
1160 if (new_dibs[idx] != DIB_RAW_REHASH)
1161 continue;
1162
1163 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1164
1165 /*
1166 * Not much to do if by luck the entry hashes to its current
1167 * location. Just set its DIB.
1168 */
1169 if (optimal_idx == idx) {
1170 new_dibs[idx] = 0;
1171 n_rehashed++;
1172 continue;
1173 }
1174
1175 new_dibs[idx] = DIB_RAW_FREE;
1176 bucket_move_entry(h, &swap, idx, IDX_PUT);
1177 /* bucket_move_entry does not clear the source */
1178 memzero(bucket_at(h, idx), hi->entry_size);
1179
1180 do {
1181 /*
1182 * Find the new bucket for the current entry. This may make
1183 * another entry homeless and load it into IDX_PUT.
1184 */
1185 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1186 n_rehashed++;
1187
1188 /* Did the current entry displace another one? */
1189 if (rehash_next)
1190 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1191 } while (rehash_next);
1192 }
1193
1194 assert_se(n_rehashed == n_entries(h));
1195
1196 return 1;
1197 }
1198
1199 /*
1200 * Finds an entry with a matching key
1201 * Returns: index of the found entry, or IDX_NIL if not found.
1202 */
1203 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1204 struct hashmap_base_entry *e;
1205 unsigned dib, distance;
1206 dib_raw_t *dibs = dib_raw_ptr(h);
1207
1208 assert(idx < n_buckets(h));
1209
1210 for (distance = 0; ; distance++) {
1211 if (dibs[idx] == DIB_RAW_FREE)
1212 return IDX_NIL;
1213
1214 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1215
1216 if (dib < distance)
1217 return IDX_NIL;
1218 if (dib == distance) {
1219 e = bucket_at(h, idx);
1220 if (h->hash_ops->compare(e->key, key) == 0)
1221 return idx;
1222 }
1223
1224 idx = next_idx(h, idx);
1225 }
1226 }
1227 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1228
1229 int hashmap_put(Hashmap *h, const void *key, void *value) {
1230 struct swap_entries swap;
1231 struct plain_hashmap_entry *e;
1232 unsigned hash, idx;
1233
1234 assert(h);
1235
1236 hash = bucket_hash(h, key);
1237 idx = bucket_scan(h, hash, key);
1238 if (idx != IDX_NIL) {
1239 e = plain_bucket_at(h, idx);
1240 if (e->value == value)
1241 return 0;
1242 return -EEXIST;
1243 }
1244
1245 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1246 e->b.key = key;
1247 e->value = value;
1248 return hashmap_put_boldly(h, hash, &swap, true);
1249 }
1250
1251 int set_put(Set *s, const void *key) {
1252 struct swap_entries swap;
1253 struct hashmap_base_entry *e;
1254 unsigned hash, idx;
1255
1256 assert(s);
1257
1258 hash = bucket_hash(s, key);
1259 idx = bucket_scan(s, hash, key);
1260 if (idx != IDX_NIL)
1261 return 0;
1262
1263 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1264 e->key = key;
1265 return hashmap_put_boldly(s, hash, &swap, true);
1266 }
1267
1268 int _set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key HASHMAP_DEBUG_PARAMS) {
1269 int r;
1270
1271 r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1272 if (r < 0)
1273 return r;
1274
1275 return set_put(*s, key);
1276 }
1277
1278 int _set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key HASHMAP_DEBUG_PARAMS) {
1279 int r;
1280
1281 r = _set_ensure_put(s, hash_ops, key HASHMAP_DEBUG_PASS_ARGS);
1282 if (r <= 0) {
1283 if (hash_ops && hash_ops->free_key)
1284 hash_ops->free_key(key);
1285 else
1286 free(key);
1287 }
1288
1289 return r;
1290 }
1291
1292 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1293 struct swap_entries swap;
1294 struct plain_hashmap_entry *e;
1295 unsigned hash, idx;
1296
1297 assert(h);
1298
1299 hash = bucket_hash(h, key);
1300 idx = bucket_scan(h, hash, key);
1301 if (idx != IDX_NIL) {
1302 e = plain_bucket_at(h, idx);
1303 #if ENABLE_DEBUG_HASHMAP
1304 /* Although the key is equal, the key pointer may have changed,
1305 * and this would break our assumption for iterating. So count
1306 * this operation as incompatible with iteration. */
1307 if (e->b.key != key) {
1308 h->b.debug.put_count++;
1309 h->b.debug.rem_count++;
1310 h->b.debug.last_rem_idx = idx;
1311 }
1312 #endif
1313 e->b.key = key;
1314 e->value = value;
1315 hashmap_set_dirty(h);
1316
1317 return 0;
1318 }
1319
1320 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1321 e->b.key = key;
1322 e->value = value;
1323 return hashmap_put_boldly(h, hash, &swap, true);
1324 }
1325
1326 int hashmap_update(Hashmap *h, const void *key, void *value) {
1327 struct plain_hashmap_entry *e;
1328 unsigned hash, idx;
1329
1330 assert(h);
1331
1332 hash = bucket_hash(h, key);
1333 idx = bucket_scan(h, hash, key);
1334 if (idx == IDX_NIL)
1335 return -ENOENT;
1336
1337 e = plain_bucket_at(h, idx);
1338 e->value = value;
1339 hashmap_set_dirty(h);
1340
1341 return 0;
1342 }
1343
1344 void* _hashmap_get(HashmapBase *h, const void *key) {
1345 struct hashmap_base_entry *e;
1346 unsigned hash, idx;
1347
1348 if (!h)
1349 return NULL;
1350
1351 hash = bucket_hash(h, key);
1352 idx = bucket_scan(h, hash, key);
1353 if (idx == IDX_NIL)
1354 return NULL;
1355
1356 e = bucket_at(h, idx);
1357 return entry_value(h, e);
1358 }
1359
1360 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
1361 struct plain_hashmap_entry *e;
1362 unsigned hash, idx;
1363
1364 if (!h)
1365 return NULL;
1366
1367 hash = bucket_hash(h, key);
1368 idx = bucket_scan(h, hash, key);
1369 if (idx == IDX_NIL)
1370 return NULL;
1371
1372 e = plain_bucket_at(h, idx);
1373 if (key2)
1374 *key2 = (void*) e->b.key;
1375
1376 return e->value;
1377 }
1378
1379 bool _hashmap_contains(HashmapBase *h, const void *key) {
1380 unsigned hash;
1381
1382 if (!h)
1383 return false;
1384
1385 hash = bucket_hash(h, key);
1386 return bucket_scan(h, hash, key) != IDX_NIL;
1387 }
1388
1389 void* _hashmap_remove(HashmapBase *h, const void *key) {
1390 struct hashmap_base_entry *e;
1391 unsigned hash, idx;
1392 void *data;
1393
1394 if (!h)
1395 return NULL;
1396
1397 hash = bucket_hash(h, key);
1398 idx = bucket_scan(h, hash, key);
1399 if (idx == IDX_NIL)
1400 return NULL;
1401
1402 e = bucket_at(h, idx);
1403 data = entry_value(h, e);
1404 remove_entry(h, idx);
1405
1406 return data;
1407 }
1408
1409 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1410 struct plain_hashmap_entry *e;
1411 unsigned hash, idx;
1412 void *data;
1413
1414 if (!h) {
1415 if (rkey)
1416 *rkey = NULL;
1417 return NULL;
1418 }
1419
1420 hash = bucket_hash(h, key);
1421 idx = bucket_scan(h, hash, key);
1422 if (idx == IDX_NIL) {
1423 if (rkey)
1424 *rkey = NULL;
1425 return NULL;
1426 }
1427
1428 e = plain_bucket_at(h, idx);
1429 data = e->value;
1430 if (rkey)
1431 *rkey = (void*) e->b.key;
1432
1433 remove_entry(h, idx);
1434
1435 return data;
1436 }
1437
1438 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1439 struct swap_entries swap;
1440 struct plain_hashmap_entry *e;
1441 unsigned old_hash, new_hash, idx;
1442
1443 if (!h)
1444 return -ENOENT;
1445
1446 old_hash = bucket_hash(h, old_key);
1447 idx = bucket_scan(h, old_hash, old_key);
1448 if (idx == IDX_NIL)
1449 return -ENOENT;
1450
1451 new_hash = bucket_hash(h, new_key);
1452 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1453 return -EEXIST;
1454
1455 remove_entry(h, idx);
1456
1457 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1458 e->b.key = new_key;
1459 e->value = value;
1460 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1461
1462 return 0;
1463 }
1464
1465 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1466 struct swap_entries swap;
1467 struct hashmap_base_entry *e;
1468 unsigned old_hash, new_hash, idx;
1469
1470 if (!s)
1471 return -ENOENT;
1472
1473 old_hash = bucket_hash(s, old_key);
1474 idx = bucket_scan(s, old_hash, old_key);
1475 if (idx == IDX_NIL)
1476 return -ENOENT;
1477
1478 new_hash = bucket_hash(s, new_key);
1479 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1480 return -EEXIST;
1481
1482 remove_entry(s, idx);
1483
1484 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1485 e->key = new_key;
1486 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1487
1488 return 0;
1489 }
1490
1491 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1492 struct swap_entries swap;
1493 struct plain_hashmap_entry *e;
1494 unsigned old_hash, new_hash, idx_old, idx_new;
1495
1496 if (!h)
1497 return -ENOENT;
1498
1499 old_hash = bucket_hash(h, old_key);
1500 idx_old = bucket_scan(h, old_hash, old_key);
1501 if (idx_old == IDX_NIL)
1502 return -ENOENT;
1503
1504 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1505
1506 new_hash = bucket_hash(h, new_key);
1507 idx_new = bucket_scan(h, new_hash, new_key);
1508 if (idx_new != IDX_NIL)
1509 if (idx_old != idx_new) {
1510 remove_entry(h, idx_new);
1511 /* Compensate for a possible backward shift. */
1512 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1513 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1514 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1515 }
1516
1517 remove_entry(h, idx_old);
1518
1519 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1520 e->b.key = new_key;
1521 e->value = value;
1522 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1523
1524 return 0;
1525 }
1526
1527 void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
1528 struct hashmap_base_entry *e;
1529 unsigned hash, idx;
1530
1531 if (!h)
1532 return NULL;
1533
1534 hash = bucket_hash(h, key);
1535 idx = bucket_scan(h, hash, key);
1536 if (idx == IDX_NIL)
1537 return NULL;
1538
1539 e = bucket_at(h, idx);
1540 if (entry_value(h, e) != value)
1541 return NULL;
1542
1543 remove_entry(h, idx);
1544
1545 return value;
1546 }
1547
1548 static unsigned find_first_entry(HashmapBase *h) {
1549 Iterator i = ITERATOR_FIRST;
1550
1551 if (!h || !n_entries(h))
1552 return IDX_NIL;
1553
1554 return hashmap_iterate_entry(h, &i);
1555 }
1556
1557 void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
1558 struct hashmap_base_entry *e;
1559 void *key, *data;
1560 unsigned idx;
1561
1562 idx = find_first_entry(h);
1563 if (idx == IDX_NIL) {
1564 if (ret_key)
1565 *ret_key = NULL;
1566 return NULL;
1567 }
1568
1569 e = bucket_at(h, idx);
1570 key = (void*) e->key;
1571 data = entry_value(h, e);
1572
1573 if (remove)
1574 remove_entry(h, idx);
1575
1576 if (ret_key)
1577 *ret_key = key;
1578
1579 return data;
1580 }
1581
1582 unsigned _hashmap_size(HashmapBase *h) {
1583 if (!h)
1584 return 0;
1585
1586 return n_entries(h);
1587 }
1588
1589 unsigned _hashmap_buckets(HashmapBase *h) {
1590 if (!h)
1591 return 0;
1592
1593 return n_buckets(h);
1594 }
1595
1596 int _hashmap_merge(Hashmap *h, Hashmap *other) {
1597 Iterator i;
1598 unsigned idx;
1599
1600 assert(h);
1601
1602 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1603 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1604 int r;
1605
1606 r = hashmap_put(h, pe->b.key, pe->value);
1607 if (r < 0 && r != -EEXIST)
1608 return r;
1609 }
1610
1611 return 0;
1612 }
1613
1614 int set_merge(Set *s, Set *other) {
1615 Iterator i;
1616 unsigned idx;
1617
1618 assert(s);
1619
1620 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1621 struct set_entry *se = set_bucket_at(other, idx);
1622 int r;
1623
1624 r = set_put(s, se->b.key);
1625 if (r < 0)
1626 return r;
1627 }
1628
1629 return 0;
1630 }
1631
1632 int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1633 int r;
1634
1635 assert(h);
1636
1637 r = resize_buckets(h, entries_add);
1638 if (r < 0)
1639 return r;
1640
1641 return 0;
1642 }
1643
1644 /*
1645 * The same as hashmap_merge(), but every new item from other is moved to h.
1646 * Keys already in h are skipped and stay in other.
1647 * Returns: 0 on success.
1648 * -ENOMEM on alloc failure, in which case no move has been done.
1649 */
1650 int _hashmap_move(HashmapBase *h, HashmapBase *other) {
1651 struct swap_entries swap;
1652 struct hashmap_base_entry *e, *n;
1653 Iterator i;
1654 unsigned idx;
1655 int r;
1656
1657 assert(h);
1658
1659 if (!other)
1660 return 0;
1661
1662 assert(other->type == h->type);
1663
1664 /*
1665 * This reserves buckets for the worst case, where none of other's
1666 * entries are yet present in h. This is preferable to risking
1667 * an allocation failure in the middle of the moving and having to
1668 * rollback or return a partial result.
1669 */
1670 r = resize_buckets(h, n_entries(other));
1671 if (r < 0)
1672 return r;
1673
1674 HASHMAP_FOREACH_IDX(idx, other, i) {
1675 unsigned h_hash;
1676
1677 e = bucket_at(other, idx);
1678 h_hash = bucket_hash(h, e->key);
1679 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1680 continue;
1681
1682 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1683 n->key = e->key;
1684 if (h->type != HASHMAP_TYPE_SET)
1685 ((struct plain_hashmap_entry*) n)->value =
1686 ((struct plain_hashmap_entry*) e)->value;
1687 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1688
1689 remove_entry(other, idx);
1690 }
1691
1692 return 0;
1693 }
1694
1695 int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1696 struct swap_entries swap;
1697 unsigned h_hash, other_hash, idx;
1698 struct hashmap_base_entry *e, *n;
1699 int r;
1700
1701 assert(h);
1702
1703 h_hash = bucket_hash(h, key);
1704 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1705 return -EEXIST;
1706
1707 if (!other)
1708 return -ENOENT;
1709
1710 assert(other->type == h->type);
1711
1712 other_hash = bucket_hash(other, key);
1713 idx = bucket_scan(other, other_hash, key);
1714 if (idx == IDX_NIL)
1715 return -ENOENT;
1716
1717 e = bucket_at(other, idx);
1718
1719 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1720 n->key = e->key;
1721 if (h->type != HASHMAP_TYPE_SET)
1722 ((struct plain_hashmap_entry*) n)->value =
1723 ((struct plain_hashmap_entry*) e)->value;
1724 r = hashmap_put_boldly(h, h_hash, &swap, true);
1725 if (r < 0)
1726 return r;
1727
1728 remove_entry(other, idx);
1729 return 0;
1730 }
1731
1732 HashmapBase* _hashmap_copy(HashmapBase *h HASHMAP_DEBUG_PARAMS) {
1733 HashmapBase *copy;
1734 int r;
1735
1736 assert(h);
1737
1738 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_PASS_ARGS);
1739 if (!copy)
1740 return NULL;
1741
1742 switch (h->type) {
1743 case HASHMAP_TYPE_PLAIN:
1744 case HASHMAP_TYPE_ORDERED:
1745 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1746 break;
1747 case HASHMAP_TYPE_SET:
1748 r = set_merge((Set*)copy, (Set*)h);
1749 break;
1750 default:
1751 assert_not_reached();
1752 }
1753
1754 if (r < 0)
1755 return _hashmap_free(copy, false, false);
1756
1757 return copy;
1758 }
1759
1760 char** _hashmap_get_strv(HashmapBase *h) {
1761 char **sv;
1762 Iterator i;
1763 unsigned idx, n;
1764
1765 if (!h)
1766 return new0(char*, 1);
1767
1768 sv = new(char*, n_entries(h)+1);
1769 if (!sv)
1770 return NULL;
1771
1772 n = 0;
1773 HASHMAP_FOREACH_IDX(idx, h, i)
1774 sv[n++] = entry_value(h, bucket_at(h, idx));
1775 sv[n] = NULL;
1776
1777 return sv;
1778 }
1779
1780 void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1781 struct ordered_hashmap_entry *e;
1782 unsigned hash, idx;
1783
1784 if (!h)
1785 return NULL;
1786
1787 hash = bucket_hash(h, key);
1788 idx = bucket_scan(h, hash, key);
1789 if (idx == IDX_NIL)
1790 return NULL;
1791
1792 e = ordered_bucket_at(h, idx);
1793 if (e->iterate_next == IDX_NIL)
1794 return NULL;
1795 return ordered_bucket_at(h, e->iterate_next)->p.value;
1796 }
1797
1798 int set_consume(Set *s, void *value) {
1799 int r;
1800
1801 assert(s);
1802 assert(value);
1803
1804 r = set_put(s, value);
1805 if (r <= 0)
1806 free(value);
1807
1808 return r;
1809 }
1810
1811 int _hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v HASHMAP_DEBUG_PARAMS) {
1812 int r;
1813
1814 r = _hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1815 if (r < 0)
1816 return r;
1817
1818 _cleanup_free_ char *kdup = NULL, *vdup = NULL;
1819
1820 kdup = strdup(k);
1821 if (!kdup)
1822 return -ENOMEM;
1823
1824 if (v) {
1825 vdup = strdup(v);
1826 if (!vdup)
1827 return -ENOMEM;
1828 }
1829
1830 r = hashmap_put(*h, kdup, vdup);
1831 if (r < 0) {
1832 if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
1833 return 0;
1834 return r;
1835 }
1836
1837 /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1838 assert(vdup == NULL || r > 0);
1839 if (r > 0)
1840 kdup = vdup = NULL;
1841
1842 return r;
1843 }
1844
1845 int _set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n HASHMAP_DEBUG_PARAMS) {
1846 char *c;
1847 int r;
1848
1849 assert(s);
1850 assert(p);
1851
1852 r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1853 if (r < 0)
1854 return r;
1855
1856 if (n == SIZE_MAX) {
1857 if (set_contains(*s, (char*) p))
1858 return 0;
1859
1860 c = strdup(p);
1861 } else
1862 c = strndup(p, n);
1863 if (!c)
1864 return -ENOMEM;
1865
1866 return set_consume(*s, c);
1867 }
1868
1869 int _set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l HASHMAP_DEBUG_PARAMS) {
1870 int n = 0, r;
1871
1872 assert(s);
1873
1874 STRV_FOREACH(i, l) {
1875 r = _set_put_strndup_full(s, hash_ops, *i, SIZE_MAX HASHMAP_DEBUG_PASS_ARGS);
1876 if (r < 0)
1877 return r;
1878
1879 n += r;
1880 }
1881
1882 return n;
1883 }
1884
1885 int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1886 const char *p = ASSERT_PTR(v);
1887 int r;
1888
1889 assert(s);
1890
1891 for (;;) {
1892 char *word;
1893
1894 r = extract_first_word(&p, &word, separators, flags);
1895 if (r <= 0)
1896 return r;
1897
1898 r = set_consume(s, word);
1899 if (r < 0)
1900 return r;
1901 }
1902 }
1903
1904 /* expand the cachemem if needed, return true if newly (re)activated. */
1905 static int cachemem_maintain(CacheMem *mem, size_t size) {
1906 assert(mem);
1907
1908 if (!GREEDY_REALLOC(mem->ptr, size)) {
1909 if (size > 0)
1910 return -ENOMEM;
1911 }
1912
1913 if (!mem->active) {
1914 mem->active = true;
1915 return true;
1916 }
1917
1918 return false;
1919 }
1920
1921 int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1922 bool sync_keys = false, sync_values = false;
1923 size_t size;
1924 int r;
1925
1926 assert(cache);
1927 assert(cache->hashmap);
1928
1929 size = n_entries(cache->hashmap);
1930
1931 if (res_keys) {
1932 r = cachemem_maintain(&cache->keys, size);
1933 if (r < 0)
1934 return r;
1935
1936 sync_keys = r;
1937 } else
1938 cache->keys.active = false;
1939
1940 if (res_values) {
1941 r = cachemem_maintain(&cache->values, size);
1942 if (r < 0)
1943 return r;
1944
1945 sync_values = r;
1946 } else
1947 cache->values.active = false;
1948
1949 if (cache->hashmap->dirty) {
1950 if (cache->keys.active)
1951 sync_keys = true;
1952 if (cache->values.active)
1953 sync_values = true;
1954
1955 cache->hashmap->dirty = false;
1956 }
1957
1958 if (sync_keys || sync_values) {
1959 unsigned i, idx;
1960 Iterator iter;
1961
1962 i = 0;
1963 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1964 struct hashmap_base_entry *e;
1965
1966 e = bucket_at(cache->hashmap, idx);
1967
1968 if (sync_keys)
1969 cache->keys.ptr[i] = e->key;
1970 if (sync_values)
1971 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1972 i++;
1973 }
1974 }
1975
1976 if (res_keys)
1977 *res_keys = cache->keys.ptr;
1978 if (res_values)
1979 *res_values = cache->values.ptr;
1980 if (res_n_entries)
1981 *res_n_entries = size;
1982
1983 return 0;
1984 }
1985
1986 IteratedCache* iterated_cache_free(IteratedCache *cache) {
1987 if (cache) {
1988 free(cache->keys.ptr);
1989 free(cache->values.ptr);
1990 }
1991
1992 return mfree(cache);
1993 }
1994
1995 int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
1996 _cleanup_free_ char *str = NULL;
1997 size_t separator_len, len = 0;
1998 const char *value;
1999 bool first;
2000
2001 assert(ret);
2002
2003 if (set_isempty(s)) {
2004 *ret = NULL;
2005 return 0;
2006 }
2007
2008 separator_len = strlen_ptr(separator);
2009
2010 if (separator_len == 0)
2011 wrap_with_separator = false;
2012
2013 first = !wrap_with_separator;
2014
2015 SET_FOREACH(value, s) {
2016 size_t l = strlen_ptr(value);
2017
2018 if (l == 0)
2019 continue;
2020
2021 if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
2022 return -ENOMEM;
2023
2024 if (separator_len > 0 && !first) {
2025 memcpy(str + len, separator, separator_len);
2026 len += separator_len;
2027 }
2028
2029 memcpy(str + len, value, l);
2030 len += l;
2031 first = false;
2032 }
2033
2034 if (wrap_with_separator) {
2035 memcpy(str + len, separator, separator_len);
2036 len += separator_len;
2037 }
2038
2039 str[len] = '\0';
2040
2041 *ret = TAKE_PTR(str);
2042 return 0;
2043 }
2044
2045 bool set_equal(Set *a, Set *b) {
2046 void *p;
2047
2048 /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2049 * entries */
2050
2051 if (a == b)
2052 return true;
2053
2054 if (set_isempty(a) && set_isempty(b))
2055 return true;
2056
2057 if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
2058 * already */
2059 return false;
2060
2061 SET_FOREACH(p, a)
2062 if (!set_contains(b, p))
2063 return false;
2064
2065 /* If we have the same hashops, then we don't need to check things backwards given we compared the
2066 * size and that all of a is in b. */
2067 if (a->b.hash_ops == b->b.hash_ops)
2068 return true;
2069
2070 SET_FOREACH(p, b)
2071 if (!set_contains(a, p))
2072 return false;
2073
2074 return true;
2075 }
2076
2077 static bool set_fnmatch_one(Set *patterns, const char *needle) {
2078 const char *p;
2079
2080 assert(needle);
2081
2082 /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2083
2084 SET_FOREACH(p, patterns)
2085 if (fnmatch(p, needle, 0) == 0)
2086 return true;
2087
2088 return false;
2089 }
2090
2091 bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
2092 assert(needle);
2093
2094 if (set_fnmatch_one(exclude_patterns, needle))
2095 return false;
2096
2097 if (set_isempty(include_patterns))
2098 return true;
2099
2100 return set_fnmatch_one(include_patterns, needle);
2101 }