]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/hashmap.c
Merge pull request #26491 from dtardon/list-paths
[thirdparty/systemd.git] / src / basic / hashmap.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <fnmatch.h>
5 #include <pthread.h>
6 #include <stdint.h>
7 #include <stdlib.h>
8
9 #include "alloc-util.h"
10 #include "fileio.h"
11 #include "hashmap.h"
12 #include "logarithm.h"
13 #include "macro.h"
14 #include "memory-util.h"
15 #include "mempool.h"
16 #include "missing_syscall.h"
17 #include "process-util.h"
18 #include "random-util.h"
19 #include "set.h"
20 #include "siphash24.h"
21 #include "string-util.h"
22 #include "strv.h"
23
24 #if ENABLE_DEBUG_HASHMAP
25 #include "list.h"
26 #endif
27
28 /*
29 * Implementation of hashmaps.
30 * Addressing: open
31 * - uses less RAM compared to closed addressing (chaining), because
32 * our entries are small (especially in Sets, which tend to contain
33 * the majority of entries in systemd).
34 * Collision resolution: Robin Hood
35 * - tends to equalize displacement of entries from their optimal buckets.
36 * Probe sequence: linear
37 * - though theoretically worse than random probing/uniform hashing/double
38 * hashing, it is good for cache locality.
39 *
40 * References:
41 * Celis, P. 1986. Robin Hood Hashing.
42 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
43 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
44 * - The results are derived for random probing. Suggests deletion with
45 * tombstones and two mean-centered search methods. None of that works
46 * well for linear probing.
47 *
48 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
49 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
50 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
51 * http://www.math.uu.se/~svante/papers/sj157.pdf
52 * - Applies to Robin Hood with linear probing. Contains remarks on
53 * the unsuitability of mean-centered search with linear probing.
54 *
55 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
56 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
57 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
58 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
59 * in a successful search), and Janson writes about displacement. C = d + 1.
60 *
61 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
62 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
63 * - Explanation of backward shift deletion with pictures.
64 *
65 * Khuong, P. 2013. The Other Robin Hood Hashing.
66 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
67 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
68 */
69
70 /*
71 * XXX Ideas for improvement:
72 * For unordered hashmaps, randomize iteration order, similarly to Perl:
73 * http://blog.booking.com/hardening-perls-hash-function.html
74 */
75
76 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
77 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
78 #define INV_KEEP_FREE 5U
79
80 /* Fields common to entries of all hashmap/set types */
81 struct hashmap_base_entry {
82 const void *key;
83 };
84
85 /* Entry types for specific hashmap/set types
86 * hashmap_base_entry must be at the beginning of each entry struct. */
87
88 struct plain_hashmap_entry {
89 struct hashmap_base_entry b;
90 void *value;
91 };
92
93 struct ordered_hashmap_entry {
94 struct plain_hashmap_entry p;
95 unsigned iterate_next, iterate_previous;
96 };
97
98 struct set_entry {
99 struct hashmap_base_entry b;
100 };
101
102 /* In several functions it is advantageous to have the hash table extended
103 * virtually by a couple of additional buckets. We reserve special index values
104 * for these "swap" buckets. */
105 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
106 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
107 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
108 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
109
110 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
111 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
112
113 assert_cc(IDX_FIRST == _IDX_SWAP_END);
114 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
115
116 /* Storage space for the "swap" buckets.
117 * All entry types can fit into an ordered_hashmap_entry. */
118 struct swap_entries {
119 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
120 };
121
122 /* Distance from Initial Bucket */
123 typedef uint8_t dib_raw_t;
124 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
125 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
126 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
127 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
128
129 #define DIB_FREE UINT_MAX
130
131 #if ENABLE_DEBUG_HASHMAP
132 struct hashmap_debug_info {
133 LIST_FIELDS(struct hashmap_debug_info, debug_list);
134 unsigned max_entries; /* high watermark of n_entries */
135
136 /* who allocated this hashmap */
137 int line;
138 const char *file;
139 const char *func;
140
141 /* fields to detect modification while iterating */
142 unsigned put_count; /* counts puts into the hashmap */
143 unsigned rem_count; /* counts removals from hashmap */
144 unsigned last_rem_idx; /* remembers last removal index */
145 };
146
147 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
148 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
149 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
150 #endif
151
152 enum HashmapType {
153 HASHMAP_TYPE_PLAIN,
154 HASHMAP_TYPE_ORDERED,
155 HASHMAP_TYPE_SET,
156 _HASHMAP_TYPE_MAX
157 };
158
159 struct _packed_ indirect_storage {
160 void *storage; /* where buckets and DIBs are stored */
161 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
162
163 unsigned n_entries; /* number of stored entries */
164 unsigned n_buckets; /* number of buckets */
165
166 unsigned idx_lowest_entry; /* Index below which all buckets are free.
167 Makes "while (hashmap_steal_first())" loops
168 O(n) instead of O(n^2) for unordered hashmaps. */
169 uint8_t _pad[3]; /* padding for the whole HashmapBase */
170 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
171 };
172
173 struct direct_storage {
174 /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
175 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
176 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
177 uint8_t storage[sizeof(struct indirect_storage)];
178 };
179
180 #define DIRECT_BUCKETS(entry_t) \
181 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
182
183 /* We should be able to store at least one entry directly. */
184 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
185
186 /* We have 3 bits for n_direct_entries. */
187 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
188
189 /* Hashmaps with directly stored entries all use this shared hash key.
190 * It's no big deal if the key is guessed, because there can be only
191 * a handful of directly stored entries in a hashmap. When a hashmap
192 * outgrows direct storage, it gets its own key for indirect storage. */
193 static uint8_t shared_hash_key[HASH_KEY_SIZE];
194
195 /* Fields that all hashmap/set types must have */
196 struct HashmapBase {
197 const struct hash_ops *hash_ops; /* hash and compare ops to use */
198
199 union _packed_ {
200 struct indirect_storage indirect; /* if has_indirect */
201 struct direct_storage direct; /* if !has_indirect */
202 };
203
204 enum HashmapType type:2; /* HASHMAP_TYPE_* */
205 bool has_indirect:1; /* whether indirect storage is used */
206 unsigned n_direct_entries:3; /* Number of entries in direct storage.
207 * Only valid if !has_indirect. */
208 bool from_pool:1; /* whether was allocated from mempool */
209 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
210 bool cached:1; /* whether this hashmap is being cached */
211
212 #if ENABLE_DEBUG_HASHMAP
213 struct hashmap_debug_info debug;
214 #endif
215 };
216
217 /* Specific hash types
218 * HashmapBase must be at the beginning of each hashmap struct. */
219
220 struct Hashmap {
221 struct HashmapBase b;
222 };
223
224 struct OrderedHashmap {
225 struct HashmapBase b;
226 unsigned iterate_list_head, iterate_list_tail;
227 };
228
229 struct Set {
230 struct HashmapBase b;
231 };
232
233 typedef struct CacheMem {
234 const void **ptr;
235 size_t n_populated;
236 bool active:1;
237 } CacheMem;
238
239 struct IteratedCache {
240 HashmapBase *hashmap;
241 CacheMem keys, values;
242 };
243
244 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
245 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
246 /* No need for a separate Set pool */
247 assert_cc(sizeof(Hashmap) == sizeof(Set));
248
249 struct hashmap_type_info {
250 size_t head_size;
251 size_t entry_size;
252 struct mempool *mempool;
253 unsigned n_direct_buckets;
254 };
255
256 static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
257 [HASHMAP_TYPE_PLAIN] = {
258 .head_size = sizeof(Hashmap),
259 .entry_size = sizeof(struct plain_hashmap_entry),
260 .mempool = &hashmap_pool,
261 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
262 },
263 [HASHMAP_TYPE_ORDERED] = {
264 .head_size = sizeof(OrderedHashmap),
265 .entry_size = sizeof(struct ordered_hashmap_entry),
266 .mempool = &ordered_hashmap_pool,
267 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
268 },
269 [HASHMAP_TYPE_SET] = {
270 .head_size = sizeof(Set),
271 .entry_size = sizeof(struct set_entry),
272 .mempool = &hashmap_pool,
273 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
274 },
275 };
276
277 void hashmap_trim_pools(void) {
278 int r;
279
280 /* The pool is only allocated by the main thread, but the memory can be passed to other
281 * threads. Let's clean up if we are the main thread and no other threads are live. */
282
283 /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
284 * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
285 if (getpid() != gettid())
286 return (void) log_debug("Not cleaning up memory pools, not in main thread.");
287
288 r = get_process_threads(0);
289 if (r < 0)
290 return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
291 if (r != 1)
292 return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
293
294 mempool_trim(&hashmap_pool);
295 mempool_trim(&ordered_hashmap_pool);
296 }
297
298 #if VALGRIND
299 _destructor_ static void cleanup_pools(void) {
300 /* Be nice to valgrind */
301 hashmap_trim_pools();
302 }
303 #endif
304
305 static unsigned n_buckets(HashmapBase *h) {
306 return h->has_indirect ? h->indirect.n_buckets
307 : hashmap_type_info[h->type].n_direct_buckets;
308 }
309
310 static unsigned n_entries(HashmapBase *h) {
311 return h->has_indirect ? h->indirect.n_entries
312 : h->n_direct_entries;
313 }
314
315 static void n_entries_inc(HashmapBase *h) {
316 if (h->has_indirect)
317 h->indirect.n_entries++;
318 else
319 h->n_direct_entries++;
320 }
321
322 static void n_entries_dec(HashmapBase *h) {
323 if (h->has_indirect)
324 h->indirect.n_entries--;
325 else
326 h->n_direct_entries--;
327 }
328
329 static void* storage_ptr(HashmapBase *h) {
330 return h->has_indirect ? h->indirect.storage
331 : h->direct.storage;
332 }
333
334 static uint8_t* hash_key(HashmapBase *h) {
335 return h->has_indirect ? h->indirect.hash_key
336 : shared_hash_key;
337 }
338
339 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
340 struct siphash state;
341 uint64_t hash;
342
343 siphash24_init(&state, hash_key(h));
344
345 h->hash_ops->hash(p, &state);
346
347 hash = siphash24_finalize(&state);
348
349 return (unsigned) (hash % n_buckets(h));
350 }
351 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
352
353 static void base_set_dirty(HashmapBase *h) {
354 h->dirty = true;
355 }
356 #define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
357
358 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
359 static uint8_t current[HASH_KEY_SIZE];
360 static bool current_initialized = false;
361
362 /* Returns a hash function key to use. In order to keep things
363 * fast we will not generate a new key each time we allocate a
364 * new hash table. Instead, we'll just reuse the most recently
365 * generated one, except if we never generated one or when we
366 * are rehashing an entire hash table because we reached a
367 * fill level */
368
369 if (!current_initialized || !reuse_is_ok) {
370 random_bytes(current, sizeof(current));
371 current_initialized = true;
372 }
373
374 memcpy(hash_key, current, sizeof(current));
375 }
376
377 static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
378 return CAST_ALIGN_PTR(
379 struct hashmap_base_entry,
380 (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
381 }
382
383 static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
384 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
385 }
386
387 static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
388 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
389 }
390
391 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
392 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
393 }
394
395 static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
396 return &swap->e[idx - _IDX_SWAP_BEGIN];
397 }
398
399 /* Returns a pointer to the bucket at index idx.
400 * Understands real indexes and swap indexes, hence "_virtual". */
401 static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
402 unsigned idx) {
403 if (idx < _IDX_SWAP_BEGIN)
404 return bucket_at(h, idx);
405
406 if (idx < _IDX_SWAP_END)
407 return &bucket_at_swap(swap, idx)->p.b;
408
409 assert_not_reached();
410 }
411
412 static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
413 return (dib_raw_t*)
414 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
415 }
416
417 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
418 return idx >= from ? idx - from
419 : n_buckets(h) + idx - from;
420 }
421
422 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
423 unsigned initial_bucket;
424
425 if (raw_dib == DIB_RAW_FREE)
426 return DIB_FREE;
427
428 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
429 return raw_dib;
430
431 /*
432 * Having an overflow DIB value is very unlikely. The hash function
433 * would have to be bad. For example, in a table of size 2^24 filled
434 * to load factor 0.9 the maximum observed DIB is only about 60.
435 * In theory (assuming I used Maxima correctly), for an infinite size
436 * hash table with load factor 0.8 the probability of a given entry
437 * having DIB > 40 is 1.9e-8.
438 * This returns the correct DIB value by recomputing the hash value in
439 * the unlikely case. XXX Hitting this case could be a hint to rehash.
440 */
441 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
442 return bucket_distance(h, idx, initial_bucket);
443 }
444
445 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
446 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
447 }
448
449 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
450 dib_raw_t *dibs;
451
452 dibs = dib_raw_ptr(h);
453
454 for ( ; idx < n_buckets(h); idx++)
455 if (dibs[idx] != DIB_RAW_FREE)
456 return idx;
457
458 return IDX_NIL;
459 }
460
461 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
462 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
463 bucket_set_dib(h, idx, DIB_FREE);
464 }
465
466 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
467 unsigned from, unsigned to) {
468 struct hashmap_base_entry *e_from, *e_to;
469
470 assert(from != to);
471
472 e_from = bucket_at_virtual(h, swap, from);
473 e_to = bucket_at_virtual(h, swap, to);
474
475 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
476
477 if (h->type == HASHMAP_TYPE_ORDERED) {
478 OrderedHashmap *lh = (OrderedHashmap*) h;
479 struct ordered_hashmap_entry *le, *le_to;
480
481 le_to = (struct ordered_hashmap_entry*) e_to;
482
483 if (le_to->iterate_next != IDX_NIL) {
484 le = (struct ordered_hashmap_entry*)
485 bucket_at_virtual(h, swap, le_to->iterate_next);
486 le->iterate_previous = to;
487 }
488
489 if (le_to->iterate_previous != IDX_NIL) {
490 le = (struct ordered_hashmap_entry*)
491 bucket_at_virtual(h, swap, le_to->iterate_previous);
492 le->iterate_next = to;
493 }
494
495 if (lh->iterate_list_head == from)
496 lh->iterate_list_head = to;
497 if (lh->iterate_list_tail == from)
498 lh->iterate_list_tail = to;
499 }
500 }
501
502 static unsigned next_idx(HashmapBase *h, unsigned idx) {
503 return (idx + 1U) % n_buckets(h);
504 }
505
506 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
507 return (n_buckets(h) + idx - 1U) % n_buckets(h);
508 }
509
510 static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
511 switch (h->type) {
512
513 case HASHMAP_TYPE_PLAIN:
514 case HASHMAP_TYPE_ORDERED:
515 return ((struct plain_hashmap_entry*)e)->value;
516
517 case HASHMAP_TYPE_SET:
518 return (void*) e->key;
519
520 default:
521 assert_not_reached();
522 }
523 }
524
525 static void base_remove_entry(HashmapBase *h, unsigned idx) {
526 unsigned left, right, prev, dib;
527 dib_raw_t raw_dib, *dibs;
528
529 dibs = dib_raw_ptr(h);
530 assert(dibs[idx] != DIB_RAW_FREE);
531
532 #if ENABLE_DEBUG_HASHMAP
533 h->debug.rem_count++;
534 h->debug.last_rem_idx = idx;
535 #endif
536
537 left = idx;
538 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
539 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
540 raw_dib = dibs[right];
541 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
542 break;
543
544 /* The buckets are not supposed to be all occupied and with DIB > 0.
545 * That would mean we could make everyone better off by shifting them
546 * backward. This scenario is impossible. */
547 assert(left != right);
548 }
549
550 if (h->type == HASHMAP_TYPE_ORDERED) {
551 OrderedHashmap *lh = (OrderedHashmap*) h;
552 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
553
554 if (le->iterate_next != IDX_NIL)
555 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
556 else
557 lh->iterate_list_tail = le->iterate_previous;
558
559 if (le->iterate_previous != IDX_NIL)
560 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
561 else
562 lh->iterate_list_head = le->iterate_next;
563 }
564
565 /* Now shift all buckets in the interval (left, right) one step backwards */
566 for (prev = left, left = next_idx(h, left); left != right;
567 prev = left, left = next_idx(h, left)) {
568 dib = bucket_calculate_dib(h, left, dibs[left]);
569 assert(dib != 0);
570 bucket_move_entry(h, NULL, left, prev);
571 bucket_set_dib(h, prev, dib - 1);
572 }
573
574 bucket_mark_free(h, prev);
575 n_entries_dec(h);
576 base_set_dirty(h);
577 }
578 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
579
580 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
581 struct ordered_hashmap_entry *e;
582 unsigned idx;
583
584 assert(h);
585 assert(i);
586
587 if (i->idx == IDX_NIL)
588 goto at_end;
589
590 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
591 goto at_end;
592
593 if (i->idx == IDX_FIRST) {
594 idx = h->iterate_list_head;
595 e = ordered_bucket_at(h, idx);
596 } else {
597 idx = i->idx;
598 e = ordered_bucket_at(h, idx);
599 /*
600 * We allow removing the current entry while iterating, but removal may cause
601 * a backward shift. The next entry may thus move one bucket to the left.
602 * To detect when it happens, we remember the key pointer of the entry we were
603 * going to iterate next. If it does not match, there was a backward shift.
604 */
605 if (e->p.b.key != i->next_key) {
606 idx = prev_idx(HASHMAP_BASE(h), idx);
607 e = ordered_bucket_at(h, idx);
608 }
609 assert(e->p.b.key == i->next_key);
610 }
611
612 #if ENABLE_DEBUG_HASHMAP
613 i->prev_idx = idx;
614 #endif
615
616 if (e->iterate_next != IDX_NIL) {
617 struct ordered_hashmap_entry *n;
618 i->idx = e->iterate_next;
619 n = ordered_bucket_at(h, i->idx);
620 i->next_key = n->p.b.key;
621 } else
622 i->idx = IDX_NIL;
623
624 return idx;
625
626 at_end:
627 i->idx = IDX_NIL;
628 return IDX_NIL;
629 }
630
631 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
632 unsigned idx;
633
634 assert(h);
635 assert(i);
636
637 if (i->idx == IDX_NIL)
638 goto at_end;
639
640 if (i->idx == IDX_FIRST) {
641 /* fast forward to the first occupied bucket */
642 if (h->has_indirect) {
643 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
644 h->indirect.idx_lowest_entry = i->idx;
645 } else
646 i->idx = skip_free_buckets(h, 0);
647
648 if (i->idx == IDX_NIL)
649 goto at_end;
650 } else {
651 struct hashmap_base_entry *e;
652
653 assert(i->idx > 0);
654
655 e = bucket_at(h, i->idx);
656 /*
657 * We allow removing the current entry while iterating, but removal may cause
658 * a backward shift. The next entry may thus move one bucket to the left.
659 * To detect when it happens, we remember the key pointer of the entry we were
660 * going to iterate next. If it does not match, there was a backward shift.
661 */
662 if (e->key != i->next_key)
663 e = bucket_at(h, --i->idx);
664
665 assert(e->key == i->next_key);
666 }
667
668 idx = i->idx;
669 #if ENABLE_DEBUG_HASHMAP
670 i->prev_idx = idx;
671 #endif
672
673 i->idx = skip_free_buckets(h, i->idx + 1);
674 if (i->idx != IDX_NIL)
675 i->next_key = bucket_at(h, i->idx)->key;
676 else
677 i->idx = IDX_NIL;
678
679 return idx;
680
681 at_end:
682 i->idx = IDX_NIL;
683 return IDX_NIL;
684 }
685
686 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
687 if (!h) {
688 i->idx = IDX_NIL;
689 return IDX_NIL;
690 }
691
692 #if ENABLE_DEBUG_HASHMAP
693 if (i->idx == IDX_FIRST) {
694 i->put_count = h->debug.put_count;
695 i->rem_count = h->debug.rem_count;
696 } else {
697 /* While iterating, must not add any new entries */
698 assert(i->put_count == h->debug.put_count);
699 /* ... or remove entries other than the current one */
700 assert(i->rem_count == h->debug.rem_count ||
701 (i->rem_count == h->debug.rem_count - 1 &&
702 i->prev_idx == h->debug.last_rem_idx));
703 /* Reset our removals counter */
704 i->rem_count = h->debug.rem_count;
705 }
706 #endif
707
708 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
709 : hashmap_iterate_in_internal_order(h, i);
710 }
711
712 bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
713 struct hashmap_base_entry *e;
714 void *data;
715 unsigned idx;
716
717 idx = hashmap_iterate_entry(h, i);
718 if (idx == IDX_NIL) {
719 if (value)
720 *value = NULL;
721 if (key)
722 *key = NULL;
723
724 return false;
725 }
726
727 e = bucket_at(h, idx);
728 data = entry_value(h, e);
729 if (value)
730 *value = data;
731 if (key)
732 *key = e->key;
733
734 return true;
735 }
736
737 #define HASHMAP_FOREACH_IDX(idx, h, i) \
738 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
739 (idx != IDX_NIL); \
740 (idx) = hashmap_iterate_entry((h), &(i)))
741
742 IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
743 IteratedCache *cache;
744
745 assert(h);
746 assert(!h->cached);
747
748 if (h->cached)
749 return NULL;
750
751 cache = new0(IteratedCache, 1);
752 if (!cache)
753 return NULL;
754
755 cache->hashmap = h;
756 h->cached = true;
757
758 return cache;
759 }
760
761 static void reset_direct_storage(HashmapBase *h) {
762 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
763 void *p;
764
765 assert(!h->has_indirect);
766
767 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
768 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
769 }
770
771 static void shared_hash_key_initialize(void) {
772 random_bytes(shared_hash_key, sizeof(shared_hash_key));
773 }
774
775 static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
776 HashmapBase *h;
777 const struct hashmap_type_info *hi = &hashmap_type_info[type];
778
779 bool use_pool = mempool_enabled && mempool_enabled(); /* mempool_enabled is a weak symbol */
780
781 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
782 if (!h)
783 return NULL;
784
785 h->type = type;
786 h->from_pool = use_pool;
787 h->hash_ops = hash_ops ?: &trivial_hash_ops;
788
789 if (type == HASHMAP_TYPE_ORDERED) {
790 OrderedHashmap *lh = (OrderedHashmap*)h;
791 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
792 }
793
794 reset_direct_storage(h);
795
796 static pthread_once_t once = PTHREAD_ONCE_INIT;
797 assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
798
799 #if ENABLE_DEBUG_HASHMAP
800 h->debug.func = func;
801 h->debug.file = file;
802 h->debug.line = line;
803 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
804 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
805 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
806 #endif
807
808 return h;
809 }
810
811 Hashmap *_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
812 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
813 }
814
815 OrderedHashmap *_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
816 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
817 }
818
819 Set *_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
820 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
821 }
822
823 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
824 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
825 HashmapBase *q;
826
827 assert(h);
828
829 if (*h)
830 return 0;
831
832 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
833 if (!q)
834 return -ENOMEM;
835
836 *h = q;
837 return 1;
838 }
839
840 int _hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
841 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
842 }
843
844 int _ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
845 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
846 }
847
848 int _set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
849 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
850 }
851
852 int _hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) {
853 int r;
854
855 r = _hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
856 if (r < 0)
857 return r;
858
859 return hashmap_put(*h, key, value);
860 }
861
862 int _ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) {
863 int r;
864
865 r = _ordered_hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
866 if (r < 0)
867 return r;
868
869 return ordered_hashmap_put(*h, key, value);
870 }
871
872 static void hashmap_free_no_clear(HashmapBase *h) {
873 assert(!h->has_indirect);
874 assert(h->n_direct_entries == 0);
875
876 #if ENABLE_DEBUG_HASHMAP
877 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
878 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
879 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
880 #endif
881
882 if (h->from_pool) {
883 /* Ensure that the object didn't get migrated between threads. */
884 assert_se(is_main_thread());
885 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
886 } else
887 free(h);
888 }
889
890 HashmapBase* _hashmap_free(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
891 if (h) {
892 _hashmap_clear(h, default_free_key, default_free_value);
893 hashmap_free_no_clear(h);
894 }
895
896 return NULL;
897 }
898
899 void _hashmap_clear(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
900 free_func_t free_key, free_value;
901 if (!h)
902 return;
903
904 free_key = h->hash_ops->free_key ?: default_free_key;
905 free_value = h->hash_ops->free_value ?: default_free_value;
906
907 if (free_key || free_value) {
908
909 /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
910 * hash table, and only then call the destructor functions. If these destructors then try to unregister
911 * themselves from our hash table a second time, the entry is already gone. */
912
913 while (_hashmap_size(h) > 0) {
914 void *k = NULL;
915 void *v;
916
917 v = _hashmap_first_key_and_value(h, true, &k);
918
919 if (free_key)
920 free_key(k);
921
922 if (free_value)
923 free_value(v);
924 }
925 }
926
927 if (h->has_indirect) {
928 free(h->indirect.storage);
929 h->has_indirect = false;
930 }
931
932 h->n_direct_entries = 0;
933 reset_direct_storage(h);
934
935 if (h->type == HASHMAP_TYPE_ORDERED) {
936 OrderedHashmap *lh = (OrderedHashmap*) h;
937 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
938 }
939
940 base_set_dirty(h);
941 }
942
943 static int resize_buckets(HashmapBase *h, unsigned entries_add);
944
945 /*
946 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
947 * Performs Robin Hood swaps as it goes. The entry to put must be placed
948 * by the caller into swap slot IDX_PUT.
949 * If used for in-place resizing, may leave a displaced entry in swap slot
950 * IDX_PUT. Caller must rehash it next.
951 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
952 * false otherwise.
953 */
954 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
955 struct swap_entries *swap) {
956 dib_raw_t raw_dib, *dibs;
957 unsigned dib, distance;
958
959 #if ENABLE_DEBUG_HASHMAP
960 h->debug.put_count++;
961 #endif
962
963 dibs = dib_raw_ptr(h);
964
965 for (distance = 0; ; distance++) {
966 raw_dib = dibs[idx];
967 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
968 if (raw_dib == DIB_RAW_REHASH)
969 bucket_move_entry(h, swap, idx, IDX_TMP);
970
971 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
972 h->indirect.idx_lowest_entry = idx;
973
974 bucket_set_dib(h, idx, distance);
975 bucket_move_entry(h, swap, IDX_PUT, idx);
976 if (raw_dib == DIB_RAW_REHASH) {
977 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
978 return true;
979 }
980
981 return false;
982 }
983
984 dib = bucket_calculate_dib(h, idx, raw_dib);
985
986 if (dib < distance) {
987 /* Found a wealthier entry. Go Robin Hood! */
988 bucket_set_dib(h, idx, distance);
989
990 /* swap the entries */
991 bucket_move_entry(h, swap, idx, IDX_TMP);
992 bucket_move_entry(h, swap, IDX_PUT, idx);
993 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
994
995 distance = dib;
996 }
997
998 idx = next_idx(h, idx);
999 }
1000 }
1001
1002 /*
1003 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1004 * The caller must place the entry (only its key and value, not link indexes)
1005 * in swap slot IDX_PUT.
1006 * Caller must ensure: the key does not exist yet in the hashmap.
1007 * that resize is not needed if !may_resize.
1008 * Returns: 1 if entry was put successfully.
1009 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1010 * Cannot return -ENOMEM if !may_resize.
1011 */
1012 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1013 struct swap_entries *swap, bool may_resize) {
1014 struct ordered_hashmap_entry *new_entry;
1015 int r;
1016
1017 assert(idx < n_buckets(h));
1018
1019 new_entry = bucket_at_swap(swap, IDX_PUT);
1020
1021 if (may_resize) {
1022 r = resize_buckets(h, 1);
1023 if (r < 0)
1024 return r;
1025 if (r > 0)
1026 idx = bucket_hash(h, new_entry->p.b.key);
1027 }
1028 assert(n_entries(h) < n_buckets(h));
1029
1030 if (h->type == HASHMAP_TYPE_ORDERED) {
1031 OrderedHashmap *lh = (OrderedHashmap*) h;
1032
1033 new_entry->iterate_next = IDX_NIL;
1034 new_entry->iterate_previous = lh->iterate_list_tail;
1035
1036 if (lh->iterate_list_tail != IDX_NIL) {
1037 struct ordered_hashmap_entry *old_tail;
1038
1039 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1040 assert(old_tail->iterate_next == IDX_NIL);
1041 old_tail->iterate_next = IDX_PUT;
1042 }
1043
1044 lh->iterate_list_tail = IDX_PUT;
1045 if (lh->iterate_list_head == IDX_NIL)
1046 lh->iterate_list_head = IDX_PUT;
1047 }
1048
1049 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1050
1051 n_entries_inc(h);
1052 #if ENABLE_DEBUG_HASHMAP
1053 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1054 #endif
1055
1056 base_set_dirty(h);
1057
1058 return 1;
1059 }
1060 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1061 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1062
1063 /*
1064 * Returns 0 if resize is not needed.
1065 * 1 if successfully resized.
1066 * -ENOMEM on allocation failure.
1067 */
1068 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1069 struct swap_entries swap;
1070 void *new_storage;
1071 dib_raw_t *old_dibs, *new_dibs;
1072 const struct hashmap_type_info *hi;
1073 unsigned idx, optimal_idx;
1074 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1075 uint8_t new_shift;
1076 bool rehash_next;
1077
1078 assert(h);
1079
1080 hi = &hashmap_type_info[h->type];
1081 new_n_entries = n_entries(h) + entries_add;
1082
1083 /* overflow? */
1084 if (_unlikely_(new_n_entries < entries_add))
1085 return -ENOMEM;
1086
1087 /* For direct storage we allow 100% load, because it's tiny. */
1088 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1089 return 0;
1090
1091 /*
1092 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1093 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1094 */
1095 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1096 /* overflow? */
1097 if (_unlikely_(new_n_buckets < new_n_entries))
1098 return -ENOMEM;
1099
1100 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1101 return -ENOMEM;
1102
1103 old_n_buckets = n_buckets(h);
1104
1105 if (_likely_(new_n_buckets <= old_n_buckets))
1106 return 0;
1107
1108 new_shift = log2u_round_up(MAX(
1109 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1110 2 * sizeof(struct direct_storage)));
1111
1112 /* Realloc storage (buckets and DIB array). */
1113 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1114 1U << new_shift);
1115 if (!new_storage)
1116 return -ENOMEM;
1117
1118 /* Must upgrade direct to indirect storage. */
1119 if (!h->has_indirect) {
1120 memcpy(new_storage, h->direct.storage,
1121 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1122 h->indirect.n_entries = h->n_direct_entries;
1123 h->indirect.idx_lowest_entry = 0;
1124 h->n_direct_entries = 0;
1125 }
1126
1127 /* Get a new hash key. If we've just upgraded to indirect storage,
1128 * allow reusing a previously generated key. It's still a different key
1129 * from the shared one that we used for direct storage. */
1130 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1131
1132 h->has_indirect = true;
1133 h->indirect.storage = new_storage;
1134 h->indirect.n_buckets = (1U << new_shift) /
1135 (hi->entry_size + sizeof(dib_raw_t));
1136
1137 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
1138 new_dibs = dib_raw_ptr(h);
1139
1140 /*
1141 * Move the DIB array to the new place, replacing valid DIB values with
1142 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1143 * Note: Overlap is not possible, because we have at least doubled the
1144 * number of buckets and dib_raw_t is smaller than any entry type.
1145 */
1146 for (idx = 0; idx < old_n_buckets; idx++) {
1147 assert(old_dibs[idx] != DIB_RAW_REHASH);
1148 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1149 : DIB_RAW_REHASH;
1150 }
1151
1152 /* Zero the area of newly added entries (including the old DIB area) */
1153 memzero(bucket_at(h, old_n_buckets),
1154 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1155
1156 /* The upper half of the new DIB array needs initialization */
1157 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1158 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1159
1160 /* Rehash entries that need it */
1161 n_rehashed = 0;
1162 for (idx = 0; idx < old_n_buckets; idx++) {
1163 if (new_dibs[idx] != DIB_RAW_REHASH)
1164 continue;
1165
1166 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1167
1168 /*
1169 * Not much to do if by luck the entry hashes to its current
1170 * location. Just set its DIB.
1171 */
1172 if (optimal_idx == idx) {
1173 new_dibs[idx] = 0;
1174 n_rehashed++;
1175 continue;
1176 }
1177
1178 new_dibs[idx] = DIB_RAW_FREE;
1179 bucket_move_entry(h, &swap, idx, IDX_PUT);
1180 /* bucket_move_entry does not clear the source */
1181 memzero(bucket_at(h, idx), hi->entry_size);
1182
1183 do {
1184 /*
1185 * Find the new bucket for the current entry. This may make
1186 * another entry homeless and load it into IDX_PUT.
1187 */
1188 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1189 n_rehashed++;
1190
1191 /* Did the current entry displace another one? */
1192 if (rehash_next)
1193 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1194 } while (rehash_next);
1195 }
1196
1197 assert_se(n_rehashed == n_entries(h));
1198
1199 return 1;
1200 }
1201
1202 /*
1203 * Finds an entry with a matching key
1204 * Returns: index of the found entry, or IDX_NIL if not found.
1205 */
1206 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1207 struct hashmap_base_entry *e;
1208 unsigned dib, distance;
1209 dib_raw_t *dibs = dib_raw_ptr(h);
1210
1211 assert(idx < n_buckets(h));
1212
1213 for (distance = 0; ; distance++) {
1214 if (dibs[idx] == DIB_RAW_FREE)
1215 return IDX_NIL;
1216
1217 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1218
1219 if (dib < distance)
1220 return IDX_NIL;
1221 if (dib == distance) {
1222 e = bucket_at(h, idx);
1223 if (h->hash_ops->compare(e->key, key) == 0)
1224 return idx;
1225 }
1226
1227 idx = next_idx(h, idx);
1228 }
1229 }
1230 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1231
1232 int hashmap_put(Hashmap *h, const void *key, void *value) {
1233 struct swap_entries swap;
1234 struct plain_hashmap_entry *e;
1235 unsigned hash, idx;
1236
1237 assert(h);
1238
1239 hash = bucket_hash(h, key);
1240 idx = bucket_scan(h, hash, key);
1241 if (idx != IDX_NIL) {
1242 e = plain_bucket_at(h, idx);
1243 if (e->value == value)
1244 return 0;
1245 return -EEXIST;
1246 }
1247
1248 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1249 e->b.key = key;
1250 e->value = value;
1251 return hashmap_put_boldly(h, hash, &swap, true);
1252 }
1253
1254 int set_put(Set *s, const void *key) {
1255 struct swap_entries swap;
1256 struct hashmap_base_entry *e;
1257 unsigned hash, idx;
1258
1259 assert(s);
1260
1261 hash = bucket_hash(s, key);
1262 idx = bucket_scan(s, hash, key);
1263 if (idx != IDX_NIL)
1264 return 0;
1265
1266 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1267 e->key = key;
1268 return hashmap_put_boldly(s, hash, &swap, true);
1269 }
1270
1271 int _set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key HASHMAP_DEBUG_PARAMS) {
1272 int r;
1273
1274 r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1275 if (r < 0)
1276 return r;
1277
1278 return set_put(*s, key);
1279 }
1280
1281 int _set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key HASHMAP_DEBUG_PARAMS) {
1282 int r;
1283
1284 r = _set_ensure_put(s, hash_ops, key HASHMAP_DEBUG_PASS_ARGS);
1285 if (r <= 0) {
1286 if (hash_ops && hash_ops->free_key)
1287 hash_ops->free_key(key);
1288 else
1289 free(key);
1290 }
1291
1292 return r;
1293 }
1294
1295 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1296 struct swap_entries swap;
1297 struct plain_hashmap_entry *e;
1298 unsigned hash, idx;
1299
1300 assert(h);
1301
1302 hash = bucket_hash(h, key);
1303 idx = bucket_scan(h, hash, key);
1304 if (idx != IDX_NIL) {
1305 e = plain_bucket_at(h, idx);
1306 #if ENABLE_DEBUG_HASHMAP
1307 /* Although the key is equal, the key pointer may have changed,
1308 * and this would break our assumption for iterating. So count
1309 * this operation as incompatible with iteration. */
1310 if (e->b.key != key) {
1311 h->b.debug.put_count++;
1312 h->b.debug.rem_count++;
1313 h->b.debug.last_rem_idx = idx;
1314 }
1315 #endif
1316 e->b.key = key;
1317 e->value = value;
1318 hashmap_set_dirty(h);
1319
1320 return 0;
1321 }
1322
1323 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1324 e->b.key = key;
1325 e->value = value;
1326 return hashmap_put_boldly(h, hash, &swap, true);
1327 }
1328
1329 int hashmap_update(Hashmap *h, const void *key, void *value) {
1330 struct plain_hashmap_entry *e;
1331 unsigned hash, idx;
1332
1333 assert(h);
1334
1335 hash = bucket_hash(h, key);
1336 idx = bucket_scan(h, hash, key);
1337 if (idx == IDX_NIL)
1338 return -ENOENT;
1339
1340 e = plain_bucket_at(h, idx);
1341 e->value = value;
1342 hashmap_set_dirty(h);
1343
1344 return 0;
1345 }
1346
1347 void* _hashmap_get(HashmapBase *h, const void *key) {
1348 struct hashmap_base_entry *e;
1349 unsigned hash, idx;
1350
1351 if (!h)
1352 return NULL;
1353
1354 hash = bucket_hash(h, key);
1355 idx = bucket_scan(h, hash, key);
1356 if (idx == IDX_NIL)
1357 return NULL;
1358
1359 e = bucket_at(h, idx);
1360 return entry_value(h, e);
1361 }
1362
1363 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
1364 struct plain_hashmap_entry *e;
1365 unsigned hash, idx;
1366
1367 if (!h)
1368 return NULL;
1369
1370 hash = bucket_hash(h, key);
1371 idx = bucket_scan(h, hash, key);
1372 if (idx == IDX_NIL)
1373 return NULL;
1374
1375 e = plain_bucket_at(h, idx);
1376 if (key2)
1377 *key2 = (void*) e->b.key;
1378
1379 return e->value;
1380 }
1381
1382 bool _hashmap_contains(HashmapBase *h, const void *key) {
1383 unsigned hash;
1384
1385 if (!h)
1386 return false;
1387
1388 hash = bucket_hash(h, key);
1389 return bucket_scan(h, hash, key) != IDX_NIL;
1390 }
1391
1392 void* _hashmap_remove(HashmapBase *h, const void *key) {
1393 struct hashmap_base_entry *e;
1394 unsigned hash, idx;
1395 void *data;
1396
1397 if (!h)
1398 return NULL;
1399
1400 hash = bucket_hash(h, key);
1401 idx = bucket_scan(h, hash, key);
1402 if (idx == IDX_NIL)
1403 return NULL;
1404
1405 e = bucket_at(h, idx);
1406 data = entry_value(h, e);
1407 remove_entry(h, idx);
1408
1409 return data;
1410 }
1411
1412 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1413 struct plain_hashmap_entry *e;
1414 unsigned hash, idx;
1415 void *data;
1416
1417 if (!h) {
1418 if (rkey)
1419 *rkey = NULL;
1420 return NULL;
1421 }
1422
1423 hash = bucket_hash(h, key);
1424 idx = bucket_scan(h, hash, key);
1425 if (idx == IDX_NIL) {
1426 if (rkey)
1427 *rkey = NULL;
1428 return NULL;
1429 }
1430
1431 e = plain_bucket_at(h, idx);
1432 data = e->value;
1433 if (rkey)
1434 *rkey = (void*) e->b.key;
1435
1436 remove_entry(h, idx);
1437
1438 return data;
1439 }
1440
1441 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1442 struct swap_entries swap;
1443 struct plain_hashmap_entry *e;
1444 unsigned old_hash, new_hash, idx;
1445
1446 if (!h)
1447 return -ENOENT;
1448
1449 old_hash = bucket_hash(h, old_key);
1450 idx = bucket_scan(h, old_hash, old_key);
1451 if (idx == IDX_NIL)
1452 return -ENOENT;
1453
1454 new_hash = bucket_hash(h, new_key);
1455 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1456 return -EEXIST;
1457
1458 remove_entry(h, idx);
1459
1460 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1461 e->b.key = new_key;
1462 e->value = value;
1463 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1464
1465 return 0;
1466 }
1467
1468 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1469 struct swap_entries swap;
1470 struct hashmap_base_entry *e;
1471 unsigned old_hash, new_hash, idx;
1472
1473 if (!s)
1474 return -ENOENT;
1475
1476 old_hash = bucket_hash(s, old_key);
1477 idx = bucket_scan(s, old_hash, old_key);
1478 if (idx == IDX_NIL)
1479 return -ENOENT;
1480
1481 new_hash = bucket_hash(s, new_key);
1482 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1483 return -EEXIST;
1484
1485 remove_entry(s, idx);
1486
1487 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1488 e->key = new_key;
1489 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1490
1491 return 0;
1492 }
1493
1494 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1495 struct swap_entries swap;
1496 struct plain_hashmap_entry *e;
1497 unsigned old_hash, new_hash, idx_old, idx_new;
1498
1499 if (!h)
1500 return -ENOENT;
1501
1502 old_hash = bucket_hash(h, old_key);
1503 idx_old = bucket_scan(h, old_hash, old_key);
1504 if (idx_old == IDX_NIL)
1505 return -ENOENT;
1506
1507 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1508
1509 new_hash = bucket_hash(h, new_key);
1510 idx_new = bucket_scan(h, new_hash, new_key);
1511 if (idx_new != IDX_NIL)
1512 if (idx_old != idx_new) {
1513 remove_entry(h, idx_new);
1514 /* Compensate for a possible backward shift. */
1515 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1516 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1517 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1518 }
1519
1520 remove_entry(h, idx_old);
1521
1522 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1523 e->b.key = new_key;
1524 e->value = value;
1525 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1526
1527 return 0;
1528 }
1529
1530 void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
1531 struct hashmap_base_entry *e;
1532 unsigned hash, idx;
1533
1534 if (!h)
1535 return NULL;
1536
1537 hash = bucket_hash(h, key);
1538 idx = bucket_scan(h, hash, key);
1539 if (idx == IDX_NIL)
1540 return NULL;
1541
1542 e = bucket_at(h, idx);
1543 if (entry_value(h, e) != value)
1544 return NULL;
1545
1546 remove_entry(h, idx);
1547
1548 return value;
1549 }
1550
1551 static unsigned find_first_entry(HashmapBase *h) {
1552 Iterator i = ITERATOR_FIRST;
1553
1554 if (!h || !n_entries(h))
1555 return IDX_NIL;
1556
1557 return hashmap_iterate_entry(h, &i);
1558 }
1559
1560 void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
1561 struct hashmap_base_entry *e;
1562 void *key, *data;
1563 unsigned idx;
1564
1565 idx = find_first_entry(h);
1566 if (idx == IDX_NIL) {
1567 if (ret_key)
1568 *ret_key = NULL;
1569 return NULL;
1570 }
1571
1572 e = bucket_at(h, idx);
1573 key = (void*) e->key;
1574 data = entry_value(h, e);
1575
1576 if (remove)
1577 remove_entry(h, idx);
1578
1579 if (ret_key)
1580 *ret_key = key;
1581
1582 return data;
1583 }
1584
1585 unsigned _hashmap_size(HashmapBase *h) {
1586 if (!h)
1587 return 0;
1588
1589 return n_entries(h);
1590 }
1591
1592 unsigned _hashmap_buckets(HashmapBase *h) {
1593 if (!h)
1594 return 0;
1595
1596 return n_buckets(h);
1597 }
1598
1599 int _hashmap_merge(Hashmap *h, Hashmap *other) {
1600 Iterator i;
1601 unsigned idx;
1602
1603 assert(h);
1604
1605 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1606 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1607 int r;
1608
1609 r = hashmap_put(h, pe->b.key, pe->value);
1610 if (r < 0 && r != -EEXIST)
1611 return r;
1612 }
1613
1614 return 0;
1615 }
1616
1617 int set_merge(Set *s, Set *other) {
1618 Iterator i;
1619 unsigned idx;
1620
1621 assert(s);
1622
1623 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1624 struct set_entry *se = set_bucket_at(other, idx);
1625 int r;
1626
1627 r = set_put(s, se->b.key);
1628 if (r < 0)
1629 return r;
1630 }
1631
1632 return 0;
1633 }
1634
1635 int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1636 int r;
1637
1638 assert(h);
1639
1640 r = resize_buckets(h, entries_add);
1641 if (r < 0)
1642 return r;
1643
1644 return 0;
1645 }
1646
1647 /*
1648 * The same as hashmap_merge(), but every new item from other is moved to h.
1649 * Keys already in h are skipped and stay in other.
1650 * Returns: 0 on success.
1651 * -ENOMEM on alloc failure, in which case no move has been done.
1652 */
1653 int _hashmap_move(HashmapBase *h, HashmapBase *other) {
1654 struct swap_entries swap;
1655 struct hashmap_base_entry *e, *n;
1656 Iterator i;
1657 unsigned idx;
1658 int r;
1659
1660 assert(h);
1661
1662 if (!other)
1663 return 0;
1664
1665 assert(other->type == h->type);
1666
1667 /*
1668 * This reserves buckets for the worst case, where none of other's
1669 * entries are yet present in h. This is preferable to risking
1670 * an allocation failure in the middle of the moving and having to
1671 * rollback or return a partial result.
1672 */
1673 r = resize_buckets(h, n_entries(other));
1674 if (r < 0)
1675 return r;
1676
1677 HASHMAP_FOREACH_IDX(idx, other, i) {
1678 unsigned h_hash;
1679
1680 e = bucket_at(other, idx);
1681 h_hash = bucket_hash(h, e->key);
1682 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1683 continue;
1684
1685 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1686 n->key = e->key;
1687 if (h->type != HASHMAP_TYPE_SET)
1688 ((struct plain_hashmap_entry*) n)->value =
1689 ((struct plain_hashmap_entry*) e)->value;
1690 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1691
1692 remove_entry(other, idx);
1693 }
1694
1695 return 0;
1696 }
1697
1698 int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1699 struct swap_entries swap;
1700 unsigned h_hash, other_hash, idx;
1701 struct hashmap_base_entry *e, *n;
1702 int r;
1703
1704 assert(h);
1705
1706 h_hash = bucket_hash(h, key);
1707 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1708 return -EEXIST;
1709
1710 if (!other)
1711 return -ENOENT;
1712
1713 assert(other->type == h->type);
1714
1715 other_hash = bucket_hash(other, key);
1716 idx = bucket_scan(other, other_hash, key);
1717 if (idx == IDX_NIL)
1718 return -ENOENT;
1719
1720 e = bucket_at(other, idx);
1721
1722 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1723 n->key = e->key;
1724 if (h->type != HASHMAP_TYPE_SET)
1725 ((struct plain_hashmap_entry*) n)->value =
1726 ((struct plain_hashmap_entry*) e)->value;
1727 r = hashmap_put_boldly(h, h_hash, &swap, true);
1728 if (r < 0)
1729 return r;
1730
1731 remove_entry(other, idx);
1732 return 0;
1733 }
1734
1735 HashmapBase* _hashmap_copy(HashmapBase *h HASHMAP_DEBUG_PARAMS) {
1736 HashmapBase *copy;
1737 int r;
1738
1739 assert(h);
1740
1741 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_PASS_ARGS);
1742 if (!copy)
1743 return NULL;
1744
1745 switch (h->type) {
1746 case HASHMAP_TYPE_PLAIN:
1747 case HASHMAP_TYPE_ORDERED:
1748 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1749 break;
1750 case HASHMAP_TYPE_SET:
1751 r = set_merge((Set*)copy, (Set*)h);
1752 break;
1753 default:
1754 assert_not_reached();
1755 }
1756
1757 if (r < 0)
1758 return _hashmap_free(copy, NULL, NULL);
1759
1760 return copy;
1761 }
1762
1763 char** _hashmap_get_strv(HashmapBase *h) {
1764 char **sv;
1765 Iterator i;
1766 unsigned idx, n;
1767
1768 if (!h)
1769 return new0(char*, 1);
1770
1771 sv = new(char*, n_entries(h)+1);
1772 if (!sv)
1773 return NULL;
1774
1775 n = 0;
1776 HASHMAP_FOREACH_IDX(idx, h, i)
1777 sv[n++] = entry_value(h, bucket_at(h, idx));
1778 sv[n] = NULL;
1779
1780 return sv;
1781 }
1782
1783 void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1784 struct ordered_hashmap_entry *e;
1785 unsigned hash, idx;
1786
1787 if (!h)
1788 return NULL;
1789
1790 hash = bucket_hash(h, key);
1791 idx = bucket_scan(h, hash, key);
1792 if (idx == IDX_NIL)
1793 return NULL;
1794
1795 e = ordered_bucket_at(h, idx);
1796 if (e->iterate_next == IDX_NIL)
1797 return NULL;
1798 return ordered_bucket_at(h, e->iterate_next)->p.value;
1799 }
1800
1801 int set_consume(Set *s, void *value) {
1802 int r;
1803
1804 assert(s);
1805 assert(value);
1806
1807 r = set_put(s, value);
1808 if (r <= 0)
1809 free(value);
1810
1811 return r;
1812 }
1813
1814 int _hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v HASHMAP_DEBUG_PARAMS) {
1815 int r;
1816
1817 r = _hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1818 if (r < 0)
1819 return r;
1820
1821 _cleanup_free_ char *kdup = NULL, *vdup = NULL;
1822
1823 kdup = strdup(k);
1824 if (!kdup)
1825 return -ENOMEM;
1826
1827 if (v) {
1828 vdup = strdup(v);
1829 if (!vdup)
1830 return -ENOMEM;
1831 }
1832
1833 r = hashmap_put(*h, kdup, vdup);
1834 if (r < 0) {
1835 if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
1836 return 0;
1837 return r;
1838 }
1839
1840 /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1841 assert(vdup == NULL || r > 0);
1842 if (r > 0)
1843 kdup = vdup = NULL;
1844
1845 return r;
1846 }
1847
1848 int _set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n HASHMAP_DEBUG_PARAMS) {
1849 char *c;
1850 int r;
1851
1852 assert(s);
1853 assert(p);
1854
1855 r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1856 if (r < 0)
1857 return r;
1858
1859 if (n == SIZE_MAX) {
1860 if (set_contains(*s, (char*) p))
1861 return 0;
1862
1863 c = strdup(p);
1864 } else
1865 c = strndup(p, n);
1866 if (!c)
1867 return -ENOMEM;
1868
1869 return set_consume(*s, c);
1870 }
1871
1872 int _set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l HASHMAP_DEBUG_PARAMS) {
1873 int n = 0, r;
1874
1875 assert(s);
1876
1877 STRV_FOREACH(i, l) {
1878 r = _set_put_strndup_full(s, hash_ops, *i, SIZE_MAX HASHMAP_DEBUG_PASS_ARGS);
1879 if (r < 0)
1880 return r;
1881
1882 n += r;
1883 }
1884
1885 return n;
1886 }
1887
1888 int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1889 const char *p = ASSERT_PTR(v);
1890 int r;
1891
1892 assert(s);
1893
1894 for (;;) {
1895 char *word;
1896
1897 r = extract_first_word(&p, &word, separators, flags);
1898 if (r <= 0)
1899 return r;
1900
1901 r = set_consume(s, word);
1902 if (r < 0)
1903 return r;
1904 }
1905 }
1906
1907 /* expand the cachemem if needed, return true if newly (re)activated. */
1908 static int cachemem_maintain(CacheMem *mem, size_t size) {
1909 assert(mem);
1910
1911 if (!GREEDY_REALLOC(mem->ptr, size)) {
1912 if (size > 0)
1913 return -ENOMEM;
1914 }
1915
1916 if (!mem->active) {
1917 mem->active = true;
1918 return true;
1919 }
1920
1921 return false;
1922 }
1923
1924 int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1925 bool sync_keys = false, sync_values = false;
1926 size_t size;
1927 int r;
1928
1929 assert(cache);
1930 assert(cache->hashmap);
1931
1932 size = n_entries(cache->hashmap);
1933
1934 if (res_keys) {
1935 r = cachemem_maintain(&cache->keys, size);
1936 if (r < 0)
1937 return r;
1938
1939 sync_keys = r;
1940 } else
1941 cache->keys.active = false;
1942
1943 if (res_values) {
1944 r = cachemem_maintain(&cache->values, size);
1945 if (r < 0)
1946 return r;
1947
1948 sync_values = r;
1949 } else
1950 cache->values.active = false;
1951
1952 if (cache->hashmap->dirty) {
1953 if (cache->keys.active)
1954 sync_keys = true;
1955 if (cache->values.active)
1956 sync_values = true;
1957
1958 cache->hashmap->dirty = false;
1959 }
1960
1961 if (sync_keys || sync_values) {
1962 unsigned i, idx;
1963 Iterator iter;
1964
1965 i = 0;
1966 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1967 struct hashmap_base_entry *e;
1968
1969 e = bucket_at(cache->hashmap, idx);
1970
1971 if (sync_keys)
1972 cache->keys.ptr[i] = e->key;
1973 if (sync_values)
1974 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1975 i++;
1976 }
1977 }
1978
1979 if (res_keys)
1980 *res_keys = cache->keys.ptr;
1981 if (res_values)
1982 *res_values = cache->values.ptr;
1983 if (res_n_entries)
1984 *res_n_entries = size;
1985
1986 return 0;
1987 }
1988
1989 IteratedCache* iterated_cache_free(IteratedCache *cache) {
1990 if (cache) {
1991 free(cache->keys.ptr);
1992 free(cache->values.ptr);
1993 }
1994
1995 return mfree(cache);
1996 }
1997
1998 int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
1999 _cleanup_free_ char *str = NULL;
2000 size_t separator_len, len = 0;
2001 const char *value;
2002 bool first;
2003
2004 assert(ret);
2005
2006 if (set_isempty(s)) {
2007 *ret = NULL;
2008 return 0;
2009 }
2010
2011 separator_len = strlen_ptr(separator);
2012
2013 if (separator_len == 0)
2014 wrap_with_separator = false;
2015
2016 first = !wrap_with_separator;
2017
2018 SET_FOREACH(value, s) {
2019 size_t l = strlen_ptr(value);
2020
2021 if (l == 0)
2022 continue;
2023
2024 if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
2025 return -ENOMEM;
2026
2027 if (separator_len > 0 && !first) {
2028 memcpy(str + len, separator, separator_len);
2029 len += separator_len;
2030 }
2031
2032 memcpy(str + len, value, l);
2033 len += l;
2034 first = false;
2035 }
2036
2037 if (wrap_with_separator) {
2038 memcpy(str + len, separator, separator_len);
2039 len += separator_len;
2040 }
2041
2042 str[len] = '\0';
2043
2044 *ret = TAKE_PTR(str);
2045 return 0;
2046 }
2047
2048 bool set_equal(Set *a, Set *b) {
2049 void *p;
2050
2051 /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2052 * entries */
2053
2054 if (a == b)
2055 return true;
2056
2057 if (set_isempty(a) && set_isempty(b))
2058 return true;
2059
2060 if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
2061 * already */
2062 return false;
2063
2064 SET_FOREACH(p, a)
2065 if (!set_contains(b, p))
2066 return false;
2067
2068 /* If we have the same hashops, then we don't need to check things backwards given we compared the
2069 * size and that all of a is in b. */
2070 if (a->b.hash_ops == b->b.hash_ops)
2071 return true;
2072
2073 SET_FOREACH(p, b)
2074 if (!set_contains(a, p))
2075 return false;
2076
2077 return true;
2078 }
2079
2080 static bool set_fnmatch_one(Set *patterns, const char *needle) {
2081 const char *p;
2082
2083 assert(needle);
2084
2085 /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2086
2087 SET_FOREACH(p, patterns)
2088 if (fnmatch(p, needle, 0) == 0)
2089 return true;
2090
2091 return false;
2092 }
2093
2094 bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
2095 assert(needle);
2096
2097 if (set_fnmatch_one(exclude_patterns, needle))
2098 return false;
2099
2100 if (set_isempty(include_patterns))
2101 return true;
2102
2103 return set_fnmatch_one(include_patterns, needle);
2104 }