]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/hashmap.c
tree-wide: "<n>bit" → "<n>-bit"
[thirdparty/systemd.git] / src / basic / hashmap.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <fnmatch.h>
5 #include <pthread.h>
6 #include <stdint.h>
7 #include <stdlib.h>
8 #if HAVE_VALGRIND_VALGRIND_H
9 # include <valgrind/valgrind.h>
10 #endif
11
12 #include "alloc-util.h"
13 #include "fileio.h"
14 #include "hashmap.h"
15 #include "logarithm.h"
16 #include "macro.h"
17 #include "memory-util.h"
18 #include "mempool.h"
19 #include "missing_syscall.h"
20 #include "process-util.h"
21 #include "random-util.h"
22 #include "set.h"
23 #include "siphash24.h"
24 #include "string-util.h"
25 #include "strv.h"
26
27 #if ENABLE_DEBUG_HASHMAP
28 #include "list.h"
29 #endif
30
31 /*
32 * Implementation of hashmaps.
33 * Addressing: open
34 * - uses less RAM compared to closed addressing (chaining), because
35 * our entries are small (especially in Sets, which tend to contain
36 * the majority of entries in systemd).
37 * Collision resolution: Robin Hood
38 * - tends to equalize displacement of entries from their optimal buckets.
39 * Probe sequence: linear
40 * - though theoretically worse than random probing/uniform hashing/double
41 * hashing, it is good for cache locality.
42 *
43 * References:
44 * Celis, P. 1986. Robin Hood Hashing.
45 * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
46 * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
47 * - The results are derived for random probing. Suggests deletion with
48 * tombstones and two mean-centered search methods. None of that works
49 * well for linear probing.
50 *
51 * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
52 * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
53 * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
54 * http://www.math.uu.se/~svante/papers/sj157.pdf
55 * - Applies to Robin Hood with linear probing. Contains remarks on
56 * the unsuitability of mean-centered search with linear probing.
57 *
58 * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
59 * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
60 * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
61 * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
62 * in a successful search), and Janson writes about displacement. C = d + 1.
63 *
64 * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
65 * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
66 * - Explanation of backward shift deletion with pictures.
67 *
68 * Khuong, P. 2013. The Other Robin Hood Hashing.
69 * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
70 * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
71 */
72
73 /*
74 * XXX Ideas for improvement:
75 * For unordered hashmaps, randomize iteration order, similarly to Perl:
76 * http://blog.booking.com/hardening-perls-hash-function.html
77 */
78
79 /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
80 * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
81 #define INV_KEEP_FREE 5U
82
83 /* Fields common to entries of all hashmap/set types */
84 struct hashmap_base_entry {
85 const void *key;
86 };
87
88 /* Entry types for specific hashmap/set types
89 * hashmap_base_entry must be at the beginning of each entry struct. */
90
91 struct plain_hashmap_entry {
92 struct hashmap_base_entry b;
93 void *value;
94 };
95
96 struct ordered_hashmap_entry {
97 struct plain_hashmap_entry p;
98 unsigned iterate_next, iterate_previous;
99 };
100
101 struct set_entry {
102 struct hashmap_base_entry b;
103 };
104
105 /* In several functions it is advantageous to have the hash table extended
106 * virtually by a couple of additional buckets. We reserve special index values
107 * for these "swap" buckets. */
108 #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
109 #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
110 #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
111 #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
112
113 #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
114 #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
115
116 assert_cc(IDX_FIRST == _IDX_SWAP_END);
117 assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
118
119 /* Storage space for the "swap" buckets.
120 * All entry types can fit into an ordered_hashmap_entry. */
121 struct swap_entries {
122 struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
123 };
124
125 /* Distance from Initial Bucket */
126 typedef uint8_t dib_raw_t;
127 #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
128 #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
129 #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
130 #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
131
132 #define DIB_FREE UINT_MAX
133
134 #if ENABLE_DEBUG_HASHMAP
135 struct hashmap_debug_info {
136 LIST_FIELDS(struct hashmap_debug_info, debug_list);
137 unsigned max_entries; /* high watermark of n_entries */
138
139 /* who allocated this hashmap */
140 int line;
141 const char *file;
142 const char *func;
143
144 /* fields to detect modification while iterating */
145 unsigned put_count; /* counts puts into the hashmap */
146 unsigned rem_count; /* counts removals from hashmap */
147 unsigned last_rem_idx; /* remembers last removal index */
148 };
149
150 /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
151 static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
152 static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
153 #endif
154
155 enum HashmapType {
156 HASHMAP_TYPE_PLAIN,
157 HASHMAP_TYPE_ORDERED,
158 HASHMAP_TYPE_SET,
159 _HASHMAP_TYPE_MAX
160 };
161
162 struct _packed_ indirect_storage {
163 void *storage; /* where buckets and DIBs are stored */
164 uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
165
166 unsigned n_entries; /* number of stored entries */
167 unsigned n_buckets; /* number of buckets */
168
169 unsigned idx_lowest_entry; /* Index below which all buckets are free.
170 Makes "while (hashmap_steal_first())" loops
171 O(n) instead of O(n^2) for unordered hashmaps. */
172 uint8_t _pad[3]; /* padding for the whole HashmapBase */
173 /* The bitfields in HashmapBase complete the alignment of the whole thing. */
174 };
175
176 struct direct_storage {
177 /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit.
178 * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit,
179 * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */
180 uint8_t storage[sizeof(struct indirect_storage)];
181 };
182
183 #define DIRECT_BUCKETS(entry_t) \
184 (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
185
186 /* We should be able to store at least one entry directly. */
187 assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
188
189 /* We have 3 bits for n_direct_entries. */
190 assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
191
192 /* Hashmaps with directly stored entries all use this shared hash key.
193 * It's no big deal if the key is guessed, because there can be only
194 * a handful of directly stored entries in a hashmap. When a hashmap
195 * outgrows direct storage, it gets its own key for indirect storage. */
196 static uint8_t shared_hash_key[HASH_KEY_SIZE];
197
198 /* Fields that all hashmap/set types must have */
199 struct HashmapBase {
200 const struct hash_ops *hash_ops; /* hash and compare ops to use */
201
202 union _packed_ {
203 struct indirect_storage indirect; /* if has_indirect */
204 struct direct_storage direct; /* if !has_indirect */
205 };
206
207 enum HashmapType type:2; /* HASHMAP_TYPE_* */
208 bool has_indirect:1; /* whether indirect storage is used */
209 unsigned n_direct_entries:3; /* Number of entries in direct storage.
210 * Only valid if !has_indirect. */
211 bool from_pool:1; /* whether was allocated from mempool */
212 bool dirty:1; /* whether dirtied since last iterated_cache_get() */
213 bool cached:1; /* whether this hashmap is being cached */
214
215 #if ENABLE_DEBUG_HASHMAP
216 struct hashmap_debug_info debug;
217 #endif
218 };
219
220 /* Specific hash types
221 * HashmapBase must be at the beginning of each hashmap struct. */
222
223 struct Hashmap {
224 struct HashmapBase b;
225 };
226
227 struct OrderedHashmap {
228 struct HashmapBase b;
229 unsigned iterate_list_head, iterate_list_tail;
230 };
231
232 struct Set {
233 struct HashmapBase b;
234 };
235
236 typedef struct CacheMem {
237 const void **ptr;
238 size_t n_populated;
239 bool active:1;
240 } CacheMem;
241
242 struct IteratedCache {
243 HashmapBase *hashmap;
244 CacheMem keys, values;
245 };
246
247 DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
248 DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
249 /* No need for a separate Set pool */
250 assert_cc(sizeof(Hashmap) == sizeof(Set));
251
252 struct hashmap_type_info {
253 size_t head_size;
254 size_t entry_size;
255 struct mempool *mempool;
256 unsigned n_direct_buckets;
257 };
258
259 static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
260 [HASHMAP_TYPE_PLAIN] = {
261 .head_size = sizeof(Hashmap),
262 .entry_size = sizeof(struct plain_hashmap_entry),
263 .mempool = &hashmap_pool,
264 .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
265 },
266 [HASHMAP_TYPE_ORDERED] = {
267 .head_size = sizeof(OrderedHashmap),
268 .entry_size = sizeof(struct ordered_hashmap_entry),
269 .mempool = &ordered_hashmap_pool,
270 .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
271 },
272 [HASHMAP_TYPE_SET] = {
273 .head_size = sizeof(Set),
274 .entry_size = sizeof(struct set_entry),
275 .mempool = &hashmap_pool,
276 .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
277 },
278 };
279
280 void hashmap_trim_pools(void) {
281 int r;
282
283 /* The pool is only allocated by the main thread, but the memory can be passed to other
284 * threads. Let's clean up if we are the main thread and no other threads are live. */
285
286 /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the
287 * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */
288 if (getpid() != gettid())
289 return (void) log_debug("Not cleaning up memory pools, not in main thread.");
290
291 r = get_process_threads(0);
292 if (r < 0)
293 return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m");
294 if (r != 1)
295 return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process.");
296
297 mempool_trim(&hashmap_pool);
298 mempool_trim(&ordered_hashmap_pool);
299 }
300
301 #if HAVE_VALGRIND_VALGRIND_H
302 _destructor_ static void cleanup_pools(void) {
303 /* Be nice to valgrind */
304 if (RUNNING_ON_VALGRIND)
305 hashmap_trim_pools();
306 }
307 #endif
308
309 static unsigned n_buckets(HashmapBase *h) {
310 return h->has_indirect ? h->indirect.n_buckets
311 : hashmap_type_info[h->type].n_direct_buckets;
312 }
313
314 static unsigned n_entries(HashmapBase *h) {
315 return h->has_indirect ? h->indirect.n_entries
316 : h->n_direct_entries;
317 }
318
319 static void n_entries_inc(HashmapBase *h) {
320 if (h->has_indirect)
321 h->indirect.n_entries++;
322 else
323 h->n_direct_entries++;
324 }
325
326 static void n_entries_dec(HashmapBase *h) {
327 if (h->has_indirect)
328 h->indirect.n_entries--;
329 else
330 h->n_direct_entries--;
331 }
332
333 static void* storage_ptr(HashmapBase *h) {
334 return h->has_indirect ? h->indirect.storage
335 : h->direct.storage;
336 }
337
338 static uint8_t* hash_key(HashmapBase *h) {
339 return h->has_indirect ? h->indirect.hash_key
340 : shared_hash_key;
341 }
342
343 static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
344 struct siphash state;
345 uint64_t hash;
346
347 siphash24_init(&state, hash_key(h));
348
349 h->hash_ops->hash(p, &state);
350
351 hash = siphash24_finalize(&state);
352
353 return (unsigned) (hash % n_buckets(h));
354 }
355 #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
356
357 static void base_set_dirty(HashmapBase *h) {
358 h->dirty = true;
359 }
360 #define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
361
362 static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
363 static uint8_t current[HASH_KEY_SIZE];
364 static bool current_initialized = false;
365
366 /* Returns a hash function key to use. In order to keep things
367 * fast we will not generate a new key each time we allocate a
368 * new hash table. Instead, we'll just reuse the most recently
369 * generated one, except if we never generated one or when we
370 * are rehashing an entire hash table because we reached a
371 * fill level */
372
373 if (!current_initialized || !reuse_is_ok) {
374 random_bytes(current, sizeof(current));
375 current_initialized = true;
376 }
377
378 memcpy(hash_key, current, sizeof(current));
379 }
380
381 static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) {
382 return CAST_ALIGN_PTR(
383 struct hashmap_base_entry,
384 (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
385 }
386
387 static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) {
388 return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
389 }
390
391 static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
392 return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
393 }
394
395 static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
396 return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
397 }
398
399 static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) {
400 return &swap->e[idx - _IDX_SWAP_BEGIN];
401 }
402
403 /* Returns a pointer to the bucket at index idx.
404 * Understands real indexes and swap indexes, hence "_virtual". */
405 static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
406 unsigned idx) {
407 if (idx < _IDX_SWAP_BEGIN)
408 return bucket_at(h, idx);
409
410 if (idx < _IDX_SWAP_END)
411 return &bucket_at_swap(swap, idx)->p.b;
412
413 assert_not_reached();
414 }
415
416 static dib_raw_t* dib_raw_ptr(HashmapBase *h) {
417 return (dib_raw_t*)
418 ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
419 }
420
421 static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
422 return idx >= from ? idx - from
423 : n_buckets(h) + idx - from;
424 }
425
426 static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
427 unsigned initial_bucket;
428
429 if (raw_dib == DIB_RAW_FREE)
430 return DIB_FREE;
431
432 if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
433 return raw_dib;
434
435 /*
436 * Having an overflow DIB value is very unlikely. The hash function
437 * would have to be bad. For example, in a table of size 2^24 filled
438 * to load factor 0.9 the maximum observed DIB is only about 60.
439 * In theory (assuming I used Maxima correctly), for an infinite size
440 * hash table with load factor 0.8 the probability of a given entry
441 * having DIB > 40 is 1.9e-8.
442 * This returns the correct DIB value by recomputing the hash value in
443 * the unlikely case. XXX Hitting this case could be a hint to rehash.
444 */
445 initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
446 return bucket_distance(h, idx, initial_bucket);
447 }
448
449 static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
450 dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
451 }
452
453 static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
454 dib_raw_t *dibs;
455
456 dibs = dib_raw_ptr(h);
457
458 for ( ; idx < n_buckets(h); idx++)
459 if (dibs[idx] != DIB_RAW_FREE)
460 return idx;
461
462 return IDX_NIL;
463 }
464
465 static void bucket_mark_free(HashmapBase *h, unsigned idx) {
466 memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
467 bucket_set_dib(h, idx, DIB_FREE);
468 }
469
470 static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
471 unsigned from, unsigned to) {
472 struct hashmap_base_entry *e_from, *e_to;
473
474 assert(from != to);
475
476 e_from = bucket_at_virtual(h, swap, from);
477 e_to = bucket_at_virtual(h, swap, to);
478
479 memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
480
481 if (h->type == HASHMAP_TYPE_ORDERED) {
482 OrderedHashmap *lh = (OrderedHashmap*) h;
483 struct ordered_hashmap_entry *le, *le_to;
484
485 le_to = (struct ordered_hashmap_entry*) e_to;
486
487 if (le_to->iterate_next != IDX_NIL) {
488 le = (struct ordered_hashmap_entry*)
489 bucket_at_virtual(h, swap, le_to->iterate_next);
490 le->iterate_previous = to;
491 }
492
493 if (le_to->iterate_previous != IDX_NIL) {
494 le = (struct ordered_hashmap_entry*)
495 bucket_at_virtual(h, swap, le_to->iterate_previous);
496 le->iterate_next = to;
497 }
498
499 if (lh->iterate_list_head == from)
500 lh->iterate_list_head = to;
501 if (lh->iterate_list_tail == from)
502 lh->iterate_list_tail = to;
503 }
504 }
505
506 static unsigned next_idx(HashmapBase *h, unsigned idx) {
507 return (idx + 1U) % n_buckets(h);
508 }
509
510 static unsigned prev_idx(HashmapBase *h, unsigned idx) {
511 return (n_buckets(h) + idx - 1U) % n_buckets(h);
512 }
513
514 static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
515 switch (h->type) {
516
517 case HASHMAP_TYPE_PLAIN:
518 case HASHMAP_TYPE_ORDERED:
519 return ((struct plain_hashmap_entry*)e)->value;
520
521 case HASHMAP_TYPE_SET:
522 return (void*) e->key;
523
524 default:
525 assert_not_reached();
526 }
527 }
528
529 static void base_remove_entry(HashmapBase *h, unsigned idx) {
530 unsigned left, right, prev, dib;
531 dib_raw_t raw_dib, *dibs;
532
533 dibs = dib_raw_ptr(h);
534 assert(dibs[idx] != DIB_RAW_FREE);
535
536 #if ENABLE_DEBUG_HASHMAP
537 h->debug.rem_count++;
538 h->debug.last_rem_idx = idx;
539 #endif
540
541 left = idx;
542 /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
543 for (right = next_idx(h, left); ; right = next_idx(h, right)) {
544 raw_dib = dibs[right];
545 if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
546 break;
547
548 /* The buckets are not supposed to be all occupied and with DIB > 0.
549 * That would mean we could make everyone better off by shifting them
550 * backward. This scenario is impossible. */
551 assert(left != right);
552 }
553
554 if (h->type == HASHMAP_TYPE_ORDERED) {
555 OrderedHashmap *lh = (OrderedHashmap*) h;
556 struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
557
558 if (le->iterate_next != IDX_NIL)
559 ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
560 else
561 lh->iterate_list_tail = le->iterate_previous;
562
563 if (le->iterate_previous != IDX_NIL)
564 ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
565 else
566 lh->iterate_list_head = le->iterate_next;
567 }
568
569 /* Now shift all buckets in the interval (left, right) one step backwards */
570 for (prev = left, left = next_idx(h, left); left != right;
571 prev = left, left = next_idx(h, left)) {
572 dib = bucket_calculate_dib(h, left, dibs[left]);
573 assert(dib != 0);
574 bucket_move_entry(h, NULL, left, prev);
575 bucket_set_dib(h, prev, dib - 1);
576 }
577
578 bucket_mark_free(h, prev);
579 n_entries_dec(h);
580 base_set_dirty(h);
581 }
582 #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
583
584 static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
585 struct ordered_hashmap_entry *e;
586 unsigned idx;
587
588 assert(h);
589 assert(i);
590
591 if (i->idx == IDX_NIL)
592 goto at_end;
593
594 if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
595 goto at_end;
596
597 if (i->idx == IDX_FIRST) {
598 idx = h->iterate_list_head;
599 e = ordered_bucket_at(h, idx);
600 } else {
601 idx = i->idx;
602 e = ordered_bucket_at(h, idx);
603 /*
604 * We allow removing the current entry while iterating, but removal may cause
605 * a backward shift. The next entry may thus move one bucket to the left.
606 * To detect when it happens, we remember the key pointer of the entry we were
607 * going to iterate next. If it does not match, there was a backward shift.
608 */
609 if (e->p.b.key != i->next_key) {
610 idx = prev_idx(HASHMAP_BASE(h), idx);
611 e = ordered_bucket_at(h, idx);
612 }
613 assert(e->p.b.key == i->next_key);
614 }
615
616 #if ENABLE_DEBUG_HASHMAP
617 i->prev_idx = idx;
618 #endif
619
620 if (e->iterate_next != IDX_NIL) {
621 struct ordered_hashmap_entry *n;
622 i->idx = e->iterate_next;
623 n = ordered_bucket_at(h, i->idx);
624 i->next_key = n->p.b.key;
625 } else
626 i->idx = IDX_NIL;
627
628 return idx;
629
630 at_end:
631 i->idx = IDX_NIL;
632 return IDX_NIL;
633 }
634
635 static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
636 unsigned idx;
637
638 assert(h);
639 assert(i);
640
641 if (i->idx == IDX_NIL)
642 goto at_end;
643
644 if (i->idx == IDX_FIRST) {
645 /* fast forward to the first occupied bucket */
646 if (h->has_indirect) {
647 i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
648 h->indirect.idx_lowest_entry = i->idx;
649 } else
650 i->idx = skip_free_buckets(h, 0);
651
652 if (i->idx == IDX_NIL)
653 goto at_end;
654 } else {
655 struct hashmap_base_entry *e;
656
657 assert(i->idx > 0);
658
659 e = bucket_at(h, i->idx);
660 /*
661 * We allow removing the current entry while iterating, but removal may cause
662 * a backward shift. The next entry may thus move one bucket to the left.
663 * To detect when it happens, we remember the key pointer of the entry we were
664 * going to iterate next. If it does not match, there was a backward shift.
665 */
666 if (e->key != i->next_key)
667 e = bucket_at(h, --i->idx);
668
669 assert(e->key == i->next_key);
670 }
671
672 idx = i->idx;
673 #if ENABLE_DEBUG_HASHMAP
674 i->prev_idx = idx;
675 #endif
676
677 i->idx = skip_free_buckets(h, i->idx + 1);
678 if (i->idx != IDX_NIL)
679 i->next_key = bucket_at(h, i->idx)->key;
680 else
681 i->idx = IDX_NIL;
682
683 return idx;
684
685 at_end:
686 i->idx = IDX_NIL;
687 return IDX_NIL;
688 }
689
690 static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
691 if (!h) {
692 i->idx = IDX_NIL;
693 return IDX_NIL;
694 }
695
696 #if ENABLE_DEBUG_HASHMAP
697 if (i->idx == IDX_FIRST) {
698 i->put_count = h->debug.put_count;
699 i->rem_count = h->debug.rem_count;
700 } else {
701 /* While iterating, must not add any new entries */
702 assert(i->put_count == h->debug.put_count);
703 /* ... or remove entries other than the current one */
704 assert(i->rem_count == h->debug.rem_count ||
705 (i->rem_count == h->debug.rem_count - 1 &&
706 i->prev_idx == h->debug.last_rem_idx));
707 /* Reset our removals counter */
708 i->rem_count = h->debug.rem_count;
709 }
710 #endif
711
712 return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
713 : hashmap_iterate_in_internal_order(h, i);
714 }
715
716 bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
717 struct hashmap_base_entry *e;
718 void *data;
719 unsigned idx;
720
721 idx = hashmap_iterate_entry(h, i);
722 if (idx == IDX_NIL) {
723 if (value)
724 *value = NULL;
725 if (key)
726 *key = NULL;
727
728 return false;
729 }
730
731 e = bucket_at(h, idx);
732 data = entry_value(h, e);
733 if (value)
734 *value = data;
735 if (key)
736 *key = e->key;
737
738 return true;
739 }
740
741 #define HASHMAP_FOREACH_IDX(idx, h, i) \
742 for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
743 (idx != IDX_NIL); \
744 (idx) = hashmap_iterate_entry((h), &(i)))
745
746 IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) {
747 IteratedCache *cache;
748
749 assert(h);
750 assert(!h->cached);
751
752 if (h->cached)
753 return NULL;
754
755 cache = new0(IteratedCache, 1);
756 if (!cache)
757 return NULL;
758
759 cache->hashmap = h;
760 h->cached = true;
761
762 return cache;
763 }
764
765 static void reset_direct_storage(HashmapBase *h) {
766 const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
767 void *p;
768
769 assert(!h->has_indirect);
770
771 p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
772 memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
773 }
774
775 static void shared_hash_key_initialize(void) {
776 random_bytes(shared_hash_key, sizeof(shared_hash_key));
777 }
778
779 static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
780 HashmapBase *h;
781 const struct hashmap_type_info *hi = &hashmap_type_info[type];
782
783 bool use_pool = mempool_enabled && mempool_enabled(); /* mempool_enabled is a weak symbol */
784
785 h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
786 if (!h)
787 return NULL;
788
789 h->type = type;
790 h->from_pool = use_pool;
791 h->hash_ops = hash_ops ?: &trivial_hash_ops;
792
793 if (type == HASHMAP_TYPE_ORDERED) {
794 OrderedHashmap *lh = (OrderedHashmap*)h;
795 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
796 }
797
798 reset_direct_storage(h);
799
800 static pthread_once_t once = PTHREAD_ONCE_INIT;
801 assert_se(pthread_once(&once, shared_hash_key_initialize) == 0);
802
803 #if ENABLE_DEBUG_HASHMAP
804 h->debug.func = func;
805 h->debug.file = file;
806 h->debug.line = line;
807 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
808 LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
809 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
810 #endif
811
812 return h;
813 }
814
815 Hashmap *_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
816 return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
817 }
818
819 OrderedHashmap *_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
820 return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
821 }
822
823 Set *_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
824 return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
825 }
826
827 static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
828 enum HashmapType type HASHMAP_DEBUG_PARAMS) {
829 HashmapBase *q;
830
831 assert(h);
832
833 if (*h)
834 return 0;
835
836 q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
837 if (!q)
838 return -ENOMEM;
839
840 *h = q;
841 return 1;
842 }
843
844 int _hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
845 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
846 }
847
848 int _ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
849 return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
850 }
851
852 int _set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
853 return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
854 }
855
856 int _hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) {
857 int r;
858
859 r = _hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
860 if (r < 0)
861 return r;
862
863 return hashmap_put(*h, key, value);
864 }
865
866 int _ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) {
867 int r;
868
869 r = _ordered_hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
870 if (r < 0)
871 return r;
872
873 return ordered_hashmap_put(*h, key, value);
874 }
875
876 static void hashmap_free_no_clear(HashmapBase *h) {
877 assert(!h->has_indirect);
878 assert(h->n_direct_entries == 0);
879
880 #if ENABLE_DEBUG_HASHMAP
881 assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
882 LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
883 assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
884 #endif
885
886 if (h->from_pool) {
887 /* Ensure that the object didn't get migrated between threads. */
888 assert_se(is_main_thread());
889 mempool_free_tile(hashmap_type_info[h->type].mempool, h);
890 } else
891 free(h);
892 }
893
894 HashmapBase* _hashmap_free(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
895 if (h) {
896 _hashmap_clear(h, default_free_key, default_free_value);
897 hashmap_free_no_clear(h);
898 }
899
900 return NULL;
901 }
902
903 void _hashmap_clear(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) {
904 free_func_t free_key, free_value;
905 if (!h)
906 return;
907
908 free_key = h->hash_ops->free_key ?: default_free_key;
909 free_value = h->hash_ops->free_value ?: default_free_value;
910
911 if (free_key || free_value) {
912
913 /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the
914 * hash table, and only then call the destructor functions. If these destructors then try to unregister
915 * themselves from our hash table a second time, the entry is already gone. */
916
917 while (_hashmap_size(h) > 0) {
918 void *k = NULL;
919 void *v;
920
921 v = _hashmap_first_key_and_value(h, true, &k);
922
923 if (free_key)
924 free_key(k);
925
926 if (free_value)
927 free_value(v);
928 }
929 }
930
931 if (h->has_indirect) {
932 free(h->indirect.storage);
933 h->has_indirect = false;
934 }
935
936 h->n_direct_entries = 0;
937 reset_direct_storage(h);
938
939 if (h->type == HASHMAP_TYPE_ORDERED) {
940 OrderedHashmap *lh = (OrderedHashmap*) h;
941 lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
942 }
943
944 base_set_dirty(h);
945 }
946
947 static int resize_buckets(HashmapBase *h, unsigned entries_add);
948
949 /*
950 * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
951 * Performs Robin Hood swaps as it goes. The entry to put must be placed
952 * by the caller into swap slot IDX_PUT.
953 * If used for in-place resizing, may leave a displaced entry in swap slot
954 * IDX_PUT. Caller must rehash it next.
955 * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
956 * false otherwise.
957 */
958 static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
959 struct swap_entries *swap) {
960 dib_raw_t raw_dib, *dibs;
961 unsigned dib, distance;
962
963 #if ENABLE_DEBUG_HASHMAP
964 h->debug.put_count++;
965 #endif
966
967 dibs = dib_raw_ptr(h);
968
969 for (distance = 0; ; distance++) {
970 raw_dib = dibs[idx];
971 if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
972 if (raw_dib == DIB_RAW_REHASH)
973 bucket_move_entry(h, swap, idx, IDX_TMP);
974
975 if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
976 h->indirect.idx_lowest_entry = idx;
977
978 bucket_set_dib(h, idx, distance);
979 bucket_move_entry(h, swap, IDX_PUT, idx);
980 if (raw_dib == DIB_RAW_REHASH) {
981 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
982 return true;
983 }
984
985 return false;
986 }
987
988 dib = bucket_calculate_dib(h, idx, raw_dib);
989
990 if (dib < distance) {
991 /* Found a wealthier entry. Go Robin Hood! */
992 bucket_set_dib(h, idx, distance);
993
994 /* swap the entries */
995 bucket_move_entry(h, swap, idx, IDX_TMP);
996 bucket_move_entry(h, swap, IDX_PUT, idx);
997 bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
998
999 distance = dib;
1000 }
1001
1002 idx = next_idx(h, idx);
1003 }
1004 }
1005
1006 /*
1007 * Puts an entry into a hashmap, boldly - no check whether key already exists.
1008 * The caller must place the entry (only its key and value, not link indexes)
1009 * in swap slot IDX_PUT.
1010 * Caller must ensure: the key does not exist yet in the hashmap.
1011 * that resize is not needed if !may_resize.
1012 * Returns: 1 if entry was put successfully.
1013 * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1014 * Cannot return -ENOMEM if !may_resize.
1015 */
1016 static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1017 struct swap_entries *swap, bool may_resize) {
1018 struct ordered_hashmap_entry *new_entry;
1019 int r;
1020
1021 assert(idx < n_buckets(h));
1022
1023 new_entry = bucket_at_swap(swap, IDX_PUT);
1024
1025 if (may_resize) {
1026 r = resize_buckets(h, 1);
1027 if (r < 0)
1028 return r;
1029 if (r > 0)
1030 idx = bucket_hash(h, new_entry->p.b.key);
1031 }
1032 assert(n_entries(h) < n_buckets(h));
1033
1034 if (h->type == HASHMAP_TYPE_ORDERED) {
1035 OrderedHashmap *lh = (OrderedHashmap*) h;
1036
1037 new_entry->iterate_next = IDX_NIL;
1038 new_entry->iterate_previous = lh->iterate_list_tail;
1039
1040 if (lh->iterate_list_tail != IDX_NIL) {
1041 struct ordered_hashmap_entry *old_tail;
1042
1043 old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1044 assert(old_tail->iterate_next == IDX_NIL);
1045 old_tail->iterate_next = IDX_PUT;
1046 }
1047
1048 lh->iterate_list_tail = IDX_PUT;
1049 if (lh->iterate_list_head == IDX_NIL)
1050 lh->iterate_list_head = IDX_PUT;
1051 }
1052
1053 assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1054
1055 n_entries_inc(h);
1056 #if ENABLE_DEBUG_HASHMAP
1057 h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1058 #endif
1059
1060 base_set_dirty(h);
1061
1062 return 1;
1063 }
1064 #define hashmap_put_boldly(h, idx, swap, may_resize) \
1065 hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1066
1067 /*
1068 * Returns 0 if resize is not needed.
1069 * 1 if successfully resized.
1070 * -ENOMEM on allocation failure.
1071 */
1072 static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1073 struct swap_entries swap;
1074 void *new_storage;
1075 dib_raw_t *old_dibs, *new_dibs;
1076 const struct hashmap_type_info *hi;
1077 unsigned idx, optimal_idx;
1078 unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1079 uint8_t new_shift;
1080 bool rehash_next;
1081
1082 assert(h);
1083
1084 hi = &hashmap_type_info[h->type];
1085 new_n_entries = n_entries(h) + entries_add;
1086
1087 /* overflow? */
1088 if (_unlikely_(new_n_entries < entries_add))
1089 return -ENOMEM;
1090
1091 /* For direct storage we allow 100% load, because it's tiny. */
1092 if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1093 return 0;
1094
1095 /*
1096 * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1097 * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1098 */
1099 new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1100 /* overflow? */
1101 if (_unlikely_(new_n_buckets < new_n_entries))
1102 return -ENOMEM;
1103
1104 if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1105 return -ENOMEM;
1106
1107 old_n_buckets = n_buckets(h);
1108
1109 if (_likely_(new_n_buckets <= old_n_buckets))
1110 return 0;
1111
1112 new_shift = log2u_round_up(MAX(
1113 new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1114 2 * sizeof(struct direct_storage)));
1115
1116 /* Realloc storage (buckets and DIB array). */
1117 new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1118 1U << new_shift);
1119 if (!new_storage)
1120 return -ENOMEM;
1121
1122 /* Must upgrade direct to indirect storage. */
1123 if (!h->has_indirect) {
1124 memcpy(new_storage, h->direct.storage,
1125 old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1126 h->indirect.n_entries = h->n_direct_entries;
1127 h->indirect.idx_lowest_entry = 0;
1128 h->n_direct_entries = 0;
1129 }
1130
1131 /* Get a new hash key. If we've just upgraded to indirect storage,
1132 * allow reusing a previously generated key. It's still a different key
1133 * from the shared one that we used for direct storage. */
1134 get_hash_key(h->indirect.hash_key, !h->has_indirect);
1135
1136 h->has_indirect = true;
1137 h->indirect.storage = new_storage;
1138 h->indirect.n_buckets = (1U << new_shift) /
1139 (hi->entry_size + sizeof(dib_raw_t));
1140
1141 old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets);
1142 new_dibs = dib_raw_ptr(h);
1143
1144 /*
1145 * Move the DIB array to the new place, replacing valid DIB values with
1146 * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1147 * Note: Overlap is not possible, because we have at least doubled the
1148 * number of buckets and dib_raw_t is smaller than any entry type.
1149 */
1150 for (idx = 0; idx < old_n_buckets; idx++) {
1151 assert(old_dibs[idx] != DIB_RAW_REHASH);
1152 new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1153 : DIB_RAW_REHASH;
1154 }
1155
1156 /* Zero the area of newly added entries (including the old DIB area) */
1157 memzero(bucket_at(h, old_n_buckets),
1158 (n_buckets(h) - old_n_buckets) * hi->entry_size);
1159
1160 /* The upper half of the new DIB array needs initialization */
1161 memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1162 (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1163
1164 /* Rehash entries that need it */
1165 n_rehashed = 0;
1166 for (idx = 0; idx < old_n_buckets; idx++) {
1167 if (new_dibs[idx] != DIB_RAW_REHASH)
1168 continue;
1169
1170 optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1171
1172 /*
1173 * Not much to do if by luck the entry hashes to its current
1174 * location. Just set its DIB.
1175 */
1176 if (optimal_idx == idx) {
1177 new_dibs[idx] = 0;
1178 n_rehashed++;
1179 continue;
1180 }
1181
1182 new_dibs[idx] = DIB_RAW_FREE;
1183 bucket_move_entry(h, &swap, idx, IDX_PUT);
1184 /* bucket_move_entry does not clear the source */
1185 memzero(bucket_at(h, idx), hi->entry_size);
1186
1187 do {
1188 /*
1189 * Find the new bucket for the current entry. This may make
1190 * another entry homeless and load it into IDX_PUT.
1191 */
1192 rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1193 n_rehashed++;
1194
1195 /* Did the current entry displace another one? */
1196 if (rehash_next)
1197 optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1198 } while (rehash_next);
1199 }
1200
1201 assert_se(n_rehashed == n_entries(h));
1202
1203 return 1;
1204 }
1205
1206 /*
1207 * Finds an entry with a matching key
1208 * Returns: index of the found entry, or IDX_NIL if not found.
1209 */
1210 static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1211 struct hashmap_base_entry *e;
1212 unsigned dib, distance;
1213 dib_raw_t *dibs = dib_raw_ptr(h);
1214
1215 assert(idx < n_buckets(h));
1216
1217 for (distance = 0; ; distance++) {
1218 if (dibs[idx] == DIB_RAW_FREE)
1219 return IDX_NIL;
1220
1221 dib = bucket_calculate_dib(h, idx, dibs[idx]);
1222
1223 if (dib < distance)
1224 return IDX_NIL;
1225 if (dib == distance) {
1226 e = bucket_at(h, idx);
1227 if (h->hash_ops->compare(e->key, key) == 0)
1228 return idx;
1229 }
1230
1231 idx = next_idx(h, idx);
1232 }
1233 }
1234 #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1235
1236 int hashmap_put(Hashmap *h, const void *key, void *value) {
1237 struct swap_entries swap;
1238 struct plain_hashmap_entry *e;
1239 unsigned hash, idx;
1240
1241 assert(h);
1242
1243 hash = bucket_hash(h, key);
1244 idx = bucket_scan(h, hash, key);
1245 if (idx != IDX_NIL) {
1246 e = plain_bucket_at(h, idx);
1247 if (e->value == value)
1248 return 0;
1249 return -EEXIST;
1250 }
1251
1252 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1253 e->b.key = key;
1254 e->value = value;
1255 return hashmap_put_boldly(h, hash, &swap, true);
1256 }
1257
1258 int set_put(Set *s, const void *key) {
1259 struct swap_entries swap;
1260 struct hashmap_base_entry *e;
1261 unsigned hash, idx;
1262
1263 assert(s);
1264
1265 hash = bucket_hash(s, key);
1266 idx = bucket_scan(s, hash, key);
1267 if (idx != IDX_NIL)
1268 return 0;
1269
1270 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1271 e->key = key;
1272 return hashmap_put_boldly(s, hash, &swap, true);
1273 }
1274
1275 int _set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key HASHMAP_DEBUG_PARAMS) {
1276 int r;
1277
1278 r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1279 if (r < 0)
1280 return r;
1281
1282 return set_put(*s, key);
1283 }
1284
1285 int _set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key HASHMAP_DEBUG_PARAMS) {
1286 int r;
1287
1288 r = _set_ensure_put(s, hash_ops, key HASHMAP_DEBUG_PASS_ARGS);
1289 if (r <= 0) {
1290 if (hash_ops && hash_ops->free_key)
1291 hash_ops->free_key(key);
1292 else
1293 free(key);
1294 }
1295
1296 return r;
1297 }
1298
1299 int hashmap_replace(Hashmap *h, const void *key, void *value) {
1300 struct swap_entries swap;
1301 struct plain_hashmap_entry *e;
1302 unsigned hash, idx;
1303
1304 assert(h);
1305
1306 hash = bucket_hash(h, key);
1307 idx = bucket_scan(h, hash, key);
1308 if (idx != IDX_NIL) {
1309 e = plain_bucket_at(h, idx);
1310 #if ENABLE_DEBUG_HASHMAP
1311 /* Although the key is equal, the key pointer may have changed,
1312 * and this would break our assumption for iterating. So count
1313 * this operation as incompatible with iteration. */
1314 if (e->b.key != key) {
1315 h->b.debug.put_count++;
1316 h->b.debug.rem_count++;
1317 h->b.debug.last_rem_idx = idx;
1318 }
1319 #endif
1320 e->b.key = key;
1321 e->value = value;
1322 hashmap_set_dirty(h);
1323
1324 return 0;
1325 }
1326
1327 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1328 e->b.key = key;
1329 e->value = value;
1330 return hashmap_put_boldly(h, hash, &swap, true);
1331 }
1332
1333 int hashmap_update(Hashmap *h, const void *key, void *value) {
1334 struct plain_hashmap_entry *e;
1335 unsigned hash, idx;
1336
1337 assert(h);
1338
1339 hash = bucket_hash(h, key);
1340 idx = bucket_scan(h, hash, key);
1341 if (idx == IDX_NIL)
1342 return -ENOENT;
1343
1344 e = plain_bucket_at(h, idx);
1345 e->value = value;
1346 hashmap_set_dirty(h);
1347
1348 return 0;
1349 }
1350
1351 void* _hashmap_get(HashmapBase *h, const void *key) {
1352 struct hashmap_base_entry *e;
1353 unsigned hash, idx;
1354
1355 if (!h)
1356 return NULL;
1357
1358 hash = bucket_hash(h, key);
1359 idx = bucket_scan(h, hash, key);
1360 if (idx == IDX_NIL)
1361 return NULL;
1362
1363 e = bucket_at(h, idx);
1364 return entry_value(h, e);
1365 }
1366
1367 void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
1368 struct plain_hashmap_entry *e;
1369 unsigned hash, idx;
1370
1371 if (!h)
1372 return NULL;
1373
1374 hash = bucket_hash(h, key);
1375 idx = bucket_scan(h, hash, key);
1376 if (idx == IDX_NIL)
1377 return NULL;
1378
1379 e = plain_bucket_at(h, idx);
1380 if (key2)
1381 *key2 = (void*) e->b.key;
1382
1383 return e->value;
1384 }
1385
1386 bool _hashmap_contains(HashmapBase *h, const void *key) {
1387 unsigned hash;
1388
1389 if (!h)
1390 return false;
1391
1392 hash = bucket_hash(h, key);
1393 return bucket_scan(h, hash, key) != IDX_NIL;
1394 }
1395
1396 void* _hashmap_remove(HashmapBase *h, const void *key) {
1397 struct hashmap_base_entry *e;
1398 unsigned hash, idx;
1399 void *data;
1400
1401 if (!h)
1402 return NULL;
1403
1404 hash = bucket_hash(h, key);
1405 idx = bucket_scan(h, hash, key);
1406 if (idx == IDX_NIL)
1407 return NULL;
1408
1409 e = bucket_at(h, idx);
1410 data = entry_value(h, e);
1411 remove_entry(h, idx);
1412
1413 return data;
1414 }
1415
1416 void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1417 struct plain_hashmap_entry *e;
1418 unsigned hash, idx;
1419 void *data;
1420
1421 if (!h) {
1422 if (rkey)
1423 *rkey = NULL;
1424 return NULL;
1425 }
1426
1427 hash = bucket_hash(h, key);
1428 idx = bucket_scan(h, hash, key);
1429 if (idx == IDX_NIL) {
1430 if (rkey)
1431 *rkey = NULL;
1432 return NULL;
1433 }
1434
1435 e = plain_bucket_at(h, idx);
1436 data = e->value;
1437 if (rkey)
1438 *rkey = (void*) e->b.key;
1439
1440 remove_entry(h, idx);
1441
1442 return data;
1443 }
1444
1445 int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1446 struct swap_entries swap;
1447 struct plain_hashmap_entry *e;
1448 unsigned old_hash, new_hash, idx;
1449
1450 if (!h)
1451 return -ENOENT;
1452
1453 old_hash = bucket_hash(h, old_key);
1454 idx = bucket_scan(h, old_hash, old_key);
1455 if (idx == IDX_NIL)
1456 return -ENOENT;
1457
1458 new_hash = bucket_hash(h, new_key);
1459 if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1460 return -EEXIST;
1461
1462 remove_entry(h, idx);
1463
1464 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1465 e->b.key = new_key;
1466 e->value = value;
1467 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1468
1469 return 0;
1470 }
1471
1472 int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1473 struct swap_entries swap;
1474 struct hashmap_base_entry *e;
1475 unsigned old_hash, new_hash, idx;
1476
1477 if (!s)
1478 return -ENOENT;
1479
1480 old_hash = bucket_hash(s, old_key);
1481 idx = bucket_scan(s, old_hash, old_key);
1482 if (idx == IDX_NIL)
1483 return -ENOENT;
1484
1485 new_hash = bucket_hash(s, new_key);
1486 if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1487 return -EEXIST;
1488
1489 remove_entry(s, idx);
1490
1491 e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1492 e->key = new_key;
1493 assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1494
1495 return 0;
1496 }
1497
1498 int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1499 struct swap_entries swap;
1500 struct plain_hashmap_entry *e;
1501 unsigned old_hash, new_hash, idx_old, idx_new;
1502
1503 if (!h)
1504 return -ENOENT;
1505
1506 old_hash = bucket_hash(h, old_key);
1507 idx_old = bucket_scan(h, old_hash, old_key);
1508 if (idx_old == IDX_NIL)
1509 return -ENOENT;
1510
1511 old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1512
1513 new_hash = bucket_hash(h, new_key);
1514 idx_new = bucket_scan(h, new_hash, new_key);
1515 if (idx_new != IDX_NIL)
1516 if (idx_old != idx_new) {
1517 remove_entry(h, idx_new);
1518 /* Compensate for a possible backward shift. */
1519 if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1520 idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1521 assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1522 }
1523
1524 remove_entry(h, idx_old);
1525
1526 e = &bucket_at_swap(&swap, IDX_PUT)->p;
1527 e->b.key = new_key;
1528 e->value = value;
1529 assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1530
1531 return 0;
1532 }
1533
1534 void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) {
1535 struct hashmap_base_entry *e;
1536 unsigned hash, idx;
1537
1538 if (!h)
1539 return NULL;
1540
1541 hash = bucket_hash(h, key);
1542 idx = bucket_scan(h, hash, key);
1543 if (idx == IDX_NIL)
1544 return NULL;
1545
1546 e = bucket_at(h, idx);
1547 if (entry_value(h, e) != value)
1548 return NULL;
1549
1550 remove_entry(h, idx);
1551
1552 return value;
1553 }
1554
1555 static unsigned find_first_entry(HashmapBase *h) {
1556 Iterator i = ITERATOR_FIRST;
1557
1558 if (!h || !n_entries(h))
1559 return IDX_NIL;
1560
1561 return hashmap_iterate_entry(h, &i);
1562 }
1563
1564 void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) {
1565 struct hashmap_base_entry *e;
1566 void *key, *data;
1567 unsigned idx;
1568
1569 idx = find_first_entry(h);
1570 if (idx == IDX_NIL) {
1571 if (ret_key)
1572 *ret_key = NULL;
1573 return NULL;
1574 }
1575
1576 e = bucket_at(h, idx);
1577 key = (void*) e->key;
1578 data = entry_value(h, e);
1579
1580 if (remove)
1581 remove_entry(h, idx);
1582
1583 if (ret_key)
1584 *ret_key = key;
1585
1586 return data;
1587 }
1588
1589 unsigned _hashmap_size(HashmapBase *h) {
1590 if (!h)
1591 return 0;
1592
1593 return n_entries(h);
1594 }
1595
1596 unsigned _hashmap_buckets(HashmapBase *h) {
1597 if (!h)
1598 return 0;
1599
1600 return n_buckets(h);
1601 }
1602
1603 int _hashmap_merge(Hashmap *h, Hashmap *other) {
1604 Iterator i;
1605 unsigned idx;
1606
1607 assert(h);
1608
1609 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1610 struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1611 int r;
1612
1613 r = hashmap_put(h, pe->b.key, pe->value);
1614 if (r < 0 && r != -EEXIST)
1615 return r;
1616 }
1617
1618 return 0;
1619 }
1620
1621 int set_merge(Set *s, Set *other) {
1622 Iterator i;
1623 unsigned idx;
1624
1625 assert(s);
1626
1627 HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1628 struct set_entry *se = set_bucket_at(other, idx);
1629 int r;
1630
1631 r = set_put(s, se->b.key);
1632 if (r < 0)
1633 return r;
1634 }
1635
1636 return 0;
1637 }
1638
1639 int _hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1640 int r;
1641
1642 assert(h);
1643
1644 r = resize_buckets(h, entries_add);
1645 if (r < 0)
1646 return r;
1647
1648 return 0;
1649 }
1650
1651 /*
1652 * The same as hashmap_merge(), but every new item from other is moved to h.
1653 * Keys already in h are skipped and stay in other.
1654 * Returns: 0 on success.
1655 * -ENOMEM on alloc failure, in which case no move has been done.
1656 */
1657 int _hashmap_move(HashmapBase *h, HashmapBase *other) {
1658 struct swap_entries swap;
1659 struct hashmap_base_entry *e, *n;
1660 Iterator i;
1661 unsigned idx;
1662 int r;
1663
1664 assert(h);
1665
1666 if (!other)
1667 return 0;
1668
1669 assert(other->type == h->type);
1670
1671 /*
1672 * This reserves buckets for the worst case, where none of other's
1673 * entries are yet present in h. This is preferable to risking
1674 * an allocation failure in the middle of the moving and having to
1675 * rollback or return a partial result.
1676 */
1677 r = resize_buckets(h, n_entries(other));
1678 if (r < 0)
1679 return r;
1680
1681 HASHMAP_FOREACH_IDX(idx, other, i) {
1682 unsigned h_hash;
1683
1684 e = bucket_at(other, idx);
1685 h_hash = bucket_hash(h, e->key);
1686 if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1687 continue;
1688
1689 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1690 n->key = e->key;
1691 if (h->type != HASHMAP_TYPE_SET)
1692 ((struct plain_hashmap_entry*) n)->value =
1693 ((struct plain_hashmap_entry*) e)->value;
1694 assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1695
1696 remove_entry(other, idx);
1697 }
1698
1699 return 0;
1700 }
1701
1702 int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1703 struct swap_entries swap;
1704 unsigned h_hash, other_hash, idx;
1705 struct hashmap_base_entry *e, *n;
1706 int r;
1707
1708 assert(h);
1709
1710 h_hash = bucket_hash(h, key);
1711 if (bucket_scan(h, h_hash, key) != IDX_NIL)
1712 return -EEXIST;
1713
1714 if (!other)
1715 return -ENOENT;
1716
1717 assert(other->type == h->type);
1718
1719 other_hash = bucket_hash(other, key);
1720 idx = bucket_scan(other, other_hash, key);
1721 if (idx == IDX_NIL)
1722 return -ENOENT;
1723
1724 e = bucket_at(other, idx);
1725
1726 n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1727 n->key = e->key;
1728 if (h->type != HASHMAP_TYPE_SET)
1729 ((struct plain_hashmap_entry*) n)->value =
1730 ((struct plain_hashmap_entry*) e)->value;
1731 r = hashmap_put_boldly(h, h_hash, &swap, true);
1732 if (r < 0)
1733 return r;
1734
1735 remove_entry(other, idx);
1736 return 0;
1737 }
1738
1739 HashmapBase* _hashmap_copy(HashmapBase *h HASHMAP_DEBUG_PARAMS) {
1740 HashmapBase *copy;
1741 int r;
1742
1743 assert(h);
1744
1745 copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_PASS_ARGS);
1746 if (!copy)
1747 return NULL;
1748
1749 switch (h->type) {
1750 case HASHMAP_TYPE_PLAIN:
1751 case HASHMAP_TYPE_ORDERED:
1752 r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1753 break;
1754 case HASHMAP_TYPE_SET:
1755 r = set_merge((Set*)copy, (Set*)h);
1756 break;
1757 default:
1758 assert_not_reached();
1759 }
1760
1761 if (r < 0)
1762 return _hashmap_free(copy, NULL, NULL);
1763
1764 return copy;
1765 }
1766
1767 char** _hashmap_get_strv(HashmapBase *h) {
1768 char **sv;
1769 Iterator i;
1770 unsigned idx, n;
1771
1772 if (!h)
1773 return new0(char*, 1);
1774
1775 sv = new(char*, n_entries(h)+1);
1776 if (!sv)
1777 return NULL;
1778
1779 n = 0;
1780 HASHMAP_FOREACH_IDX(idx, h, i)
1781 sv[n++] = entry_value(h, bucket_at(h, idx));
1782 sv[n] = NULL;
1783
1784 return sv;
1785 }
1786
1787 void* ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1788 struct ordered_hashmap_entry *e;
1789 unsigned hash, idx;
1790
1791 if (!h)
1792 return NULL;
1793
1794 hash = bucket_hash(h, key);
1795 idx = bucket_scan(h, hash, key);
1796 if (idx == IDX_NIL)
1797 return NULL;
1798
1799 e = ordered_bucket_at(h, idx);
1800 if (e->iterate_next == IDX_NIL)
1801 return NULL;
1802 return ordered_bucket_at(h, e->iterate_next)->p.value;
1803 }
1804
1805 int set_consume(Set *s, void *value) {
1806 int r;
1807
1808 assert(s);
1809 assert(value);
1810
1811 r = set_put(s, value);
1812 if (r <= 0)
1813 free(value);
1814
1815 return r;
1816 }
1817
1818 int _hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v HASHMAP_DEBUG_PARAMS) {
1819 int r;
1820
1821 r = _hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1822 if (r < 0)
1823 return r;
1824
1825 _cleanup_free_ char *kdup = NULL, *vdup = NULL;
1826
1827 kdup = strdup(k);
1828 if (!kdup)
1829 return -ENOMEM;
1830
1831 if (v) {
1832 vdup = strdup(v);
1833 if (!vdup)
1834 return -ENOMEM;
1835 }
1836
1837 r = hashmap_put(*h, kdup, vdup);
1838 if (r < 0) {
1839 if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup)))
1840 return 0;
1841 return r;
1842 }
1843
1844 /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */
1845 assert(vdup == NULL || r > 0);
1846 if (r > 0)
1847 kdup = vdup = NULL;
1848
1849 return r;
1850 }
1851
1852 int _set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n HASHMAP_DEBUG_PARAMS) {
1853 char *c;
1854 int r;
1855
1856 assert(s);
1857 assert(p);
1858
1859 r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS);
1860 if (r < 0)
1861 return r;
1862
1863 if (n == SIZE_MAX) {
1864 if (set_contains(*s, (char*) p))
1865 return 0;
1866
1867 c = strdup(p);
1868 } else
1869 c = strndup(p, n);
1870 if (!c)
1871 return -ENOMEM;
1872
1873 return set_consume(*s, c);
1874 }
1875
1876 int _set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l HASHMAP_DEBUG_PARAMS) {
1877 int n = 0, r;
1878
1879 assert(s);
1880
1881 STRV_FOREACH(i, l) {
1882 r = _set_put_strndup_full(s, hash_ops, *i, SIZE_MAX HASHMAP_DEBUG_PASS_ARGS);
1883 if (r < 0)
1884 return r;
1885
1886 n += r;
1887 }
1888
1889 return n;
1890 }
1891
1892 int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
1893 const char *p = ASSERT_PTR(v);
1894 int r;
1895
1896 assert(s);
1897
1898 for (;;) {
1899 char *word;
1900
1901 r = extract_first_word(&p, &word, separators, flags);
1902 if (r <= 0)
1903 return r;
1904
1905 r = set_consume(s, word);
1906 if (r < 0)
1907 return r;
1908 }
1909 }
1910
1911 /* expand the cachemem if needed, return true if newly (re)activated. */
1912 static int cachemem_maintain(CacheMem *mem, size_t size) {
1913 assert(mem);
1914
1915 if (!GREEDY_REALLOC(mem->ptr, size)) {
1916 if (size > 0)
1917 return -ENOMEM;
1918 }
1919
1920 if (!mem->active) {
1921 mem->active = true;
1922 return true;
1923 }
1924
1925 return false;
1926 }
1927
1928 int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
1929 bool sync_keys = false, sync_values = false;
1930 size_t size;
1931 int r;
1932
1933 assert(cache);
1934 assert(cache->hashmap);
1935
1936 size = n_entries(cache->hashmap);
1937
1938 if (res_keys) {
1939 r = cachemem_maintain(&cache->keys, size);
1940 if (r < 0)
1941 return r;
1942
1943 sync_keys = r;
1944 } else
1945 cache->keys.active = false;
1946
1947 if (res_values) {
1948 r = cachemem_maintain(&cache->values, size);
1949 if (r < 0)
1950 return r;
1951
1952 sync_values = r;
1953 } else
1954 cache->values.active = false;
1955
1956 if (cache->hashmap->dirty) {
1957 if (cache->keys.active)
1958 sync_keys = true;
1959 if (cache->values.active)
1960 sync_values = true;
1961
1962 cache->hashmap->dirty = false;
1963 }
1964
1965 if (sync_keys || sync_values) {
1966 unsigned i, idx;
1967 Iterator iter;
1968
1969 i = 0;
1970 HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
1971 struct hashmap_base_entry *e;
1972
1973 e = bucket_at(cache->hashmap, idx);
1974
1975 if (sync_keys)
1976 cache->keys.ptr[i] = e->key;
1977 if (sync_values)
1978 cache->values.ptr[i] = entry_value(cache->hashmap, e);
1979 i++;
1980 }
1981 }
1982
1983 if (res_keys)
1984 *res_keys = cache->keys.ptr;
1985 if (res_values)
1986 *res_values = cache->values.ptr;
1987 if (res_n_entries)
1988 *res_n_entries = size;
1989
1990 return 0;
1991 }
1992
1993 IteratedCache* iterated_cache_free(IteratedCache *cache) {
1994 if (cache) {
1995 free(cache->keys.ptr);
1996 free(cache->values.ptr);
1997 }
1998
1999 return mfree(cache);
2000 }
2001
2002 int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) {
2003 _cleanup_free_ char *str = NULL;
2004 size_t separator_len, len = 0;
2005 const char *value;
2006 bool first;
2007
2008 assert(ret);
2009
2010 if (set_isempty(s)) {
2011 *ret = NULL;
2012 return 0;
2013 }
2014
2015 separator_len = strlen_ptr(separator);
2016
2017 if (separator_len == 0)
2018 wrap_with_separator = false;
2019
2020 first = !wrap_with_separator;
2021
2022 SET_FOREACH(value, s) {
2023 size_t l = strlen_ptr(value);
2024
2025 if (l == 0)
2026 continue;
2027
2028 if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1))
2029 return -ENOMEM;
2030
2031 if (separator_len > 0 && !first) {
2032 memcpy(str + len, separator, separator_len);
2033 len += separator_len;
2034 }
2035
2036 memcpy(str + len, value, l);
2037 len += l;
2038 first = false;
2039 }
2040
2041 if (wrap_with_separator) {
2042 memcpy(str + len, separator, separator_len);
2043 len += separator_len;
2044 }
2045
2046 str[len] = '\0';
2047
2048 *ret = TAKE_PTR(str);
2049 return 0;
2050 }
2051
2052 bool set_equal(Set *a, Set *b) {
2053 void *p;
2054
2055 /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same
2056 * entries */
2057
2058 if (a == b)
2059 return true;
2060
2061 if (set_isempty(a) && set_isempty(b))
2062 return true;
2063
2064 if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases
2065 * already */
2066 return false;
2067
2068 SET_FOREACH(p, a)
2069 if (!set_contains(b, p))
2070 return false;
2071
2072 /* If we have the same hashops, then we don't need to check things backwards given we compared the
2073 * size and that all of a is in b. */
2074 if (a->b.hash_ops == b->b.hash_ops)
2075 return true;
2076
2077 SET_FOREACH(p, b)
2078 if (!set_contains(a, p))
2079 return false;
2080
2081 return true;
2082 }
2083
2084 static bool set_fnmatch_one(Set *patterns, const char *needle) {
2085 const char *p;
2086
2087 assert(needle);
2088
2089 /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */
2090
2091 SET_FOREACH(p, patterns)
2092 if (fnmatch(p, needle, 0) == 0)
2093 return true;
2094
2095 return false;
2096 }
2097
2098 bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) {
2099 assert(needle);
2100
2101 if (set_fnmatch_one(exclude_patterns, needle))
2102 return false;
2103
2104 if (set_isempty(include_patterns))
2105 return true;
2106
2107 return set_fnmatch_one(include_patterns, needle);
2108 }