]>
Commit | Line | Data |
---|---|---|
db9ecf05 | 1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ |
a7334b09 | 2 | |
60918275 | 3 | #include <errno.h> |
d25d4f18 | 4 | #include <fnmatch.h> |
ae0b700a | 5 | #include <pthread.h> |
11c3a366 | 6 | #include <stdint.h> |
d4510856 | 7 | #include <stdlib.h> |
50b35193 ZJS |
8 | #if HAVE_VALGRIND_VALGRIND_H |
9 | # include <valgrind/valgrind.h> | |
10 | #endif | |
60918275 | 11 | |
b5efdb8a | 12 | #include "alloc-util.h" |
556c7bae | 13 | #include "fileio.h" |
b4f60743 | 14 | #include "hashmap.h" |
3ae6b3bf | 15 | #include "logarithm.h" |
60918275 | 16 | #include "macro.h" |
0a970718 | 17 | #include "memory-util.h" |
b3dcf58e | 18 | #include "mempool.h" |
f5947a5e | 19 | #include "missing_syscall.h" |
d4510856 | 20 | #include "process-util.h" |
3df3e884 | 21 | #include "random-util.h" |
d4510856 LP |
22 | #include "set.h" |
23 | #include "siphash24.h" | |
556c7bae | 24 | #include "string-util.h" |
d4510856 | 25 | #include "strv.h" |
60918275 | 26 | |
349cc4a5 | 27 | #if ENABLE_DEBUG_HASHMAP |
2eec67ac TA |
28 | #include "list.h" |
29 | #endif | |
30 | ||
89439d4f MS |
31 | /* |
32 | * Implementation of hashmaps. | |
33 | * Addressing: open | |
34 | * - uses less RAM compared to closed addressing (chaining), because | |
35 | * our entries are small (especially in Sets, which tend to contain | |
36 | * the majority of entries in systemd). | |
37 | * Collision resolution: Robin Hood | |
38 | * - tends to equalize displacement of entries from their optimal buckets. | |
39 | * Probe sequence: linear | |
40 | * - though theoretically worse than random probing/uniform hashing/double | |
41 | * hashing, it is good for cache locality. | |
42 | * | |
43 | * References: | |
44 | * Celis, P. 1986. Robin Hood Hashing. | |
45 | * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada. | |
46 | * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf | |
47 | * - The results are derived for random probing. Suggests deletion with | |
48 | * tombstones and two mean-centered search methods. None of that works | |
49 | * well for linear probing. | |
50 | * | |
51 | * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies. | |
52 | * ACM Trans. Algorithms 1, 2 (October 2005), 177-213. | |
53 | * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964 | |
54 | * http://www.math.uu.se/~svante/papers/sj157.pdf | |
55 | * - Applies to Robin Hood with linear probing. Contains remarks on | |
56 | * the unsuitability of mean-centered search with linear probing. | |
57 | * | |
58 | * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing. | |
59 | * ACM Trans. Algorithms 1, 2 (October 2005), 214-242. | |
60 | * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965 | |
61 | * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes | |
62 | * in a successful search), and Janson writes about displacement. C = d + 1. | |
63 | * | |
64 | * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion. | |
65 | * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/ | |
66 | * - Explanation of backward shift deletion with pictures. | |
67 | * | |
68 | * Khuong, P. 2013. The Other Robin Hood Hashing. | |
69 | * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/ | |
70 | * - Short summary of random vs. linear probing, and tombstones vs. backward shift. | |
71 | */ | |
72 | ||
73 | /* | |
74 | * XXX Ideas for improvement: | |
75 | * For unordered hashmaps, randomize iteration order, similarly to Perl: | |
76 | * http://blog.booking.com/hardening-perls-hash-function.html | |
77 | */ | |
78 | ||
79 | /* INV_KEEP_FREE = 1 / (1 - max_load_factor) | |
80 | * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */ | |
81 | #define INV_KEEP_FREE 5U | |
82 | ||
83 | /* Fields common to entries of all hashmap/set types */ | |
84 | struct hashmap_base_entry { | |
60918275 | 85 | const void *key; |
89439d4f MS |
86 | }; |
87 | ||
88 | /* Entry types for specific hashmap/set types | |
89 | * hashmap_base_entry must be at the beginning of each entry struct. */ | |
90 | ||
91 | struct plain_hashmap_entry { | |
92 | struct hashmap_base_entry b; | |
60918275 | 93 | void *value; |
60918275 LP |
94 | }; |
95 | ||
89439d4f MS |
96 | struct ordered_hashmap_entry { |
97 | struct plain_hashmap_entry p; | |
98 | unsigned iterate_next, iterate_previous; | |
99 | }; | |
60918275 | 100 | |
89439d4f MS |
101 | struct set_entry { |
102 | struct hashmap_base_entry b; | |
103 | }; | |
45fa9e29 | 104 | |
89439d4f MS |
105 | /* In several functions it is advantageous to have the hash table extended |
106 | * virtually by a couple of additional buckets. We reserve special index values | |
107 | * for these "swap" buckets. */ | |
108 | #define _IDX_SWAP_BEGIN (UINT_MAX - 3) | |
109 | #define IDX_PUT (_IDX_SWAP_BEGIN + 0) | |
110 | #define IDX_TMP (_IDX_SWAP_BEGIN + 1) | |
111 | #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2) | |
39c2a6f1 | 112 | |
89439d4f MS |
113 | #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */ |
114 | #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */ | |
115 | ||
116 | assert_cc(IDX_FIRST == _IDX_SWAP_END); | |
117 | assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST); | |
118 | ||
119 | /* Storage space for the "swap" buckets. | |
387f6955 | 120 | * All entry types can fit into an ordered_hashmap_entry. */ |
89439d4f MS |
121 | struct swap_entries { |
122 | struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN]; | |
60918275 LP |
123 | }; |
124 | ||
89439d4f MS |
125 | /* Distance from Initial Bucket */ |
126 | typedef uint8_t dib_raw_t; | |
3ef11dcf ZJS |
127 | #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */ |
128 | #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */ | |
129 | #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */ | |
130 | #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */ | |
89439d4f MS |
131 | |
132 | #define DIB_FREE UINT_MAX | |
133 | ||
349cc4a5 | 134 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
135 | struct hashmap_debug_info { |
136 | LIST_FIELDS(struct hashmap_debug_info, debug_list); | |
137 | unsigned max_entries; /* high watermark of n_entries */ | |
138 | ||
139 | /* who allocated this hashmap */ | |
140 | int line; | |
141 | const char *file; | |
142 | const char *func; | |
143 | ||
144 | /* fields to detect modification while iterating */ | |
145 | unsigned put_count; /* counts puts into the hashmap */ | |
146 | unsigned rem_count; /* counts removals from hashmap */ | |
147 | unsigned last_rem_idx; /* remembers last removal index */ | |
39c2a6f1 LP |
148 | }; |
149 | ||
89439d4f MS |
150 | /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */ |
151 | static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list); | |
4f1b3061 | 152 | static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER; |
55825de5 | 153 | #endif |
39c2a6f1 | 154 | |
89439d4f MS |
155 | enum HashmapType { |
156 | HASHMAP_TYPE_PLAIN, | |
157 | HASHMAP_TYPE_ORDERED, | |
158 | HASHMAP_TYPE_SET, | |
159 | _HASHMAP_TYPE_MAX | |
160 | }; | |
39c2a6f1 | 161 | |
89439d4f | 162 | struct _packed_ indirect_storage { |
1a39bc8c | 163 | void *storage; /* where buckets and DIBs are stored */ |
89439d4f MS |
164 | uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */ |
165 | ||
166 | unsigned n_entries; /* number of stored entries */ | |
167 | unsigned n_buckets; /* number of buckets */ | |
168 | ||
169 | unsigned idx_lowest_entry; /* Index below which all buckets are free. | |
79893116 | 170 | Makes "while (hashmap_steal_first())" loops |
89439d4f MS |
171 | O(n) instead of O(n^2) for unordered hashmaps. */ |
172 | uint8_t _pad[3]; /* padding for the whole HashmapBase */ | |
173 | /* The bitfields in HashmapBase complete the alignment of the whole thing. */ | |
174 | }; | |
175 | ||
176 | struct direct_storage { | |
da890466 ZJS |
177 | /* This gives us 39 bytes on 64-bit, or 35 bytes on 32-bit. |
178 | * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64-bit, | |
179 | * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32-bit. */ | |
1a39bc8c | 180 | uint8_t storage[sizeof(struct indirect_storage)]; |
89439d4f MS |
181 | }; |
182 | ||
183 | #define DIRECT_BUCKETS(entry_t) \ | |
184 | (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t))) | |
185 | ||
186 | /* We should be able to store at least one entry directly. */ | |
187 | assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1); | |
188 | ||
189 | /* We have 3 bits for n_direct_entries. */ | |
190 | assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3)); | |
191 | ||
192 | /* Hashmaps with directly stored entries all use this shared hash key. | |
193 | * It's no big deal if the key is guessed, because there can be only | |
194 | * a handful of directly stored entries in a hashmap. When a hashmap | |
195 | * outgrows direct storage, it gets its own key for indirect storage. */ | |
196 | static uint8_t shared_hash_key[HASH_KEY_SIZE]; | |
89439d4f MS |
197 | |
198 | /* Fields that all hashmap/set types must have */ | |
199 | struct HashmapBase { | |
200 | const struct hash_ops *hash_ops; /* hash and compare ops to use */ | |
201 | ||
202 | union _packed_ { | |
203 | struct indirect_storage indirect; /* if has_indirect */ | |
204 | struct direct_storage direct; /* if !has_indirect */ | |
205 | }; | |
206 | ||
207 | enum HashmapType type:2; /* HASHMAP_TYPE_* */ | |
208 | bool has_indirect:1; /* whether indirect storage is used */ | |
209 | unsigned n_direct_entries:3; /* Number of entries in direct storage. | |
210 | * Only valid if !has_indirect. */ | |
211 | bool from_pool:1; /* whether was allocated from mempool */ | |
45ea84d8 VC |
212 | bool dirty:1; /* whether dirtied since last iterated_cache_get() */ |
213 | bool cached:1; /* whether this hashmap is being cached */ | |
55825de5 ZJS |
214 | |
215 | #if ENABLE_DEBUG_HASHMAP | |
216 | struct hashmap_debug_info debug; | |
217 | #endif | |
89439d4f MS |
218 | }; |
219 | ||
220 | /* Specific hash types | |
221 | * HashmapBase must be at the beginning of each hashmap struct. */ | |
222 | ||
223 | struct Hashmap { | |
224 | struct HashmapBase b; | |
225 | }; | |
226 | ||
227 | struct OrderedHashmap { | |
228 | struct HashmapBase b; | |
229 | unsigned iterate_list_head, iterate_list_tail; | |
230 | }; | |
231 | ||
232 | struct Set { | |
233 | struct HashmapBase b; | |
234 | }; | |
235 | ||
45ea84d8 VC |
236 | typedef struct CacheMem { |
237 | const void **ptr; | |
319a4f4b | 238 | size_t n_populated; |
45ea84d8 VC |
239 | bool active:1; |
240 | } CacheMem; | |
241 | ||
242 | struct IteratedCache { | |
243 | HashmapBase *hashmap; | |
244 | CacheMem keys, values; | |
245 | }; | |
246 | ||
89439d4f MS |
247 | DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8); |
248 | DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8); | |
249 | /* No need for a separate Set pool */ | |
250 | assert_cc(sizeof(Hashmap) == sizeof(Set)); | |
251 | ||
252 | struct hashmap_type_info { | |
253 | size_t head_size; | |
254 | size_t entry_size; | |
255 | struct mempool *mempool; | |
256 | unsigned n_direct_buckets; | |
257 | }; | |
258 | ||
43874aa7 | 259 | static _used_ const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = { |
89439d4f MS |
260 | [HASHMAP_TYPE_PLAIN] = { |
261 | .head_size = sizeof(Hashmap), | |
262 | .entry_size = sizeof(struct plain_hashmap_entry), | |
263 | .mempool = &hashmap_pool, | |
264 | .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry), | |
265 | }, | |
266 | [HASHMAP_TYPE_ORDERED] = { | |
267 | .head_size = sizeof(OrderedHashmap), | |
268 | .entry_size = sizeof(struct ordered_hashmap_entry), | |
269 | .mempool = &ordered_hashmap_pool, | |
270 | .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry), | |
271 | }, | |
272 | [HASHMAP_TYPE_SET] = { | |
273 | .head_size = sizeof(Set), | |
274 | .entry_size = sizeof(struct set_entry), | |
275 | .mempool = &hashmap_pool, | |
276 | .n_direct_buckets = DIRECT_BUCKETS(struct set_entry), | |
277 | }, | |
278 | }; | |
39c2a6f1 | 279 | |
a2b052b2 | 280 | void hashmap_trim_pools(void) { |
9a0f0ef5 | 281 | int r; |
556c7bae | 282 | |
9a0f0ef5 LP |
283 | /* The pool is only allocated by the main thread, but the memory can be passed to other |
284 | * threads. Let's clean up if we are the main thread and no other threads are live. */ | |
556c7bae | 285 | |
9a0f0ef5 LP |
286 | /* We build our own is_main_thread() here, which doesn't use C11 TLS based caching of the |
287 | * result. That's because valgrind apparently doesn't like TLS to be used from a GCC destructor. */ | |
31c9d74d | 288 | if (getpid() != gettid()) |
9a0f0ef5 | 289 | return (void) log_debug("Not cleaning up memory pools, not in main thread."); |
556c7bae | 290 | |
9a0f0ef5 LP |
291 | r = get_process_threads(0); |
292 | if (r < 0) | |
293 | return (void) log_debug_errno(r, "Failed to determine number of threads, not cleaning up memory pools: %m"); | |
294 | if (r != 1) | |
295 | return (void) log_debug("Not cleaning up memory pools, running in multi-threaded process."); | |
556c7bae | 296 | |
a2b052b2 LP |
297 | mempool_trim(&hashmap_pool); |
298 | mempool_trim(&ordered_hashmap_pool); | |
556c7bae | 299 | } |
9a0f0ef5 | 300 | |
50b35193 | 301 | #if HAVE_VALGRIND_VALGRIND_H |
9a0f0ef5 LP |
302 | _destructor_ static void cleanup_pools(void) { |
303 | /* Be nice to valgrind */ | |
50b35193 ZJS |
304 | if (RUNNING_ON_VALGRIND) |
305 | hashmap_trim_pools(); | |
9a0f0ef5 | 306 | } |
556c7bae ZJS |
307 | #endif |
308 | ||
89439d4f MS |
309 | static unsigned n_buckets(HashmapBase *h) { |
310 | return h->has_indirect ? h->indirect.n_buckets | |
311 | : hashmap_type_info[h->type].n_direct_buckets; | |
312 | } | |
313 | ||
314 | static unsigned n_entries(HashmapBase *h) { | |
315 | return h->has_indirect ? h->indirect.n_entries | |
316 | : h->n_direct_entries; | |
317 | } | |
318 | ||
319 | static void n_entries_inc(HashmapBase *h) { | |
320 | if (h->has_indirect) | |
321 | h->indirect.n_entries++; | |
322 | else | |
323 | h->n_direct_entries++; | |
324 | } | |
325 | ||
326 | static void n_entries_dec(HashmapBase *h) { | |
327 | if (h->has_indirect) | |
328 | h->indirect.n_entries--; | |
329 | else | |
330 | h->n_direct_entries--; | |
331 | } | |
332 | ||
8a35af80 | 333 | static void* storage_ptr(HashmapBase *h) { |
89439d4f MS |
334 | return h->has_indirect ? h->indirect.storage |
335 | : h->direct.storage; | |
336 | } | |
337 | ||
8a35af80 | 338 | static uint8_t* hash_key(HashmapBase *h) { |
89439d4f MS |
339 | return h->has_indirect ? h->indirect.hash_key |
340 | : shared_hash_key; | |
341 | } | |
342 | ||
343 | static unsigned base_bucket_hash(HashmapBase *h, const void *p) { | |
b826ab58 | 344 | struct siphash state; |
0cb3c286 | 345 | uint64_t hash; |
b826ab58 | 346 | |
0cb3c286 | 347 | siphash24_init(&state, hash_key(h)); |
b826ab58 TG |
348 | |
349 | h->hash_ops->hash(p, &state); | |
350 | ||
933f9cae | 351 | hash = siphash24_finalize(&state); |
0cb3c286 TG |
352 | |
353 | return (unsigned) (hash % n_buckets(h)); | |
9bf3b535 | 354 | } |
89439d4f | 355 | #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p) |
9bf3b535 | 356 | |
a1e92eee | 357 | static void base_set_dirty(HashmapBase *h) { |
84dcca75 VC |
358 | h->dirty = true; |
359 | } | |
360 | #define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h)) | |
361 | ||
9bf3b535 LP |
362 | static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) { |
363 | static uint8_t current[HASH_KEY_SIZE]; | |
364 | static bool current_initialized = false; | |
365 | ||
366 | /* Returns a hash function key to use. In order to keep things | |
367 | * fast we will not generate a new key each time we allocate a | |
368 | * new hash table. Instead, we'll just reuse the most recently | |
369 | * generated one, except if we never generated one or when we | |
370 | * are rehashing an entire hash table because we reached a | |
371 | * fill level */ | |
372 | ||
373 | if (!current_initialized || !reuse_is_ok) { | |
374 | random_bytes(current, sizeof(current)); | |
375 | current_initialized = true; | |
376 | } | |
377 | ||
378 | memcpy(hash_key, current, sizeof(current)); | |
a3b6fafe LP |
379 | } |
380 | ||
8a35af80 | 381 | static struct hashmap_base_entry* bucket_at(HashmapBase *h, unsigned idx) { |
6759b627 TH |
382 | return CAST_ALIGN_PTR( |
383 | struct hashmap_base_entry, | |
384 | (uint8_t *) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size); | |
89439d4f MS |
385 | } |
386 | ||
8a35af80 | 387 | static struct plain_hashmap_entry* plain_bucket_at(Hashmap *h, unsigned idx) { |
89439d4f MS |
388 | return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx); |
389 | } | |
390 | ||
8a35af80 | 391 | static struct ordered_hashmap_entry* ordered_bucket_at(OrderedHashmap *h, unsigned idx) { |
89439d4f MS |
392 | return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx); |
393 | } | |
39c2a6f1 | 394 | |
89439d4f MS |
395 | static struct set_entry *set_bucket_at(Set *h, unsigned idx) { |
396 | return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx); | |
397 | } | |
39c2a6f1 | 398 | |
8a35af80 | 399 | static struct ordered_hashmap_entry* bucket_at_swap(struct swap_entries *swap, unsigned idx) { |
89439d4f MS |
400 | return &swap->e[idx - _IDX_SWAP_BEGIN]; |
401 | } | |
39c2a6f1 | 402 | |
89439d4f MS |
403 | /* Returns a pointer to the bucket at index idx. |
404 | * Understands real indexes and swap indexes, hence "_virtual". */ | |
8a35af80 | 405 | static struct hashmap_base_entry* bucket_at_virtual(HashmapBase *h, struct swap_entries *swap, |
89439d4f MS |
406 | unsigned idx) { |
407 | if (idx < _IDX_SWAP_BEGIN) | |
408 | return bucket_at(h, idx); | |
409 | ||
410 | if (idx < _IDX_SWAP_END) | |
411 | return &bucket_at_swap(swap, idx)->p.b; | |
412 | ||
04499a70 | 413 | assert_not_reached(); |
89439d4f MS |
414 | } |
415 | ||
8a35af80 | 416 | static dib_raw_t* dib_raw_ptr(HashmapBase *h) { |
89439d4f | 417 | return (dib_raw_t*) |
1a39bc8c | 418 | ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h)); |
89439d4f MS |
419 | } |
420 | ||
421 | static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) { | |
422 | return idx >= from ? idx - from | |
423 | : n_buckets(h) + idx - from; | |
424 | } | |
425 | ||
426 | static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) { | |
427 | unsigned initial_bucket; | |
428 | ||
429 | if (raw_dib == DIB_RAW_FREE) | |
430 | return DIB_FREE; | |
431 | ||
432 | if (_likely_(raw_dib < DIB_RAW_OVERFLOW)) | |
433 | return raw_dib; | |
434 | ||
435 | /* | |
436 | * Having an overflow DIB value is very unlikely. The hash function | |
437 | * would have to be bad. For example, in a table of size 2^24 filled | |
438 | * to load factor 0.9 the maximum observed DIB is only about 60. | |
439 | * In theory (assuming I used Maxima correctly), for an infinite size | |
440 | * hash table with load factor 0.8 the probability of a given entry | |
441 | * having DIB > 40 is 1.9e-8. | |
442 | * This returns the correct DIB value by recomputing the hash value in | |
443 | * the unlikely case. XXX Hitting this case could be a hint to rehash. | |
444 | */ | |
445 | initial_bucket = bucket_hash(h, bucket_at(h, idx)->key); | |
446 | return bucket_distance(h, idx, initial_bucket); | |
447 | } | |
448 | ||
449 | static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) { | |
450 | dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE; | |
451 | } | |
452 | ||
453 | static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) { | |
454 | dib_raw_t *dibs; | |
455 | ||
456 | dibs = dib_raw_ptr(h); | |
457 | ||
458 | for ( ; idx < n_buckets(h); idx++) | |
459 | if (dibs[idx] != DIB_RAW_FREE) | |
460 | return idx; | |
461 | ||
462 | return IDX_NIL; | |
463 | } | |
464 | ||
465 | static void bucket_mark_free(HashmapBase *h, unsigned idx) { | |
eccaf899 | 466 | memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size); |
89439d4f MS |
467 | bucket_set_dib(h, idx, DIB_FREE); |
468 | } | |
469 | ||
470 | static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap, | |
471 | unsigned from, unsigned to) { | |
472 | struct hashmap_base_entry *e_from, *e_to; | |
473 | ||
474 | assert(from != to); | |
39c2a6f1 | 475 | |
89439d4f MS |
476 | e_from = bucket_at_virtual(h, swap, from); |
477 | e_to = bucket_at_virtual(h, swap, to); | |
478 | ||
479 | memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size); | |
480 | ||
481 | if (h->type == HASHMAP_TYPE_ORDERED) { | |
482 | OrderedHashmap *lh = (OrderedHashmap*) h; | |
483 | struct ordered_hashmap_entry *le, *le_to; | |
484 | ||
485 | le_to = (struct ordered_hashmap_entry*) e_to; | |
486 | ||
487 | if (le_to->iterate_next != IDX_NIL) { | |
488 | le = (struct ordered_hashmap_entry*) | |
489 | bucket_at_virtual(h, swap, le_to->iterate_next); | |
490 | le->iterate_previous = to; | |
491 | } | |
492 | ||
493 | if (le_to->iterate_previous != IDX_NIL) { | |
494 | le = (struct ordered_hashmap_entry*) | |
495 | bucket_at_virtual(h, swap, le_to->iterate_previous); | |
496 | le->iterate_next = to; | |
497 | } | |
498 | ||
499 | if (lh->iterate_list_head == from) | |
500 | lh->iterate_list_head = to; | |
501 | if (lh->iterate_list_tail == from) | |
502 | lh->iterate_list_tail = to; | |
39c2a6f1 | 503 | } |
89439d4f | 504 | } |
60918275 | 505 | |
89439d4f MS |
506 | static unsigned next_idx(HashmapBase *h, unsigned idx) { |
507 | return (idx + 1U) % n_buckets(h); | |
508 | } | |
60918275 | 509 | |
89439d4f MS |
510 | static unsigned prev_idx(HashmapBase *h, unsigned idx) { |
511 | return (n_buckets(h) + idx - 1U) % n_buckets(h); | |
512 | } | |
60918275 | 513 | |
8a35af80 | 514 | static void* entry_value(HashmapBase *h, struct hashmap_base_entry *e) { |
89439d4f | 515 | switch (h->type) { |
45fa9e29 | 516 | |
89439d4f MS |
517 | case HASHMAP_TYPE_PLAIN: |
518 | case HASHMAP_TYPE_ORDERED: | |
519 | return ((struct plain_hashmap_entry*)e)->value; | |
39c2a6f1 | 520 | |
89439d4f MS |
521 | case HASHMAP_TYPE_SET: |
522 | return (void*) e->key; | |
a3b6fafe | 523 | |
89439d4f | 524 | default: |
04499a70 | 525 | assert_not_reached(); |
89439d4f | 526 | } |
60918275 LP |
527 | } |
528 | ||
89439d4f MS |
529 | static void base_remove_entry(HashmapBase *h, unsigned idx) { |
530 | unsigned left, right, prev, dib; | |
531 | dib_raw_t raw_dib, *dibs; | |
45fa9e29 | 532 | |
89439d4f MS |
533 | dibs = dib_raw_ptr(h); |
534 | assert(dibs[idx] != DIB_RAW_FREE); | |
034c6ed7 | 535 | |
349cc4a5 | 536 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
537 | h->debug.rem_count++; |
538 | h->debug.last_rem_idx = idx; | |
539 | #endif | |
034c6ed7 | 540 | |
89439d4f MS |
541 | left = idx; |
542 | /* Find the stop bucket ("right"). It is either free or has DIB == 0. */ | |
543 | for (right = next_idx(h, left); ; right = next_idx(h, right)) { | |
544 | raw_dib = dibs[right]; | |
4c701096 | 545 | if (IN_SET(raw_dib, 0, DIB_RAW_FREE)) |
89439d4f MS |
546 | break; |
547 | ||
548 | /* The buckets are not supposed to be all occupied and with DIB > 0. | |
549 | * That would mean we could make everyone better off by shifting them | |
550 | * backward. This scenario is impossible. */ | |
551 | assert(left != right); | |
552 | } | |
034c6ed7 | 553 | |
89439d4f MS |
554 | if (h->type == HASHMAP_TYPE_ORDERED) { |
555 | OrderedHashmap *lh = (OrderedHashmap*) h; | |
556 | struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx); | |
557 | ||
558 | if (le->iterate_next != IDX_NIL) | |
559 | ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous; | |
560 | else | |
561 | lh->iterate_list_tail = le->iterate_previous; | |
562 | ||
563 | if (le->iterate_previous != IDX_NIL) | |
564 | ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next; | |
565 | else | |
566 | lh->iterate_list_head = le->iterate_next; | |
567 | } | |
568 | ||
569 | /* Now shift all buckets in the interval (left, right) one step backwards */ | |
570 | for (prev = left, left = next_idx(h, left); left != right; | |
571 | prev = left, left = next_idx(h, left)) { | |
572 | dib = bucket_calculate_dib(h, left, dibs[left]); | |
573 | assert(dib != 0); | |
574 | bucket_move_entry(h, NULL, left, prev); | |
575 | bucket_set_dib(h, prev, dib - 1); | |
576 | } | |
577 | ||
578 | bucket_mark_free(h, prev); | |
579 | n_entries_dec(h); | |
84dcca75 | 580 | base_set_dirty(h); |
034c6ed7 | 581 | } |
89439d4f MS |
582 | #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx) |
583 | ||
584 | static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) { | |
585 | struct ordered_hashmap_entry *e; | |
586 | unsigned idx; | |
034c6ed7 | 587 | |
101d8e63 | 588 | assert(h); |
89439d4f MS |
589 | assert(i); |
590 | ||
591 | if (i->idx == IDX_NIL) | |
592 | goto at_end; | |
593 | ||
594 | if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL) | |
595 | goto at_end; | |
596 | ||
597 | if (i->idx == IDX_FIRST) { | |
598 | idx = h->iterate_list_head; | |
599 | e = ordered_bucket_at(h, idx); | |
101d8e63 | 600 | } else { |
89439d4f MS |
601 | idx = i->idx; |
602 | e = ordered_bucket_at(h, idx); | |
603 | /* | |
604 | * We allow removing the current entry while iterating, but removal may cause | |
605 | * a backward shift. The next entry may thus move one bucket to the left. | |
606 | * To detect when it happens, we remember the key pointer of the entry we were | |
607 | * going to iterate next. If it does not match, there was a backward shift. | |
608 | */ | |
609 | if (e->p.b.key != i->next_key) { | |
610 | idx = prev_idx(HASHMAP_BASE(h), idx); | |
611 | e = ordered_bucket_at(h, idx); | |
612 | } | |
613 | assert(e->p.b.key == i->next_key); | |
101d8e63 | 614 | } |
101d8e63 | 615 | |
349cc4a5 | 616 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
617 | i->prev_idx = idx; |
618 | #endif | |
619 | ||
620 | if (e->iterate_next != IDX_NIL) { | |
621 | struct ordered_hashmap_entry *n; | |
622 | i->idx = e->iterate_next; | |
623 | n = ordered_bucket_at(h, i->idx); | |
624 | i->next_key = n->p.b.key; | |
625 | } else | |
626 | i->idx = IDX_NIL; | |
627 | ||
628 | return idx; | |
629 | ||
630 | at_end: | |
631 | i->idx = IDX_NIL; | |
632 | return IDX_NIL; | |
101d8e63 LP |
633 | } |
634 | ||
89439d4f MS |
635 | static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) { |
636 | unsigned idx; | |
637 | ||
60918275 | 638 | assert(h); |
89439d4f | 639 | assert(i); |
60918275 | 640 | |
89439d4f MS |
641 | if (i->idx == IDX_NIL) |
642 | goto at_end; | |
60918275 | 643 | |
89439d4f MS |
644 | if (i->idx == IDX_FIRST) { |
645 | /* fast forward to the first occupied bucket */ | |
646 | if (h->has_indirect) { | |
647 | i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry); | |
648 | h->indirect.idx_lowest_entry = i->idx; | |
649 | } else | |
650 | i->idx = skip_free_buckets(h, 0); | |
651 | ||
652 | if (i->idx == IDX_NIL) | |
653 | goto at_end; | |
654 | } else { | |
655 | struct hashmap_base_entry *e; | |
656 | ||
657 | assert(i->idx > 0); | |
60918275 | 658 | |
89439d4f MS |
659 | e = bucket_at(h, i->idx); |
660 | /* | |
661 | * We allow removing the current entry while iterating, but removal may cause | |
662 | * a backward shift. The next entry may thus move one bucket to the left. | |
663 | * To detect when it happens, we remember the key pointer of the entry we were | |
664 | * going to iterate next. If it does not match, there was a backward shift. | |
665 | */ | |
666 | if (e->key != i->next_key) | |
667 | e = bucket_at(h, --i->idx); | |
60918275 | 668 | |
89439d4f MS |
669 | assert(e->key == i->next_key); |
670 | } | |
671 | ||
672 | idx = i->idx; | |
349cc4a5 | 673 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
674 | i->prev_idx = idx; |
675 | #endif | |
676 | ||
677 | i->idx = skip_free_buckets(h, i->idx + 1); | |
678 | if (i->idx != IDX_NIL) | |
679 | i->next_key = bucket_at(h, i->idx)->key; | |
101d8e63 | 680 | else |
89439d4f MS |
681 | i->idx = IDX_NIL; |
682 | ||
683 | return idx; | |
60918275 | 684 | |
89439d4f MS |
685 | at_end: |
686 | i->idx = IDX_NIL; | |
687 | return IDX_NIL; | |
60918275 LP |
688 | } |
689 | ||
89439d4f MS |
690 | static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) { |
691 | if (!h) { | |
692 | i->idx = IDX_NIL; | |
693 | return IDX_NIL; | |
694 | } | |
101d8e63 | 695 | |
349cc4a5 | 696 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
697 | if (i->idx == IDX_FIRST) { |
698 | i->put_count = h->debug.put_count; | |
699 | i->rem_count = h->debug.rem_count; | |
700 | } else { | |
701 | /* While iterating, must not add any new entries */ | |
702 | assert(i->put_count == h->debug.put_count); | |
703 | /* ... or remove entries other than the current one */ | |
704 | assert(i->rem_count == h->debug.rem_count || | |
705 | (i->rem_count == h->debug.rem_count - 1 && | |
706 | i->prev_idx == h->debug.last_rem_idx)); | |
707 | /* Reset our removals counter */ | |
708 | i->rem_count = h->debug.rem_count; | |
709 | } | |
710 | #endif | |
101d8e63 | 711 | |
89439d4f MS |
712 | return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i) |
713 | : hashmap_iterate_in_internal_order(h, i); | |
714 | } | |
39c2a6f1 | 715 | |
138f49e4 | 716 | bool _hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) { |
89439d4f MS |
717 | struct hashmap_base_entry *e; |
718 | void *data; | |
719 | unsigned idx; | |
720 | ||
721 | idx = hashmap_iterate_entry(h, i); | |
722 | if (idx == IDX_NIL) { | |
8927b1da DH |
723 | if (value) |
724 | *value = NULL; | |
89439d4f MS |
725 | if (key) |
726 | *key = NULL; | |
727 | ||
8927b1da | 728 | return false; |
89439d4f MS |
729 | } |
730 | ||
731 | e = bucket_at(h, idx); | |
732 | data = entry_value(h, e); | |
8927b1da DH |
733 | if (value) |
734 | *value = data; | |
89439d4f MS |
735 | if (key) |
736 | *key = e->key; | |
737 | ||
8927b1da | 738 | return true; |
101d8e63 LP |
739 | } |
740 | ||
89439d4f MS |
741 | #define HASHMAP_FOREACH_IDX(idx, h, i) \ |
742 | for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \ | |
743 | (idx != IDX_NIL); \ | |
744 | (idx) = hashmap_iterate_entry((h), &(i))) | |
745 | ||
8a35af80 | 746 | IteratedCache* _hashmap_iterated_cache_new(HashmapBase *h) { |
45ea84d8 VC |
747 | IteratedCache *cache; |
748 | ||
749 | assert(h); | |
750 | assert(!h->cached); | |
751 | ||
752 | if (h->cached) | |
753 | return NULL; | |
754 | ||
755 | cache = new0(IteratedCache, 1); | |
756 | if (!cache) | |
757 | return NULL; | |
758 | ||
759 | cache->hashmap = h; | |
760 | h->cached = true; | |
761 | ||
762 | return cache; | |
763 | } | |
764 | ||
89439d4f MS |
765 | static void reset_direct_storage(HashmapBase *h) { |
766 | const struct hashmap_type_info *hi = &hashmap_type_info[h->type]; | |
767 | void *p; | |
768 | ||
769 | assert(!h->has_indirect); | |
770 | ||
771 | p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets); | |
772 | memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets); | |
773 | } | |
774 | ||
ae0b700a LP |
775 | static void shared_hash_key_initialize(void) { |
776 | random_bytes(shared_hash_key, sizeof(shared_hash_key)); | |
777 | } | |
778 | ||
8a35af80 | 779 | static struct HashmapBase* hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) { |
89439d4f MS |
780 | HashmapBase *h; |
781 | const struct hashmap_type_info *hi = &hashmap_type_info[type]; | |
89439d4f | 782 | |
e8d2cb0f | 783 | bool use_pool = mempool_enabled && mempool_enabled(); /* mempool_enabled is a weak symbol */ |
67f3c402 | 784 | |
b01f3195 | 785 | h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size); |
60918275 | 786 | if (!h) |
89439d4f MS |
787 | return NULL; |
788 | ||
789 | h->type = type; | |
b01f3195 | 790 | h->from_pool = use_pool; |
70b400d9 | 791 | h->hash_ops = hash_ops ?: &trivial_hash_ops; |
89439d4f MS |
792 | |
793 | if (type == HASHMAP_TYPE_ORDERED) { | |
794 | OrderedHashmap *lh = (OrderedHashmap*)h; | |
795 | lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL; | |
796 | } | |
797 | ||
798 | reset_direct_storage(h); | |
60918275 | 799 | |
ae0b700a LP |
800 | static pthread_once_t once = PTHREAD_ONCE_INIT; |
801 | assert_se(pthread_once(&once, shared_hash_key_initialize) == 0); | |
89439d4f | 802 | |
349cc4a5 | 803 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
804 | h->debug.func = func; |
805 | h->debug.file = file; | |
806 | h->debug.line = line; | |
4f1b3061 TG |
807 | assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0); |
808 | LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug); | |
809 | assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0); | |
89439d4f MS |
810 | #endif |
811 | ||
812 | return h; | |
813 | } | |
60918275 | 814 | |
138f49e4 | 815 | Hashmap *_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { |
add74e89 | 816 | return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS); |
89439d4f MS |
817 | } |
818 | ||
138f49e4 | 819 | OrderedHashmap *_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { |
add74e89 | 820 | return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS); |
89439d4f MS |
821 | } |
822 | ||
138f49e4 | 823 | Set *_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { |
add74e89 | 824 | return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS); |
89439d4f MS |
825 | } |
826 | ||
827 | static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops, | |
add74e89 | 828 | enum HashmapType type HASHMAP_DEBUG_PARAMS) { |
89439d4f MS |
829 | HashmapBase *q; |
830 | ||
831 | assert(h); | |
832 | ||
833 | if (*h) | |
834 | return 0; | |
835 | ||
add74e89 | 836 | q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS); |
89439d4f MS |
837 | if (!q) |
838 | return -ENOMEM; | |
839 | ||
840 | *h = q; | |
9ff7c5b0 | 841 | return 1; |
89439d4f MS |
842 | } |
843 | ||
138f49e4 | 844 | int _hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { |
add74e89 | 845 | return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS); |
89439d4f MS |
846 | } |
847 | ||
138f49e4 | 848 | int _ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { |
add74e89 | 849 | return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS); |
89439d4f MS |
850 | } |
851 | ||
138f49e4 | 852 | int _set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { |
add74e89 | 853 | return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS); |
89439d4f MS |
854 | } |
855 | ||
1346c36d SS |
856 | int _hashmap_ensure_put(Hashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) { |
857 | int r; | |
858 | ||
859 | r = _hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS); | |
860 | if (r < 0) | |
861 | return r; | |
862 | ||
863 | return hashmap_put(*h, key, value); | |
864 | } | |
865 | ||
b7847e05 SS |
866 | int _ordered_hashmap_ensure_put(OrderedHashmap **h, const struct hash_ops *hash_ops, const void *key, void *value HASHMAP_DEBUG_PARAMS) { |
867 | int r; | |
868 | ||
869 | r = _ordered_hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS); | |
870 | if (r < 0) | |
871 | return r; | |
872 | ||
873 | return ordered_hashmap_put(*h, key, value); | |
874 | } | |
875 | ||
89439d4f MS |
876 | static void hashmap_free_no_clear(HashmapBase *h) { |
877 | assert(!h->has_indirect); | |
ee05335f | 878 | assert(h->n_direct_entries == 0); |
89439d4f | 879 | |
349cc4a5 | 880 | #if ENABLE_DEBUG_HASHMAP |
4f1b3061 | 881 | assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0); |
89439d4f | 882 | LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug); |
4f1b3061 | 883 | assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0); |
89439d4f | 884 | #endif |
45fa9e29 | 885 | |
205c085b LP |
886 | if (h->from_pool) { |
887 | /* Ensure that the object didn't get migrated between threads. */ | |
888 | assert_se(is_main_thread()); | |
89439d4f | 889 | mempool_free_tile(hashmap_type_info[h->type].mempool, h); |
205c085b | 890 | } else |
39c2a6f1 | 891 | free(h); |
60918275 LP |
892 | } |
893 | ||
8a35af80 | 894 | HashmapBase* _hashmap_free(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) { |
cfe561a4 | 895 | if (h) { |
138f49e4 | 896 | _hashmap_clear(h, default_free_key, default_free_value); |
cfe561a4 DH |
897 | hashmap_free_no_clear(h); |
898 | } | |
89439d4f | 899 | |
cfe561a4 | 900 | return NULL; |
89439d4f MS |
901 | } |
902 | ||
138f49e4 | 903 | void _hashmap_clear(HashmapBase *h, free_func_t default_free_key, free_func_t default_free_value) { |
59a5cda7 YW |
904 | free_func_t free_key, free_value; |
905 | if (!h) | |
906 | return; | |
67f3c402 | 907 | |
59a5cda7 YW |
908 | free_key = h->hash_ops->free_key ?: default_free_key; |
909 | free_value = h->hash_ops->free_value ?: default_free_value; | |
67f3c402 | 910 | |
59a5cda7 | 911 | if (free_key || free_value) { |
449ddb2d | 912 | |
c380b84d LP |
913 | /* If destructor calls are defined, let's destroy things defensively: let's take the item out of the |
914 | * hash table, and only then call the destructor functions. If these destructors then try to unregister | |
915 | * themselves from our hash table a second time, the entry is already gone. */ | |
916 | ||
138f49e4 | 917 | while (_hashmap_size(h) > 0) { |
ca323715 TH |
918 | void *k = NULL; |
919 | void *v; | |
c380b84d | 920 | |
138f49e4 | 921 | v = _hashmap_first_key_and_value(h, true, &k); |
fabe5c0e | 922 | |
59a5cda7 | 923 | if (free_key) |
c380b84d | 924 | free_key(k); |
fabe5c0e | 925 | |
59a5cda7 | 926 | if (free_value) |
c380b84d | 927 | free_value(v); |
59a5cda7 | 928 | } |
cfe561a4 | 929 | } |
fabe5c0e | 930 | |
89439d4f MS |
931 | if (h->has_indirect) { |
932 | free(h->indirect.storage); | |
933 | h->has_indirect = false; | |
934 | } | |
935 | ||
936 | h->n_direct_entries = 0; | |
937 | reset_direct_storage(h); | |
938 | ||
939 | if (h->type == HASHMAP_TYPE_ORDERED) { | |
940 | OrderedHashmap *lh = (OrderedHashmap*) h; | |
941 | lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL; | |
942 | } | |
84dcca75 VC |
943 | |
944 | base_set_dirty(h); | |
11dd41ce LP |
945 | } |
946 | ||
89439d4f MS |
947 | static int resize_buckets(HashmapBase *h, unsigned entries_add); |
948 | ||
949 | /* | |
950 | * Finds an empty bucket to put an entry into, starting the scan at 'idx'. | |
951 | * Performs Robin Hood swaps as it goes. The entry to put must be placed | |
952 | * by the caller into swap slot IDX_PUT. | |
953 | * If used for in-place resizing, may leave a displaced entry in swap slot | |
954 | * IDX_PUT. Caller must rehash it next. | |
955 | * Returns: true if it left a displaced entry to rehash next in IDX_PUT, | |
956 | * false otherwise. | |
957 | */ | |
958 | static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx, | |
959 | struct swap_entries *swap) { | |
960 | dib_raw_t raw_dib, *dibs; | |
961 | unsigned dib, distance; | |
962 | ||
349cc4a5 | 963 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
964 | h->debug.put_count++; |
965 | #endif | |
966 | ||
967 | dibs = dib_raw_ptr(h); | |
968 | ||
969 | for (distance = 0; ; distance++) { | |
970 | raw_dib = dibs[idx]; | |
3742095b | 971 | if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) { |
89439d4f MS |
972 | if (raw_dib == DIB_RAW_REHASH) |
973 | bucket_move_entry(h, swap, idx, IDX_TMP); | |
974 | ||
975 | if (h->has_indirect && h->indirect.idx_lowest_entry > idx) | |
976 | h->indirect.idx_lowest_entry = idx; | |
60918275 | 977 | |
89439d4f MS |
978 | bucket_set_dib(h, idx, distance); |
979 | bucket_move_entry(h, swap, IDX_PUT, idx); | |
980 | if (raw_dib == DIB_RAW_REHASH) { | |
981 | bucket_move_entry(h, swap, IDX_TMP, IDX_PUT); | |
982 | return true; | |
983 | } | |
60918275 | 984 | |
89439d4f MS |
985 | return false; |
986 | } | |
987 | ||
988 | dib = bucket_calculate_dib(h, idx, raw_dib); | |
989 | ||
990 | if (dib < distance) { | |
991 | /* Found a wealthier entry. Go Robin Hood! */ | |
89439d4f MS |
992 | bucket_set_dib(h, idx, distance); |
993 | ||
994 | /* swap the entries */ | |
995 | bucket_move_entry(h, swap, idx, IDX_TMP); | |
996 | bucket_move_entry(h, swap, IDX_PUT, idx); | |
997 | bucket_move_entry(h, swap, IDX_TMP, IDX_PUT); | |
998 | ||
999 | distance = dib; | |
1000 | } | |
1001 | ||
1002 | idx = next_idx(h, idx); | |
1003 | } | |
60918275 LP |
1004 | } |
1005 | ||
89439d4f MS |
1006 | /* |
1007 | * Puts an entry into a hashmap, boldly - no check whether key already exists. | |
1008 | * The caller must place the entry (only its key and value, not link indexes) | |
1009 | * in swap slot IDX_PUT. | |
1010 | * Caller must ensure: the key does not exist yet in the hashmap. | |
1011 | * that resize is not needed if !may_resize. | |
1012 | * Returns: 1 if entry was put successfully. | |
1013 | * -ENOMEM if may_resize==true and resize failed with -ENOMEM. | |
1014 | * Cannot return -ENOMEM if !may_resize. | |
1015 | */ | |
1016 | static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx, | |
1017 | struct swap_entries *swap, bool may_resize) { | |
1018 | struct ordered_hashmap_entry *new_entry; | |
1019 | int r; | |
1020 | ||
1021 | assert(idx < n_buckets(h)); | |
1022 | ||
1023 | new_entry = bucket_at_swap(swap, IDX_PUT); | |
1024 | ||
1025 | if (may_resize) { | |
1026 | r = resize_buckets(h, 1); | |
1027 | if (r < 0) | |
1028 | return r; | |
1029 | if (r > 0) | |
1030 | idx = bucket_hash(h, new_entry->p.b.key); | |
1031 | } | |
1032 | assert(n_entries(h) < n_buckets(h)); | |
1033 | ||
1034 | if (h->type == HASHMAP_TYPE_ORDERED) { | |
1035 | OrderedHashmap *lh = (OrderedHashmap*) h; | |
1036 | ||
1037 | new_entry->iterate_next = IDX_NIL; | |
1038 | new_entry->iterate_previous = lh->iterate_list_tail; | |
1039 | ||
1040 | if (lh->iterate_list_tail != IDX_NIL) { | |
1041 | struct ordered_hashmap_entry *old_tail; | |
1042 | ||
1043 | old_tail = ordered_bucket_at(lh, lh->iterate_list_tail); | |
1044 | assert(old_tail->iterate_next == IDX_NIL); | |
1045 | old_tail->iterate_next = IDX_PUT; | |
1046 | } | |
1047 | ||
1048 | lh->iterate_list_tail = IDX_PUT; | |
1049 | if (lh->iterate_list_head == IDX_NIL) | |
1050 | lh->iterate_list_head = IDX_PUT; | |
1051 | } | |
1052 | ||
1053 | assert_se(hashmap_put_robin_hood(h, idx, swap) == false); | |
1054 | ||
1055 | n_entries_inc(h); | |
349cc4a5 | 1056 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
1057 | h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h)); |
1058 | #endif | |
1059 | ||
84dcca75 VC |
1060 | base_set_dirty(h); |
1061 | ||
89439d4f MS |
1062 | return 1; |
1063 | } | |
1064 | #define hashmap_put_boldly(h, idx, swap, may_resize) \ | |
1065 | hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize) | |
1066 | ||
1067 | /* | |
1068 | * Returns 0 if resize is not needed. | |
f131770b | 1069 | * 1 if successfully resized. |
89439d4f MS |
1070 | * -ENOMEM on allocation failure. |
1071 | */ | |
1072 | static int resize_buckets(HashmapBase *h, unsigned entries_add) { | |
1073 | struct swap_entries swap; | |
1a39bc8c | 1074 | void *new_storage; |
89439d4f MS |
1075 | dib_raw_t *old_dibs, *new_dibs; |
1076 | const struct hashmap_type_info *hi; | |
1077 | unsigned idx, optimal_idx; | |
1078 | unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries; | |
1079 | uint8_t new_shift; | |
1080 | bool rehash_next; | |
45fa9e29 LP |
1081 | |
1082 | assert(h); | |
1083 | ||
89439d4f MS |
1084 | hi = &hashmap_type_info[h->type]; |
1085 | new_n_entries = n_entries(h) + entries_add; | |
e4c691b5 MS |
1086 | |
1087 | /* overflow? */ | |
89439d4f | 1088 | if (_unlikely_(new_n_entries < entries_add)) |
e4c691b5 MS |
1089 | return -ENOMEM; |
1090 | ||
89439d4f MS |
1091 | /* For direct storage we allow 100% load, because it's tiny. */ |
1092 | if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets) | |
9700d698 | 1093 | return 0; |
45fa9e29 | 1094 | |
89439d4f MS |
1095 | /* |
1096 | * Load factor = n/m = 1 - (1/INV_KEEP_FREE). | |
1097 | * From it follows: m = n + n/(INV_KEEP_FREE - 1) | |
1098 | */ | |
1099 | new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1); | |
1100 | /* overflow? */ | |
1101 | if (_unlikely_(new_n_buckets < new_n_entries)) | |
9700d698 | 1102 | return -ENOMEM; |
45fa9e29 | 1103 | |
89439d4f MS |
1104 | if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t)))) |
1105 | return -ENOMEM; | |
a3b6fafe | 1106 | |
89439d4f | 1107 | old_n_buckets = n_buckets(h); |
45fa9e29 | 1108 | |
89439d4f MS |
1109 | if (_likely_(new_n_buckets <= old_n_buckets)) |
1110 | return 0; | |
45fa9e29 | 1111 | |
89439d4f MS |
1112 | new_shift = log2u_round_up(MAX( |
1113 | new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)), | |
1114 | 2 * sizeof(struct direct_storage))); | |
45fa9e29 | 1115 | |
89439d4f MS |
1116 | /* Realloc storage (buckets and DIB array). */ |
1117 | new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL, | |
1118 | 1U << new_shift); | |
1119 | if (!new_storage) | |
1120 | return -ENOMEM; | |
45fa9e29 | 1121 | |
89439d4f MS |
1122 | /* Must upgrade direct to indirect storage. */ |
1123 | if (!h->has_indirect) { | |
1124 | memcpy(new_storage, h->direct.storage, | |
1125 | old_n_buckets * (hi->entry_size + sizeof(dib_raw_t))); | |
1126 | h->indirect.n_entries = h->n_direct_entries; | |
1127 | h->indirect.idx_lowest_entry = 0; | |
1128 | h->n_direct_entries = 0; | |
1129 | } | |
45fa9e29 | 1130 | |
89439d4f MS |
1131 | /* Get a new hash key. If we've just upgraded to indirect storage, |
1132 | * allow reusing a previously generated key. It's still a different key | |
1133 | * from the shared one that we used for direct storage. */ | |
1134 | get_hash_key(h->indirect.hash_key, !h->has_indirect); | |
1135 | ||
1136 | h->has_indirect = true; | |
1137 | h->indirect.storage = new_storage; | |
1138 | h->indirect.n_buckets = (1U << new_shift) / | |
1139 | (hi->entry_size + sizeof(dib_raw_t)); | |
1140 | ||
1a39bc8c | 1141 | old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets); |
89439d4f MS |
1142 | new_dibs = dib_raw_ptr(h); |
1143 | ||
1144 | /* | |
1145 | * Move the DIB array to the new place, replacing valid DIB values with | |
1146 | * DIB_RAW_REHASH to indicate all of the used buckets need rehashing. | |
1147 | * Note: Overlap is not possible, because we have at least doubled the | |
1148 | * number of buckets and dib_raw_t is smaller than any entry type. | |
1149 | */ | |
1150 | for (idx = 0; idx < old_n_buckets; idx++) { | |
1151 | assert(old_dibs[idx] != DIB_RAW_REHASH); | |
1152 | new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE | |
1153 | : DIB_RAW_REHASH; | |
45fa9e29 LP |
1154 | } |
1155 | ||
89439d4f | 1156 | /* Zero the area of newly added entries (including the old DIB area) */ |
eccaf899 | 1157 | memzero(bucket_at(h, old_n_buckets), |
89439d4f | 1158 | (n_buckets(h) - old_n_buckets) * hi->entry_size); |
45fa9e29 | 1159 | |
89439d4f MS |
1160 | /* The upper half of the new DIB array needs initialization */ |
1161 | memset(&new_dibs[old_n_buckets], DIB_RAW_INIT, | |
1162 | (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t)); | |
9bf3b535 | 1163 | |
89439d4f MS |
1164 | /* Rehash entries that need it */ |
1165 | n_rehashed = 0; | |
1166 | for (idx = 0; idx < old_n_buckets; idx++) { | |
1167 | if (new_dibs[idx] != DIB_RAW_REHASH) | |
1168 | continue; | |
45fa9e29 | 1169 | |
89439d4f | 1170 | optimal_idx = bucket_hash(h, bucket_at(h, idx)->key); |
45fa9e29 | 1171 | |
89439d4f MS |
1172 | /* |
1173 | * Not much to do if by luck the entry hashes to its current | |
1174 | * location. Just set its DIB. | |
1175 | */ | |
1176 | if (optimal_idx == idx) { | |
1177 | new_dibs[idx] = 0; | |
1178 | n_rehashed++; | |
1179 | continue; | |
1180 | } | |
1181 | ||
1182 | new_dibs[idx] = DIB_RAW_FREE; | |
1183 | bucket_move_entry(h, &swap, idx, IDX_PUT); | |
1184 | /* bucket_move_entry does not clear the source */ | |
eccaf899 | 1185 | memzero(bucket_at(h, idx), hi->entry_size); |
89439d4f MS |
1186 | |
1187 | do { | |
1188 | /* | |
1189 | * Find the new bucket for the current entry. This may make | |
1190 | * another entry homeless and load it into IDX_PUT. | |
1191 | */ | |
1192 | rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap); | |
1193 | n_rehashed++; | |
1194 | ||
1195 | /* Did the current entry displace another one? */ | |
1196 | if (rehash_next) | |
1197 | optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key); | |
1198 | } while (rehash_next); | |
1199 | } | |
60918275 | 1200 | |
3b2b3d29 | 1201 | assert_se(n_rehashed == n_entries(h)); |
60918275 | 1202 | |
89439d4f MS |
1203 | return 1; |
1204 | } | |
45fa9e29 | 1205 | |
89439d4f MS |
1206 | /* |
1207 | * Finds an entry with a matching key | |
1208 | * Returns: index of the found entry, or IDX_NIL if not found. | |
1209 | */ | |
1210 | static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) { | |
1211 | struct hashmap_base_entry *e; | |
1212 | unsigned dib, distance; | |
1213 | dib_raw_t *dibs = dib_raw_ptr(h); | |
39c2a6f1 | 1214 | |
89439d4f | 1215 | assert(idx < n_buckets(h)); |
60918275 | 1216 | |
89439d4f MS |
1217 | for (distance = 0; ; distance++) { |
1218 | if (dibs[idx] == DIB_RAW_FREE) | |
1219 | return IDX_NIL; | |
60918275 | 1220 | |
89439d4f | 1221 | dib = bucket_calculate_dib(h, idx, dibs[idx]); |
60918275 | 1222 | |
89439d4f MS |
1223 | if (dib < distance) |
1224 | return IDX_NIL; | |
1225 | if (dib == distance) { | |
1226 | e = bucket_at(h, idx); | |
1227 | if (h->hash_ops->compare(e->key, key) == 0) | |
1228 | return idx; | |
1229 | } | |
1230 | ||
1231 | idx = next_idx(h, idx); | |
1232 | } | |
60918275 | 1233 | } |
89439d4f | 1234 | #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key) |
60918275 | 1235 | |
923041cb | 1236 | int hashmap_put(Hashmap *h, const void *key, void *value) { |
89439d4f MS |
1237 | struct swap_entries swap; |
1238 | struct plain_hashmap_entry *e; | |
1239 | unsigned hash, idx; | |
923041cb MS |
1240 | |
1241 | assert(h); | |
1242 | ||
1243 | hash = bucket_hash(h, key); | |
89439d4f MS |
1244 | idx = bucket_scan(h, hash, key); |
1245 | if (idx != IDX_NIL) { | |
1246 | e = plain_bucket_at(h, idx); | |
923041cb MS |
1247 | if (e->value == value) |
1248 | return 0; | |
1249 | return -EEXIST; | |
1250 | } | |
1251 | ||
89439d4f MS |
1252 | e = &bucket_at_swap(&swap, IDX_PUT)->p; |
1253 | e->b.key = key; | |
1254 | e->value = value; | |
1255 | return hashmap_put_boldly(h, hash, &swap, true); | |
1256 | } | |
1257 | ||
1258 | int set_put(Set *s, const void *key) { | |
1259 | struct swap_entries swap; | |
1260 | struct hashmap_base_entry *e; | |
1261 | unsigned hash, idx; | |
1262 | ||
1263 | assert(s); | |
1264 | ||
1265 | hash = bucket_hash(s, key); | |
1266 | idx = bucket_scan(s, hash, key); | |
1267 | if (idx != IDX_NIL) | |
1268 | return 0; | |
1269 | ||
1270 | e = &bucket_at_swap(&swap, IDX_PUT)->p.b; | |
1271 | e->key = key; | |
1272 | return hashmap_put_boldly(s, hash, &swap, true); | |
923041cb MS |
1273 | } |
1274 | ||
0f9ccd95 ZJS |
1275 | int _set_ensure_put(Set **s, const struct hash_ops *hash_ops, const void *key HASHMAP_DEBUG_PARAMS) { |
1276 | int r; | |
1277 | ||
1278 | r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS); | |
1279 | if (r < 0) | |
1280 | return r; | |
1281 | ||
1282 | return set_put(*s, key); | |
1283 | } | |
1284 | ||
fcc1d031 ZJS |
1285 | int _set_ensure_consume(Set **s, const struct hash_ops *hash_ops, void *key HASHMAP_DEBUG_PARAMS) { |
1286 | int r; | |
1287 | ||
1288 | r = _set_ensure_put(s, hash_ops, key HASHMAP_DEBUG_PASS_ARGS); | |
1289 | if (r <= 0) { | |
1290 | if (hash_ops && hash_ops->free_key) | |
1291 | hash_ops->free_key(key); | |
1292 | else | |
1293 | free(key); | |
1294 | } | |
1295 | ||
1296 | return r; | |
1297 | } | |
1298 | ||
3158713e | 1299 | int hashmap_replace(Hashmap *h, const void *key, void *value) { |
89439d4f MS |
1300 | struct swap_entries swap; |
1301 | struct plain_hashmap_entry *e; | |
1302 | unsigned hash, idx; | |
3158713e LP |
1303 | |
1304 | assert(h); | |
1305 | ||
a3b6fafe | 1306 | hash = bucket_hash(h, key); |
89439d4f MS |
1307 | idx = bucket_scan(h, hash, key); |
1308 | if (idx != IDX_NIL) { | |
1309 | e = plain_bucket_at(h, idx); | |
349cc4a5 | 1310 | #if ENABLE_DEBUG_HASHMAP |
89439d4f MS |
1311 | /* Although the key is equal, the key pointer may have changed, |
1312 | * and this would break our assumption for iterating. So count | |
1313 | * this operation as incompatible with iteration. */ | |
1314 | if (e->b.key != key) { | |
1315 | h->b.debug.put_count++; | |
1316 | h->b.debug.rem_count++; | |
1317 | h->b.debug.last_rem_idx = idx; | |
1318 | } | |
1319 | #endif | |
1320 | e->b.key = key; | |
3158713e | 1321 | e->value = value; |
84dcca75 VC |
1322 | hashmap_set_dirty(h); |
1323 | ||
3158713e LP |
1324 | return 0; |
1325 | } | |
1326 | ||
89439d4f MS |
1327 | e = &bucket_at_swap(&swap, IDX_PUT)->p; |
1328 | e->b.key = key; | |
1329 | e->value = value; | |
1330 | return hashmap_put_boldly(h, hash, &swap, true); | |
3158713e LP |
1331 | } |
1332 | ||
d99ae53a | 1333 | int hashmap_update(Hashmap *h, const void *key, void *value) { |
89439d4f MS |
1334 | struct plain_hashmap_entry *e; |
1335 | unsigned hash, idx; | |
d99ae53a LP |
1336 | |
1337 | assert(h); | |
1338 | ||
a3b6fafe | 1339 | hash = bucket_hash(h, key); |
89439d4f MS |
1340 | idx = bucket_scan(h, hash, key); |
1341 | if (idx == IDX_NIL) | |
d99ae53a LP |
1342 | return -ENOENT; |
1343 | ||
89439d4f | 1344 | e = plain_bucket_at(h, idx); |
d99ae53a | 1345 | e->value = value; |
84dcca75 VC |
1346 | hashmap_set_dirty(h); |
1347 | ||
d99ae53a LP |
1348 | return 0; |
1349 | } | |
1350 | ||
8a35af80 | 1351 | void* _hashmap_get(HashmapBase *h, const void *key) { |
89439d4f MS |
1352 | struct hashmap_base_entry *e; |
1353 | unsigned hash, idx; | |
60918275 LP |
1354 | |
1355 | if (!h) | |
1356 | return NULL; | |
1357 | ||
a3b6fafe | 1358 | hash = bucket_hash(h, key); |
89439d4f MS |
1359 | idx = bucket_scan(h, hash, key); |
1360 | if (idx == IDX_NIL) | |
60918275 LP |
1361 | return NULL; |
1362 | ||
89439d4f MS |
1363 | e = bucket_at(h, idx); |
1364 | return entry_value(h, e); | |
60918275 LP |
1365 | } |
1366 | ||
8a35af80 | 1367 | void* hashmap_get2(Hashmap *h, const void *key, void **key2) { |
89439d4f MS |
1368 | struct plain_hashmap_entry *e; |
1369 | unsigned hash, idx; | |
d99ae53a LP |
1370 | |
1371 | if (!h) | |
1372 | return NULL; | |
1373 | ||
a3b6fafe | 1374 | hash = bucket_hash(h, key); |
89439d4f MS |
1375 | idx = bucket_scan(h, hash, key); |
1376 | if (idx == IDX_NIL) | |
d99ae53a LP |
1377 | return NULL; |
1378 | ||
89439d4f | 1379 | e = plain_bucket_at(h, idx); |
d99ae53a | 1380 | if (key2) |
89439d4f | 1381 | *key2 = (void*) e->b.key; |
d99ae53a LP |
1382 | |
1383 | return e->value; | |
1384 | } | |
1385 | ||
138f49e4 | 1386 | bool _hashmap_contains(HashmapBase *h, const void *key) { |
96342de6 | 1387 | unsigned hash; |
96342de6 LN |
1388 | |
1389 | if (!h) | |
1390 | return false; | |
1391 | ||
a3b6fafe | 1392 | hash = bucket_hash(h, key); |
89439d4f | 1393 | return bucket_scan(h, hash, key) != IDX_NIL; |
96342de6 LN |
1394 | } |
1395 | ||
8a35af80 | 1396 | void* _hashmap_remove(HashmapBase *h, const void *key) { |
89439d4f MS |
1397 | struct hashmap_base_entry *e; |
1398 | unsigned hash, idx; | |
60918275 LP |
1399 | void *data; |
1400 | ||
1401 | if (!h) | |
1402 | return NULL; | |
1403 | ||
a3b6fafe | 1404 | hash = bucket_hash(h, key); |
89439d4f MS |
1405 | idx = bucket_scan(h, hash, key); |
1406 | if (idx == IDX_NIL) | |
60918275 LP |
1407 | return NULL; |
1408 | ||
89439d4f MS |
1409 | e = bucket_at(h, idx); |
1410 | data = entry_value(h, e); | |
1411 | remove_entry(h, idx); | |
60918275 LP |
1412 | |
1413 | return data; | |
1414 | } | |
1415 | ||
8a35af80 | 1416 | void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) { |
89439d4f MS |
1417 | struct plain_hashmap_entry *e; |
1418 | unsigned hash, idx; | |
c582a3b3 LP |
1419 | void *data; |
1420 | ||
1421 | if (!h) { | |
1422 | if (rkey) | |
1423 | *rkey = NULL; | |
1424 | return NULL; | |
1425 | } | |
1426 | ||
1427 | hash = bucket_hash(h, key); | |
89439d4f MS |
1428 | idx = bucket_scan(h, hash, key); |
1429 | if (idx == IDX_NIL) { | |
c582a3b3 LP |
1430 | if (rkey) |
1431 | *rkey = NULL; | |
1432 | return NULL; | |
1433 | } | |
1434 | ||
89439d4f | 1435 | e = plain_bucket_at(h, idx); |
c582a3b3 LP |
1436 | data = e->value; |
1437 | if (rkey) | |
89439d4f | 1438 | *rkey = (void*) e->b.key; |
c582a3b3 | 1439 | |
89439d4f | 1440 | remove_entry(h, idx); |
c582a3b3 LP |
1441 | |
1442 | return data; | |
1443 | } | |
1444 | ||
101d8e63 | 1445 | int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) { |
89439d4f MS |
1446 | struct swap_entries swap; |
1447 | struct plain_hashmap_entry *e; | |
1448 | unsigned old_hash, new_hash, idx; | |
101d8e63 LP |
1449 | |
1450 | if (!h) | |
1451 | return -ENOENT; | |
1452 | ||
a3b6fafe | 1453 | old_hash = bucket_hash(h, old_key); |
89439d4f MS |
1454 | idx = bucket_scan(h, old_hash, old_key); |
1455 | if (idx == IDX_NIL) | |
101d8e63 LP |
1456 | return -ENOENT; |
1457 | ||
a3b6fafe | 1458 | new_hash = bucket_hash(h, new_key); |
89439d4f | 1459 | if (bucket_scan(h, new_hash, new_key) != IDX_NIL) |
101d8e63 LP |
1460 | return -EEXIST; |
1461 | ||
89439d4f | 1462 | remove_entry(h, idx); |
101d8e63 | 1463 | |
89439d4f MS |
1464 | e = &bucket_at_swap(&swap, IDX_PUT)->p; |
1465 | e->b.key = new_key; | |
101d8e63 | 1466 | e->value = value; |
89439d4f MS |
1467 | assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1); |
1468 | ||
1469 | return 0; | |
1470 | } | |
1471 | ||
1472 | int set_remove_and_put(Set *s, const void *old_key, const void *new_key) { | |
1473 | struct swap_entries swap; | |
1474 | struct hashmap_base_entry *e; | |
1475 | unsigned old_hash, new_hash, idx; | |
101d8e63 | 1476 | |
89439d4f MS |
1477 | if (!s) |
1478 | return -ENOENT; | |
1479 | ||
1480 | old_hash = bucket_hash(s, old_key); | |
1481 | idx = bucket_scan(s, old_hash, old_key); | |
1482 | if (idx == IDX_NIL) | |
1483 | return -ENOENT; | |
1484 | ||
1485 | new_hash = bucket_hash(s, new_key); | |
1486 | if (bucket_scan(s, new_hash, new_key) != IDX_NIL) | |
1487 | return -EEXIST; | |
1488 | ||
1489 | remove_entry(s, idx); | |
1490 | ||
1491 | e = &bucket_at_swap(&swap, IDX_PUT)->p.b; | |
1492 | e->key = new_key; | |
1493 | assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1); | |
101d8e63 LP |
1494 | |
1495 | return 0; | |
1496 | } | |
1497 | ||
8fe914ec | 1498 | int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) { |
89439d4f MS |
1499 | struct swap_entries swap; |
1500 | struct plain_hashmap_entry *e; | |
1501 | unsigned old_hash, new_hash, idx_old, idx_new; | |
8fe914ec LP |
1502 | |
1503 | if (!h) | |
1504 | return -ENOENT; | |
1505 | ||
a3b6fafe | 1506 | old_hash = bucket_hash(h, old_key); |
89439d4f MS |
1507 | idx_old = bucket_scan(h, old_hash, old_key); |
1508 | if (idx_old == IDX_NIL) | |
8fe914ec LP |
1509 | return -ENOENT; |
1510 | ||
89439d4f | 1511 | old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key; |
8fe914ec | 1512 | |
89439d4f MS |
1513 | new_hash = bucket_hash(h, new_key); |
1514 | idx_new = bucket_scan(h, new_hash, new_key); | |
1515 | if (idx_new != IDX_NIL) | |
1516 | if (idx_old != idx_new) { | |
1517 | remove_entry(h, idx_new); | |
1518 | /* Compensate for a possible backward shift. */ | |
1519 | if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key) | |
1520 | idx_old = prev_idx(HASHMAP_BASE(h), idx_old); | |
1521 | assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key); | |
1522 | } | |
1523 | ||
1524 | remove_entry(h, idx_old); | |
1525 | ||
1526 | e = &bucket_at_swap(&swap, IDX_PUT)->p; | |
1527 | e->b.key = new_key; | |
8fe914ec | 1528 | e->value = value; |
89439d4f | 1529 | assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1); |
8fe914ec LP |
1530 | |
1531 | return 0; | |
1532 | } | |
1533 | ||
8a35af80 | 1534 | void* _hashmap_remove_value(HashmapBase *h, const void *key, void *value) { |
c380b84d | 1535 | struct hashmap_base_entry *e; |
89439d4f | 1536 | unsigned hash, idx; |
3158713e LP |
1537 | |
1538 | if (!h) | |
1539 | return NULL; | |
1540 | ||
a3b6fafe | 1541 | hash = bucket_hash(h, key); |
89439d4f MS |
1542 | idx = bucket_scan(h, hash, key); |
1543 | if (idx == IDX_NIL) | |
3158713e LP |
1544 | return NULL; |
1545 | ||
c380b84d LP |
1546 | e = bucket_at(h, idx); |
1547 | if (entry_value(h, e) != value) | |
3158713e LP |
1548 | return NULL; |
1549 | ||
89439d4f | 1550 | remove_entry(h, idx); |
3158713e LP |
1551 | |
1552 | return value; | |
1553 | } | |
1554 | ||
89439d4f MS |
1555 | static unsigned find_first_entry(HashmapBase *h) { |
1556 | Iterator i = ITERATOR_FIRST; | |
60918275 | 1557 | |
89439d4f MS |
1558 | if (!h || !n_entries(h)) |
1559 | return IDX_NIL; | |
60918275 | 1560 | |
89439d4f | 1561 | return hashmap_iterate_entry(h, &i); |
60918275 LP |
1562 | } |
1563 | ||
8a35af80 | 1564 | void* _hashmap_first_key_and_value(HashmapBase *h, bool remove, void **ret_key) { |
89439d4f | 1565 | struct hashmap_base_entry *e; |
7ef670c3 | 1566 | void *key, *data; |
89439d4f | 1567 | unsigned idx; |
60918275 | 1568 | |
89439d4f | 1569 | idx = find_first_entry(h); |
51c682df TH |
1570 | if (idx == IDX_NIL) { |
1571 | if (ret_key) | |
1572 | *ret_key = NULL; | |
60918275 | 1573 | return NULL; |
51c682df | 1574 | } |
60918275 | 1575 | |
89439d4f | 1576 | e = bucket_at(h, idx); |
7ef670c3 | 1577 | key = (void*) e->key; |
89439d4f | 1578 | data = entry_value(h, e); |
60918275 | 1579 | |
7ef670c3 YW |
1580 | if (remove) |
1581 | remove_entry(h, idx); | |
60918275 | 1582 | |
7ef670c3 YW |
1583 | if (ret_key) |
1584 | *ret_key = key; | |
22be093f | 1585 | |
7ef670c3 | 1586 | return data; |
22be093f LP |
1587 | } |
1588 | ||
138f49e4 | 1589 | unsigned _hashmap_size(HashmapBase *h) { |
60918275 LP |
1590 | if (!h) |
1591 | return 0; | |
1592 | ||
89439d4f | 1593 | return n_entries(h); |
60918275 LP |
1594 | } |
1595 | ||
138f49e4 | 1596 | unsigned _hashmap_buckets(HashmapBase *h) { |
45fa9e29 LP |
1597 | if (!h) |
1598 | return 0; | |
1599 | ||
89439d4f | 1600 | return n_buckets(h); |
45fa9e29 LP |
1601 | } |
1602 | ||
138f49e4 | 1603 | int _hashmap_merge(Hashmap *h, Hashmap *other) { |
89439d4f MS |
1604 | Iterator i; |
1605 | unsigned idx; | |
60918275 | 1606 | |
89439d4f | 1607 | assert(h); |
60918275 | 1608 | |
89439d4f MS |
1609 | HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) { |
1610 | struct plain_hashmap_entry *pe = plain_bucket_at(other, idx); | |
1611 | int r; | |
91cdde8a | 1612 | |
89439d4f MS |
1613 | r = hashmap_put(h, pe->b.key, pe->value); |
1614 | if (r < 0 && r != -EEXIST) | |
1615 | return r; | |
1616 | } | |
91cdde8a | 1617 | |
89439d4f MS |
1618 | return 0; |
1619 | } | |
91cdde8a | 1620 | |
89439d4f MS |
1621 | int set_merge(Set *s, Set *other) { |
1622 | Iterator i; | |
1623 | unsigned idx; | |
91cdde8a | 1624 | |
89439d4f MS |
1625 | assert(s); |
1626 | ||
1627 | HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) { | |
1628 | struct set_entry *se = set_bucket_at(other, idx); | |
91cdde8a LP |
1629 | int r; |
1630 | ||
89439d4f MS |
1631 | r = set_put(s, se->b.key); |
1632 | if (r < 0) | |
a3b6fafe | 1633 | return r; |
91cdde8a LP |
1634 | } |
1635 | ||
1636 | return 0; | |
1637 | } | |
1638 | ||
138f49e4 | 1639 | int _hashmap_reserve(HashmapBase *h, unsigned entries_add) { |
e4c691b5 MS |
1640 | int r; |
1641 | ||
1642 | assert(h); | |
1643 | ||
1644 | r = resize_buckets(h, entries_add); | |
1645 | if (r < 0) | |
1646 | return r; | |
1647 | ||
1648 | return 0; | |
1649 | } | |
1650 | ||
89439d4f MS |
1651 | /* |
1652 | * The same as hashmap_merge(), but every new item from other is moved to h. | |
1653 | * Keys already in h are skipped and stay in other. | |
1654 | * Returns: 0 on success. | |
1655 | * -ENOMEM on alloc failure, in which case no move has been done. | |
1656 | */ | |
138f49e4 | 1657 | int _hashmap_move(HashmapBase *h, HashmapBase *other) { |
89439d4f MS |
1658 | struct swap_entries swap; |
1659 | struct hashmap_base_entry *e, *n; | |
1660 | Iterator i; | |
1661 | unsigned idx; | |
1662 | int r; | |
101d8e63 LP |
1663 | |
1664 | assert(h); | |
1665 | ||
101d8e63 | 1666 | if (!other) |
7ad63f57 | 1667 | return 0; |
101d8e63 | 1668 | |
89439d4f MS |
1669 | assert(other->type == h->type); |
1670 | ||
1671 | /* | |
1672 | * This reserves buckets for the worst case, where none of other's | |
1673 | * entries are yet present in h. This is preferable to risking | |
1674 | * an allocation failure in the middle of the moving and having to | |
1675 | * rollback or return a partial result. | |
1676 | */ | |
1677 | r = resize_buckets(h, n_entries(other)); | |
1678 | if (r < 0) | |
1679 | return r; | |
101d8e63 | 1680 | |
89439d4f MS |
1681 | HASHMAP_FOREACH_IDX(idx, other, i) { |
1682 | unsigned h_hash; | |
101d8e63 | 1683 | |
89439d4f | 1684 | e = bucket_at(other, idx); |
a3b6fafe | 1685 | h_hash = bucket_hash(h, e->key); |
89439d4f | 1686 | if (bucket_scan(h, h_hash, e->key) != IDX_NIL) |
101d8e63 LP |
1687 | continue; |
1688 | ||
89439d4f MS |
1689 | n = &bucket_at_swap(&swap, IDX_PUT)->p.b; |
1690 | n->key = e->key; | |
1691 | if (h->type != HASHMAP_TYPE_SET) | |
1692 | ((struct plain_hashmap_entry*) n)->value = | |
1693 | ((struct plain_hashmap_entry*) e)->value; | |
1694 | assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1); | |
1695 | ||
1696 | remove_entry(other, idx); | |
101d8e63 | 1697 | } |
7ad63f57 MS |
1698 | |
1699 | return 0; | |
101d8e63 LP |
1700 | } |
1701 | ||
138f49e4 | 1702 | int _hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) { |
89439d4f MS |
1703 | struct swap_entries swap; |
1704 | unsigned h_hash, other_hash, idx; | |
1705 | struct hashmap_base_entry *e, *n; | |
1706 | int r; | |
101d8e63 | 1707 | |
101d8e63 LP |
1708 | assert(h); |
1709 | ||
a3b6fafe | 1710 | h_hash = bucket_hash(h, key); |
89439d4f | 1711 | if (bucket_scan(h, h_hash, key) != IDX_NIL) |
101d8e63 LP |
1712 | return -EEXIST; |
1713 | ||
bf3d3e2b MS |
1714 | if (!other) |
1715 | return -ENOENT; | |
1716 | ||
89439d4f MS |
1717 | assert(other->type == h->type); |
1718 | ||
a3b6fafe | 1719 | other_hash = bucket_hash(other, key); |
89439d4f MS |
1720 | idx = bucket_scan(other, other_hash, key); |
1721 | if (idx == IDX_NIL) | |
101d8e63 LP |
1722 | return -ENOENT; |
1723 | ||
89439d4f MS |
1724 | e = bucket_at(other, idx); |
1725 | ||
1726 | n = &bucket_at_swap(&swap, IDX_PUT)->p.b; | |
1727 | n->key = e->key; | |
1728 | if (h->type != HASHMAP_TYPE_SET) | |
1729 | ((struct plain_hashmap_entry*) n)->value = | |
1730 | ((struct plain_hashmap_entry*) e)->value; | |
1731 | r = hashmap_put_boldly(h, h_hash, &swap, true); | |
1732 | if (r < 0) | |
1733 | return r; | |
101d8e63 | 1734 | |
89439d4f | 1735 | remove_entry(other, idx); |
101d8e63 LP |
1736 | return 0; |
1737 | } | |
1738 | ||
8a35af80 | 1739 | HashmapBase* _hashmap_copy(HashmapBase *h HASHMAP_DEBUG_PARAMS) { |
89439d4f MS |
1740 | HashmapBase *copy; |
1741 | int r; | |
91cdde8a LP |
1742 | |
1743 | assert(h); | |
1744 | ||
add74e89 | 1745 | copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_PASS_ARGS); |
45fa9e29 | 1746 | if (!copy) |
91cdde8a LP |
1747 | return NULL; |
1748 | ||
89439d4f MS |
1749 | switch (h->type) { |
1750 | case HASHMAP_TYPE_PLAIN: | |
1751 | case HASHMAP_TYPE_ORDERED: | |
1752 | r = hashmap_merge((Hashmap*)copy, (Hashmap*)h); | |
1753 | break; | |
1754 | case HASHMAP_TYPE_SET: | |
1755 | r = set_merge((Set*)copy, (Set*)h); | |
1756 | break; | |
1757 | default: | |
04499a70 | 1758 | assert_not_reached(); |
89439d4f MS |
1759 | } |
1760 | ||
add74e89 | 1761 | if (r < 0) |
a4a1569f | 1762 | return _hashmap_free(copy, NULL, NULL); |
91cdde8a LP |
1763 | |
1764 | return copy; | |
1765 | } | |
db1413d7 | 1766 | |
8a35af80 | 1767 | char** _hashmap_get_strv(HashmapBase *h) { |
db1413d7 | 1768 | char **sv; |
89439d4f MS |
1769 | Iterator i; |
1770 | unsigned idx, n; | |
db1413d7 | 1771 | |
107e2163 LP |
1772 | if (!h) |
1773 | return new0(char*, 1); | |
1774 | ||
89439d4f | 1775 | sv = new(char*, n_entries(h)+1); |
729e3769 | 1776 | if (!sv) |
db1413d7 KS |
1777 | return NULL; |
1778 | ||
1779 | n = 0; | |
89439d4f MS |
1780 | HASHMAP_FOREACH_IDX(idx, h, i) |
1781 | sv[n++] = entry_value(h, bucket_at(h, idx)); | |
db1413d7 KS |
1782 | sv[n] = NULL; |
1783 | ||
1784 | return sv; | |
1785 | } | |
3c1668da | 1786 | |
8a35af80 | 1787 | void* ordered_hashmap_next(OrderedHashmap *h, const void *key) { |
89439d4f MS |
1788 | struct ordered_hashmap_entry *e; |
1789 | unsigned hash, idx; | |
3c1668da | 1790 | |
3c1668da LP |
1791 | if (!h) |
1792 | return NULL; | |
1793 | ||
a3b6fafe | 1794 | hash = bucket_hash(h, key); |
89439d4f MS |
1795 | idx = bucket_scan(h, hash, key); |
1796 | if (idx == IDX_NIL) | |
3c1668da LP |
1797 | return NULL; |
1798 | ||
89439d4f MS |
1799 | e = ordered_bucket_at(h, idx); |
1800 | if (e->iterate_next == IDX_NIL) | |
3c1668da | 1801 | return NULL; |
89439d4f MS |
1802 | return ordered_bucket_at(h, e->iterate_next)->p.value; |
1803 | } | |
3c1668da | 1804 | |
89439d4f MS |
1805 | int set_consume(Set *s, void *value) { |
1806 | int r; | |
1807 | ||
d97c5aea LP |
1808 | assert(s); |
1809 | assert(value); | |
1810 | ||
89439d4f | 1811 | r = set_put(s, value); |
575ccc1b | 1812 | if (r <= 0) |
89439d4f MS |
1813 | free(value); |
1814 | ||
1815 | return r; | |
1816 | } | |
1817 | ||
11e9fec2 | 1818 | int _hashmap_put_strdup_full(Hashmap **h, const struct hash_ops *hash_ops, const char *k, const char *v HASHMAP_DEBUG_PARAMS) { |
87da8784 ZJS |
1819 | int r; |
1820 | ||
11e9fec2 | 1821 | r = _hashmap_ensure_allocated(h, hash_ops HASHMAP_DEBUG_PASS_ARGS); |
87da8784 ZJS |
1822 | if (r < 0) |
1823 | return r; | |
1824 | ||
1825 | _cleanup_free_ char *kdup = NULL, *vdup = NULL; | |
25b3e2a8 | 1826 | |
87da8784 | 1827 | kdup = strdup(k); |
25b3e2a8 | 1828 | if (!kdup) |
87da8784 ZJS |
1829 | return -ENOMEM; |
1830 | ||
25b3e2a8 ZJS |
1831 | if (v) { |
1832 | vdup = strdup(v); | |
1833 | if (!vdup) | |
1834 | return -ENOMEM; | |
1835 | } | |
1836 | ||
87da8784 ZJS |
1837 | r = hashmap_put(*h, kdup, vdup); |
1838 | if (r < 0) { | |
25b3e2a8 | 1839 | if (r == -EEXIST && streq_ptr(v, hashmap_get(*h, kdup))) |
87da8784 ZJS |
1840 | return 0; |
1841 | return r; | |
1842 | } | |
1843 | ||
25b3e2a8 ZJS |
1844 | /* 0 with non-null vdup would mean vdup is already in the hashmap, which cannot be */ |
1845 | assert(vdup == NULL || r > 0); | |
1846 | if (r > 0) | |
1847 | kdup = vdup = NULL; | |
87da8784 | 1848 | |
25b3e2a8 | 1849 | return r; |
87da8784 ZJS |
1850 | } |
1851 | ||
cb649d12 | 1852 | int _set_put_strndup_full(Set **s, const struct hash_ops *hash_ops, const char *p, size_t n HASHMAP_DEBUG_PARAMS) { |
89439d4f | 1853 | char *c; |
be327321 | 1854 | int r; |
89439d4f MS |
1855 | |
1856 | assert(s); | |
1857 | assert(p); | |
1858 | ||
11e9fec2 | 1859 | r = _set_ensure_allocated(s, hash_ops HASHMAP_DEBUG_PASS_ARGS); |
be327321 ZJS |
1860 | if (r < 0) |
1861 | return r; | |
1862 | ||
cb649d12 YW |
1863 | if (n == SIZE_MAX) { |
1864 | if (set_contains(*s, (char*) p)) | |
1865 | return 0; | |
454f0f86 | 1866 | |
cb649d12 YW |
1867 | c = strdup(p); |
1868 | } else | |
1869 | c = strndup(p, n); | |
89439d4f MS |
1870 | if (!c) |
1871 | return -ENOMEM; | |
1872 | ||
be327321 | 1873 | return set_consume(*s, c); |
89439d4f MS |
1874 | } |
1875 | ||
11e9fec2 | 1876 | int _set_put_strdupv_full(Set **s, const struct hash_ops *hash_ops, char **l HASHMAP_DEBUG_PARAMS) { |
89439d4f | 1877 | int n = 0, r; |
89439d4f | 1878 | |
d97c5aea LP |
1879 | assert(s); |
1880 | ||
89439d4f | 1881 | STRV_FOREACH(i, l) { |
cb649d12 | 1882 | r = _set_put_strndup_full(s, hash_ops, *i, SIZE_MAX HASHMAP_DEBUG_PASS_ARGS); |
89439d4f MS |
1883 | if (r < 0) |
1884 | return r; | |
1885 | ||
1886 | n += r; | |
1887 | } | |
1888 | ||
1889 | return n; | |
3c1668da | 1890 | } |
d97c5aea LP |
1891 | |
1892 | int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) { | |
99534007 | 1893 | const char *p = ASSERT_PTR(v); |
d97c5aea LP |
1894 | int r; |
1895 | ||
1896 | assert(s); | |
d97c5aea LP |
1897 | |
1898 | for (;;) { | |
1899 | char *word; | |
1900 | ||
1901 | r = extract_first_word(&p, &word, separators, flags); | |
1902 | if (r <= 0) | |
1903 | return r; | |
1904 | ||
1905 | r = set_consume(s, word); | |
1906 | if (r < 0) | |
1907 | return r; | |
1908 | } | |
1909 | } | |
45ea84d8 VC |
1910 | |
1911 | /* expand the cachemem if needed, return true if newly (re)activated. */ | |
319a4f4b | 1912 | static int cachemem_maintain(CacheMem *mem, size_t size) { |
45ea84d8 VC |
1913 | assert(mem); |
1914 | ||
319a4f4b | 1915 | if (!GREEDY_REALLOC(mem->ptr, size)) { |
45ea84d8 VC |
1916 | if (size > 0) |
1917 | return -ENOMEM; | |
1918 | } | |
1919 | ||
afbbc068 ZJS |
1920 | if (!mem->active) { |
1921 | mem->active = true; | |
1922 | return true; | |
1923 | } | |
45ea84d8 | 1924 | |
afbbc068 | 1925 | return false; |
45ea84d8 VC |
1926 | } |
1927 | ||
1928 | int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) { | |
1929 | bool sync_keys = false, sync_values = false; | |
319a4f4b | 1930 | size_t size; |
45ea84d8 VC |
1931 | int r; |
1932 | ||
1933 | assert(cache); | |
1934 | assert(cache->hashmap); | |
1935 | ||
1936 | size = n_entries(cache->hashmap); | |
1937 | ||
1938 | if (res_keys) { | |
1939 | r = cachemem_maintain(&cache->keys, size); | |
1940 | if (r < 0) | |
1941 | return r; | |
1942 | ||
1943 | sync_keys = r; | |
1944 | } else | |
1945 | cache->keys.active = false; | |
1946 | ||
1947 | if (res_values) { | |
1948 | r = cachemem_maintain(&cache->values, size); | |
1949 | if (r < 0) | |
1950 | return r; | |
1951 | ||
1952 | sync_values = r; | |
1953 | } else | |
1954 | cache->values.active = false; | |
1955 | ||
1956 | if (cache->hashmap->dirty) { | |
1957 | if (cache->keys.active) | |
1958 | sync_keys = true; | |
1959 | if (cache->values.active) | |
1960 | sync_values = true; | |
1961 | ||
1962 | cache->hashmap->dirty = false; | |
1963 | } | |
1964 | ||
1965 | if (sync_keys || sync_values) { | |
1966 | unsigned i, idx; | |
1967 | Iterator iter; | |
1968 | ||
1969 | i = 0; | |
1970 | HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) { | |
1971 | struct hashmap_base_entry *e; | |
1972 | ||
1973 | e = bucket_at(cache->hashmap, idx); | |
1974 | ||
1975 | if (sync_keys) | |
1976 | cache->keys.ptr[i] = e->key; | |
1977 | if (sync_values) | |
1978 | cache->values.ptr[i] = entry_value(cache->hashmap, e); | |
1979 | i++; | |
1980 | } | |
1981 | } | |
1982 | ||
1983 | if (res_keys) | |
1984 | *res_keys = cache->keys.ptr; | |
1985 | if (res_values) | |
1986 | *res_values = cache->values.ptr; | |
1987 | if (res_n_entries) | |
1988 | *res_n_entries = size; | |
1989 | ||
1990 | return 0; | |
1991 | } | |
1992 | ||
8a35af80 | 1993 | IteratedCache* iterated_cache_free(IteratedCache *cache) { |
45ea84d8 VC |
1994 | if (cache) { |
1995 | free(cache->keys.ptr); | |
1996 | free(cache->values.ptr); | |
45ea84d8 VC |
1997 | } |
1998 | ||
b61658fd | 1999 | return mfree(cache); |
45ea84d8 | 2000 | } |
4dbce717 | 2001 | |
8d80f275 | 2002 | int set_strjoin(Set *s, const char *separator, bool wrap_with_separator, char **ret) { |
4dbce717 | 2003 | _cleanup_free_ char *str = NULL; |
319a4f4b | 2004 | size_t separator_len, len = 0; |
4dbce717 | 2005 | const char *value; |
8d80f275 | 2006 | bool first; |
4dbce717 YW |
2007 | |
2008 | assert(ret); | |
2009 | ||
8d80f275 YW |
2010 | if (set_isempty(s)) { |
2011 | *ret = NULL; | |
2012 | return 0; | |
2013 | } | |
2014 | ||
4dbce717 YW |
2015 | separator_len = strlen_ptr(separator); |
2016 | ||
8d80f275 YW |
2017 | if (separator_len == 0) |
2018 | wrap_with_separator = false; | |
2019 | ||
2020 | first = !wrap_with_separator; | |
2021 | ||
4dbce717 YW |
2022 | SET_FOREACH(value, s) { |
2023 | size_t l = strlen_ptr(value); | |
2024 | ||
2025 | if (l == 0) | |
2026 | continue; | |
2027 | ||
319a4f4b | 2028 | if (!GREEDY_REALLOC(str, len + l + (first ? 0 : separator_len) + (wrap_with_separator ? separator_len : 0) + 1)) |
4dbce717 YW |
2029 | return -ENOMEM; |
2030 | ||
2031 | if (separator_len > 0 && !first) { | |
2032 | memcpy(str + len, separator, separator_len); | |
2033 | len += separator_len; | |
2034 | } | |
2035 | ||
2036 | memcpy(str + len, value, l); | |
2037 | len += l; | |
2038 | first = false; | |
2039 | } | |
8d80f275 YW |
2040 | |
2041 | if (wrap_with_separator) { | |
2042 | memcpy(str + len, separator, separator_len); | |
2043 | len += separator_len; | |
2044 | } | |
2045 | ||
2046 | str[len] = '\0'; | |
4dbce717 YW |
2047 | |
2048 | *ret = TAKE_PTR(str); | |
2049 | return 0; | |
2050 | } | |
e4304fb8 LP |
2051 | |
2052 | bool set_equal(Set *a, Set *b) { | |
2053 | void *p; | |
2054 | ||
2055 | /* Checks whether each entry of 'a' is also in 'b' and vice versa, i.e. the two sets contain the same | |
2056 | * entries */ | |
2057 | ||
2058 | if (a == b) | |
2059 | return true; | |
2060 | ||
2061 | if (set_isempty(a) && set_isempty(b)) | |
2062 | return true; | |
2063 | ||
2064 | if (set_size(a) != set_size(b)) /* Cheap check that hopefully catches a lot of inequality cases | |
2065 | * already */ | |
2066 | return false; | |
2067 | ||
2068 | SET_FOREACH(p, a) | |
2069 | if (!set_contains(b, p)) | |
2070 | return false; | |
2071 | ||
2072 | /* If we have the same hashops, then we don't need to check things backwards given we compared the | |
2073 | * size and that all of a is in b. */ | |
2074 | if (a->b.hash_ops == b->b.hash_ops) | |
2075 | return true; | |
2076 | ||
2077 | SET_FOREACH(p, b) | |
2078 | if (!set_contains(a, p)) | |
2079 | return false; | |
2080 | ||
2081 | return true; | |
2082 | } | |
d25d4f18 YW |
2083 | |
2084 | static bool set_fnmatch_one(Set *patterns, const char *needle) { | |
2085 | const char *p; | |
2086 | ||
2087 | assert(needle); | |
2088 | ||
8812f8fc LP |
2089 | /* Any failure of fnmatch() is treated as equivalent to FNM_NOMATCH, i.e. as non-matching pattern */ |
2090 | ||
d25d4f18 YW |
2091 | SET_FOREACH(p, patterns) |
2092 | if (fnmatch(p, needle, 0) == 0) | |
2093 | return true; | |
2094 | ||
2095 | return false; | |
2096 | } | |
2097 | ||
2098 | bool set_fnmatch(Set *include_patterns, Set *exclude_patterns, const char *needle) { | |
2099 | assert(needle); | |
2100 | ||
2101 | if (set_fnmatch_one(exclude_patterns, needle)) | |
2102 | return false; | |
2103 | ||
2104 | if (set_isempty(include_patterns)) | |
2105 | return true; | |
2106 | ||
2107 | return set_fnmatch_one(include_patterns, needle); | |
2108 | } |