+/* SPDX-License-Identifier: LGPL-2.1+ */
/***
- This file is part of systemd.
-
Copyright 2010 Lennart Poettering
Copyright 2014 Michal Schmidt
-
- systemd is free software; you can redistribute it and/or modify it
- under the terms of the GNU Lesser General Public License as published by
- the Free Software Foundation; either version 2.1 of the License, or
- (at your option) any later version.
-
- systemd is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public License
- along with systemd; If not, see <http://www.gnu.org/licenses/>.
***/
#include <errno.h>
#include "alloc-util.h"
#include "hashmap.h"
+#include "fileio.h"
#include "macro.h"
#include "mempool.h"
#include "process-util.h"
#include "random-util.h"
#include "set.h"
#include "siphash24.h"
+#include "string-util.h"
#include "strv.h"
#include "util.h"
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
#include <pthread.h>
#include "list.h"
#endif
#define DIB_FREE UINT_MAX
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
struct hashmap_debug_info {
LIST_FIELDS(struct hashmap_debug_info, debug_list);
unsigned max_entries; /* high watermark of n_entries */
unsigned n_direct_entries:3; /* Number of entries in direct storage.
* Only valid if !has_indirect. */
bool from_pool:1; /* whether was allocated from mempool */
+ bool dirty:1; /* whether dirtied since last iterated_cache_get() */
+ bool cached:1; /* whether this hashmap is being cached */
HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
};
struct HashmapBase b;
};
+typedef struct CacheMem {
+ const void **ptr;
+ size_t n_populated, n_allocated;
+ bool active:1;
+} CacheMem;
+
+struct IteratedCache {
+ HashmapBase *hashmap;
+ CacheMem keys, values;
+};
+
DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
/* No need for a separate Set pool */
},
};
+#if VALGRIND
+__attribute__((destructor)) static void cleanup_pools(void) {
+ _cleanup_free_ char *t = NULL;
+ int r;
+
+ /* Be nice to valgrind */
+
+ /* The pool is only allocated by the main thread, but the memory can
+ * be passed to other threads. Let's clean up if we are the main thread
+ * and no other threads are live. */
+ if (!is_main_thread())
+ return;
+
+ r = get_proc_field("/proc/self/status", "Threads", WHITESPACE, &t);
+ if (r < 0 || !streq(t, "1"))
+ return;
+
+ mempool_drop(&hashmap_pool);
+ mempool_drop(&ordered_hashmap_pool);
+}
+#endif
+
static unsigned n_buckets(HashmapBase *h) {
return h->has_indirect ? h->indirect.n_buckets
: hashmap_type_info[h->type].n_direct_buckets;
}
#define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
+static inline void base_set_dirty(HashmapBase *h) {
+ h->dirty = true;
+}
+#define hashmap_set_dirty(h) base_set_dirty(HASHMAP_BASE(h))
+
static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
static uint8_t current[HASH_KEY_SIZE];
static bool current_initialized = false;
dibs = dib_raw_ptr(h);
assert(dibs[idx] != DIB_RAW_FREE);
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
h->debug.rem_count++;
h->debug.last_rem_idx = idx;
#endif
/* Find the stop bucket ("right"). It is either free or has DIB == 0. */
for (right = next_idx(h, left); ; right = next_idx(h, right)) {
raw_dib = dibs[right];
- if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
+ if (IN_SET(raw_dib, 0, DIB_RAW_FREE))
break;
/* The buckets are not supposed to be all occupied and with DIB > 0.
bucket_mark_free(h, prev);
n_entries_dec(h);
+ base_set_dirty(h);
}
#define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
assert(e->p.b.key == i->next_key);
}
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
i->prev_idx = idx;
#endif
}
idx = i->idx;
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
i->prev_idx = idx;
#endif
return IDX_NIL;
}
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
if (i->idx == IDX_FIRST) {
i->put_count = h->debug.put_count;
i->rem_count = h->debug.rem_count;
(idx != IDX_NIL); \
(idx) = hashmap_iterate_entry((h), &(i)))
+IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) {
+ IteratedCache *cache;
+
+ assert(h);
+ assert(!h->cached);
+
+ if (h->cached)
+ return NULL;
+
+ cache = new0(IteratedCache, 1);
+ if (!cache)
+ return NULL;
+
+ cache->hashmap = h;
+ h->cached = true;
+
+ return cache;
+}
+
static void reset_direct_storage(HashmapBase *h) {
const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
void *p;
shared_hash_key_initialized= true;
}
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
h->debug.func = func;
h->debug.file = file;
h->debug.line = line;
assert(!h->has_indirect);
assert(!h->n_direct_entries);
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
OrderedHashmap *lh = (OrderedHashmap*) h;
lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
}
+
+ base_set_dirty(h);
}
void internal_hashmap_clear_free(HashmapBase *h) {
dib_raw_t raw_dib, *dibs;
unsigned dib, distance;
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
h->debug.put_count++;
#endif
for (distance = 0; ; distance++) {
raw_dib = dibs[idx];
- if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
+ if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)) {
if (raw_dib == DIB_RAW_REHASH)
bucket_move_entry(h, swap, idx, IDX_TMP);
assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
n_entries_inc(h);
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
#endif
+ base_set_dirty(h);
+
return 1;
}
#define hashmap_put_boldly(h, idx, swap, may_resize) \
idx = bucket_scan(h, hash, key);
if (idx != IDX_NIL) {
e = plain_bucket_at(h, idx);
-#ifdef ENABLE_DEBUG_HASHMAP
+#if ENABLE_DEBUG_HASHMAP
/* Although the key is equal, the key pointer may have changed,
* and this would break our assumption for iterating. So count
* this operation as incompatible with iteration. */
#endif
e->b.key = key;
e->value = value;
+ hashmap_set_dirty(h);
+
return 0;
}
e = plain_bucket_at(h, idx);
e->value = value;
+ hashmap_set_dirty(h);
+
return 0;
}
int set_consume(Set *s, void *value) {
int r;
+ assert(s);
+ assert(value);
+
r = set_put(s, value);
if (r <= 0)
free(value);
int n = 0, r;
char **i;
+ assert(s);
+
STRV_FOREACH(i, l) {
r = set_put_strdup(s, *i);
if (r < 0)
return n;
}
+
+int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) {
+ const char *p = v;
+ int r;
+
+ assert(s);
+ assert(v);
+
+ for (;;) {
+ char *word;
+
+ r = extract_first_word(&p, &word, separators, flags);
+ if (r <= 0)
+ return r;
+
+ r = set_consume(s, word);
+ if (r < 0)
+ return r;
+ }
+}
+
+/* expand the cachemem if needed, return true if newly (re)activated. */
+static int cachemem_maintain(CacheMem *mem, unsigned size) {
+ assert(mem);
+
+ if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) {
+ if (size > 0)
+ return -ENOMEM;
+ }
+
+ if (!mem->active) {
+ mem->active = true;
+ return true;
+ }
+
+ return false;
+}
+
+int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) {
+ bool sync_keys = false, sync_values = false;
+ unsigned size;
+ int r;
+
+ assert(cache);
+ assert(cache->hashmap);
+
+ size = n_entries(cache->hashmap);
+
+ if (res_keys) {
+ r = cachemem_maintain(&cache->keys, size);
+ if (r < 0)
+ return r;
+
+ sync_keys = r;
+ } else
+ cache->keys.active = false;
+
+ if (res_values) {
+ r = cachemem_maintain(&cache->values, size);
+ if (r < 0)
+ return r;
+
+ sync_values = r;
+ } else
+ cache->values.active = false;
+
+ if (cache->hashmap->dirty) {
+ if (cache->keys.active)
+ sync_keys = true;
+ if (cache->values.active)
+ sync_values = true;
+
+ cache->hashmap->dirty = false;
+ }
+
+ if (sync_keys || sync_values) {
+ unsigned i, idx;
+ Iterator iter;
+
+ i = 0;
+ HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) {
+ struct hashmap_base_entry *e;
+
+ e = bucket_at(cache->hashmap, idx);
+
+ if (sync_keys)
+ cache->keys.ptr[i] = e->key;
+ if (sync_values)
+ cache->values.ptr[i] = entry_value(cache->hashmap, e);
+ i++;
+ }
+ }
+
+ if (res_keys)
+ *res_keys = cache->keys.ptr;
+ if (res_values)
+ *res_values = cache->values.ptr;
+ if (res_n_entries)
+ *res_n_entries = size;
+
+ return 0;
+}
+
+IteratedCache *iterated_cache_free(IteratedCache *cache) {
+ if (cache) {
+ free(cache->keys.ptr);
+ free(cache->values.ptr);
+ free(cache);
+ }
+
+ return NULL;
+}