cbd->periodic->cur_backend = 0;
/* Reset cache, old cached data will be cleaned on timeout */
g_atomic_int_set(&data->cache->available, 0);
+ g_atomic_int_set(&map->shared->loaded, 0);
data->cur_cache_cbd = NULL;
rspamd_map_process_periodic(cbd->periodic);
* We know that a map is in the locked state
*/
g_atomic_int_set(&data->cache->available, 1);
+ g_atomic_int_set(&map->shared->loaded, 1);
/* Store cached data */
rspamd_strlcpy(data->cache->shmem_name, cbd->shmem_data->shm_name,
sizeof(data->cache->shmem_name));
map->read_callback(NULL, 0, &periodic->cbdata, TRUE);
}
+ g_atomic_int_set(&map->shared->loaded, 1);
+
return TRUE;
}
}
data->processed = TRUE;
+ g_atomic_int_set(&map->shared->loaded, 1);
return TRUE;
}
}
if (periodic->locked) {
- g_atomic_int_set(periodic->map->locked, 0);
+ g_atomic_int_set(&periodic->map->shared->locked, 0);
msg_debug_map("unlocked map %s", periodic->map->name);
if (periodic->map->wrk->state == rspamd_worker_state_running) {
map->read_callback(in, len, &periodic->cbdata, TRUE);
}
+ g_atomic_int_set(&map->shared->loaded, 1);
+ g_atomic_int_set(&map->shared->cached, 1);
+
munmap(in, mmap_len);
return TRUE;
map->scheduled_check = NULL;
if (!map->file_only && !cbd->locked) {
- if (!g_atomic_int_compare_and_exchange(cbd->map->locked,
+ if (!g_atomic_int_compare_and_exchange(&cbd->map->shared->locked,
0, 1)) {
msg_debug_map(
"don't try to reread map %s as it is locked by other process, "
rspamd_map_schedule_periodic(cbd->map, RSPAMD_MAP_SCHEDULE_ERROR);
if (cbd->locked) {
- g_atomic_int_set(cbd->map->locked, 0);
+ g_atomic_int_set(&cbd->map->shared->locked, 0);
cbd->locked = FALSE;
}
map->user_data = user_data;
map->cfg = cfg;
map->id = rspamd_random_uint64_fast();
- map->locked =
- rspamd_mempool_alloc0_shared(cfg->cfg_pool, sizeof(int));
+ map->shared =
+ rspamd_mempool_alloc0_shared(cfg->cfg_pool, sizeof(struct rspamd_map_shared_data));
map->backends = g_ptr_array_sized_new(1);
map->wrk = worker;
rspamd_mempool_add_destructor(cfg->cfg_pool, rspamd_ptr_array_free_hard,
map->user_data = user_data;
map->cfg = cfg;
map->id = rspamd_random_uint64_fast();
- map->locked =
- rspamd_mempool_alloc0_shared(cfg->cfg_pool, sizeof(int));
+ map->shared =
+ rspamd_mempool_alloc0_shared(cfg->cfg_pool, sizeof(struct rspamd_map_shared_data));
map->backends = g_ptr_array_new();
map->wrk = worker;
map->no_file_read = (flags & RSPAMD_MAP_FILE_NO_READ);
/*
- * Copyright 2024 Vsevolod Stakhov
+ * Copyright 2025 Vsevolod Stakhov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
time_t last_checked;
};
-struct rspamd_map_cachepoint {
+struct rspamd_http_map_cache {
int available;
gsize len;
time_t last_modified;
*/
struct http_map_data {
/* Shared cache data */
- struct rspamd_map_cachepoint *cache;
+ struct rspamd_http_map_cache *cache;
/* Non-shared for cache owner, used to cleanup cache */
struct rspamd_http_map_cached_cbdata *cur_cache_cbd;
char *userinfo;
struct map_periodic_cbdata;
+/*
+ * Shared between workers
+ */
+struct rspamd_map_shared_data {
+ int locked;
+ int loaded;
+ int cached;
+};
+
struct rspamd_map {
struct rspamd_dns_resolver *r;
struct rspamd_config *cfg;
bool no_file_read; /* Do not read files */
bool seen; /* This map has already been watched or pre-loaded */
/* Shared lock for temporary disabling of map reading (e.g. when this map is written by UI) */
- int *locked;
+ struct rspamd_map_shared_data *shared;
char tag[MEMPOOL_UID_LEN];
};