]> git.ipfire.org Git - thirdparty/rspamd.git/commitdiff
Manual revert of 4c98aab6f670c659dff4c7e0cf392576f7850732
authorVsevolod Stakhov <vsevolod@rspamd.com>
Fri, 13 Jun 2025 08:00:01 +0000 (09:00 +0100)
committerVsevolod Stakhov <vsevolod@rspamd.com>
Fri, 13 Jun 2025 08:00:01 +0000 (09:00 +0100)
The problem is that we are trying to lock maps per backend, but periodics
are scheduled per maps. It means that locks are not consistent with what we
schedule and it really does not match the current maps processing logic.

Hence, we revert that change here.

src/controller.c
src/libserver/maps/map.c
src/libserver/maps/map_private.h

index ae2098282cb92bb325d52ebf32c59b48abcdb12a..8df12e2abce748ad2a80549c684af7b227cb2f2f 100644 (file)
@@ -992,9 +992,9 @@ rspamd_controller_handle_maps(struct rspamd_http_connection_entry *conn_ent,
                                                                          "type", 0, false);
                                ucl_object_insert_key(obj, ucl_object_frombool(editable),
                                                                          "editable", 0, false);
-                               ucl_object_insert_key(obj, ucl_object_frombool(bk->shared->loaded),
+                               ucl_object_insert_key(obj, ucl_object_frombool(map->shared->loaded),
                                                                          "loaded", 0, false);
-                               ucl_object_insert_key(obj, ucl_object_frombool(bk->shared->cached),
+                               ucl_object_insert_key(obj, ucl_object_frombool(map->shared->cached),
                                                                          "cached", 0, false);
                                ucl_array_append(top, obj);
                        }
@@ -1012,9 +1012,9 @@ rspamd_controller_handle_maps(struct rspamd_http_connection_entry *conn_ent,
                                                                          "type", 0, false);
                                ucl_object_insert_key(obj, ucl_object_frombool(false),
                                                                          "editable", 0, false);
-                               ucl_object_insert_key(obj, ucl_object_frombool(bk->shared->loaded),
+                               ucl_object_insert_key(obj, ucl_object_frombool(map->shared->loaded),
                                                                          "loaded", 0, false);
-                               ucl_object_insert_key(obj, ucl_object_frombool(bk->shared->cached),
+                               ucl_object_insert_key(obj, ucl_object_frombool(map->shared->cached),
                                                                          "cached", 0, false);
                                ucl_array_append(top, obj);
                        }
@@ -1141,7 +1141,7 @@ rspamd_controller_handle_get_map(struct rspamd_http_connection_entry *conn_ent,
                rspamd_map_traverse(bk->map, rspamd_controller_map_traverse_callback, &map_body, FALSE);
                rspamd_http_message_set_body_from_fstring_steal(reply, map_body);
        }
-       else if (bk->shared->loaded) {
+       else if (map->shared->loaded) {
                reply = rspamd_http_new_message(HTTP_RESPONSE);
                reply->code = 200;
                rspamd_fstring_t *map_body = rspamd_fstring_new();
index 78d1c1ccde1c151aaabe62e04db33a639c414250..92e09928403adaefcf76e30a1d79c6e3e84e4891 100644 (file)
@@ -301,14 +301,12 @@ rspamd_map_cache_cb(struct ev_loop *loop, ev_timer *w, int revents)
 static void
 rspamd_map_unlock_current_backend(struct map_periodic_cbdata *cbd)
 {
-       struct rspamd_map_backend *bk;
        struct rspamd_map *map = cbd->map;
 
        if (cbd->owned_lock && cbd->cur_backend < cbd->map->backends->len) {
-               bk = g_ptr_array_index(cbd->map->backends, cbd->cur_backend);
-               g_atomic_int_set(&bk->shared->locked, 0);
+               g_atomic_int_set(&map->shared->locked, 0);
                cbd->owned_lock = FALSE;
-               msg_debug_map("unlocked current backend %s before switching", bk->uri);
+               msg_debug_map("unlocked map %s before switching", map->name);
        }
 }
 
@@ -358,7 +356,7 @@ http_map_finish(struct rspamd_http_connection *conn,
                        cbd->periodic->cur_backend = 0;
                        /* Reset cache, old cached data will be cleaned on timeout */
                        g_atomic_int_set(&data->cache->available, 0);
-                       g_atomic_int_set(&bk->shared->loaded, 0);
+                       g_atomic_int_set(&map->shared->loaded, 0);
                        data->cur_cache_cbd = NULL;
 
                        rspamd_map_process_periodic(cbd->periodic);
@@ -444,8 +442,8 @@ http_map_finish(struct rspamd_http_connection *conn,
                 * We know that a map is in the locked state
                 */
                g_atomic_int_set(&data->cache->available, 1);
-               g_atomic_int_set(&bk->shared->loaded, 1);
-               g_atomic_int_set(&bk->shared->cached, 0);
+               g_atomic_int_set(&map->shared->loaded, 1);
+               g_atomic_int_set(&map->shared->cached, 0);
                /* Store cached data */
                rspamd_strlcpy(data->cache->shmem_name, cbd->shmem_data->shm_name,
                                           sizeof(data->cache->shmem_name));
@@ -945,7 +943,7 @@ read_map_file(struct rspamd_map *map, struct file_map_data *data,
                map->read_callback(NULL, 0, &periodic->cbdata, TRUE);
        }
 
-       g_atomic_int_set(&bk->shared->loaded, 1);
+       g_atomic_int_set(&map->shared->loaded, 1);
 
        return TRUE;
 }
@@ -1031,7 +1029,7 @@ read_map_static(struct rspamd_map *map, struct static_map_data *data,
        }
 
        data->processed = TRUE;
-       g_atomic_int_set(&bk->shared->loaded, 1);
+       g_atomic_int_set(&map->shared->loaded, 1);
 
        return TRUE;
 }
@@ -1039,10 +1037,7 @@ read_map_static(struct rspamd_map *map, struct static_map_data *data,
 static void
 rspamd_map_periodic_dtor(struct map_periodic_cbdata *periodic)
 {
-       struct rspamd_map *map;
-       struct rspamd_map_backend *bk;
-
-       map = periodic->map;
+       struct rspamd_map *map = periodic->map;
        msg_debug_map("periodic dtor %p; need_modify=%d", periodic, periodic->need_modify);
 
        if (periodic->need_modify || periodic->cbdata.errored) {
@@ -1059,8 +1054,7 @@ rspamd_map_periodic_dtor(struct map_periodic_cbdata *periodic)
 
        if (periodic->owned_lock) {
                if (periodic->cur_backend < map->backends->len) {
-                       bk = (struct rspamd_map_backend *) g_ptr_array_index(map->backends, periodic->cur_backend);
-                       g_atomic_int_set(&bk->shared->locked, 0);
+                       g_atomic_int_set(&map->shared->locked, 0);
                        msg_debug_map("unlocked map %s", map->name);
                }
 
@@ -1471,8 +1465,8 @@ rspamd_map_read_cached(struct rspamd_map *map, struct rspamd_map_backend *bk,
                map->read_callback(in, len, &periodic->cbdata, TRUE);
        }
 
-       g_atomic_int_set(&bk->shared->loaded, 1);
-       g_atomic_int_set(&bk->shared->cached, 1);
+       g_atomic_int_set(&map->shared->loaded, 1);
+       g_atomic_int_set(&map->shared->cached, 1);
 
        munmap(in, mmap_len);
 
@@ -1763,8 +1757,8 @@ rspamd_map_read_http_cached_file(struct rspamd_map *map,
        struct tm tm;
        char ncheck_buf[32], lm_buf[32];
 
-       g_atomic_int_set(&bk->shared->loaded, 1);
-       g_atomic_int_set(&bk->shared->cached, 1);
+       g_atomic_int_set(&map->shared->loaded, 1);
+       g_atomic_int_set(&map->shared->cached, 1);
        rspamd_localtime(map->next_check, &tm);
        strftime(ncheck_buf, sizeof(ncheck_buf) - 1, "%Y-%m-%d %H:%M:%S", &tm);
        rspamd_localtime(htdata->last_modified, &tm);
@@ -1808,7 +1802,7 @@ rspamd_map_common_http_callback(struct rspamd_map *map,
                                                         (int) data->cache->last_modified);
                                periodic->need_modify = TRUE;
                                /* Reset the whole chain */
-                               g_atomic_int_set(&bk->shared->locked, 0);
+                               g_atomic_int_set(&map->shared->locked, 0);
                                periodic->cur_backend = 0;
                                rspamd_map_process_periodic(periodic);
                        }
@@ -1819,7 +1813,7 @@ rspamd_map_common_http_callback(struct rspamd_map *map,
                                }
                                else {
                                        /* Switch to the next backend */
-                                       g_atomic_int_set(&bk->shared->locked, 0);
+                                       g_atomic_int_set(&map->shared->locked, 0);
                                        periodic->cur_backend++;
                                        rspamd_map_process_periodic(periodic);
                                }
@@ -2069,15 +2063,7 @@ rspamd_map_process_periodic(struct map_periodic_cbdata *cbd)
 
        /* For each backend we need to check for modifications */
        if (cbd->cur_backend >= cbd->map->backends->len) {
-               /* Last backend - unlock current backend if needed */
-               if (cbd->owned_lock) {
-                       /* Unlock all backends */
-                       for (unsigned int i = 0; i < cbd->map->backends->len; i++) {
-                               bk = g_ptr_array_index(cbd->map->backends, i);
-                               g_atomic_int_set(&bk->shared->locked, 0);
-                       }
-                       cbd->owned_lock = FALSE;
-               }
+               /* Last backend */
                msg_debug_map("finished map: %d of %d", cbd->cur_backend,
                                          cbd->map->backends->len);
                MAP_RELEASE(cbd, "periodic");
@@ -2088,7 +2074,7 @@ rspamd_map_process_periodic(struct map_periodic_cbdata *cbd)
        bk = g_ptr_array_index(map->backends, cbd->cur_backend);
 
        if (!map->file_only && !cbd->owned_lock) {
-               if (!g_atomic_int_compare_and_exchange(&bk->shared->locked,
+               if (!g_atomic_int_compare_and_exchange(&map->shared->locked,
                                                                                           0, 1)) {
                        msg_debug_map(
                                "don't try to reread map %s as it is locked by other process, "
@@ -2100,7 +2086,7 @@ rspamd_map_process_periodic(struct map_periodic_cbdata *cbd)
                        return;
                }
                else {
-                       msg_debug_map("locked map %s (backend: %s)", map->name, bk->uri);
+                       msg_debug_map("locked map %s", map->name);
                        cbd->owned_lock = TRUE;
                }
        }
@@ -2110,10 +2096,7 @@ rspamd_map_process_periodic(struct map_periodic_cbdata *cbd)
                rspamd_map_schedule_periodic(cbd->map, RSPAMD_MAP_SCHEDULE_ERROR);
 
                if (cbd->owned_lock) {
-                       for (unsigned int i = 0; i < cbd->map->backends->len; i++) {
-                               bk = g_ptr_array_index(cbd->map->backends, i);
-                               g_atomic_int_set(&bk->shared->locked, 0);
-                       }
+                       g_atomic_int_set(&map->shared->locked, 0);
                        cbd->owned_lock = FALSE;
                }
 
@@ -2832,9 +2815,6 @@ rspamd_map_parse_backend(struct rspamd_config *cfg, const char *map_line)
                bk->data.sd = sdata;
        }
 
-       bk->shared = rspamd_mempool_alloc0_shared(cfg->cfg_pool,
-                                                                                         sizeof(struct rspamd_map_shared_backend_data));
-
        return bk;
 
 err:
@@ -2965,6 +2945,8 @@ rspamd_map_add(struct rspamd_config *cfg,
        map->user_data = user_data;
        map->cfg = cfg;
        map->id = rspamd_random_uint64_fast();
+       map->shared =
+               rspamd_mempool_alloc0_shared(cfg->cfg_pool, sizeof(struct rspamd_map_shared_data));
        map->backends = g_ptr_array_sized_new(1);
        map->wrk = worker;
        rspamd_mempool_add_destructor(cfg->cfg_pool, rspamd_ptr_array_free_hard,
@@ -3063,6 +3045,8 @@ rspamd_map_add_from_ucl(struct rspamd_config *cfg,
        map->user_data = user_data;
        map->cfg = cfg;
        map->id = rspamd_random_uint64_fast();
+       map->shared =
+               rspamd_mempool_alloc0_shared(cfg->cfg_pool, sizeof(struct rspamd_map_shared_data));
        map->backends = g_ptr_array_new();
        map->wrk = worker;
        map->no_file_read = (flags & RSPAMD_MAP_FILE_NO_READ);
@@ -3244,7 +3228,7 @@ rspamd_map_add_from_ucl(struct rspamd_config *cfg,
 
        if (all_loaded) {
                /* Static map */
-               g_atomic_int_set(&bk->shared->loaded, 1);
+               g_atomic_int_set(&map->shared->loaded, 1);
        }
 
        rspamd_map_calculate_hash(map);
index 7f98170bc45753c6e4653567931a4bf4177efe0e..0ea614425e8094885817f16b91bbbb9e88dd33c8 100644 (file)
@@ -134,20 +134,12 @@ union rspamd_map_backend_data {
 
 
 struct rspamd_map;
-/*
- * Shared between workers
- */
-struct rspamd_map_shared_backend_data {
-       int locked;
-       int loaded;
-       int cached;
-};
+
 struct rspamd_map_backend {
        enum fetch_proto protocol;
        gboolean is_signed;
        gboolean is_compressed;
        gboolean is_fallback;
-       struct rspamd_map_shared_backend_data *shared;
        struct rspamd_map *map;
        struct ev_loop *event_loop;
        uint64_t id;
@@ -159,6 +151,15 @@ struct rspamd_map_backend {
 
 struct map_periodic_cbdata;
 
+/*
+ * Shared between workers
+ */
+struct rspamd_map_shared_data {
+       int locked;
+       int loaded;
+       int cached;
+};
+
 struct rspamd_map {
        struct rspamd_dns_resolver *r;
        struct rspamd_config *cfg;
@@ -193,6 +194,8 @@ struct rspamd_map {
        bool static_only;  /* No need to check */
        bool no_file_read; /* Do not read files */
        bool seen;         /* This map has already been watched or pre-loaded */
+       /* Shared lock for temporary disabling of map reading (e.g. when this map is written by UI) */
+       struct rspamd_map_shared_data *shared;
        char tag[MEMPOOL_UID_LEN];
 };