From: Alberto Leiva Popper Date: Thu, 2 Feb 2023 00:56:09 +0000 (-0600) Subject: Convert vrps.table_lock into a mutex X-Git-Tag: 1.5.4~8 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=48868d8167084aaa8d081849810f9fd3ba8942b4;p=thirdparty%2FFORT-validator.git Convert vrps.table_lock into a mutex There are no readers, so there's no point in this being a reader-writer lock. Still not meant to be a fix for #83/#89. I'm mostly just trying to force myself to interact with the code in hopes of finding the bug. --- diff --git a/src/common.c b/src/common.c index c19389e0..9935bc7f 100644 --- a/src/common.c +++ b/src/common.c @@ -12,6 +12,26 @@ #include "config.h" #include "log.h" +void +panic_on_fail(int error, char const *function_name) +{ + if (error) + pr_crit("%s() returned error code %d. This is too critical for a graceful recovery; I must die now.", + function_name, error); +} + +void +mutex_lock(pthread_mutex_t *lock) +{ + panic_on_fail(pthread_mutex_lock(lock), "pthread_mutex_lock"); +} + +void +mutex_unlock(pthread_mutex_t *lock) +{ + panic_on_fail(pthread_mutex_unlock(lock), "pthread_mutex_unlock"); +} + int rwlock_read_lock(pthread_rwlock_t *lock) { diff --git a/src/common.h b/src/common.h index 47551a48..1a0771ac 100644 --- a/src/common.h +++ b/src/common.h @@ -38,6 +38,15 @@ #define ARRAY_LEN(array) (sizeof(array) / sizeof((array)[0])) +void panic_on_fail(int, char const *); + +/* + * Mutex wrappers. They are just a bunch of boilerplate, and removal of + * unrecoverable resulting error codes. + */ +void mutex_lock(pthread_mutex_t *); +void mutex_unlock(pthread_mutex_t *); + /* * rwlock wrappers. They are just a bunch of boilerplate, and removal of * unrecoverable resulting error codes. diff --git a/src/rtr/db/vrps.c b/src/rtr/db/vrps.c index 0e49faf0..51a55061 100644 --- a/src/rtr/db/vrps.c +++ b/src/rtr/db/vrps.c @@ -84,8 +84,23 @@ static struct thread_pool *pool; /** Protects @state.base, @state.deltas and @state.serial. */ static pthread_rwlock_t state_lock; -/** Lock to protect ROA table during construction. */ -static pthread_rwlock_t table_lock; +/** + * Lock to protect the ROA table while it's being built up. + * + * To be honest, I'm tempted to remove this mutex completely. It currently + * exists because all the threads store their ROAs in the same table, which is + * awkward engineering. Each thread should work on its own table, and the main + * thread should join the tables afterwards. This would render the semaphore + * redundant, as well as rid the relevant code from any concurrency risks. + * + * I'm conflicted about committing to the refactor however, because the change + * would require about twice as much memory and involve the extra joining step. + * And the current implementation is working fine... + * + * Assuming, that is, that #83/#89 isn't a concurrency problem. But I can't + * figure out how it could be. + */ +static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER; int vrps_init(void) @@ -134,17 +149,8 @@ vrps_init(void) goto revert_deltas; } - error = pthread_rwlock_init(&table_lock, NULL); - if (error) { - pr_op_err("table pthread_rwlock_init() errored: %s", - strerror(error)); - goto revert_state_lock; - } - return 0; -revert_state_lock: - pthread_rwlock_destroy(&state_lock); revert_deltas: darray_destroy(state.deltas); revert_thread_pool: @@ -158,7 +164,6 @@ vrps_destroy(void) thread_pool_destroy(pool); pthread_rwlock_destroy(&state_lock); - pthread_rwlock_destroy(&table_lock); if (state.slurm != NULL) db_slurm_destroy(state.slurm); @@ -170,9 +175,9 @@ vrps_destroy(void) #define WLOCK_HANDLER(cb) \ int error; \ - rwlock_write_lock(&table_lock); \ + mutex_lock(&table_lock); \ error = cb; \ - rwlock_unlock(&table_lock); \ + mutex_unlock(&table_lock); \ return error; int diff --git a/src/rtr/rtr.c b/src/rtr/rtr.c index 830ab011..b97c94d6 100644 --- a/src/rtr/rtr.c +++ b/src/rtr/rtr.c @@ -44,26 +44,6 @@ enum poll_verdict { PV_STOP, }; -static void -panic_on_fail(int error, char const *function_name) -{ - if (error) - pr_crit("%s() returned error code %d. This is too critical for a graceful recovery; I must die now.", - function_name, error); -} - -static void -lock_mutex(void) -{ - panic_on_fail(pthread_mutex_lock(&lock), "pthread_mutex_lock"); -} - -static void -unlock_mutex(void) -{ - panic_on_fail(pthread_mutex_unlock(&lock), "pthread_mutex_unlock"); -} - static void cleanup_server(struct rtr_server *server) { @@ -724,9 +704,9 @@ fddb_poll(void) } } - lock_mutex(); + mutex_lock(&lock); apply_pollfds(pollfds, nclients); - unlock_mutex(); + mutex_unlock(&lock); /* Fall through */ success: @@ -810,7 +790,7 @@ rtr_foreach_client(rtr_foreach_client_cb cb, void *arg) unsigned int i; int error = 0; - lock_mutex(); + mutex_lock(&lock); ARRAYLIST_FOREACH(&clients, client, i) { if (client->fd != -1) { @@ -820,7 +800,7 @@ rtr_foreach_client(rtr_foreach_client_cb cb, void *arg) } } - unlock_mutex(); + mutex_unlock(&lock); return error; } diff --git a/src/thread/thread_pool.c b/src/thread/thread_pool.c index 97e6a22d..1b38e93b 100644 --- a/src/thread/thread_pool.c +++ b/src/thread/thread_pool.c @@ -4,6 +4,7 @@ #include #include #include +#include "common.h" #include "log.h" /* @@ -79,26 +80,6 @@ struct thread_pool { unsigned int thread_ids_len; }; -static void -panic_on_fail(int error, char const *function_name) -{ - if (error) - pr_crit("%s() returned error code %d. This is too critical for a graceful recovery; I must die now.", - function_name, error); -} - -static void -mutex_lock(struct thread_pool *pool) -{ - panic_on_fail(pthread_mutex_lock(&pool->lock), "pthread_mutex_lock"); -} - -static void -mutex_unlock(struct thread_pool *pool) -{ - panic_on_fail(pthread_mutex_unlock(&pool->lock), "pthread_mutex_unlock"); -} - /* Wait until the parent sends us work. */ static void wait_for_parent_signal(struct thread_pool *pool, unsigned int thread_id) @@ -203,7 +184,7 @@ tasks_poll(void *arg) struct thread_pool_task *task; unsigned int thread_id; - mutex_lock(pool); + mutex_lock(&pool->lock); pool->thread_count++; thread_id = pool->thread_count; @@ -218,7 +199,7 @@ tasks_poll(void *arg) /* Claim the work. */ task = task_queue_pull(pool, thread_id); pool->working_count++; - mutex_unlock(pool); + mutex_unlock(&pool->lock); if (task != NULL) { task->cb(task->arg); @@ -227,7 +208,7 @@ tasks_poll(void *arg) task_destroy(task); } - mutex_lock(pool); + mutex_lock(&pool->lock); pool->working_count--; if (pool->stop) @@ -237,7 +218,7 @@ tasks_poll(void *arg) signal_to_parent(pool); } - mutex_unlock(pool); + mutex_unlock(&pool->lock); pr_op_debug("Thread %s.%u: Returning.", pool->name, thread_id); return NULL; } @@ -387,7 +368,7 @@ thread_pool_destroy(struct thread_pool *pool) pr_op_debug("Destroying thread pool '%s'.", pool->name); /* Remove all pending work and send the signal to stop it */ - mutex_lock(pool); + mutex_lock(&pool->lock); queue = &(pool->queue); while (!TAILQ_EMPTY(queue)) { tmp = TAILQ_FIRST(queue); @@ -396,7 +377,7 @@ thread_pool_destroy(struct thread_pool *pool) } pool->stop = true; pthread_cond_broadcast(&pool->parent2worker); - mutex_unlock(pool); + mutex_unlock(&pool->lock); for (t = 0; t < pool->thread_ids_len; t++) pthread_join(pool->thread_ids[t], NULL); @@ -425,9 +406,9 @@ thread_pool_push(struct thread_pool *pool, char const *task_name, if (error) return error; - mutex_lock(pool); + mutex_lock(&pool->lock); task_queue_push(pool, task); - mutex_unlock(pool); + mutex_unlock(&pool->lock); /* * Note: This assumes the threads have already spawned. @@ -443,9 +424,9 @@ thread_pool_avail_threads(struct thread_pool *pool) { bool result; - mutex_lock(pool); + mutex_lock(&pool->lock); result = (pool->working_count < pool->thread_ids_len); - mutex_unlock(pool); + mutex_unlock(&pool->lock); return result; } @@ -454,7 +435,7 @@ thread_pool_avail_threads(struct thread_pool *pool) void thread_pool_wait(struct thread_pool *pool) { - mutex_lock(pool); + mutex_lock(&pool->lock); /* If the pool has to stop, the wait will happen during the joins. */ while (!pool->stop) { @@ -473,5 +454,5 @@ thread_pool_wait(struct thread_pool *pool) wait_for_worker_signal(pool); } - mutex_unlock(pool); + mutex_unlock(&pool->lock); }