]> git.ipfire.org Git - thirdparty/freeswitch.git/commitdiff
FS-10167: fixed a couple deadlock issues and a misconception about the locks on hash
authorShane Bryldt <astaelan@gmail.com>
Mon, 17 Apr 2017 17:10:05 +0000 (11:10 -0600)
committerShane Bryldt <astaelan@gmail.com>
Mon, 17 Apr 2017 17:10:20 +0000 (11:10 -0600)
libs/libblade/src/blade_session.c
libs/libblade/src/blade_stack.c
libs/libks/src/simclist.c

index ea7e0509fd25cbc4f25a2725c7e52e58d5a72d87..41ddd0f64a6fd1cd02ee0b0b77927376d0949418 100644 (file)
@@ -319,8 +319,8 @@ KS_DECLARE(void) blade_session_state_set(blade_session_t *bs, blade_session_stat
 
        blade_handle_session_state_callbacks_execute(bs, BLADE_SESSION_STATE_CONDITION_PRE);
 
-       ks_cond_try_signal(bs->cond);
        ks_mutex_unlock(bs->mutex);
+       ks_cond_try_signal(bs->cond);
 }
 
 KS_DECLARE(void) blade_session_hangup(blade_session_t *bs)
index 3c628631a934147e454f58557ed9cc19cad42153..77a7cfe98c496b00bd8f8bcc12eb49ad2e5b678c 100644 (file)
@@ -196,22 +196,23 @@ KS_DECLARE(ks_status_t) blade_handle_create(blade_handle_t **bhP, ks_pool_t *poo
        bh->pool = pool;
        bh->tpool = tpool;
 
-       ks_hash_create(&bh->transports, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
+       // @todo this needs to be reviewed, NOLOCK is incorrect, but RWLOCK causes deadlock somewhere
+       ks_hash_create(&bh->transports, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
        ks_assert(bh->transports);
-       ks_hash_create(&bh->spaces, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
+       ks_hash_create(&bh->spaces, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
        ks_assert(bh->spaces);
-       ks_hash_create(&bh->events, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
+       ks_hash_create(&bh->events, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
        ks_assert(bh->events);
 
-       ks_hash_create(&bh->connections, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
+       ks_hash_create(&bh->connections, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
        ks_assert(bh->connections);
 
-       ks_hash_create(&bh->sessions, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
+       ks_hash_create(&bh->sessions, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
        ks_assert(bh->sessions);
-       ks_hash_create(&bh->session_state_callbacks, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
+       ks_hash_create(&bh->session_state_callbacks, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
        ks_assert(bh->session_state_callbacks);
 
-       ks_hash_create(&bh->requests, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
+       ks_hash_create(&bh->requests, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
        ks_assert(bh->requests);
 
        *bhP = bh;
@@ -665,6 +666,13 @@ KS_DECLARE(blade_session_t *) blade_handle_sessions_get(blade_handle_t *bh, cons
        ks_assert(bh);
        ks_assert(sid);
 
+       // @todo consider using blade_session_t via reference counting, rather than locking a mutex to simulate a reference count to halt cleanups while in use
+       // using actual reference counting would mean that mutexes would not need to be held locked when looking up a session by id just to prevent cleanup,
+       // instead cleanup would automatically occur when the last reference is actually removed (which SHOULD be at the end of the state machine thread),
+       // which is safer than another thread potentially waiting on the write lock to release while it's being destroyed, or external code forgetting to unlock
+       // then use short lived mutex or rwl for accessing the content of the session while it is referenced
+       // this approach should also be used for blade_connection_t, which has a similar threaded state machine
+
        ks_hash_read_lock(bh->sessions);
        bs = ks_hash_search(bh->sessions, (void *)sid, KS_UNLOCKED);
        if (bs && blade_session_read_lock(bs, KS_FALSE) != KS_STATUS_SUCCESS) bs = NULL;
@@ -766,7 +774,7 @@ KS_DECLARE(ks_status_t) blade_handle_session_state_callback_unregister(blade_han
        ks_hash_write_lock(bh->session_state_callbacks);
        bhsscr = (blade_handle_session_state_callback_registration_t *)ks_hash_remove(bh->session_state_callbacks, (void *)id);
        if (!bhsscr) ret = KS_STATUS_FAIL;
-       ks_hash_write_lock(bh->session_state_callbacks);
+       ks_hash_write_unlock(bh->session_state_callbacks);
 
        if (bhsscr)     blade_handle_session_state_callback_registration_destroy(&bhsscr);
 
index 52b0bddf78fec2ef23bfab846d7cd8f45f1732c0..4aacd9e06f90fae38d01de3b6844d0c5e10f4cf1 100755 (executable)
@@ -267,8 +267,6 @@ static void ks_list_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_clean
                break;
        case KS_MPCL_TEARDOWN:
                ks_list_clear(l);
-               break;
-       case KS_MPCL_DESTROY:
                ks_rwl_write_lock(l->lock);
                for (unsigned int i = 0; i < l->spareelsnum; i++) ks_pool_free(l->pool, &l->spareels[i]);
                l->spareelsnum = 0;
@@ -278,6 +276,8 @@ static void ks_list_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_clean
                ks_rwl_write_unlock(l->lock);
                ks_rwl_destroy(&l->lock);
                break;
+       case KS_MPCL_DESTROY:
+               break;
        }
 }
 
@@ -648,13 +648,13 @@ KS_DECLARE(int) ks_list_delete_at(ks_list_t *restrict l, unsigned int pos) {
 KS_DECLARE(int) ks_list_delete_iterator(ks_list_t *restrict l) {
        struct ks_list_entry_s *delendo;
 
-       if (!l->iter_active || l->iter_pos >= l->numels) return -1;
+       if (!l->iter_active || l->iter_pos > l->numels) return -1;
 
        ks_rwl_write_lock(l->lock);
 
-       delendo = ks_list_findpos(l, l->iter_pos);
-
-       ks_list_drop_elem(l, delendo, l->iter_pos);
+       delendo = ks_list_findpos(l, l->iter_pos - 1);
+       
+       ks_list_drop_elem(l, delendo, l->iter_pos - 1);
 
        l->numels--;