]> git.ipfire.org Git - thirdparty/asterisk.git/commitdiff
res_pjsip: Need to use the same serializer for a pjproject SIP transaction. 92/792/1
authorRichard Mudgett <rmudgett@digium.com>
Fri, 5 Jun 2015 20:37:33 +0000 (15:37 -0500)
committerRichard Mudgett <rmudgett@digium.com>
Mon, 6 Jul 2015 17:52:36 +0000 (12:52 -0500)
All send/receive processing for a SIP transaction needs to be done under
the same threadpool serializer to prevent reentrancy problems inside
pjproject and res_pjsip.

* Add threadpool API call to get the current serializer associated with
the worker thread.

* Pick a serializer from a pool of default serializers if the caller of
res_pjsip.c:ast_sip_push_task() does not provide one.

This is a simple way to ensure that all outgoing SIP request messages are
processed under a serializer.  Otherwise, any place where a pushed task is
done that would result in an outgoing out-of-dialog request would need to
be modified to supply a serializer.  Serializers from the default
serializer pool are picked in a round robin sequence for simplicity.

A side effect is that the default serializer pool will limit the growth of
the thread pool from random tasks.  This is not necessarily a bad thing.

* Made pjsip_distributor.c save the thread's serializer name on the
outgoing request tdata struct so the response can be processed under the
same serializer.

This is a cherry-pick from master.

**** ASTERISK-25115 Change-Id: Iea71c16ce1132017b5791635e198b8c27973f40a

NOTE: session_inv_on_state_changed() is disassociating the dialog from the
session when the invite dialog becomes PJSIP_INV_STATE_DISCONNECTED.
Unfortunately this is a tad too soon because our BYE request transaction
has not completed yet.

ASTERISK-25183 #close
Reported by: Matt Jordan

Change-Id: I8bad0ae1daf18d75b8c9e55874244b7962df2d0a

include/asterisk/threadpool.h
main/threadpool.c
res/res_pjsip.c
res/res_pjsip/pjsip_distributor.c

index 942d14fc1e3bd5d0f12dee37515909cfd52220d4..75ce0e4e4ef114901e48ef3062015601c9a79653 100644 (file)
@@ -217,6 +217,22 @@ struct ast_serializer_shutdown_group *ast_serializer_shutdown_group_alloc(void);
  */
 int ast_serializer_shutdown_group_join(struct ast_serializer_shutdown_group *shutdown_group, int timeout);
 
+/*!
+ * \brief Get the threadpool serializer currently associated with this thread.
+ * \since 14.0.0
+ *
+ * \note The returned pointer is valid while the serializer
+ * thread is running.
+ *
+ * \note Use ao2_ref() on serializer if you are going to keep it
+ * for another thread.  To unref it you must then use
+ * ast_taskprocessor_unreference().
+ *
+ * \retval serializer on success.
+ * \retval NULL on error or no serializer associated with the thread.
+ */
+struct ast_taskprocessor *ast_threadpool_serializer_get_current(void);
+
 /*!
  * \brief Serialized execution of tasks within a \ref ast_threadpool.
  *
index 479938959e0399805ec7613dc0a31a4adc06e467..d97a7adb8008585fd9b7a7f72b5763a84b960c19 100644 (file)
@@ -1259,13 +1259,17 @@ static struct serializer *serializer_create(struct ast_threadpool *pool,
        return ser;
 }
 
+AST_THREADSTORAGE_RAW(current_serializer);
+
 static int execute_tasks(void *data)
 {
        struct ast_taskprocessor *tps = data;
 
+       ast_threadstorage_set_ptr(&current_serializer, tps);
        while (ast_taskprocessor_execute(tps)) {
                /* No-op */
        }
+       ast_threadstorage_set_ptr(&current_serializer, NULL);
 
        ast_taskprocessor_unreference(tps);
        return 0;
@@ -1305,6 +1309,11 @@ static struct ast_taskprocessor_listener_callbacks serializer_tps_listener_callb
        .shutdown = serializer_shutdown,
 };
 
+struct ast_taskprocessor *ast_threadpool_serializer_get_current(void)
+{
+       return ast_threadstorage_get_ptr(&current_serializer);
+}
+
 struct ast_taskprocessor *ast_threadpool_serializer_group(const char *name,
        struct ast_threadpool *pool, struct ast_serializer_shutdown_group *shutdown_group)
 {
index e92de51bb147d232079f557a86befb74d254dfdb..658a55e88187174b8cb30299e498a0aedbba0b53 100644 (file)
 
 #define MOD_DATA_CONTACT "contact"
 
+/*! Number of serializers in pool if one not supplied. */
+#define SERIALIZER_POOL_SIZE           8
+
+/*! Next serializer pool index to use. */
+static int serializer_pool_pos;
+
+/*! Pool of serializers to use if not supplied. */
+static struct ast_taskprocessor *serializer_pool[SERIALIZER_POOL_SIZE];
+
 static pjsip_endpoint *ast_pjsip_endpoint;
 
 static struct ast_threadpool *sip_threadpool;
@@ -3341,8 +3350,62 @@ struct ast_taskprocessor *ast_sip_create_serializer(void)
        return ast_sip_create_serializer_group(NULL);
 }
 
+/*!
+ * \internal
+ * \brief Shutdown the serializers in the default pool.
+ * \since 14.0.0
+ *
+ * \return Nothing
+ */
+static void serializer_pool_shutdown(void)
+{
+       int idx;
+
+       for (idx = 0; idx < SERIALIZER_POOL_SIZE; ++idx) {
+               ast_taskprocessor_unreference(serializer_pool[idx]);
+               serializer_pool[idx] = NULL;
+       }
+}
+
+/*!
+ * \internal
+ * \brief Setup the serializers in the default pool.
+ * \since 14.0.0
+ *
+ * \retval 0 on success.
+ * \retval -1 on error.
+ */
+static int serializer_pool_setup(void)
+{
+       int idx;
+
+       for (idx = 0; idx < SERIALIZER_POOL_SIZE; ++idx) {
+               serializer_pool[idx] = ast_sip_create_serializer();
+               if (!serializer_pool[idx]) {
+                       serializer_pool_shutdown();
+                       return -1;
+               }
+       }
+       return 0;
+}
+
 int ast_sip_push_task(struct ast_taskprocessor *serializer, int (*sip_task)(void *), void *task_data)
 {
+       if (!serializer) {
+               unsigned int pos;
+
+               /*
+                * Pick a serializer to use from the pool.
+                *
+                * Note: We don't care about any reentrancy behavior
+                * when incrementing serializer_pool_pos.  If it gets
+                * incorrectly incremented it doesn't matter.
+                */
+               pos = serializer_pool_pos++;
+               pos %= SERIALIZER_POOL_SIZE;
+               serializer = serializer_pool[pos];
+       }
+
        if (serializer) {
                return ast_taskprocessor_push(serializer, sip_task, task_data);
        } else {
@@ -3395,18 +3458,10 @@ int ast_sip_push_task_synchronous(struct ast_taskprocessor *serializer, int (*si
        std.task = sip_task;
        std.task_data = task_data;
 
-       if (serializer) {
-               if (ast_taskprocessor_push(serializer, sync_task, &std)) {
-                       ast_mutex_destroy(&std.lock);
-                       ast_cond_destroy(&std.cond);
-                       return -1;
-               }
-       } else {
-               if (ast_threadpool_push(sip_threadpool, sync_task, &std)) {
-                       ast_mutex_destroy(&std.lock);
-                       ast_cond_destroy(&std.cond);
-                       return -1;
-               }
+       if (ast_sip_push_task(serializer, sync_task, &std)) {
+               ast_mutex_destroy(&std.lock);
+               ast_cond_destroy(&std.cond);
+               return -1;
        }
 
        ast_mutex_lock(&std.lock);
@@ -3697,6 +3752,18 @@ static int load_module(void)
                return AST_MODULE_LOAD_DECLINE;
        }
 
+       if (serializer_pool_setup()) {
+               ast_log(LOG_ERROR, "Failed to create SIP serializer pool. Aborting load\n");
+               ast_threadpool_shutdown(sip_threadpool);
+               ast_sip_destroy_system();
+               pj_pool_release(memory_pool);
+               memory_pool = NULL;
+               pjsip_endpt_destroy(ast_pjsip_endpoint);
+               ast_pjsip_endpoint = NULL;
+               pj_caching_pool_destroy(&caching_pool);
+               return AST_MODULE_LOAD_DECLINE;
+       }
+
        ast_sip_initialize_dns();
 
        pjsip_tsx_layer_init_module(ast_pjsip_endpoint);
@@ -3826,6 +3893,7 @@ static int unload_module(void)
         */
        ast_sip_push_task_synchronous(NULL, unload_pjsip, NULL);
 
+       serializer_pool_shutdown();
        ast_threadpool_shutdown(sip_threadpool);
 
        ast_sip_destroy_cli();
index e32f02833f7dc94561c556e58fa1430a07ad6d31..9b052603a954d756fa04ff86dedd8864f809e689 100644 (file)
 
 #include "asterisk/res_pjsip.h"
 #include "include/res_pjsip_private.h"
+#include "asterisk/taskprocessor.h"
+#include "asterisk/threadpool.h"
 
 static int distribute(void *data);
 static pj_bool_t distributor(pjsip_rx_data *rdata);
+static pj_status_t record_serializer(pjsip_tx_data *tdata);
 
 static pjsip_module distributor_mod = {
        .name = {"Request Distributor", 19},
        .priority = PJSIP_MOD_PRIORITY_TSX_LAYER - 6,
+       .on_tx_request = record_serializer,
        .on_rx_request = distributor,
        .on_rx_response = distributor,
 };
 
+/*!
+ * \internal
+ * \brief Record the task's serializer name on the tdata structure.
+ * \since 14.0.0
+ *
+ * \param tdata The outgoing message.
+ *
+ * \retval PJ_SUCCESS.
+ */
+static pj_status_t record_serializer(pjsip_tx_data *tdata)
+{
+       struct ast_taskprocessor *serializer;
+
+       serializer = ast_threadpool_serializer_get_current();
+       if (serializer) {
+               const char *name;
+
+               name = ast_taskprocessor_name(serializer);
+               if (!ast_strlen_zero(name)
+                       && (!tdata->mod_data[distributor_mod.id]
+                               || strcmp(tdata->mod_data[distributor_mod.id], name))) {
+                       char *tdata_name;
+
+                       /* The serializer in use changed. */
+                       tdata_name = pj_pool_alloc(tdata->pool, strlen(name) + 1);
+                       strcpy(tdata_name, name);/* Safe */
+
+                       tdata->mod_data[distributor_mod.id] = tdata_name;
+               }
+       }
+
+       return PJ_SUCCESS;
+}
+
+/*!
+ * \internal
+ * \brief Find the request tdata to get the serializer it used.
+ * \since 14.0.0
+ *
+ * \param rdata The incoming message.
+ *
+ * \retval serializer on success.
+ * \retval NULL on error or could not find the serializer.
+ */
+static struct ast_taskprocessor *find_request_serializer(pjsip_rx_data *rdata)
+{
+       struct ast_taskprocessor *serializer = NULL;
+       pj_str_t tsx_key;
+       pjsip_transaction *tsx;
+
+       pjsip_tsx_create_key(rdata->tp_info.pool, &tsx_key, PJSIP_ROLE_UAC,
+               &rdata->msg_info.cseq->method, rdata);
+
+       tsx = pjsip_tsx_layer_find_tsx(&tsx_key, PJ_TRUE);
+       if (!tsx) {
+               ast_debug(1, "Could not find %.*s transaction for %d response.\n",
+                       (int) pj_strlen(&rdata->msg_info.cseq->method.name),
+                       pj_strbuf(&rdata->msg_info.cseq->method.name),
+                       rdata->msg_info.msg->line.status.code);
+               return NULL;
+       }
+
+       if (tsx->last_tx) {
+               const char *serializer_name;
+
+               serializer_name = tsx->last_tx->mod_data[distributor_mod.id];
+               if (!ast_strlen_zero(serializer_name)) {
+                       serializer = ast_taskprocessor_get(serializer_name, TPS_REF_IF_EXISTS);
+               }
+       }
+
+#ifdef HAVE_PJ_TRANSACTION_GRP_LOCK
+       pj_grp_lock_release(tsx->grp_lock);
+#else
+       pj_mutex_unlock(tsx->mutex);
+#endif
+
+       return serializer;
+}
+
 /*! Dialog-specific information the distributor uses */
 struct distributor_dialog_data {
-       /* Serializer to distribute tasks to for this dialog */
+       /*! Serializer to distribute tasks to for this dialog */
        struct ast_taskprocessor *serializer;
-       /* Endpoint associated with this dialog */
+       /*! Endpoint associated with this dialog */
        struct ast_sip_endpoint *endpoint;
 };
 
@@ -167,6 +251,7 @@ static pj_bool_t distributor(pjsip_rx_data *rdata)
        pjsip_dialog *dlg = find_dialog(rdata);
        struct distributor_dialog_data *dist = NULL;
        struct ast_taskprocessor *serializer = NULL;
+       struct ast_taskprocessor *req_serializer = NULL;
        pjsip_rx_data *clone;
 
        if (dlg) {
@@ -176,11 +261,16 @@ static pj_bool_t distributor(pjsip_rx_data *rdata)
                }
        }
 
-       if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG && (
-               !pjsip_method_cmp(&rdata->msg_info.msg->line.req.method, &pjsip_cancel_method) || 
-               !pjsip_method_cmp(&rdata->msg_info.msg->line.req.method, &pjsip_bye_method)) &&
-               !serializer) {
-               pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), rdata, 481, NULL, NULL, NULL);
+       if (serializer) {
+               /* We have a serializer so we know where to send the message. */
+       } else if (rdata->msg_info.msg->type == PJSIP_RESPONSE_MSG) {
+               req_serializer = find_request_serializer(rdata);
+               serializer = req_serializer;
+       } else if (!pjsip_method_cmp(&rdata->msg_info.msg->line.req.method, &pjsip_cancel_method)
+               || !pjsip_method_cmp(&rdata->msg_info.msg->line.req.method, &pjsip_bye_method)) {
+               /* We have a BYE or CANCEL request without a serializer. */
+               pjsip_endpt_respond_stateless(ast_sip_get_pjsip_endpoint(), rdata,
+                       PJSIP_SC_CALL_TSX_DOES_NOT_EXIST, NULL, NULL, NULL);
                goto end;
        }
 
@@ -196,6 +286,7 @@ end:
        if (dlg) {
                pjsip_dlg_dec_lock(dlg);
        }
+       ast_taskprocessor_unreference(req_serializer);
 
        return PJ_TRUE;
 }