/*!
* \brief Destructor for thread_worker_pair
*/
-static void thread_worker_pair_destructor(void *obj)
+static void thread_worker_pair_free(struct thread_worker_pair *pair)
{
- struct thread_worker_pair *pair = obj;
ao2_ref(pair->worker, -1);
+ ast_free(pair);
}
/*!
static struct thread_worker_pair *thread_worker_pair_alloc(struct ast_threadpool *pool,
struct worker_thread *worker)
{
- struct thread_worker_pair *pair = ao2_alloc(sizeof(*pair), thread_worker_pair_destructor);
+ struct thread_worker_pair *pair = ast_malloc(sizeof(*pair));
if (!pair) {
return NULL;
}
pair->pool = pool;
ao2_ref(worker, +1);
pair->worker = worker;
+
return pair;
}
threadpool_send_state_changed(pair->pool);
- ao2_ref(pair, -1);
+ thread_worker_pair_free(pair);
return 0;
}
{
struct thread_worker_pair *pair;
SCOPED_AO2LOCK(lock, pool);
+
if (pool->shutting_down) {
return;
}
+
pair = thread_worker_pair_alloc(pool, worker);
if (!pair) {
return;
}
- ast_taskprocessor_push(pool->control_tps, queued_active_thread_idle, pair);
+
+ if (ast_taskprocessor_push(pool->control_tps, queued_active_thread_idle, pair)) {
+ thread_worker_pair_free(pair);
+ }
}
/*!
ao2_unlink(pair->pool->zombie_threads, pair->worker);
threadpool_send_state_changed(pair->pool);
- ao2_ref(pair, -1);
+ thread_worker_pair_free(pair);
return 0;
}
{
struct thread_worker_pair *pair;
SCOPED_AO2LOCK(lock, pool);
+
if (pool->shutting_down) {
return;
}
+
pair = thread_worker_pair_alloc(pool, worker);
if (!pair) {
return;
}
- ast_taskprocessor_push(pool->control_tps, queued_zombie_thread_dead, pair);
+
+ if (ast_taskprocessor_push(pool->control_tps, queued_zombie_thread_dead, pair)) {
+ thread_worker_pair_free(pair);
+ }
}
static int queued_idle_thread_dead(void *data)
ao2_unlink(pair->pool->idle_threads, pair->worker);
threadpool_send_state_changed(pair->pool);
- ao2_ref(pair, -1);
+ thread_worker_pair_free(pair);
return 0;
}
{
struct thread_worker_pair *pair;
SCOPED_AO2LOCK(lock, pool);
+
if (pool->shutting_down) {
return;
}
+
pair = thread_worker_pair_alloc(pool, worker);
if (!pair) {
return;
}
- ast_taskprocessor_push(pool->control_tps, queued_idle_thread_dead, pair);
+
+ if (ast_taskprocessor_push(pool->control_tps, queued_idle_thread_dead, pair)) {
+ thread_worker_pair_free(pair);
+ }
}
/*!
static struct task_pushed_data *task_pushed_data_alloc(struct ast_threadpool *pool,
int was_empty)
{
- struct task_pushed_data *tpd = ao2_alloc(sizeof(*tpd), NULL);
+ struct task_pushed_data *tpd = ast_malloc(sizeof(*tpd));
if (!tpd) {
return NULL;
int was_empty = tpd->was_empty;
unsigned int existing_active;
+ ast_free(tpd);
+
if (pool->listener && pool->listener->callbacks->task_pushed) {
pool->listener->callbacks->task_pushed(pool, pool->listener, was_empty);
}
/* If no idle threads could be transitioned to active grow the pool as permitted. */
if (ao2_container_count(pool->active_threads) == existing_active) {
if (!pool->options.auto_increment) {
- ao2_ref(tpd, -1);
return 0;
}
grow(pool, pool->options.auto_increment);
}
threadpool_send_state_changed(pool);
- ao2_ref(tpd, -1);
return 0;
}
if (pool->shutting_down) {
return;
}
+
tpd = task_pushed_data_alloc(pool, was_empty);
if (!tpd) {
return;
}
- ast_taskprocessor_push(pool->control_tps, queued_task_pushed, tpd);
+ if (ast_taskprocessor_push(pool->control_tps, queued_task_pushed, tpd)) {
+ ast_free(tpd);
+ }
}
/*!
static struct set_size_data *set_size_data_alloc(struct ast_threadpool *pool,
unsigned int size)
{
- struct set_size_data *ssd = ao2_alloc(sizeof(*ssd), NULL);
+ struct set_size_data *ssd = ast_malloc(sizeof(*ssd));
if (!ssd) {
return NULL;
}
*/
static int queued_set_size(void *data)
{
- RAII_VAR(struct set_size_data *, ssd, data, ao2_cleanup);
+ struct set_size_data *ssd = data;
struct ast_threadpool *pool = ssd->pool;
unsigned int num_threads = ssd->size;
unsigned int current_size = ao2_container_count(pool->active_threads) +
ao2_container_count(pool->idle_threads);
+ ast_free(ssd);
+
if (current_size == num_threads) {
ast_debug(3, "Not changing threadpool size since new size %u is the same as current %u\n",
num_threads, current_size);
{
struct set_size_data *ssd;
SCOPED_AO2LOCK(lock, pool);
+
if (pool->shutting_down) {
return;
}
return;
}
- ast_taskprocessor_push(pool->control_tps, queued_set_size, ssd);
+ if (ast_taskprocessor_push(pool->control_tps, queued_set_size, ssd)) {
+ ast_free(ssd);
+ }
}
struct ast_threadpool_listener *ast_threadpool_listener_alloc(