int listener_started = 0;
int loops;
int prev_threads_created;
+ int max_recycled_pools = -1;
/* We must create the fd queues before we start up the listener
* and worker threads. */
clean_child_exit(APEXIT_CHILDFATAL);
}
+ if (ap_max_mem_free != APR_ALLOCATOR_MAX_FREE_UNLIMITED) {
+ /* If we want to conserve memory, let's not keep an unlimited number of
+ * pools & allocators.
+ * XXX: This should probably be a separate config directive
+ */
+ max_recycled_pools = threads_per_child * 3 / 4 ;
+ }
rv = ap_queue_info_create(&worker_queue_info, pchild,
- threads_per_child);
+ threads_per_child, max_recycled_pools);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
"ap_queue_info_create() failed");
apr_thread_cond_t *wait_for_idler;
int terminated;
int max_idlers;
+ int max_recycled_pools;
+ apr_uint32_t recycled_pools_count;
recycled_pool *recycled_pools;
};
}
apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info,
- apr_pool_t * pool, int max_idlers)
+ apr_pool_t * pool, int max_idlers,
+ int max_recycled_pools)
{
apr_status_t rv;
fd_queue_info_t *qi;
return rv;
}
qi->recycled_pools = NULL;
+ qi->max_recycled_pools = max_recycled_pools;
qi->max_idlers = max_idlers;
apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
apr_pool_cleanup_null);
void ap_push_pool(fd_queue_info_t * queue_info,
apr_pool_t * pool_to_recycle)
{
+ struct recycled_pool *new_recycle;
/* If we have been given a pool to recycle, atomically link
* it into the queue_info's list of recycled pools
*/
- if (pool_to_recycle) {
- struct recycled_pool *new_recycle;
- new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle,
- sizeof
- (*new_recycle));
- new_recycle->pool = pool_to_recycle;
- for (;;) {
- /*
- * Save queue_info->recycled_pool in local variable next because
- * new_recycle->next can be changed after apr_atomic_casptr
- * function call. For gory details see PR 44402.
- */
- struct recycled_pool *next = queue_info->recycled_pools;
- new_recycle->next = next;
- if (apr_atomic_casptr
- ((void*) &(queue_info->recycled_pools),
- new_recycle, next) == next) {
- break;
- }
+ if (!pool_to_recycle)
+ return;
+
+ if (queue_info->max_recycled_pools >= 0) {
+ apr_uint32_t cnt = apr_atomic_read32(&queue_info->recycled_pools_count);
+ if (cnt >= queue_info->max_recycled_pools) {
+ apr_pool_destroy(pool_to_recycle);
+ return;
}
+ apr_atomic_inc32(&queue_info->recycled_pools_count);
+ }
+
+ new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle,
+ sizeof (*new_recycle));
+ new_recycle->pool = pool_to_recycle;
+ for (;;) {
+ /*
+ * Save queue_info->recycled_pool in local variable next because
+ * new_recycle->next can be changed after apr_atomic_casptr
+ * function call. For gory details see PR 44402.
+ */
+ struct recycled_pool *next = queue_info->recycled_pools;
+ new_recycle->next = next;
+ if (apr_atomic_casptr((void*) &(queue_info->recycled_pools),
+ new_recycle, next) == next)
+ break;
}
}
((void*) &(queue_info->recycled_pools),
first_pool->next, first_pool) == first_pool) {
*recycled_pool = first_pool->pool;
+ if (queue_info->max_recycled_pools >= 0)
+ apr_atomic_dec32(&queue_info->recycled_pools_count);
break;
}
}