From: Stefan Fritsch Date: Sat, 18 Jun 2011 22:29:49 +0000 (+0000) Subject: If MaxMemFree is set, limit the number of recycled pools that is kept X-Git-Tag: 2.3.13~38 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=6fb57c93e66b23e33506c447d7096cd44dabdb02;p=thirdparty%2Fapache%2Fhttpd.git If MaxMemFree is set, limit the number of recycled pools that is kept git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1137262 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/CHANGES b/CHANGES index 1a3a0d6f2ec..34651388441 100644 --- a/CHANGES +++ b/CHANGES @@ -2,6 +2,9 @@ Changes with Apache 2.3.13 + *) mpm_event: If MaxMemFree is set, limit the number of pools that is kept + around. [Stefan Fritsch] + *) mpm_event: Fix graceful restart aborting connections. PR 43359. [Takashi Sato ] diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c index 83ebcff71f9..b002f56910c 100644 --- a/server/mpm/event/event.c +++ b/server/mpm/event/event.c @@ -1558,6 +1558,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy) int listener_started = 0; int loops; int prev_threads_created; + int max_recycled_pools = -1; /* We must create the fd queues before we start up the listener * and worker threads. */ @@ -1569,8 +1570,15 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy) clean_child_exit(APEXIT_CHILDFATAL); } + if (ap_max_mem_free != APR_ALLOCATOR_MAX_FREE_UNLIMITED) { + /* If we want to conserve memory, let's not keep an unlimited number of + * pools & allocators. + * XXX: This should probably be a separate config directive + */ + max_recycled_pools = threads_per_child * 3 / 4 ; + } rv = ap_queue_info_create(&worker_queue_info, pchild, - threads_per_child); + threads_per_child, max_recycled_pools); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, "ap_queue_info_create() failed"); diff --git a/server/mpm/event/fdqueue.c b/server/mpm/event/fdqueue.c index c1dbda1ee52..26b5906d22b 100644 --- a/server/mpm/event/fdqueue.c +++ b/server/mpm/event/fdqueue.c @@ -34,6 +34,8 @@ struct fd_queue_info_t apr_thread_cond_t *wait_for_idler; int terminated; int max_idlers; + int max_recycled_pools; + apr_uint32_t recycled_pools_count; recycled_pool *recycled_pools; }; @@ -60,7 +62,8 @@ static apr_status_t queue_info_cleanup(void *data_) } apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info, - apr_pool_t * pool, int max_idlers) + apr_pool_t * pool, int max_idlers, + int max_recycled_pools) { apr_status_t rv; fd_queue_info_t *qi; @@ -77,6 +80,7 @@ apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info, return rv; } qi->recycled_pools = NULL; + qi->max_recycled_pools = max_recycled_pools; qi->max_idlers = max_idlers; apr_pool_cleanup_register(pool, qi, queue_info_cleanup, apr_pool_cleanup_null); @@ -191,29 +195,36 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info) void ap_push_pool(fd_queue_info_t * queue_info, apr_pool_t * pool_to_recycle) { + struct recycled_pool *new_recycle; /* If we have been given a pool to recycle, atomically link * it into the queue_info's list of recycled pools */ - if (pool_to_recycle) { - struct recycled_pool *new_recycle; - new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle, - sizeof - (*new_recycle)); - new_recycle->pool = pool_to_recycle; - for (;;) { - /* - * Save queue_info->recycled_pool in local variable next because - * new_recycle->next can be changed after apr_atomic_casptr - * function call. For gory details see PR 44402. - */ - struct recycled_pool *next = queue_info->recycled_pools; - new_recycle->next = next; - if (apr_atomic_casptr - ((void*) &(queue_info->recycled_pools), - new_recycle, next) == next) { - break; - } + if (!pool_to_recycle) + return; + + if (queue_info->max_recycled_pools >= 0) { + apr_uint32_t cnt = apr_atomic_read32(&queue_info->recycled_pools_count); + if (cnt >= queue_info->max_recycled_pools) { + apr_pool_destroy(pool_to_recycle); + return; } + apr_atomic_inc32(&queue_info->recycled_pools_count); + } + + new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle, + sizeof (*new_recycle)); + new_recycle->pool = pool_to_recycle; + for (;;) { + /* + * Save queue_info->recycled_pool in local variable next because + * new_recycle->next can be changed after apr_atomic_casptr + * function call. For gory details see PR 44402. + */ + struct recycled_pool *next = queue_info->recycled_pools; + new_recycle->next = next; + if (apr_atomic_casptr((void*) &(queue_info->recycled_pools), + new_recycle, next) == next) + break; } } @@ -241,6 +252,8 @@ void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t * queue_info) ((void*) &(queue_info->recycled_pools), first_pool->next, first_pool) == first_pool) { *recycled_pool = first_pool->pool; + if (queue_info->max_recycled_pools >= 0) + apr_atomic_dec32(&queue_info->recycled_pools_count); break; } } diff --git a/server/mpm/event/fdqueue.h b/server/mpm/event/fdqueue.h index fbe120b8db8..9c1deb6441b 100644 --- a/server/mpm/event/fdqueue.h +++ b/server/mpm/event/fdqueue.h @@ -42,7 +42,8 @@ typedef struct fd_queue_info_t fd_queue_info_t; apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info, - apr_pool_t * pool, int max_idlers); + apr_pool_t * pool, int max_idlers, + int max_recycled_pools); apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info, apr_pool_t * pool_to_recycle); apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info);