]> git.ipfire.org Git - thirdparty/apache/httpd.git/commitdiff
Merge r1822531, r1829676, r1847232, r1847234, r1861333, r1852442, r1866145, r1868295...
authorJim Jagielski <jim@apache.org>
Tue, 14 Jan 2020 17:42:58 +0000 (17:42 +0000)
committerJim Jagielski <jim@apache.org>
Tue, 14 Jan 2020 17:42:58 +0000 (17:42 +0000)
mod_proxy: fix proxy connection cleanup from an n+2 pool.

When connection_destructor() is called after pchild is gone, we can't
dereference worker->cp anymore. This happens is debug/one_process mode only,
if we exit by calling apr_terminate() or clearing the process pool directly.

Fix this by NULL-ing worker->cp in conn_pool_cleanup(), and by registering it
as a pre_cleanup.

Delay some memory allocation.

If this handler will not handle the request, no need to waste bytes in the request pool.

Add error messages and return bad request.

fix incorrect rv. Sorry.

Follow up to r1847232.
There is no point to use "old" numbers in recent commit.

Also avoid number duplication. The messages are the same but in different code path, so having different numbers makes sense.
This also avoids a warning when running:
   make update-log-msg-tags

Make proxy modules compile if APR_HAS_THREADS is not defined.

restore use of global mutex under !APR_HAS_THREADS

followup to r1852442 which appears to have been too agressive in wrapping
blocks with #if APR_HAS_THREADS.  With !APR_HAS_THREADS a global mutex
is a proc mutex.

* Add back logging goodness

Add back logging goodness added by covener in r1865938.

Fix pool concurrency problems

Create a subpool of the connection pool for worker scoped DNS resolutions.
This is needed to avoid race conditions in using the connection pool by multiple
threads during ramp up.

Recheck after obtaining the lock if we still need to do things or if they
were already done by another thread while we were waiting on the lock.

* modules/proxy/proxy_util.c: Create a subpool of the connection pool for worker
  scoped DNS resolutions and use it.

* modules/proxy/mod_proxy.h: Define AP_VOLATILIZE_T and add dns_pool to
  struct proxy_conn_pool.

* modules/proxy/mod_proxy_ftp.c: Use dns_pool and consider that
  worker->cp->addr is volatile in this location of the code.

PR: 63503

Submitted by: ylavic, jailletc36, jfclere, jfclere, jailletc36, stsp, covener, rpluem, rpluem
Reviewed by: rpluem, covener, jim

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1872790 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
STATUS
include/ap_mmn.h
modules/proxy/mod_proxy.h
modules/proxy/mod_proxy_balancer.c
modules/proxy/mod_proxy_ftp.c
modules/proxy/proxy_util.c

diff --git a/CHANGES b/CHANGES
index 2a1f7b74feb7ff878861a3c6ba66bfe4d07dd300..1076b6061dabeab6dc7fcb02e11f343216fcbfa2 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -19,6 +19,9 @@ Changes with Apache 2.4.42
   *) mod_authn_socache: Increase the maximum length of strings that can be cached by
      the module from 100 to 256.  PR 62149 [<thorsten.meinl knime.com>]
 
+  *) mod_proxy: Fix crash by resolving pool concurrency problems. PR 63503
+     [Ruediger Pluem, Eric Covener]
+
   *) core: On Windows, fix a start-up crash if <IfFile ...> is used with a path that is not
      valid (For example, testing for a file on a flash drive that is not mounted)
      [Christophe Jaillet]
diff --git a/STATUS b/STATUS
index ff27fddaa24d282ae45570b9131eb6729b6fc32c..3ff12d9dafe650ce95eabe5743882ce7a483132b 100644 (file)
--- a/STATUS
+++ b/STATUS
@@ -132,32 +132,6 @@ RELEASE SHOWSTOPPERS:
 PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
   [ start all new proposals below, under PATCHES PROPOSED. ]
 
-  *) mod_proxy: Fix crash by resolving pool concurrency problems. PR 63503
-     trunk patch: http://svn.apache.org/r1822531
-                  http://svn.apache.org/r1829676
-                  http://svn.apache.org/r1847232
-                  http://svn.apache.org/r1847234
-                  http://svn.apache.org/r1861333
-                  http://svn.apache.org/r1852442
-                  http://svn.apache.org/r1866145
-                  http://svn.apache.org/r1868295
-                  http://svn.apache.org/r1868296
-     2.4.x patch: The core of the patch addressing the issue are r1868295 and
-                  r1868296. The other revisions are unreleated to the core
-                  patch but small changes that ease merging and avoid further
-                  misalignment between trunk and 2.4.x. In order to ease review
-                  I splitted the revisions in 4 blocks and created 4 pull
-                  requests for it on github:
-                  r1822531: https://github.com/apache/httpd/pull/77
-                            https://github.com/apache/httpd/pull/77.diff 
-                  r1829676: https://github.com/apache/httpd/pull/78
-                            https://github.com/apache/httpd/pull/78.diff
-                  r1847232, r1847234, r1861333: https://github.com/apache/httpd/pull/79
-                                                https://github.com/apache/httpd/pull/79.diff
-                  All revisions: https://github.com/apache/httpd/pull/80
-                                 https://github.com/apache/httpd/pull/80.diff
-     +1: rpluem, covener, jim
-
 
 PATCHES PROPOSED TO BACKPORT FROM TRUNK:
   [ New proposals should be added at the end of the list ]
index 40b654f7353ebcdf188836d219649a879dd63b0f..e257c606eeaacc392c8581043ed9e0723ce81f12 100644 (file)
  * 20120211.86 (2.4.40-dev) Add forward_100_continue{,_set} to proxy_dir_conf
  * 20120211.87 (2.4.40-dev) Add dav_popen_propdb
  * 20120211.88 (2.4.40-dev) Add ap_dir_nofnmatch() and ap_dir_fnmatch().
+ * 20120211.89 (2.4.42-dev) Add add dns_pool to proxy_conn_pool and define
+ *                          AP_VOLATILIZE_T.
  */
 
 #define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */
 #ifndef MODULE_MAGIC_NUMBER_MAJOR
 #define MODULE_MAGIC_NUMBER_MAJOR 20120211
 #endif
-#define MODULE_MAGIC_NUMBER_MINOR 88                  /* 0...n */
+#define MODULE_MAGIC_NUMBER_MINOR 89                  /* 0...n */
 
 /**
  * Determine if the server's current MODULE_MAGIC_NUMBER is at least a
index 40d461ef8c2428e632b70c7c834ae7146288786b..33e8c7b4e21201376350041157858235197fb90f 100644 (file)
@@ -287,12 +287,15 @@ typedef struct {
 
 /* Connection pool */
 struct proxy_conn_pool {
-    apr_pool_t     *pool;   /* The pool used in constructor and destructor calls */
-    apr_sockaddr_t *addr;   /* Preparsed remote address info */
-    apr_reslist_t  *res;    /* Connection resource list */
-    proxy_conn_rec *conn;   /* Single connection for prefork mpm */
+    apr_pool_t     *pool;     /* The pool used in constructor and destructor calls */
+    apr_sockaddr_t *addr;     /* Preparsed remote address info */
+    apr_reslist_t  *res;      /* Connection resource list */
+    proxy_conn_rec *conn;     /* Single connection for prefork mpm */
+    apr_pool_t     *dns_pool; /* The pool used for worker scoped DNS resolutions */
 };
 
+#define AP_VOLATILIZE_T(T, x) (*(T volatile *)&(x))
+
 /* worker status bits */
 /*
  * NOTE: Keep up-to-date w/ proxy_wstat_tbl[]
@@ -472,7 +475,9 @@ struct proxy_worker {
     proxy_conn_pool     *cp;    /* Connection pool to use */
     proxy_worker_shared   *s;   /* Shared data */
     proxy_balancer  *balancer;  /* which balancer am I in? */
+#if APR_HAS_THREADS
     apr_thread_mutex_t  *tmutex; /* Thread lock for updating address cache */
+#endif
     void            *context;   /* general purpose storage */
     ap_conf_vector_t *section_config; /* <Proxy>-section wherein defined */
 };
@@ -531,7 +536,9 @@ struct proxy_balancer {
     apr_time_t      wupdated;    /* timestamp of last change to workers list */
     proxy_balancer_method *lbmethod;
     apr_global_mutex_t  *gmutex; /* global lock for updating list of workers */
+#if APR_HAS_THREADS
     apr_thread_mutex_t  *tmutex; /* Thread lock for updating shm */
+#endif
     proxy_server_conf *sconf;
     void            *context;    /* general purpose storage */
     proxy_balancer_shared *s;    /* Shared data */
index 77c1dd2b28ef88167cc0135b308abcab40cf28aa..dd26a63a1c8ea2b552c75226f4167705617853f8 100644 (file)
@@ -346,23 +346,27 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer,
     proxy_worker *candidate = NULL;
     apr_status_t rv;
 
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
                       "%s: Lock failed for find_best_worker()",
                       balancer->s->name);
         return NULL;
     }
+#endif
 
     candidate = (*balancer->lbmethod->finder)(balancer, r);
 
     if (candidate)
         candidate->s->elected++;
 
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
                       "%s: Unlock failed for find_best_worker()",
                       balancer->s->name);
     }
+#endif
 
     if (candidate == NULL) {
         /* All the workers are in error state or disabled.
@@ -492,11 +496,13 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
     /* Step 2: Lock the LoadBalancer
      * XXX: perhaps we need the process lock here
      */
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
                       "%s: Lock failed for pre_request", (*balancer)->s->name);
         return DECLINED;
     }
+#endif
 
     /* Step 3: force recovery */
     force_recovery(*balancer, r->server);
@@ -557,20 +563,24 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
             ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
                           "%s: All workers are in error state for route (%s)",
                           (*balancer)->s->name, route);
+#if APR_HAS_THREADS
             if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
                 ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
                               "%s: Unlock failed for pre_request",
                               (*balancer)->s->name);
             }
+#endif
             return HTTP_SERVICE_UNAVAILABLE;
         }
     }
 
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
                       "%s: Unlock failed for pre_request",
                       (*balancer)->s->name);
     }
+#endif
     if (!*worker) {
         runtime = find_best_worker(*balancer, r);
         if (!runtime) {
@@ -644,12 +654,14 @@ static int proxy_balancer_post_request(proxy_worker *worker,
 
     apr_status_t rv;
 
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173)
                       "%s: Lock failed for post_request",
                       balancer->s->name);
         return HTTP_INTERNAL_SERVER_ERROR;
     }
+#endif
 
     if (!apr_is_empty_array(balancer->errstatuses)
         && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
@@ -681,11 +693,12 @@ static int proxy_balancer_post_request(proxy_worker *worker,
         worker->s->error_time = apr_time_now();
 
     }
-
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
                       "%s: Unlock failed for post_request", balancer->s->name);
     }
+#endif
     ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176)
                   "proxy_balancer_post_request for (%s)", balancer->s->name);
 
@@ -945,7 +958,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
             PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
 
             balancer->max_workers = balancer->workers->nelts + balancer->growth;
-
             /* Create global mutex */
             rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
                                         balancer->s->sname, s, pconf, 0);
@@ -955,7 +967,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
                              balancer->s->sname);
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
-
             apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
                                       apr_pool_cleanup_null);
 
@@ -1147,17 +1158,21 @@ static int balancer_handler(request_rec *r)
 
     balancer = (proxy_balancer *)conf->balancers->elts;
     for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
+#if APR_HAS_THREADS
         if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189)
                           "%s: Lock failed for balancer_handler",
                           balancer->s->name);
         }
+#endif
         ap_proxy_sync_balancer(balancer, r->server, conf);
+#if APR_HAS_THREADS
         if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190)
                           "%s: Unlock failed for balancer_handler",
                           balancer->s->name);
         }
+#endif
     }
 
     if (r->args && (r->method_number == M_GET)) {
@@ -1381,11 +1396,13 @@ static int balancer_handler(request_rec *r)
             proxy_worker *nworker;
             nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
             if (!nworker && storage->num_free_slots(bsel->wslot)) {
+#if APR_HAS_THREADS
                 if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
                     ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
                                   "%s: Lock failed for adding worker",
                                   bsel->s->name);
                 }
+#endif
                 ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0);
                 if (!ret) {
                     unsigned int index;
@@ -1394,53 +1411,76 @@ static int balancer_handler(request_rec *r)
                     if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) {
                         ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195)
                                       "worker slotmem_grab failed");
+#if APR_HAS_THREADS
                         if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196)
                                           "%s: Unlock failed for adding worker",
                                           bsel->s->name);
                         }
+#endif
                         return HTTP_BAD_REQUEST;
                     }
                     if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) {
                         ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197)
                                       "worker slotmem_dptr failed");
+#if APR_HAS_THREADS
                         if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198)
                                           "%s: Unlock failed for adding worker",
                                           bsel->s->name);
                         }
+#endif
                         return HTTP_BAD_REQUEST;
                     }
                     if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) {
                         ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199)
                                       "Cannot share worker");
+#if APR_HAS_THREADS
                         if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200)
                                           "%s: Unlock failed for adding worker",
                                           bsel->s->name);
                         }
+#endif
                         return HTTP_BAD_REQUEST;
                     }
                     if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) {
                         ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201)
                                       "Cannot init worker");
+#if APR_HAS_THREADS
                         if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202)
                                           "%s: Unlock failed for adding worker",
                                           bsel->s->name);
                         }
+#endif
                         return HTTP_BAD_REQUEST;
                     }
                     /* sync all timestamps */
                     bsel->wupdated = bsel->s->wupdated = nworker->s->updated = apr_time_now();
                     /* by default, all new workers are disabled */
                     ap_proxy_set_wstatus(PROXY_WORKER_DISABLED_FLAG, 1, nworker);
+                } else {
+                            ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10163)
+                                  "%s: failed to add worker %s",
+                                  bsel->s->name, val);
+#if APR_HAS_THREADS
+                    PROXY_GLOBAL_UNLOCK(bsel);
+#endif
+                    return HTTP_BAD_REQUEST;
                 }
+#if APR_HAS_THREADS
                 if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                     ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203)
                                   "%s: Unlock failed for adding worker",
                                   bsel->s->name);
                 }
+#endif
+            } else {
+                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10164)
+                                  "%s: failed to add worker %s",
+                                  bsel->s->name, val);
+                return HTTP_BAD_REQUEST;
             }
 
         }
index 7ad803b97c88a621501ec12497758b218057fe9a..801e351c2b2c4086e83c42178c3b6fada1d970f7 100644 (file)
@@ -971,8 +971,10 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
     apr_status_t rv;
     conn_rec *origin, *data = NULL;
     apr_status_t err = APR_SUCCESS;
+#if APR_HAS_THREADS
     apr_status_t uerr = APR_SUCCESS;
-    apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
+#endif
+    apr_bucket_brigade *bb;
     char *buf, *connectname;
     apr_port_t connectport;
     char *ftpmessage = NULL;
@@ -1112,13 +1114,15 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
 
     if (worker->s->is_address_reusable) {
         if (!worker->cp->addr) {
+#if APR_HAS_THREADS
             if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) {
                 ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) "lock");
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
+#endif
         }
-        connect_addr = worker->cp->addr;
-        address_pool = worker->cp->pool;
+        connect_addr = AP_VOLATILIZE_T(apr_sockaddr_t *, worker->cp->addr);
+        address_pool = worker->cp->dns_pool;
     }
     else
         address_pool = r->pool;
@@ -1131,9 +1135,11 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
                                     address_pool);
     if (worker->s->is_address_reusable && !worker->cp->addr) {
         worker->cp->addr = connect_addr;
+#if APR_HAS_THREADS
         if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) {
             ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) "unlock");
         }
+#endif
     }
     /*
      * get all the possible IP addresses for the destname and loop through
@@ -1204,6 +1210,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
      * correct directory...
      */
 
+    bb = apr_brigade_create(p, c->bucket_alloc);
 
     /* possible results: */
     /* 120 Service ready in nnn minutes. */
index 732348b3b82820653162dd7c5a684134b3740db6..b50d8cfec12d7eca475bc785dbabe446ab9bd7d2 100644 (file)
@@ -1172,8 +1172,10 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
     (*balancer)->lbmethod = lbmethod;
     
     (*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
+#if APR_HAS_THREADS
     (*balancer)->gmutex = NULL;
     (*balancer)->tmutex = NULL;
+#endif
 
     if (do_malloc)
         bshared = ap_malloc(sizeof(proxy_balancer_shared));
@@ -1265,7 +1267,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer,
 
 PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balancer, server_rec *s, apr_pool_t *p)
 {
+#if APR_HAS_THREADS
     apr_status_t rv = APR_SUCCESS;
+#endif
     ap_slotmem_provider_t *storage = balancer->storage;
     apr_size_t size;
     unsigned int num;
@@ -1305,6 +1309,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
     if (balancer->lbmethod && balancer->lbmethod->reset)
         balancer->lbmethod->reset(balancer, s);
 
+#if APR_HAS_THREADS
     if (balancer->tmutex == NULL) {
         rv = apr_thread_mutex_create(&(balancer->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
         if (rv != APR_SUCCESS) {
@@ -1313,6 +1318,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
             return rv;
         }
     }
+#endif
     return APR_SUCCESS;
 }
 
@@ -1454,16 +1460,14 @@ static void socket_cleanup(proxy_conn_rec *conn)
 
 static apr_status_t conn_pool_cleanup(void *theworker)
 {
-    proxy_worker *worker = (proxy_worker *)theworker;
-    if (worker->cp->res) {
-        worker->cp->pool = NULL;
-    }
+    ((proxy_worker *)theworker)->cp = NULL;
     return APR_SUCCESS;
 }
 
 static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
 {
     apr_pool_t *pool;
+    apr_pool_t *dns_pool;
     proxy_conn_pool *cp;
 
     /*
@@ -1474,12 +1478,21 @@ static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
      */
     apr_pool_create(&pool, p);
     apr_pool_tag(pool, "proxy_worker_cp");
+    /*
+     * Create a subpool of the connection pool for worker
+     * scoped DNS resolutions. This is needed to avoid race
+     * conditions in using the connection pool by multiple
+     * threads during ramp up.
+     */
+    apr_pool_create(&dns_pool, pool);
+    apr_pool_tag(dns_pool, "proxy_worker_dns");
     /*
      * Alloc from the same pool as worker.
      * proxy_conn_pool is permanently attached to the worker.
      */
     cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
     cp->pool = pool;
+    cp->dns_pool = dns_pool;
     worker->cp = cp;
 }
 
@@ -1495,14 +1508,6 @@ static apr_status_t connection_cleanup(void *theconn)
     proxy_conn_rec *conn = (proxy_conn_rec *)theconn;
     proxy_worker *worker = conn->worker;
 
-    /*
-     * If the connection pool is NULL the worker
-     * cleanup has been run. Just return.
-     */
-    if (!worker->cp->pool) {
-        return APR_SUCCESS;
-    }
-
     if (conn->r) {
         apr_pool_destroy(conn->r->pool);
         conn->r = NULL;
@@ -1631,7 +1636,7 @@ static apr_status_t connection_destructor(void *resource, void *params,
     proxy_worker *worker = params;
 
     /* Destroy the pool only if not called from reslist_destroy */
-    if (worker->cp->pool) {
+    if (worker->cp) {
         proxy_conn_rec *conn = resource;
         apr_pool_destroy(conn->pool);
     }
@@ -1987,67 +1992,73 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
                      ap_proxy_worker_name(p, worker));
     }
     else {
-        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
-                     "initializing worker %s local",
-                     ap_proxy_worker_name(p, worker));
         apr_global_mutex_lock(proxy_mutex);
-        /* Now init local worker data */
-        if (worker->tmutex == NULL) {
-            rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
-            if (rv != APR_SUCCESS) {
-                ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00928)
-                             "can not create worker thread mutex");
+        /* Check again after we got the lock if we are still uninitialized */
+        if (!(AP_VOLATILIZE_T(unsigned int, worker->local_status) & PROXY_WORKER_INITIALIZED)) {
+            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
+                         "initializing worker %s local",
+                         ap_proxy_worker_name(p, worker));
+            /* Now init local worker data */
+#if APR_HAS_THREADS
+            if (worker->tmutex == NULL) {
+                rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
+                if (rv != APR_SUCCESS) {
+                    ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00928)
+                                 "can not create worker thread mutex");
+                    apr_global_mutex_unlock(proxy_mutex);
+                    return rv;
+                }
+            }
+#endif
+            if (worker->cp == NULL)
+                init_conn_pool(p, worker);
+            if (worker->cp == NULL) {
+                ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
+                             "can not create connection pool");
                 apr_global_mutex_unlock(proxy_mutex);
-                return rv;
+                return APR_EGENERAL;
             }
-        }
-        if (worker->cp == NULL)
-            init_conn_pool(p, worker);
-        if (worker->cp == NULL) {
-            ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
-                         "can not create connection pool");
-            apr_global_mutex_unlock(proxy_mutex);
-            return APR_EGENERAL;
-        }
 
-        if (worker->s->hmax) {
-            rv = apr_reslist_create(&(worker->cp->res),
-                                    worker->s->min, worker->s->smax,
-                                    worker->s->hmax, worker->s->ttl,
-                                    connection_constructor, connection_destructor,
-                                    worker, worker->cp->pool);
+            if (worker->s->hmax) {
+                rv = apr_reslist_create(&(worker->cp->res),
+                                        worker->s->min, worker->s->smax,
+                                        worker->s->hmax, worker->s->ttl,
+                                        connection_constructor, connection_destructor,
+                                        worker, worker->cp->pool);
 
-            apr_pool_cleanup_register(worker->cp->pool, (void *)worker,
-                                      conn_pool_cleanup,
-                                      apr_pool_cleanup_null);
+                apr_pool_pre_cleanup_register(worker->cp->pool, worker,
+                                              conn_pool_cleanup);
 
-            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930)
-                "initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
-                 getpid(), worker->s->hostname_ex, worker->s->min,
-                 worker->s->hmax, worker->s->smax);
+                ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930)
+                    "initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
+                     getpid(), worker->s->hostname_ex, worker->s->min,
+                     worker->s->hmax, worker->s->smax);
 
-            /* Set the acquire timeout */
-            if (rv == APR_SUCCESS && worker->s->acquire_set) {
-                apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
-            }
+                /* Set the acquire timeout */
+                if (rv == APR_SUCCESS && worker->s->acquire_set) {
+                    apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
+                }
 
-        }
-        else {
-            void *conn;
+            }
+            else {
+                void *conn;
 
-            rv = connection_constructor(&conn, worker, worker->cp->pool);
-            worker->cp->conn = conn;
+                rv = connection_constructor(&conn, worker, worker->cp->pool);
+                worker->cp->conn = conn;
 
-            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00931)
-                 "initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
-                 getpid(), worker->s->hostname_ex);
+                ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(00931)
+                     "initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
+                     getpid(), worker->s->hostname_ex);
+            }
+            if (rv == APR_SUCCESS) {
+                worker->local_status |= (PROXY_WORKER_INITIALIZED);
+            }
         }
         apr_global_mutex_unlock(proxy_mutex);
 
     }
     if (rv == APR_SUCCESS) {
         worker->s->status |= (PROXY_WORKER_INITIALIZED);
-        worker->local_status |= (PROXY_WORKER_INITIALIZED);
     }
     return rv;
 }
@@ -2307,13 +2318,13 @@ PROXY_DECLARE(int) ap_proxy_acquire_connection(const char *proxy_function,
     else {
         /* create the new connection if the previous was destroyed */
         if (!worker->cp->conn) {
-            connection_constructor((void **)conn, worker, worker->cp->pool);
+            rv = connection_constructor((void **)conn, worker, worker->cp->pool);
         }
         else {
             *conn = worker->cp->conn;
             worker->cp->conn = NULL;
+            rv = APR_SUCCESS;
         }
-        rv = APR_SUCCESS;
     }
 
     if (rv != APR_SUCCESS) {
@@ -2359,7 +2370,9 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
 {
     int server_port;
     apr_status_t err = APR_SUCCESS;
+#if APR_HAS_THREADS
     apr_status_t uerr = APR_SUCCESS;
+#endif
     const char *uds_path;
 
     /*
@@ -2493,25 +2506,39 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
              * we can reuse the address.
              */
             if (!worker->cp->addr) {
+#if APR_HAS_THREADS
                 if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
                     ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock");
                     return HTTP_INTERNAL_SERVER_ERROR;
                 }
+#endif
 
                 /*
-                 * Worker can have the single constant backend address.
-                 * The single DNS lookup is used once per worker.
-                 * If dynamic change is needed then set the addr to NULL
-                 * inside dynamic config to force the lookup.
+                 * Recheck addr after we got the lock. This may have changed
+                 * while waiting for the lock.
                  */
-                err = apr_sockaddr_info_get(&(worker->cp->addr),
-                                            conn->hostname, APR_UNSPEC,
-                                            conn->port, 0,
-                                            worker->cp->pool);
+                if (!AP_VOLATILIZE_T(apr_sockaddr_t *, worker->cp->addr)) {
+
+                    apr_sockaddr_t *addr;
+
+                    /*
+                     * Worker can have the single constant backend address.
+                     * The single DNS lookup is used once per worker.
+                     * If dynamic change is needed then set the addr to NULL
+                     * inside dynamic config to force the lookup.
+                     */
+                    err = apr_sockaddr_info_get(&addr,
+                                                conn->hostname, APR_UNSPEC,
+                                                conn->port, 0,
+                                                worker->cp->dns_pool);
+                    worker->cp->addr = addr;
+                }
                 conn->addr = worker->cp->addr;
+#if APR_HAS_THREADS
                 if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
                     ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock");
                 }
+#endif
             }
             else {
                 conn->addr = worker->cp->addr;
@@ -3449,7 +3476,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec
             (*runtime)->cp = NULL;
             (*runtime)->balancer = b;
             (*runtime)->s = shm;
+#if APR_HAS_THREADS
             (*runtime)->tmutex = NULL;
+#endif
             rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
             if (rv != APR_SUCCESS) {
                 ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker");