]> git.ipfire.org Git - thirdparty/apache/httpd.git/commitdiff
restore use of global mutex under !APR_HAS_THREADS
authorEric Covener <covener@apache.org>
Fri, 30 Aug 2019 11:58:41 +0000 (11:58 +0000)
committerEric Covener <covener@apache.org>
Fri, 30 Aug 2019 11:58:41 +0000 (11:58 +0000)
followup to r1852442 which appears to have been too agressive in wrapping
blocks with #if APR_HAS_THREADS.  With !APR_HAS_THREADS a global mutex
is a proc mutex.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1866145 13f79535-47bb-0310-9956-ffa450edef68

modules/proxy/mod_proxy.h
modules/proxy/mod_proxy_balancer.c
modules/proxy/proxy_util.c

index 4b118a548a370f01227c3f41196f78a349bf7062..460c02c41a8d2c58897d8a482cfdbb5e09836dd8 100644 (file)
@@ -532,8 +532,8 @@ struct proxy_balancer {
     proxy_hashes hash;
     apr_time_t      wupdated;    /* timestamp of last change to workers list */
     proxy_balancer_method *lbmethod;
-#if APR_HAS_THREADS
     apr_global_mutex_t  *gmutex; /* global lock for updating list of workers */
+#if APR_HAS_THREADS
     apr_thread_mutex_t  *tmutex; /* Thread lock for updating shm */
 #endif
     proxy_server_conf *sconf;
index ca2462285bef37fea5297fe6eda199a45f3954b6..cbac1e16e723b5079d66024b0321b8754c149843 100644 (file)
@@ -728,23 +728,19 @@ static void recalc_factors(proxy_balancer *balancer)
 
 static apr_status_t lock_remove(void *data)
 {
-#if APR_HAS_THREADS
     int i;
-#endif
     proxy_balancer *balancer;
     server_rec *s = data;
     void *sconf = s->module_config;
     proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
 
     balancer = (proxy_balancer *)conf->balancers->elts;
-#if APR_HAS_THREADS
     for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
         if (balancer->gmutex) {
             apr_global_mutex_destroy(balancer->gmutex);
             balancer->gmutex = NULL;
         }
     }
-#endif
     return(0);
 }
 
@@ -960,7 +956,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
             PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
 
             balancer->max_workers = balancer->workers->nelts + balancer->growth;
-#if APR_HAS_THREADS
             /* Create global mutex */
             rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
                                         balancer->s->sname, s, pconf, 0);
@@ -970,7 +965,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
                              balancer->s->sname);
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
-#endif
             apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
                                       apr_pool_cleanup_null);
 
index cb8e759557a293ad453b7bbf5749dd3a19036870..c7dacdf526d188b06830078cfec9dafc53877b2a 100644 (file)
@@ -1276,7 +1276,6 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
      * for each balancer we need to init the global
      * mutex and then attach to the shared worker shm
      */
-#if APR_HAS_THREADS
     if (!balancer->gmutex) {
         ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(00919)
                      "no mutex %s", balancer->s->name);
@@ -1293,7 +1292,6 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
                      balancer->s->name);
         return rv;
     }
-#endif
 
     /* now attach */
     storage->attach(&(balancer->wslot), balancer->s->sname, &size, &num, p);