From: Eric Covener Date: Fri, 30 Aug 2019 11:58:41 +0000 (+0000) Subject: restore use of global mutex under !APR_HAS_THREADS X-Git-Tag: 2.5.0-alpha2-ci-test-only~1905 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fece93508b4c2aa38ff4047b592067be706f8ca3;p=thirdparty%2Fapache%2Fhttpd.git restore use of global mutex under !APR_HAS_THREADS followup to r1852442 which appears to have been too agressive in wrapping blocks with #if APR_HAS_THREADS. With !APR_HAS_THREADS a global mutex is a proc mutex. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1866145 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h index 4b118a548a3..460c02c41a8 100644 --- a/modules/proxy/mod_proxy.h +++ b/modules/proxy/mod_proxy.h @@ -532,8 +532,8 @@ struct proxy_balancer { proxy_hashes hash; apr_time_t wupdated; /* timestamp of last change to workers list */ proxy_balancer_method *lbmethod; -#if APR_HAS_THREADS apr_global_mutex_t *gmutex; /* global lock for updating list of workers */ +#if APR_HAS_THREADS apr_thread_mutex_t *tmutex; /* Thread lock for updating shm */ #endif proxy_server_conf *sconf; diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c index ca2462285be..cbac1e16e72 100644 --- a/modules/proxy/mod_proxy_balancer.c +++ b/modules/proxy/mod_proxy_balancer.c @@ -728,23 +728,19 @@ static void recalc_factors(proxy_balancer *balancer) static apr_status_t lock_remove(void *data) { -#if APR_HAS_THREADS int i; -#endif proxy_balancer *balancer; server_rec *s = data; void *sconf = s->module_config; proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); balancer = (proxy_balancer *)conf->balancers->elts; -#if APR_HAS_THREADS for (i = 0; i < conf->balancers->nelts; i++, balancer++) { if (balancer->gmutex) { apr_global_mutex_destroy(balancer->gmutex); balancer->gmutex = NULL; } } -#endif return(0); } @@ -960,7 +956,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */ balancer->max_workers = balancer->workers->nelts + balancer->growth; -#if APR_HAS_THREADS /* Create global mutex */ rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type, balancer->s->sname, s, pconf, 0); @@ -970,7 +965,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, balancer->s->sname); return HTTP_INTERNAL_SERVER_ERROR; } -#endif apr_pool_cleanup_register(pconf, (void *)s, lock_remove, apr_pool_cleanup_null); diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c index cb8e759557a..c7dacdf526d 100644 --- a/modules/proxy/proxy_util.c +++ b/modules/proxy/proxy_util.c @@ -1276,7 +1276,6 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance * for each balancer we need to init the global * mutex and then attach to the shared worker shm */ -#if APR_HAS_THREADS if (!balancer->gmutex) { ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(00919) "no mutex %s", balancer->s->name); @@ -1293,7 +1292,6 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance balancer->s->name); return rv; } -#endif /* now attach */ storage->attach(&(balancer->wslot), balancer->s->sname, &size, &num, p);