proxy_hashes hash;
apr_time_t wupdated; /* timestamp of last change to workers list */
proxy_balancer_method *lbmethod;
-#if APR_HAS_THREADS
apr_global_mutex_t *gmutex; /* global lock for updating list of workers */
+#if APR_HAS_THREADS
apr_thread_mutex_t *tmutex; /* Thread lock for updating shm */
#endif
proxy_server_conf *sconf;
static apr_status_t lock_remove(void *data)
{
-#if APR_HAS_THREADS
int i;
-#endif
proxy_balancer *balancer;
server_rec *s = data;
void *sconf = s->module_config;
proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
balancer = (proxy_balancer *)conf->balancers->elts;
-#if APR_HAS_THREADS
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
if (balancer->gmutex) {
apr_global_mutex_destroy(balancer->gmutex);
balancer->gmutex = NULL;
}
}
-#endif
return(0);
}
PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
balancer->max_workers = balancer->workers->nelts + balancer->growth;
-#if APR_HAS_THREADS
/* Create global mutex */
rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
balancer->s->sname, s, pconf, 0);
balancer->s->sname);
return HTTP_INTERNAL_SERVER_ERROR;
}
-#endif
apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
apr_pool_cleanup_null);
* for each balancer we need to init the global
* mutex and then attach to the shared worker shm
*/
-#if APR_HAS_THREADS
if (!balancer->gmutex) {
ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(00919)
"no mutex %s", balancer->s->name);
balancer->s->name);
return rv;
}
-#endif
/* now attach */
storage->attach(&(balancer->wslot), balancer->s->sname, &size, &num, p);