proxy_worker *candidate = NULL;
apr_status_t rv;
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
"%s: Lock failed for find_best_worker()",
balancer->s->name);
return NULL;
}
+#endif
candidate = (*balancer->lbmethod->finder)(balancer, r);
if (candidate)
candidate->s->elected++;
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
"%s: Unlock failed for find_best_worker()",
balancer->s->name);
}
+#endif
if (candidate == NULL) {
/* All the workers are in error state or disabled.
/* Step 2: Lock the LoadBalancer
* XXX: perhaps we need the process lock here
*/
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
"%s: Lock failed for pre_request", (*balancer)->s->name);
return DECLINED;
}
+#endif
/* Step 3: force recovery */
force_recovery(*balancer, r->server);
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
"%s: All workers are in error state for route (%s)",
(*balancer)->s->name, route);
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
"%s: Unlock failed for pre_request",
(*balancer)->s->name);
}
+#endif
return HTTP_SERVICE_UNAVAILABLE;
}
}
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
"%s: Unlock failed for pre_request",
(*balancer)->s->name);
}
+#endif
if (!*worker) {
runtime = find_best_worker(*balancer, r);
if (!runtime) {
apr_status_t rv;
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173)
"%s: Lock failed for post_request",
balancer->s->name);
return HTTP_INTERNAL_SERVER_ERROR;
}
+#endif
if (!apr_is_empty_array(balancer->errstatuses)
&& !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
worker->s->error_time = apr_time_now();
}
-
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
"%s: Unlock failed for post_request", balancer->s->name);
}
+#endif
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176)
"proxy_balancer_post_request for (%s)", balancer->s->name);
static apr_status_t lock_remove(void *data)
{
+#if APR_HAS_THREADS
int i;
+#endif
proxy_balancer *balancer;
server_rec *s = data;
void *sconf = s->module_config;
proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
balancer = (proxy_balancer *)conf->balancers->elts;
+#if APR_HAS_THREADS
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
if (balancer->gmutex) {
apr_global_mutex_destroy(balancer->gmutex);
balancer->gmutex = NULL;
}
}
+#endif
return(0);
}
PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
balancer->max_workers = balancer->workers->nelts + balancer->growth;
-
+#if APR_HAS_THREADS
/* Create global mutex */
rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
balancer->s->sname, s, pconf, 0);
balancer->s->sname);
return HTTP_INTERNAL_SERVER_ERROR;
}
-
+#endif
apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
apr_pool_cleanup_null);
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189)
"%s: Lock failed for balancer_handler",
balancer->s->name);
}
+#endif
ap_proxy_sync_balancer(balancer, r->server, conf);
+#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190)
"%s: Unlock failed for balancer_handler",
balancer->s->name);
}
+#endif
}
if (r->args && (r->method_number == M_GET)) {
proxy_worker *nworker;
nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
if (!nworker && storage->num_free_slots(bsel->wslot)) {
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
"%s: Lock failed for adding worker",
bsel->s->name);
}
+#endif
ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0);
if (!ret) {
unsigned int index;
if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195)
"worker slotmem_grab failed");
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
return HTTP_BAD_REQUEST;
}
if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197)
"worker slotmem_dptr failed");
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
return HTTP_BAD_REQUEST;
}
if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199)
"Cannot share worker");
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
return HTTP_BAD_REQUEST;
}
if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201)
"Cannot init worker");
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
return HTTP_BAD_REQUEST;
}
/* sync all timestamps */
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207)
"%s: failed to add worker %s",
bsel->s->name, val);
+#if APR_HAS_THREADS
PROXY_GLOBAL_UNLOCK(bsel);
+#endif
return HTTP_BAD_REQUEST;
}
+#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
+#endif
} else {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207)
"%s: failed to add worker %s",
lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0");
(*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
+#if APR_HAS_THREADS
(*balancer)->gmutex = NULL;
(*balancer)->tmutex = NULL;
+#endif
(*balancer)->lbmethod = lbmethod;
if (do_malloc)
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balancer, server_rec *s, apr_pool_t *p)
{
+#if APR_HAS_THREADS
apr_status_t rv = APR_SUCCESS;
+#endif
ap_slotmem_provider_t *storage = balancer->storage;
apr_size_t size;
unsigned int num;
* for each balancer we need to init the global
* mutex and then attach to the shared worker shm
*/
+#if APR_HAS_THREADS
if (!balancer->gmutex) {
ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(00919)
"no mutex %s", balancer->s->name);
balancer->s->name);
return rv;
}
+#endif
/* now attach */
storage->attach(&(balancer->wslot), balancer->s->sname, &size, &num, p);
if (balancer->lbmethod && balancer->lbmethod->reset)
balancer->lbmethod->reset(balancer, s);
+#if APR_HAS_THREADS
if (balancer->tmutex == NULL) {
rv = apr_thread_mutex_create(&(balancer->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) {
return rv;
}
}
+#endif
return APR_SUCCESS;
}
ap_proxy_worker_name(p, worker));
apr_global_mutex_lock(proxy_mutex);
/* Now init local worker data */
+#if APR_HAS_THREADS
if (worker->tmutex == NULL) {
rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) {
return rv;
}
}
+#endif
if (worker->cp == NULL)
init_conn_pool(p, worker);
if (worker->cp == NULL) {
{
int server_port;
apr_status_t err = APR_SUCCESS;
+#if APR_HAS_THREADS
apr_status_t uerr = APR_SUCCESS;
+#endif
const char *uds_path;
/*
* we can reuse the address.
*/
if (!worker->cp->addr) {
+#if APR_HAS_THREADS
if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock");
return HTTP_INTERNAL_SERVER_ERROR;
}
+#endif
/*
* Worker can have the single constant backend address.
conn->port, 0,
worker->cp->pool);
conn->addr = worker->cp->addr;
+#if APR_HAS_THREADS
if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock");
}
+#endif
}
else {
conn->addr = worker->cp->addr;
(*runtime)->cp = NULL;
(*runtime)->balancer = b;
(*runtime)->s = shm;
+#if APR_HAS_THREADS
(*runtime)->tmutex = NULL;
+#endif
rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker");