]> git.ipfire.org Git - thirdparty/apache/httpd.git/commitdiff
Make proxy modules compile if APR_HAS_THREADS is not defined.
authorStefan Sperling <stsp@apache.org>
Tue, 29 Jan 2019 12:28:35 +0000 (12:28 +0000)
committerStefan Sperling <stsp@apache.org>
Tue, 29 Jan 2019 12:28:35 +0000 (12:28 +0000)
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1852442 13f79535-47bb-0310-9956-ffa450edef68

modules/proxy/mod_proxy.h
modules/proxy/mod_proxy_balancer.c
modules/proxy/mod_proxy_ftp.c
modules/proxy/proxy_util.c

index 7a540aec74776b7650e4f0271fc985cb030eb5f9..152a79103e6bd5b3393c5608a69ff6ad057bb9ea 100644 (file)
@@ -472,7 +472,9 @@ struct proxy_worker {
     proxy_conn_pool     *cp;    /* Connection pool to use */
     proxy_worker_shared   *s;   /* Shared data */
     proxy_balancer  *balancer;  /* which balancer am I in? */
+#if APR_HAS_THREADS
     apr_thread_mutex_t  *tmutex; /* Thread lock for updating address cache */
+#endif
     void            *context;   /* general purpose storage */
     ap_conf_vector_t *section_config; /* <Proxy>-section wherein defined */
 };
@@ -528,8 +530,10 @@ struct proxy_balancer {
     proxy_hashes hash;
     apr_time_t      wupdated;    /* timestamp of last change to workers list */
     proxy_balancer_method *lbmethod;
+#if APR_HAS_THREADS
     apr_global_mutex_t  *gmutex; /* global lock for updating list of workers */
     apr_thread_mutex_t  *tmutex; /* Thread lock for updating shm */
+#endif
     proxy_server_conf *sconf;
     void            *context;    /* general purpose storage */
     proxy_balancer_shared *s;    /* Shared data */
index b9f385794a93df19fec67375c2133c632ebe79ea..0aab6ab643c50fc0979bf24b4a201d7840596a76 100644 (file)
@@ -346,23 +346,27 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer,
     proxy_worker *candidate = NULL;
     apr_status_t rv;
 
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
                       "%s: Lock failed for find_best_worker()",
                       balancer->s->name);
         return NULL;
     }
+#endif
 
     candidate = (*balancer->lbmethod->finder)(balancer, r);
 
     if (candidate)
         candidate->s->elected++;
 
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
                       "%s: Unlock failed for find_best_worker()",
                       balancer->s->name);
     }
+#endif
 
     if (candidate == NULL) {
         /* All the workers are in error state or disabled.
@@ -492,11 +496,13 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
     /* Step 2: Lock the LoadBalancer
      * XXX: perhaps we need the process lock here
      */
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
                       "%s: Lock failed for pre_request", (*balancer)->s->name);
         return DECLINED;
     }
+#endif
 
     /* Step 3: force recovery */
     force_recovery(*balancer, r->server);
@@ -557,20 +563,24 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
             ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
                           "%s: All workers are in error state for route (%s)",
                           (*balancer)->s->name, route);
+#if APR_HAS_THREADS
             if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
                 ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
                               "%s: Unlock failed for pre_request",
                               (*balancer)->s->name);
             }
+#endif
             return HTTP_SERVICE_UNAVAILABLE;
         }
     }
 
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
                       "%s: Unlock failed for pre_request",
                       (*balancer)->s->name);
     }
+#endif
     if (!*worker) {
         runtime = find_best_worker(*balancer, r);
         if (!runtime) {
@@ -644,12 +654,14 @@ static int proxy_balancer_post_request(proxy_worker *worker,
 
     apr_status_t rv;
 
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173)
                       "%s: Lock failed for post_request",
                       balancer->s->name);
         return HTTP_INTERNAL_SERVER_ERROR;
     }
+#endif
 
     if (!apr_is_empty_array(balancer->errstatuses)
         && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
@@ -681,11 +693,12 @@ static int proxy_balancer_post_request(proxy_worker *worker,
         worker->s->error_time = apr_time_now();
 
     }
-
+#if APR_HAS_THREADS
     if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
         ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
                       "%s: Unlock failed for post_request", balancer->s->name);
     }
+#endif
     ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176)
                   "proxy_balancer_post_request for (%s)", balancer->s->name);
 
@@ -715,19 +728,23 @@ static void recalc_factors(proxy_balancer *balancer)
 
 static apr_status_t lock_remove(void *data)
 {
+#if APR_HAS_THREADS
     int i;
+#endif
     proxy_balancer *balancer;
     server_rec *s = data;
     void *sconf = s->module_config;
     proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
 
     balancer = (proxy_balancer *)conf->balancers->elts;
+#if APR_HAS_THREADS
     for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
         if (balancer->gmutex) {
             apr_global_mutex_destroy(balancer->gmutex);
             balancer->gmutex = NULL;
         }
     }
+#endif
     return(0);
 }
 
@@ -943,7 +960,7 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
             PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
 
             balancer->max_workers = balancer->workers->nelts + balancer->growth;
-
+#if APR_HAS_THREADS
             /* Create global mutex */
             rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
                                         balancer->s->sname, s, pconf, 0);
@@ -953,7 +970,7 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
                              balancer->s->sname);
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
-
+#endif
             apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
                                       apr_pool_cleanup_null);
 
@@ -1133,17 +1150,21 @@ static int balancer_handler(request_rec *r)
 
     balancer = (proxy_balancer *)conf->balancers->elts;
     for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
+#if APR_HAS_THREADS
         if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189)
                           "%s: Lock failed for balancer_handler",
                           balancer->s->name);
         }
+#endif
         ap_proxy_sync_balancer(balancer, r->server, conf);
+#if APR_HAS_THREADS
         if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190)
                           "%s: Unlock failed for balancer_handler",
                           balancer->s->name);
         }
+#endif
     }
 
     if (r->args && (r->method_number == M_GET)) {
@@ -1356,11 +1377,13 @@ static int balancer_handler(request_rec *r)
             proxy_worker *nworker;
             nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
             if (!nworker && storage->num_free_slots(bsel->wslot)) {
+#if APR_HAS_THREADS
                 if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
                     ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
                                   "%s: Lock failed for adding worker",
                                   bsel->s->name);
                 }
+#endif
                 ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0);
                 if (!ret) {
                     unsigned int index;
@@ -1369,41 +1392,49 @@ static int balancer_handler(request_rec *r)
                     if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) {
                         ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195)
                                       "worker slotmem_grab failed");
+#if APR_HAS_THREADS
                         if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196)
                                           "%s: Unlock failed for adding worker",
                                           bsel->s->name);
                         }
+#endif
                         return HTTP_BAD_REQUEST;
                     }
                     if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) {
                         ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197)
                                       "worker slotmem_dptr failed");
+#if APR_HAS_THREADS
                         if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198)
                                           "%s: Unlock failed for adding worker",
                                           bsel->s->name);
                         }
+#endif
                         return HTTP_BAD_REQUEST;
                     }
                     if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) {
                         ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199)
                                       "Cannot share worker");
+#if APR_HAS_THREADS
                         if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200)
                                           "%s: Unlock failed for adding worker",
                                           bsel->s->name);
                         }
+#endif
                         return HTTP_BAD_REQUEST;
                     }
                     if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) {
                         ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201)
                                       "Cannot init worker");
+#if APR_HAS_THREADS
                         if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202)
                                           "%s: Unlock failed for adding worker",
                                           bsel->s->name);
                         }
+#endif
                         return HTTP_BAD_REQUEST;
                     }
                     /* sync all timestamps */
@@ -1414,14 +1445,18 @@ static int balancer_handler(request_rec *r)
                             ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207)
                                   "%s: failed to add worker %s",
                                   bsel->s->name, val);
+#if APR_HAS_THREADS
                     PROXY_GLOBAL_UNLOCK(bsel);
+#endif
                     return HTTP_BAD_REQUEST;
                 }
+#if APR_HAS_THREADS
                 if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
                     ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203)
                                   "%s: Unlock failed for adding worker",
                                   bsel->s->name);
                 }
+#endif
             } else {
                 ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207)
                                   "%s: failed to add worker %s",
index 001e1e2dd6ad75f827420765f68ceae8c3d2dc9a..9cb7f8bc5d0c6a10f51d63d254280287ec87b36d 100644 (file)
@@ -979,7 +979,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
     apr_status_t rv;
     conn_rec *origin, *data = NULL;
     apr_status_t err = APR_SUCCESS;
+#if APR_HAS_THREADS
     apr_status_t uerr = APR_SUCCESS;
+#endif
     apr_bucket_brigade *bb;
     char *buf, *connectname;
     apr_port_t connectport;
@@ -1119,10 +1121,12 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
 
     if (worker->s->is_address_reusable) {
         if (!worker->cp->addr) {
+#if APR_HAS_THREADS
             if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) {
                 ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) "lock");
                 return HTTP_INTERNAL_SERVER_ERROR;
             }
+#endif
         }
         connect_addr = worker->cp->addr;
         address_pool = worker->cp->pool;
@@ -1138,9 +1142,11 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
                                     address_pool);
     if (worker->s->is_address_reusable && !worker->cp->addr) {
         worker->cp->addr = connect_addr;
+#if APR_HAS_THREADS
         if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) {
             ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) "unlock");
         }
+#endif
     }
     /*
      * get all the possible IP addresses for the destname and loop through
index 24468f97bd1b4e29bd5db7ecf8b1c034f7e948f0..af6a2635bf28e2199b12fa2805c543cd7659c40a 100644 (file)
@@ -1167,8 +1167,10 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
     lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0");
 
     (*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
+#if APR_HAS_THREADS
     (*balancer)->gmutex = NULL;
     (*balancer)->tmutex = NULL;
+#endif
     (*balancer)->lbmethod = lbmethod;
 
     if (do_malloc)
@@ -1257,7 +1259,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer,
 
 PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balancer, server_rec *s, apr_pool_t *p)
 {
+#if APR_HAS_THREADS
     apr_status_t rv = APR_SUCCESS;
+#endif
     ap_slotmem_provider_t *storage = balancer->storage;
     apr_size_t size;
     unsigned int num;
@@ -1271,6 +1275,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
      * for each balancer we need to init the global
      * mutex and then attach to the shared worker shm
      */
+#if APR_HAS_THREADS
     if (!balancer->gmutex) {
         ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(00919)
                      "no mutex %s", balancer->s->name);
@@ -1287,6 +1292,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
                      balancer->s->name);
         return rv;
     }
+#endif
 
     /* now attach */
     storage->attach(&(balancer->wslot), balancer->s->sname, &size, &num, p);
@@ -1297,6 +1303,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
     if (balancer->lbmethod && balancer->lbmethod->reset)
         balancer->lbmethod->reset(balancer, s);
 
+#if APR_HAS_THREADS
     if (balancer->tmutex == NULL) {
         rv = apr_thread_mutex_create(&(balancer->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
         if (rv != APR_SUCCESS) {
@@ -1305,6 +1312,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
             return rv;
         }
     }
+#endif
     return APR_SUCCESS;
 }
 
@@ -2035,6 +2043,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
                      ap_proxy_worker_name(p, worker));
         apr_global_mutex_lock(proxy_mutex);
         /* Now init local worker data */
+#if APR_HAS_THREADS
         if (worker->tmutex == NULL) {
             rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
             if (rv != APR_SUCCESS) {
@@ -2044,6 +2053,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
                 return rv;
             }
         }
+#endif
         if (worker->cp == NULL)
             init_conn_pool(p, worker);
         if (worker->cp == NULL) {
@@ -2401,7 +2411,9 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
 {
     int server_port;
     apr_status_t err = APR_SUCCESS;
+#if APR_HAS_THREADS
     apr_status_t uerr = APR_SUCCESS;
+#endif
     const char *uds_path;
 
     /*
@@ -2535,10 +2547,12 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
              * we can reuse the address.
              */
             if (!worker->cp->addr) {
+#if APR_HAS_THREADS
                 if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
                     ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock");
                     return HTTP_INTERNAL_SERVER_ERROR;
                 }
+#endif
 
                 /*
                  * Worker can have the single constant backend address.
@@ -2551,9 +2565,11 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
                                             conn->port, 0,
                                             worker->cp->pool);
                 conn->addr = worker->cp->addr;
+#if APR_HAS_THREADS
                 if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
                     ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock");
                 }
+#endif
             }
             else {
                 conn->addr = worker->cp->addr;
@@ -3483,7 +3499,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec
             (*runtime)->cp = NULL;
             (*runtime)->balancer = b;
             (*runtime)->s = shm;
+#if APR_HAS_THREADS
             (*runtime)->tmutex = NULL;
+#endif
             rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
             if (rv != APR_SUCCESS) {
                 ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker");