From: Jim Jagielski Date: Thu, 13 Jan 2011 15:58:45 +0000 (+0000) Subject: Use branch now X-Git-Tag: 2.3.11~228 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5febbb22aa0f0f28919f89573d93bb3cd8c665f4;p=thirdparty%2Fapache%2Fhttpd.git Use branch now git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1058621 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c index 337c63de703..4294115bb36 100644 --- a/modules/proxy/mod_proxy.c +++ b/modules/proxy/mod_proxy.c @@ -38,7 +38,7 @@ APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, /* return the sizeof of one lb_worker in scoreboard. */ static int ap_proxy_lb_worker_size(void) { - return sizeof(proxy_worker_stat); + return sizeof(proxy_worker_shared); } /* diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h index dd0c48f83c7..691e35b70e9 100644 --- a/modules/proxy/mod_proxy.h +++ b/modules/proxy/mod_proxy.h @@ -42,6 +42,7 @@ #include "apr_reslist.h" #define APR_WANT_STRFUNC #include "apr_want.h" +#include "apr_global_mutex.h" #include "httpd.h" #include "http_config.h" @@ -123,8 +124,8 @@ typedef struct { apr_array_header_t *aliases; apr_array_header_t *noproxies; apr_array_header_t *dirconn; - apr_array_header_t *workers; - apr_array_header_t *balancers; + apr_array_header_t *workers; /* non-balancer workers, eg ProxyPass http://example.com */ + apr_array_header_t *balancers; /* list of balancers @ config time */ proxy_worker *forward; /* forward proxy worker */ proxy_worker *reverse; /* reverse "module-driven" proxy worker */ const char *domain; /* domain name to use in absence of a domain name in the request */ @@ -151,7 +152,8 @@ typedef struct { status_full } proxy_status; /* Status display options */ apr_sockaddr_t *source_address; - + apr_global_mutex_t *mutex; /* global lock for updating lb params */ + int req_set:1; int viaopt_set:1; int recv_buffer_size_set:1; @@ -274,80 +276,74 @@ PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR ) /* default worker retry timeout in seconds */ #define PROXY_WORKER_DEFAULT_RETRY 60 -#define PROXY_WORKER_MAX_ROUTE_SIZ 63 +#define PROXY_WORKER_MAX_SCHEME_SIZE 16 +#define PROXY_WORKER_MAX_ROUTE_SIZE 64 +#define PROXY_WORKER_MAX_NAME_SIZE 96 + /* Runtime worker status informations. Shared in scoreboard */ typedef struct { - apr_time_t error_time; /* time of the last error */ + char name[PROXY_WORKER_MAX_NAME_SIZE]; + char scheme[PROXY_WORKER_MAX_SCHEME_SIZE]; /* scheme to use ajp|http|https */ + char hostname[PROXY_WORKER_MAX_ROUTE_SIZE]; /* remote backend address */ + char route[PROXY_WORKER_MAX_ROUTE_SIZE]; /* balancing route */ + char redirect[PROXY_WORKER_MAX_ROUTE_SIZE]; /* temporary balancing redirection route */ + char flusher[PROXY_WORKER_MAX_SCHEME_SIZE]; /* flush provider used by mod_proxy_fdpass */ + int lbset; /* load balancer cluster set */ int status; int retries; /* number of retries on this worker */ int lbstatus; /* Current lbstatus */ int lbfactor; /* dynamic lbfactor */ - apr_off_t transferred;/* Number of bytes transferred to remote */ - apr_off_t read; /* Number of bytes read from remote */ - apr_size_t elected; /* Number of times the worker was elected */ - char route[PROXY_WORKER_MAX_ROUTE_SIZ+1]; - char redirect[PROXY_WORKER_MAX_ROUTE_SIZ+1]; - void *context; /* general purpose storage */ - apr_size_t busy; /* busyness factor */ - int lbset; /* load balancer cluster set */ - unsigned int apr_hash; /* hash #0 of worker name */ - unsigned int our_hash; /* hash #1 of worker name. Why 2? hash collisions. */ -} proxy_worker_stat; - -/* Worker configuration */ -struct proxy_worker { - const char *name; - const char *scheme; /* scheme to use ajp|http|https */ - const char *hostname; /* remote backend address */ - const char *route; /* balancing route */ - const char *redirect; /* temporary balancing redirection route */ - int id; /* scoreboard id */ - int status; /* temporary worker status */ - int lbfactor; /* initial load balancing factor */ - int lbset; /* load balancer cluster set */ int min; /* Desired minimum number of available connections */ int smax; /* Soft maximum on the total number of connections */ int hmax; /* Hard maximum on the total number of connections */ + int flush_wait; /* poll wait time in microseconds if flush_auto */ + int index; /* shm array index */ + unsigned int apr_hash; /* hash #0 of worker name */ + unsigned int our_hash; /* hash #1 of worker name. Why 2? hash collisions. */ + enum { + flush_off, + flush_on, + flush_auto + } flush_packets; /* control AJP flushing */ + apr_time_t error_time; /* time of the last error */ apr_interval_time_t ttl; /* maximum amount of time in seconds a connection * may be available while exceeding the soft limit */ - apr_interval_time_t retry; /* retry interval */ + apr_interval_time_t retry; /* retry interval */ apr_interval_time_t timeout; /* connection timeout */ apr_interval_time_t acquire; /* acquire timeout when the maximum number of connections is exceeded */ apr_interval_time_t ping_timeout; apr_interval_time_t conn_timeout; apr_size_t recv_buffer_size; apr_size_t io_buffer_size; + apr_size_t elected; /* Number of times the worker was elected */ + apr_size_t busy; /* busyness factor */ apr_port_t port; - char keepalive; - char disablereuse; - int is_address_reusable:1; - proxy_conn_pool *cp; /* Connection pool to use */ - proxy_worker_stat *s; /* Shared data */ - void *opaque; /* per scheme worker data */ + apr_off_t transferred;/* Number of bytes transferred to remote */ + apr_off_t read; /* Number of bytes read from remote */ + void *context; /* general purpose storage */ + unsigned int keepalive:1; + unsigned int disablereuse:1; + unsigned int is_address_reusable:1; + unsigned int retry_set:1; + unsigned int timeout_set:1; + unsigned int acquire_set:1; + unsigned int ping_timeout_set:1; + unsigned int conn_timeout_set:1; + unsigned int recv_buffer_size_set:1; + unsigned int io_buffer_size_set:1; + unsigned int keepalive_set:1; + unsigned int disablereuse_set:1; +} proxy_worker_shared; + +/* Worker configuration */ +struct proxy_worker { + proxy_conn_pool *cp; /* Connection pool to use */ + proxy_worker_shared *s; /* Shared data */ void *context; /* general purpose storage */ - enum { - flush_off, - flush_on, - flush_auto - } flush_packets; /* control AJP flushing */ - int flush_wait; /* poll wait time in microseconds if flush_auto */ - const char *flusher; /* flush provider used by mod_proxy_fdpass */ #if APR_HAS_THREADS - apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */ + apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */ #endif - - int retry_set:1; - int timeout_set:1; - int acquire_set:1; - int ping_timeout_set:1; - int conn_timeout_set:1; - int recv_buffer_size_set:1; - int io_buffer_size_set:1; - int keepalive_set:1; - int disablereuse_set:1; - unsigned int apr_hash; /* hash #0 of worker name */ - unsigned int our_hash; /* hash #1 of worker name. Why 2? hash collisions. */ }; /* @@ -357,9 +353,12 @@ struct proxy_worker { #define PROXY_FLUSH_WAIT 10000 struct proxy_balancer { - apr_array_header_t *workers; /* array of proxy_workers */ + apr_array_header_t *cw; /* initially configured workers */ + proxy_worker **workers; /* array of proxy_workers - runtime*/ + int max_workers; /* maximum number of allowed workers */ const char *name; /* name of the load balancer */ apr_interval_time_t timeout; /* Timeout for waiting on free connection */ + const char *lbprovider; /* name of the lbmethod provider to use */ proxy_balancer_method *lbmethod; const char *sticky_path; /* URL sticky session identifier */ @@ -370,11 +369,8 @@ struct proxy_balancer { int sticky_force:1; /* Disable failover for sticky sessions */ int scolonsep:1; /* true if ';' seps sticky session paths */ int max_attempts_set:1; -#if APR_HAS_THREADS - apr_thread_mutex_t *mutex; /* Thread lock for updating lb params */ -#endif - void *context; /* general purpose storage */ - apr_time_t updated; /* timestamp of last update */ + void *context; /* general purpose storage */ + apr_time_t updated; /* timestamp of last update */ }; struct proxy_balancer_method { @@ -395,6 +391,9 @@ struct proxy_balancer_method { #define PROXY_THREAD_UNLOCK(x) APR_SUCCESS #endif +#define PROXY_GLOBAL_LOCK(x) apr_global_mutex_lock((x)->mutex) +#define PROXY_GLOBAL_UNLOCK(x) apr_global_mutex_unlock((x)->mutex) + /* hooks */ /* Create a set of PROXY_DECLARE(type), PROXY_DECLARE_NONSTD(type) and diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c index d53825e786b..c6d14258586 100644 --- a/modules/proxy/mod_proxy_balancer.c +++ b/modules/proxy/mod_proxy_balancer.c @@ -24,13 +24,26 @@ #include "apr_uuid.h" #include "apr_date.h" +static const char *balancer_mutex_type = "proxy-balancer-shm"; + module AP_MODULE_DECLARE_DATA proxy_balancer_module; static char balancer_nonce[APR_UUID_FORMATTED_LENGTH + 1]; +/* + * Register our mutex type before the config is read so we + * can adjust the mutex settings using the Mutex directive. + */ +static int balancer_pre_config(apr_pool_t *pconf, apr_pool_t *plog, + apr_pool_t *ptemp) +{ + ap_mutex_register(pconf, balancer_mutex_type, NULL, APR_LOCK_DEFAULT, 0); + return OK; +} + #if 0 extern void proxy_update_members(proxy_balancer **balancer, request_rec *r, - proxy_server_conf *conf); + proxy_server_conf *conf); #endif static int proxy_balancer_canon(request_rec *r, char *url) @@ -96,12 +109,12 @@ static int init_balancer_members(proxy_server_conf *conf, server_rec *s, int worker_is_initialized; worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(*workers); if (!worker_is_initialized) { - proxy_worker_stat *slot; + proxy_worker_shared *slot; /* * If the worker is not initialized check whether its scoreboard * slot is already initialized. */ - slot = (proxy_worker_stat *) ap_get_scoreboard_lb((*workers)->id); + slot = (proxy_worker_shared *) ap_get_scoreboard_lb((*workers)->id); if (slot) { worker_is_initialized = slot->status & PROXY_WORKER_INITIALIZED; } @@ -322,8 +335,10 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer, { proxy_worker *candidate = NULL; apr_status_t rv; + proxy_server_conf *conf = (proxy_server_conf *) + ap_get_module_config(r->server->module_config, &proxy_module); - if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { + if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, "proxy: BALANCER: (%s). Lock failed for find_best_worker()", balancer->name); return NULL; @@ -335,11 +350,11 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer, candidate->s->elected++; /* - PROXY_THREAD_UNLOCK(balancer); + PROXY_GLOBAL_UNLOCK(conf); return NULL; */ - if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { + if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, "proxy: BALANCER: (%s). Unlock failed for find_best_worker()", balancer->name); } @@ -463,7 +478,7 @@ static int proxy_balancer_pre_request(proxy_worker **worker, /* Step 2: Lock the LoadBalancer * XXX: perhaps we need the process lock here */ - if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) { + if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, "proxy: BALANCER: (%s). Lock failed for pre_request", (*balancer)->name); @@ -529,7 +544,7 @@ static int proxy_balancer_pre_request(proxy_worker **worker, ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, "proxy: BALANCER: (%s). All workers are in error state for route (%s)", (*balancer)->name, route); - if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) { + if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, "proxy: BALANCER: (%s). Unlock failed for pre_request", (*balancer)->name); @@ -538,7 +553,7 @@ static int proxy_balancer_pre_request(proxy_worker **worker, } } - if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) { + if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, "proxy: BALANCER: (%s). Unlock failed for pre_request", (*balancer)->name); @@ -614,7 +629,7 @@ static int proxy_balancer_post_request(proxy_worker *worker, apr_status_t rv; - if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { + if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, "proxy: BALANCER: (%s). Lock failed for post_request", balancer->name); @@ -636,7 +651,7 @@ static int proxy_balancer_post_request(proxy_worker *worker, } } - if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { + if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, "proxy: BALANCER: (%s). Unlock failed for post_request", balancer->name); @@ -673,24 +688,34 @@ static void recalc_factors(proxy_balancer *balancer) } /* post_config hook: */ -static int balancer_init(apr_pool_t *p, apr_pool_t *plog, +static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { apr_uuid_t uuid; void *data; + apr_status_t rv; + void *sconf = s->module_config; + proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); const char *userdata_key = "mod_proxy_balancer_init"; /* balancer_init() will be called twice during startup. So, only * set up the static data the 1st time through. */ apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (!data) { - /* Retrieve a UUID and store the nonce for the lifetime of - * the process. */ - apr_uuid_get(&uuid); - apr_uuid_format(balancer_nonce, &uuid); apr_pool_userdata_set((const void *)1, userdata_key, apr_pool_cleanup_null, s->process->pool); } + /* Retrieve a UUID and store the nonce for the lifetime of + * the process. */ + apr_uuid_get(&uuid); + apr_uuid_format(balancer_nonce, &uuid); + + /* Create global mutex */ + rv = ap_global_mutex_create(&conf->mutex, NULL, balancer_mutex_type, NULL, + s, pconf, 0); + if (rv != APR_SUCCESS) { + return HTTP_INTERNAL_SERVER_ERROR; + } return OK; } @@ -967,15 +992,26 @@ static int balancer_handler(request_rec *r) return OK; } -static void child_init(apr_pool_t *p, server_rec *s) +static void balancer_child_init(apr_pool_t *p, server_rec *s) { while (s) { - void *sconf = s->module_config; - proxy_server_conf *conf; proxy_balancer *balancer; int i; - conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module); - + void *sconf = s->module_config; + proxy_server_conf *conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module); + apr_status_t rv; + + /* Re-open the mutex for the child. */ + rv = apr_global_mutex_child_init(&conf->mutex, + apr_global_mutex_lockfile(conf->mutex), + p); + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, + "Failed to reopen mutex %s in child", + balancer_mutex_type); + exit(1); /* Ugly, but what else? */ + } + /* Initialize shared scoreboard data */ balancer = (proxy_balancer *)conf->balancers->elts; for (i = 0; i < conf->balancers->nelts; i++) { @@ -1026,9 +1062,10 @@ static void ap_proxy_balancer_register_hook(apr_pool_t *p) */ static const char *const aszPred[] = { "mpm_winnt.c", NULL}; /* manager handler */ - ap_hook_post_config(balancer_init, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_post_config(balancer_post_config, NULL, NULL, APR_HOOK_MIDDLE); + ap_hook_pre_config(balancer_pre_config, NULL, NULL, APR_HOOK_MIDDLE); ap_hook_handler(balancer_handler, NULL, NULL, APR_HOOK_FIRST); - ap_hook_child_init(child_init, aszPred, NULL, APR_HOOK_MIDDLE); + ap_hook_child_init(balancer_child_init, aszPred, NULL, APR_HOOK_MIDDLE); proxy_hook_pre_request(proxy_balancer_pre_request, NULL, NULL, APR_HOOK_FIRST); proxy_hook_post_request(proxy_balancer_post_request, NULL, NULL, APR_HOOK_FIRST); proxy_hook_canon_handler(proxy_balancer_canon, NULL, NULL, APR_HOOK_FIRST); diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c index 0d78dfd75ef..0a637f89143 100644 --- a/modules/proxy/proxy_util.c +++ b/modules/proxy/proxy_util.c @@ -1875,7 +1875,7 @@ PROXY_DECLARE(void) ap_proxy_initialize_worker_share(proxy_server_conf *conf, proxy_worker *worker, server_rec *s) { - proxy_worker_stat *score = NULL; + proxy_worker_shared *score = NULL; if (PROXY_WORKER_IS_INITIALIZED(worker)) { /* The worker share is already initialized */ @@ -1887,7 +1887,7 @@ PROXY_DECLARE(void) ap_proxy_initialize_worker_share(proxy_server_conf *conf, if (!worker->s) { /* Get scoreboard slot */ if (ap_scoreboard_image) { - score = (proxy_worker_stat *) ap_get_scoreboard_lb(worker->id); + score = (proxy_worker_shared *) ap_get_scoreboard_lb(worker->id); if (!score) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "proxy: ap_get_scoreboard_lb(%d) failed in child %" APR_PID_T_FMT " for worker %s", @@ -1900,7 +1900,7 @@ PROXY_DECLARE(void) ap_proxy_initialize_worker_share(proxy_server_conf *conf, } } if (!score) { - score = (proxy_worker_stat *) apr_pcalloc(conf->pool, sizeof(proxy_worker_stat)); + score = (proxy_worker_shared *) apr_pcalloc(conf->pool, sizeof(proxy_worker_shared)); ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "proxy: initialized plain memory in child %" APR_PID_T_FMT " for worker %s", getpid(), worker->name);