You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
637 lines
27 KiB
637 lines
27 KiB
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
|
|
index 57cc92f..fbbd508 100644
|
|
--- a/modules/proxy/mod_proxy.h
|
|
+++ b/modules/proxy/mod_proxy.h
|
|
@@ -288,12 +288,15 @@ typedef struct {
|
|
|
|
/* Connection pool */
|
|
struct proxy_conn_pool {
|
|
- apr_pool_t *pool; /* The pool used in constructor and destructor calls */
|
|
- apr_sockaddr_t *addr; /* Preparsed remote address info */
|
|
- apr_reslist_t *res; /* Connection resource list */
|
|
- proxy_conn_rec *conn; /* Single connection for prefork mpm */
|
|
+ apr_pool_t *pool; /* The pool used in constructor and destructor calls */
|
|
+ apr_sockaddr_t *addr; /* Preparsed remote address info */
|
|
+ apr_reslist_t *res; /* Connection resource list */
|
|
+ proxy_conn_rec *conn; /* Single connection for prefork mpm */
|
|
+ apr_pool_t *dns_pool; /* The pool used for worker scoped DNS resolutions */
|
|
};
|
|
|
|
+#define AP_VOLATILIZE_T(T, x) (*(T volatile *)&(x))
|
|
+
|
|
/* worker status bits */
|
|
/*
|
|
* NOTE: Keep up-to-date w/ proxy_wstat_tbl[]
|
|
@@ -475,7 +478,9 @@ struct proxy_worker {
|
|
proxy_conn_pool *cp; /* Connection pool to use */
|
|
proxy_worker_shared *s; /* Shared data */
|
|
proxy_balancer *balancer; /* which balancer am I in? */
|
|
+#if APR_HAS_THREADS
|
|
apr_thread_mutex_t *tmutex; /* Thread lock for updating address cache */
|
|
+#endif
|
|
void *context; /* general purpose storage */
|
|
ap_conf_vector_t *section_config; /* <Proxy>-section wherein defined */
|
|
};
|
|
@@ -534,7 +539,9 @@ struct proxy_balancer {
|
|
apr_time_t wupdated; /* timestamp of last change to workers list */
|
|
proxy_balancer_method *lbmethod;
|
|
apr_global_mutex_t *gmutex; /* global lock for updating list of workers */
|
|
+#if APR_HAS_THREADS
|
|
apr_thread_mutex_t *tmutex; /* Thread lock for updating shm */
|
|
+#endif
|
|
proxy_server_conf *sconf;
|
|
void *context; /* general purpose storage */
|
|
proxy_balancer_shared *s; /* Shared data */
|
|
diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c
|
|
index c59f5e9..3a28038 100644
|
|
--- a/modules/proxy/mod_proxy_balancer.c
|
|
+++ b/modules/proxy/mod_proxy_balancer.c
|
|
@@ -346,23 +346,27 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer,
|
|
proxy_worker *candidate = NULL;
|
|
apr_status_t rv;
|
|
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
|
|
"%s: Lock failed for find_best_worker()",
|
|
balancer->s->name);
|
|
return NULL;
|
|
}
|
|
+#endif
|
|
|
|
candidate = (*balancer->lbmethod->finder)(balancer, r);
|
|
|
|
if (candidate)
|
|
candidate->s->elected++;
|
|
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
|
|
"%s: Unlock failed for find_best_worker()",
|
|
balancer->s->name);
|
|
}
|
|
+#endif
|
|
|
|
if (candidate == NULL) {
|
|
/* All the workers are in error state or disabled.
|
|
@@ -492,11 +496,13 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
|
|
/* Step 2: Lock the LoadBalancer
|
|
* XXX: perhaps we need the process lock here
|
|
*/
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
|
|
"%s: Lock failed for pre_request", (*balancer)->s->name);
|
|
return DECLINED;
|
|
}
|
|
+#endif
|
|
|
|
/* Step 3: force recovery */
|
|
force_recovery(*balancer, r->server);
|
|
@@ -557,20 +563,24 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
|
|
"%s: All workers are in error state for route (%s)",
|
|
(*balancer)->s->name, route);
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
|
|
"%s: Unlock failed for pre_request",
|
|
(*balancer)->s->name);
|
|
}
|
|
+#endif
|
|
return HTTP_SERVICE_UNAVAILABLE;
|
|
}
|
|
}
|
|
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
|
|
"%s: Unlock failed for pre_request",
|
|
(*balancer)->s->name);
|
|
}
|
|
+#endif
|
|
if (!*worker) {
|
|
runtime = find_best_worker(*balancer, r);
|
|
if (!runtime) {
|
|
@@ -644,12 +654,14 @@ static int proxy_balancer_post_request(proxy_worker *worker,
|
|
|
|
apr_status_t rv;
|
|
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173)
|
|
"%s: Lock failed for post_request",
|
|
balancer->s->name);
|
|
return HTTP_INTERNAL_SERVER_ERROR;
|
|
}
|
|
+#endif
|
|
|
|
if (!apr_is_empty_array(balancer->errstatuses)
|
|
&& !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
|
|
@@ -681,11 +693,12 @@ static int proxy_balancer_post_request(proxy_worker *worker,
|
|
worker->s->error_time = apr_time_now();
|
|
|
|
}
|
|
-
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
|
|
"%s: Unlock failed for post_request", balancer->s->name);
|
|
}
|
|
+#endif
|
|
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176)
|
|
"proxy_balancer_post_request for (%s)", balancer->s->name);
|
|
|
|
@@ -945,7 +958,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
|
|
PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
|
|
|
|
balancer->max_workers = balancer->workers->nelts + balancer->growth;
|
|
-
|
|
/* Create global mutex */
|
|
rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
|
|
balancer->s->sname, s, pconf, 0);
|
|
@@ -955,7 +967,6 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
|
|
balancer->s->sname);
|
|
return HTTP_INTERNAL_SERVER_ERROR;
|
|
}
|
|
-
|
|
apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
|
|
apr_pool_cleanup_null);
|
|
|
|
@@ -1135,17 +1146,21 @@ static int balancer_handler(request_rec *r)
|
|
|
|
balancer = (proxy_balancer *)conf->balancers->elts;
|
|
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189)
|
|
"%s: Lock failed for balancer_handler",
|
|
balancer->s->name);
|
|
}
|
|
+#endif
|
|
ap_proxy_sync_balancer(balancer, r->server, conf);
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190)
|
|
"%s: Unlock failed for balancer_handler",
|
|
balancer->s->name);
|
|
}
|
|
+#endif
|
|
}
|
|
|
|
if (r->args && (r->method_number == M_GET)) {
|
|
@@ -1359,11 +1374,13 @@ static int balancer_handler(request_rec *r)
|
|
proxy_worker *nworker;
|
|
nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
|
|
if (!nworker && storage->num_free_slots(bsel->wslot)) {
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
|
|
"%s: Lock failed for adding worker",
|
|
bsel->s->name);
|
|
}
|
|
+#endif
|
|
ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0);
|
|
if (!ret) {
|
|
unsigned int index;
|
|
@@ -1372,53 +1389,76 @@ static int balancer_handler(request_rec *r)
|
|
if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195)
|
|
"worker slotmem_grab failed");
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196)
|
|
"%s: Unlock failed for adding worker",
|
|
bsel->s->name);
|
|
}
|
|
+#endif
|
|
return HTTP_BAD_REQUEST;
|
|
}
|
|
if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197)
|
|
"worker slotmem_dptr failed");
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198)
|
|
"%s: Unlock failed for adding worker",
|
|
bsel->s->name);
|
|
}
|
|
+#endif
|
|
return HTTP_BAD_REQUEST;
|
|
}
|
|
if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199)
|
|
"Cannot share worker");
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200)
|
|
"%s: Unlock failed for adding worker",
|
|
bsel->s->name);
|
|
}
|
|
+#endif
|
|
return HTTP_BAD_REQUEST;
|
|
}
|
|
if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201)
|
|
"Cannot init worker");
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202)
|
|
"%s: Unlock failed for adding worker",
|
|
bsel->s->name);
|
|
}
|
|
+#endif
|
|
return HTTP_BAD_REQUEST;
|
|
}
|
|
/* sync all timestamps */
|
|
bsel->wupdated = bsel->s->wupdated = nworker->s->updated = apr_time_now();
|
|
/* by default, all new workers are disabled */
|
|
ap_proxy_set_wstatus(PROXY_WORKER_DISABLED_FLAG, 1, nworker);
|
|
+ } else {
|
|
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10163)
|
|
+ "%s: failed to add worker %s",
|
|
+ bsel->s->name, val);
|
|
+#if APR_HAS_THREADS
|
|
+ PROXY_GLOBAL_UNLOCK(bsel);
|
|
+#endif
|
|
+ return HTTP_BAD_REQUEST;
|
|
}
|
|
+#if APR_HAS_THREADS
|
|
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203)
|
|
"%s: Unlock failed for adding worker",
|
|
bsel->s->name);
|
|
}
|
|
+#endif
|
|
+ } else {
|
|
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10164)
|
|
+ "%s: failed to add worker %s",
|
|
+ bsel->s->name, val);
|
|
+ return HTTP_BAD_REQUEST;
|
|
}
|
|
|
|
}
|
|
diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c
|
|
index 5d9175e..5c4d641 100644
|
|
--- a/modules/proxy/mod_proxy_ftp.c
|
|
+++ b/modules/proxy/mod_proxy_ftp.c
|
|
@@ -979,8 +979,10 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
|
|
apr_status_t rv;
|
|
conn_rec *origin, *data = NULL;
|
|
apr_status_t err = APR_SUCCESS;
|
|
+#if APR_HAS_THREADS
|
|
apr_status_t uerr = APR_SUCCESS;
|
|
- apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
|
|
+#endif
|
|
+ apr_bucket_brigade *bb;
|
|
char *buf, *connectname;
|
|
apr_port_t connectport;
|
|
char *ftpmessage = NULL;
|
|
@@ -1120,13 +1122,15 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
|
|
|
|
if (worker->s->is_address_reusable) {
|
|
if (!worker->cp->addr) {
|
|
+#if APR_HAS_THREADS
|
|
if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) "lock");
|
|
return HTTP_INTERNAL_SERVER_ERROR;
|
|
}
|
|
+#endif
|
|
}
|
|
- connect_addr = worker->cp->addr;
|
|
- address_pool = worker->cp->pool;
|
|
+ connect_addr = AP_VOLATILIZE_T(apr_sockaddr_t *, worker->cp->addr);
|
|
+ address_pool = worker->cp->dns_pool;
|
|
}
|
|
else
|
|
address_pool = r->pool;
|
|
@@ -1139,9 +1143,11 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
|
|
address_pool);
|
|
if (worker->s->is_address_reusable && !worker->cp->addr) {
|
|
worker->cp->addr = connect_addr;
|
|
+#if APR_HAS_THREADS
|
|
if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) "unlock");
|
|
}
|
|
+#endif
|
|
}
|
|
/*
|
|
* get all the possible IP addresses for the destname and loop through
|
|
@@ -1212,6 +1218,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
|
|
* correct directory...
|
|
*/
|
|
|
|
+ bb = apr_brigade_create(p, c->bucket_alloc);
|
|
|
|
/* possible results: */
|
|
/* 120 Service ready in nnn minutes. */
|
|
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
|
|
index 2bfc8f0..7714b6c 100644
|
|
--- a/modules/proxy/proxy_util.c
|
|
+++ b/modules/proxy/proxy_util.c
|
|
@@ -1167,8 +1167,10 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
|
|
lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0");
|
|
|
|
(*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
|
|
+#if APR_HAS_THREADS
|
|
(*balancer)->gmutex = NULL;
|
|
(*balancer)->tmutex = NULL;
|
|
+#endif
|
|
(*balancer)->lbmethod = lbmethod;
|
|
|
|
if (do_malloc)
|
|
@@ -1257,7 +1259,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer,
|
|
|
|
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balancer, server_rec *s, apr_pool_t *p)
|
|
{
|
|
+#if APR_HAS_THREADS
|
|
apr_status_t rv = APR_SUCCESS;
|
|
+#endif
|
|
ap_slotmem_provider_t *storage = balancer->storage;
|
|
apr_size_t size;
|
|
unsigned int num;
|
|
@@ -1297,6 +1301,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
|
|
if (balancer->lbmethod && balancer->lbmethod->reset)
|
|
balancer->lbmethod->reset(balancer, s);
|
|
|
|
+#if APR_HAS_THREADS
|
|
if (balancer->tmutex == NULL) {
|
|
rv = apr_thread_mutex_create(&(balancer->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
|
|
if (rv != APR_SUCCESS) {
|
|
@@ -1305,6 +1310,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
|
|
return rv;
|
|
}
|
|
}
|
|
+#endif
|
|
return APR_SUCCESS;
|
|
}
|
|
|
|
@@ -1446,16 +1452,14 @@ static void socket_cleanup(proxy_conn_rec *conn)
|
|
|
|
static apr_status_t conn_pool_cleanup(void *theworker)
|
|
{
|
|
- proxy_worker *worker = (proxy_worker *)theworker;
|
|
- if (worker->cp->res) {
|
|
- worker->cp->pool = NULL;
|
|
- }
|
|
+ ((proxy_worker *)theworker)->cp = NULL;
|
|
return APR_SUCCESS;
|
|
}
|
|
|
|
static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
|
|
{
|
|
apr_pool_t *pool;
|
|
+ apr_pool_t *dns_pool;
|
|
proxy_conn_pool *cp;
|
|
|
|
/*
|
|
@@ -1466,12 +1470,21 @@ static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
|
|
*/
|
|
apr_pool_create(&pool, p);
|
|
apr_pool_tag(pool, "proxy_worker_cp");
|
|
+ /*
|
|
+ * Create a subpool of the connection pool for worker
|
|
+ * scoped DNS resolutions. This is needed to avoid race
|
|
+ * conditions in using the connection pool by multiple
|
|
+ * threads during ramp up.
|
|
+ */
|
|
+ apr_pool_create(&dns_pool, pool);
|
|
+ apr_pool_tag(dns_pool, "proxy_worker_dns");
|
|
/*
|
|
* Alloc from the same pool as worker.
|
|
* proxy_conn_pool is permanently attached to the worker.
|
|
*/
|
|
cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
|
|
cp->pool = pool;
|
|
+ cp->dns_pool = dns_pool;
|
|
worker->cp = cp;
|
|
}
|
|
|
|
@@ -1487,14 +1500,6 @@ static apr_status_t connection_cleanup(void *theconn)
|
|
proxy_conn_rec *conn = (proxy_conn_rec *)theconn;
|
|
proxy_worker *worker = conn->worker;
|
|
|
|
- /*
|
|
- * If the connection pool is NULL the worker
|
|
- * cleanup has been run. Just return.
|
|
- */
|
|
- if (!worker->cp->pool) {
|
|
- return APR_SUCCESS;
|
|
- }
|
|
-
|
|
if (conn->r) {
|
|
apr_pool_destroy(conn->r->pool);
|
|
conn->r = NULL;
|
|
@@ -1616,7 +1621,7 @@ static apr_status_t connection_destructor(void *resource, void *params,
|
|
proxy_worker *worker = params;
|
|
|
|
/* Destroy the pool only if not called from reslist_destroy */
|
|
- if (worker->cp->pool) {
|
|
+ if (worker->cp) {
|
|
proxy_conn_rec *conn = resource;
|
|
apr_pool_destroy(conn->pool);
|
|
}
|
|
@@ -1972,67 +1977,73 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
|
|
ap_proxy_worker_name(p, worker));
|
|
}
|
|
else {
|
|
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
|
|
- "initializing worker %s local",
|
|
- ap_proxy_worker_name(p, worker));
|
|
apr_global_mutex_lock(proxy_mutex);
|
|
- /* Now init local worker data */
|
|
- if (worker->tmutex == NULL) {
|
|
- rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
|
|
- if (rv != APR_SUCCESS) {
|
|
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00928)
|
|
- "can not create worker thread mutex");
|
|
+ /* Check again after we got the lock if we are still uninitialized */
|
|
+ if (!(AP_VOLATILIZE_T(unsigned int, worker->local_status) & PROXY_WORKER_INITIALIZED)) {
|
|
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
|
|
+ "initializing worker %s local",
|
|
+ ap_proxy_worker_name(p, worker));
|
|
+ /* Now init local worker data */
|
|
+#if APR_HAS_THREADS
|
|
+ if (worker->tmutex == NULL) {
|
|
+ rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
|
|
+ if (rv != APR_SUCCESS) {
|
|
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00928)
|
|
+ "can not create worker thread mutex");
|
|
+ apr_global_mutex_unlock(proxy_mutex);
|
|
+ return rv;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+ if (worker->cp == NULL)
|
|
+ init_conn_pool(p, worker);
|
|
+ if (worker->cp == NULL) {
|
|
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
|
|
+ "can not create connection pool");
|
|
apr_global_mutex_unlock(proxy_mutex);
|
|
- return rv;
|
|
+ return APR_EGENERAL;
|
|
}
|
|
- }
|
|
- if (worker->cp == NULL)
|
|
- init_conn_pool(p, worker);
|
|
- if (worker->cp == NULL) {
|
|
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
|
|
- "can not create connection pool");
|
|
- apr_global_mutex_unlock(proxy_mutex);
|
|
- return APR_EGENERAL;
|
|
- }
|
|
|
|
- if (worker->s->hmax) {
|
|
- rv = apr_reslist_create(&(worker->cp->res),
|
|
- worker->s->min, worker->s->smax,
|
|
- worker->s->hmax, worker->s->ttl,
|
|
- connection_constructor, connection_destructor,
|
|
- worker, worker->cp->pool);
|
|
+ if (worker->s->hmax) {
|
|
+ rv = apr_reslist_create(&(worker->cp->res),
|
|
+ worker->s->min, worker->s->smax,
|
|
+ worker->s->hmax, worker->s->ttl,
|
|
+ connection_constructor, connection_destructor,
|
|
+ worker, worker->cp->pool);
|
|
|
|
- apr_pool_cleanup_register(worker->cp->pool, (void *)worker,
|
|
- conn_pool_cleanup,
|
|
- apr_pool_cleanup_null);
|
|
+ apr_pool_pre_cleanup_register(worker->cp->pool, worker,
|
|
+ conn_pool_cleanup);
|
|
|
|
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930)
|
|
- "initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
|
|
- getpid(), worker->s->hostname_ex, worker->s->min,
|
|
- worker->s->hmax, worker->s->smax);
|
|
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930)
|
|
+ "initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
|
|
+ getpid(), worker->s->hostname_ex, worker->s->min,
|
|
+ worker->s->hmax, worker->s->smax);
|
|
|
|
- /* Set the acquire timeout */
|
|
- if (rv == APR_SUCCESS && worker->s->acquire_set) {
|
|
- apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
|
|
- }
|
|
+ /* Set the acquire timeout */
|
|
+ if (rv == APR_SUCCESS && worker->s->acquire_set) {
|
|
+ apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
|
|
+ }
|
|
|
|
- }
|
|
- else {
|
|
- void *conn;
|
|
+ }
|
|
+ else {
|
|
+ void *conn;
|
|
|
|
- rv = connection_constructor(&conn, worker, worker->cp->pool);
|
|
- worker->cp->conn = conn;
|
|
+ rv = connection_constructor(&conn, worker, worker->cp->pool);
|
|
+ worker->cp->conn = conn;
|
|
|
|
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00931)
|
|
- "initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
|
|
- getpid(), worker->s->hostname_ex);
|
|
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(00931)
|
|
+ "initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
|
|
+ getpid(), worker->s->hostname_ex);
|
|
+ }
|
|
+ if (rv == APR_SUCCESS) {
|
|
+ worker->local_status |= (PROXY_WORKER_INITIALIZED);
|
|
+ }
|
|
}
|
|
apr_global_mutex_unlock(proxy_mutex);
|
|
|
|
}
|
|
if (rv == APR_SUCCESS) {
|
|
worker->s->status |= (PROXY_WORKER_INITIALIZED);
|
|
- worker->local_status |= (PROXY_WORKER_INITIALIZED);
|
|
}
|
|
return rv;
|
|
}
|
|
@@ -2292,13 +2303,13 @@ PROXY_DECLARE(int) ap_proxy_acquire_connection(const char *proxy_function,
|
|
else {
|
|
/* create the new connection if the previous was destroyed */
|
|
if (!worker->cp->conn) {
|
|
- connection_constructor((void **)conn, worker, worker->cp->pool);
|
|
+ rv = connection_constructor((void **)conn, worker, worker->cp->pool);
|
|
}
|
|
else {
|
|
*conn = worker->cp->conn;
|
|
worker->cp->conn = NULL;
|
|
+ rv = APR_SUCCESS;
|
|
}
|
|
- rv = APR_SUCCESS;
|
|
}
|
|
|
|
if (rv != APR_SUCCESS) {
|
|
@@ -2344,7 +2355,9 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
|
|
{
|
|
int server_port;
|
|
apr_status_t err = APR_SUCCESS;
|
|
+#if APR_HAS_THREADS
|
|
apr_status_t uerr = APR_SUCCESS;
|
|
+#endif
|
|
const char *uds_path;
|
|
|
|
/*
|
|
@@ -2481,25 +2494,39 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
|
|
* we can reuse the address.
|
|
*/
|
|
if (!worker->cp->addr) {
|
|
+#if APR_HAS_THREADS
|
|
if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock");
|
|
return HTTP_INTERNAL_SERVER_ERROR;
|
|
}
|
|
+#endif
|
|
|
|
/*
|
|
- * Worker can have the single constant backend address.
|
|
- * The single DNS lookup is used once per worker.
|
|
- * If dynamic change is needed then set the addr to NULL
|
|
- * inside dynamic config to force the lookup.
|
|
+ * Recheck addr after we got the lock. This may have changed
|
|
+ * while waiting for the lock.
|
|
*/
|
|
- err = apr_sockaddr_info_get(&(worker->cp->addr),
|
|
- conn->hostname, APR_UNSPEC,
|
|
- conn->port, 0,
|
|
- worker->cp->pool);
|
|
+ if (!AP_VOLATILIZE_T(apr_sockaddr_t *, worker->cp->addr)) {
|
|
+
|
|
+ apr_sockaddr_t *addr;
|
|
+
|
|
+ /*
|
|
+ * Worker can have the single constant backend address.
|
|
+ * The single DNS lookup is used once per worker.
|
|
+ * If dynamic change is needed then set the addr to NULL
|
|
+ * inside dynamic config to force the lookup.
|
|
+ */
|
|
+ err = apr_sockaddr_info_get(&addr,
|
|
+ conn->hostname, APR_UNSPEC,
|
|
+ conn->port, 0,
|
|
+ worker->cp->dns_pool);
|
|
+ worker->cp->addr = addr;
|
|
+ }
|
|
conn->addr = worker->cp->addr;
|
|
+#if APR_HAS_THREADS
|
|
if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
|
|
ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock");
|
|
}
|
|
+#endif
|
|
}
|
|
else {
|
|
conn->addr = worker->cp->addr;
|
|
@@ -3422,7 +3449,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec
|
|
(*runtime)->cp = NULL;
|
|
(*runtime)->balancer = b;
|
|
(*runtime)->s = shm;
|
|
+#if APR_HAS_THREADS
|
|
(*runtime)->tmutex = NULL;
|
|
+#endif
|
|
rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
|
|
if (rv != APR_SUCCESS) {
|
|
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker");
|