summaryrefslogtreecommitdiffstats
path: root/modules/proxy/proxy_util.c
diff options
context:
space:
mode:
authorRuediger Pluem <rpluem@apache.org>2019-10-11 17:11:40 +0200
committerRuediger Pluem <rpluem@apache.org>2019-10-11 17:11:40 +0200
commitfadc83a84fee174c24604f5b37a5446953474f3a (patch)
treeebac0e3a095095bf2717a1f7de4ba4cdf66d62f5 /modules/proxy/proxy_util.c
parent* Add back logging goodness (diff)
downloadapache2-fadc83a84fee174c24604f5b37a5446953474f3a.tar.xz
apache2-fadc83a84fee174c24604f5b37a5446953474f3a.zip
Fix pool concurrency problems
Create a subpool of the connection pool for worker scoped DNS resolutions. This is needed to avoid race conditions in using the connection pool by multiple threads during ramp up. Recheck after obtaining the lock if we still need to do things or if they were already done by another thread while we were waiting on the lock. * modules/proxy/proxy_util.c: Create a subpool of the connection pool for worker scoped DNS resolutions and use it. * modules/proxy/mod_proxy.h: Define AP_VOLATILIZE_T and add dns_pool to struct proxy_conn_pool. * modules/proxy/mod_proxy_ftp.c: Use dns_pool and consider that worker->cp->addr is volatile in this location of the code. PR: 63503 git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1868296 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'modules/proxy/proxy_util.c')
-rw-r--r--modules/proxy/proxy_util.c131
1 files changed, 78 insertions, 53 deletions
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index 0075b61927..b5787420c2 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -1460,6 +1460,7 @@ static apr_status_t conn_pool_cleanup(void *theworker)
static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
{
apr_pool_t *pool;
+ apr_pool_t *dns_pool;
proxy_conn_pool *cp;
/*
@@ -1471,11 +1472,20 @@ static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
apr_pool_create(&pool, p);
apr_pool_tag(pool, "proxy_worker_cp");
/*
+ * Create a subpool of the connection pool for worker
+ * scoped DNS resolutions. This is needed to avoid race
+ * conditions in using the connection pool by multiple
+ * threads during ramp up.
+ */
+ apr_pool_create(&dns_pool, pool);
+ apr_pool_tag(dns_pool, "proxy_worker_dns");
+ /*
* Alloc from the same pool as worker.
* proxy_conn_pool is permanently attached to the worker.
*/
cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
cp->pool = pool;
+ cp->dns_pool = dns_pool;
worker->cp = cp;
}
@@ -2044,68 +2054,73 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
ap_proxy_worker_name(p, worker));
}
else {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
- "initializing worker %s local",
- ap_proxy_worker_name(p, worker));
apr_global_mutex_lock(proxy_mutex);
- /* Now init local worker data */
+ /* Check again after we got the lock if we are still uninitialized */
+ if (!(AP_VOLATILIZE_T(unsigned int, worker->local_status) & PROXY_WORKER_INITIALIZED)) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
+ "initializing worker %s local",
+ ap_proxy_worker_name(p, worker));
+ /* Now init local worker data */
#if APR_HAS_THREADS
- if (worker->tmutex == NULL) {
- rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00928)
- "can not create worker thread mutex");
- apr_global_mutex_unlock(proxy_mutex);
- return rv;
+ if (worker->tmutex == NULL) {
+ rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00928)
+ "can not create worker thread mutex");
+ apr_global_mutex_unlock(proxy_mutex);
+ return rv;
+ }
}
- }
#endif
- if (worker->cp == NULL)
- init_conn_pool(p, worker);
- if (worker->cp == NULL) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
- "can not create connection pool");
- apr_global_mutex_unlock(proxy_mutex);
- return APR_EGENERAL;
- }
+ if (worker->cp == NULL)
+ init_conn_pool(p, worker);
+ if (worker->cp == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
+ "can not create connection pool");
+ apr_global_mutex_unlock(proxy_mutex);
+ return APR_EGENERAL;
+ }
- if (worker->s->hmax) {
- rv = apr_reslist_create(&(worker->cp->res),
- worker->s->min, worker->s->smax,
- worker->s->hmax, worker->s->ttl,
- connection_constructor, connection_destructor,
- worker, worker->cp->pool);
+ if (worker->s->hmax) {
+ rv = apr_reslist_create(&(worker->cp->res),
+ worker->s->min, worker->s->smax,
+ worker->s->hmax, worker->s->ttl,
+ connection_constructor, connection_destructor,
+ worker, worker->cp->pool);
- apr_pool_pre_cleanup_register(worker->cp->pool, worker,
- conn_pool_cleanup);
+ apr_pool_pre_cleanup_register(worker->cp->pool, worker,
+ conn_pool_cleanup);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930)
- "initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
- getpid(), worker->s->hostname_ex, worker->s->min,
- worker->s->hmax, worker->s->smax);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00930)
+ "initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
+ getpid(), worker->s->hostname_ex, worker->s->min,
+ worker->s->hmax, worker->s->smax);
- /* Set the acquire timeout */
- if (rv == APR_SUCCESS && worker->s->acquire_set) {
- apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
- }
+ /* Set the acquire timeout */
+ if (rv == APR_SUCCESS && worker->s->acquire_set) {
+ apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
+ }
- }
- else {
- void *conn;
+ }
+ else {
+ void *conn;
- rv = connection_constructor(&conn, worker, worker->cp->pool);
- worker->cp->conn = conn;
+ rv = connection_constructor(&conn, worker, worker->cp->pool);
+ worker->cp->conn = conn;
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(00931)
- "initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
- getpid(), worker->s->hostname_ex);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(00931)
+ "initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
+ getpid(), worker->s->hostname_ex);
+ }
+ if (rv == APR_SUCCESS) {
+ worker->local_status |= (PROXY_WORKER_INITIALIZED);
+ }
}
apr_global_mutex_unlock(proxy_mutex);
}
if (rv == APR_SUCCESS) {
worker->s->status |= (PROXY_WORKER_INITIALIZED);
- worker->local_status |= (PROXY_WORKER_INITIALIZED);
}
return rv;
}
@@ -2559,15 +2574,25 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
}
/*
- * Worker can have the single constant backend address.
- * The single DNS lookup is used once per worker.
- * If dynamic change is needed then set the addr to NULL
- * inside dynamic config to force the lookup.
+ * Recheck addr after we got the lock. This may have changed
+ * while waiting for the lock.
*/
- err = apr_sockaddr_info_get(&(worker->cp->addr),
- conn->hostname, APR_UNSPEC,
- conn->port, 0,
- worker->cp->pool);
+ if (!AP_VOLATILIZE_T(apr_sockaddr_t *, worker->cp->addr)) {
+
+ apr_sockaddr_t *addr;
+
+ /*
+ * Worker can have the single constant backend address.
+ * The single DNS lookup is used once per worker.
+ * If dynamic change is needed then set the addr to NULL
+ * inside dynamic config to force the lookup.
+ */
+ err = apr_sockaddr_info_get(&addr,
+ conn->hostname, APR_UNSPEC,
+ conn->port, 0,
+ worker->cp->dns_pool);
+ worker->cp->addr = addr;
+ }
conn->addr = worker->cp->addr;
if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock");