/* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Load balancer module for Apache proxy */
#include "mod_proxy.h"
#include "scoreboard.h"
#include "ap_mpm.h"
#include "apr_version.h"
#include "ap_hooks.h"
#include "apr_date.h"
#include "apr_escape.h"
#include "mod_watchdog.h"
static const char *balancer_mutex_type = "proxy-balancer-shm";
ap_slotmem_provider_t *storage = NULL;
module AP_MODULE_DECLARE_DATA proxy_balancer_module;
static APR_OPTIONAL_FN_TYPE(set_worker_hc_param) *set_worker_hc_param_f = NULL;
static int (*ap_proxy_retry_worker_fn)(const char *proxy_function,
proxy_worker *worker, server_rec *s) = NULL;
static APR_OPTIONAL_FN_TYPE(hc_show_exprs) *hc_show_exprs_f = NULL;
static APR_OPTIONAL_FN_TYPE(hc_select_exprs) *hc_select_exprs_f = NULL;
static APR_OPTIONAL_FN_TYPE(hc_valid_expr) *hc_valid_expr_f = NULL;
/*
* Register our mutex type before the config is read so we
* can adjust the mutex settings using the Mutex directive.
*/
static int balancer_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp)
{
apr_status_t rv;
rv = ap_mutex_register(pconf, balancer_mutex_type, NULL,
APR_LOCK_DEFAULT, 0);
if (rv != APR_SUCCESS) {
return rv;
}
set_worker_hc_param_f = APR_RETRIEVE_OPTIONAL_FN(set_worker_hc_param);
hc_show_exprs_f = APR_RETRIEVE_OPTIONAL_FN(hc_show_exprs);
hc_select_exprs_f = APR_RETRIEVE_OPTIONAL_FN(hc_select_exprs);
hc_valid_expr_f = APR_RETRIEVE_OPTIONAL_FN(hc_valid_expr);
return OK;
}
#if 0
extern void proxy_update_members(proxy_balancer **balancer, request_rec *r,
proxy_server_conf *conf);
#endif
static int proxy_balancer_canon(request_rec *r, char *url)
{
char *host, *path;
char *search = NULL;
const char *err;
apr_port_t port = 0;
/* TODO: offset of BALANCER_PREFIX ?? */
if (ap_cstr_casecmpn(url, "balancer:", 9) == 0) {
url += 9;
}
else {
return DECLINED;
}
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "canonicalising URL %s", url);
/* do syntatic check.
* We break the URL into host, port, path, search
*/
err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port);
if (err) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01157)
"error parsing URL %s: %s",
url, err);
return HTTP_BAD_REQUEST;
}
/*
* now parse path/search args, according to rfc1738:
* process the path. With proxy-noncanon set (by
* mod_proxy) we use the raw, unparsed uri
*/
if (apr_table_get(r->notes, "proxy-nocanon")) {
path = url; /* this is the raw path */
}
else {
path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0,
r->proxyreq);
search = r->args;
}
if (path == NULL)
return HTTP_BAD_REQUEST;
r->filename = apr_pstrcat(r->pool, "proxy:" BALANCER_PREFIX, host,
"/", path, (search) ? "?" : "", (search) ? search : "", NULL);
r->path_info = apr_pstrcat(r->pool, "/", path, NULL);
return OK;
}
static void init_balancer_members(apr_pool_t *p, server_rec *s,
proxy_balancer *balancer)
{
int i;
proxy_worker **workers;
workers = (proxy_worker **)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++) {
int worker_is_initialized;
proxy_worker *worker = *workers;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01158)
"Looking at %s -> %s initialized?", balancer->s->name,
ap_proxy_worker_name(p, worker));
worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(worker);
if (!worker_is_initialized) {
ap_proxy_initialize_worker(worker, s, p);
}
++workers;
}
/* Set default number of attempts to the number of
* workers.
*/
if (!balancer->s->max_attempts_set && balancer->workers->nelts > 1) {
balancer->s->max_attempts = balancer->workers->nelts - 1;
balancer->s->max_attempts_set = 1;
}
}
/* Retrieve the parameter with the given name
* Something like 'JSESSIONID=12345...N'
*/
static char *get_path_param(apr_pool_t *pool, char *url,
const char *name, int scolon_sep)
{
char *path = NULL;
char *pathdelims = "?&";
if (scolon_sep) {
pathdelims = ";?&";
}
for (path = strstr(url, name); path; path = strstr(path + 1, name)) {
path += strlen(name);
if (*path == '=') {
/*
* Session path was found, get its value
*/
++path;
if (*path) {
char *q;
path = apr_strtok(apr_pstrdup(pool, path), pathdelims, &q);
return path;
}
}
}
return NULL;
}
static char *get_cookie_param(request_rec *r, const char *name)
{
const char *cookies;
const char *start_cookie;
if ((cookies = apr_table_get(r->headers_in, "Cookie"))) {
for (start_cookie = ap_strstr_c(cookies, name); start_cookie;
start_cookie = ap_strstr_c(start_cookie + 1, name)) {
if (start_cookie == cookies ||
start_cookie[-1] == ';' ||
start_cookie[-1] == ',' ||
isspace(start_cookie[-1])) {
start_cookie += strlen(name);
while(*start_cookie && isspace(*start_cookie))
++start_cookie;
if (*start_cookie++ == '=' && *start_cookie) {
/*
* Session cookie was found, get its value
*/
char *end_cookie, *cookie;
cookie = apr_pstrdup(r->pool, start_cookie);
if ((end_cookie = strchr(cookie, ';')) != NULL)
*end_cookie = '\0';
if((end_cookie = strchr(cookie, ',')) != NULL)
*end_cookie = '\0';
return cookie;
}
}
}
}
return NULL;
}
/* Find the worker that has the 'route' defined
*/
static proxy_worker *find_route_worker(proxy_balancer *balancer,
const char *route, request_rec *r,
int recursion)
{
int i;
int checking_standby;
int checked_standby;
proxy_worker **workers;
checking_standby = checked_standby = 0;
while (!checked_standby) {
workers = (proxy_worker **)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++, workers++) {
proxy_worker *worker = *workers;
if ( (checking_standby ? !PROXY_WORKER_IS_STANDBY(worker) : PROXY_WORKER_IS_STANDBY(worker)) )
continue;
if (*(worker->s->route) && strcmp(worker->s->route, route) == 0) {
if (PROXY_WORKER_IS_USABLE(worker)) {
return worker;
} else {
/*
* If the worker is in error state run
* retry on that worker. It will be marked as
* operational if the retry timeout is elapsed.
* The worker might still be unusable, but we try
* anyway.
*/
ap_proxy_retry_worker_fn("BALANCER", worker, r->server);
if (PROXY_WORKER_IS_USABLE(worker)) {
return worker;
} else {
/*
* We have a worker that is unusable.
* It can be in error or disabled, but in case
* it has a redirection set use that redirection worker.
* This enables to safely remove the member from the
* balancer. Of course you will need some kind of
* session replication between those two remote.
* Also check that we haven't gone thru all the
* balancer members by means of redirects.
* This should avoid redirect cycles.
*/
if ((*worker->s->redirect)
&& (recursion < balancer->workers->nelts)) {
proxy_worker *rworker = NULL;
rworker = find_route_worker(balancer, worker->s->redirect,
r, recursion + 1);
/* Check if the redirect worker is usable */
if (rworker && !PROXY_WORKER_IS_USABLE(rworker)) {
/*
* If the worker is in error state run
* retry on that worker. It will be marked as
* operational if the retry timeout is elapsed.
* The worker might still be unusable, but we try
* anyway.
*/
ap_proxy_retry_worker_fn("BALANCER", rworker, r->server);
}
if (rworker && PROXY_WORKER_IS_USABLE(rworker))
return rworker;
}
}
}
}
}
checked_standby = checking_standby++;
}
return NULL;
}
static proxy_worker *find_session_route(proxy_balancer *balancer,
request_rec *r,
char **route,
const char **sticky_used,
char **url)
{
proxy_worker *worker = NULL;
if (!*balancer->s->sticky)
return NULL;
/* Try to find the sticky route inside url */
*route = get_path_param(r->pool, *url, balancer->s->sticky_path, balancer->s->scolonsep);
if (*route) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01159)
"Found value %s for stickysession %s",
*route, balancer->s->sticky_path);
*sticky_used = balancer->s->sticky_path;
}
else {
*route = get_cookie_param(r, balancer->s->sticky);
if (*route) {
*sticky_used = balancer->s->sticky;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01160)
"Found value %s for stickysession %s",
*route, balancer->s->sticky);
}
}
/*
* If we found a value for stickysession, find the first '.' (or whatever
* sticky_separator is set to) within. Everything after '.' (if present)
* is our route.
*/
if ((*route) && (balancer->s->sticky_separator != 0) && ((*route = strchr(*route, balancer->s->sticky_separator)) != NULL ))
(*route)++;
if ((*route) && (**route)) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01161) "Found route %s", *route);
/* We have a route in path or in cookie
* Find the worker that has this route defined.
*/
worker = find_route_worker(balancer, *route, r, 1);
if (worker && strcmp(*route, worker->s->route)) {
/*
* Notice that the route of the worker chosen is different from
* the route supplied by the client.
*/
apr_table_setn(r->subprocess_env, "BALANCER_ROUTE_CHANGED", "1");
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01162)
"Route changed from %s to %s",
*route, worker->s->route);
}
return worker;
}
else
return NULL;
}
static proxy_worker *find_best_worker(proxy_balancer *balancer,
request_rec *r)
{
proxy_worker *candidate = NULL;
apr_status_t rv;
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
"%s: Lock failed for find_best_worker()",
balancer->s->name);
return NULL;
}
#endif
candidate = (*balancer->lbmethod->finder)(balancer, r);
if (candidate)
candidate->s->elected++;
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
"%s: Unlock failed for find_best_worker()",
balancer->s->name);
}
#endif
if (candidate == NULL) {
/* All the workers are in error state or disabled.
* If the balancer has a timeout sleep for a while
* and try again to find the worker. The chances are
* that some other thread will release a connection.
* By default the timeout is not set, and the server
* returns SERVER_BUSY.
*/
if (balancer->s->timeout) {
/* XXX: This can perhaps be build using some
* smarter mechanism, like tread_cond.
* But since the statuses can came from
* different childs, use the provided algo.
*/
apr_interval_time_t timeout = balancer->s->timeout;
apr_interval_time_t step, tval = 0;
/* Set the timeout to 0 so that we don't
* end in infinite loop
*/
balancer->s->timeout = 0;
step = timeout / 100;
while (tval < timeout) {
apr_sleep(step);
/* Try again */
if ((candidate = find_best_worker(balancer, r)))
break;
tval += step;
}
/* restore the timeout */
balancer->s->timeout = timeout;
}
}
return candidate;
}
static int rewrite_url(request_rec *r, proxy_worker *worker,
char **url)
{
const char *scheme = strstr(*url, "://");
const char *path = NULL;
if (scheme)
path = ap_strchr_c(scheme + 3, '/');
/* we break the URL into host, port, uri */
if (!worker) {
return ap_proxyerror(r, HTTP_BAD_REQUEST, apr_pstrcat(r->pool,
"missing worker. URI cannot be parsed: ", *url,
NULL));
}
*url = apr_pstrcat(r->pool, worker->s->name, path, NULL);
return OK;
}
static void force_recovery(proxy_balancer *balancer, server_rec *s)
{
int i;
int ok = 0;
proxy_worker **worker;
worker = (proxy_worker **)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++, worker++) {
if (!((*worker)->s->status & PROXY_WORKER_IN_ERROR)) {
ok = 1;
break;
}
else {
/* Try if we can recover */
ap_proxy_retry_worker_fn("BALANCER", *worker, s);
if (!((*worker)->s->status & PROXY_WORKER_IN_ERROR)) {
ok = 1;
break;
}
}
}
if (!ok && balancer->s->forcerecovery) {
/* If all workers are in error state force the recovery.
*/
worker = (proxy_worker **)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++, worker++) {
++(*worker)->s->retries;
(*worker)->s->status &= ~PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01165)
"%s: Forcing recovery for worker (%s)",
balancer->s->name, (*worker)->s->hostname_ex);
}
}
}
static apr_status_t decrement_busy_count(void *worker_)
{
proxy_worker *worker = worker_;
if (worker->s->busy) {
worker->s->busy--;
}
return APR_SUCCESS;
}
static int proxy_balancer_pre_request(proxy_worker **worker,
proxy_balancer **balancer,
request_rec *r,
proxy_server_conf *conf, char **url)
{
int access_status;
proxy_worker *runtime;
char *route = NULL;
const char *sticky = NULL;
apr_status_t rv;
*worker = NULL;
/* Step 1: check if the url is for us
* The url we can handle starts with 'balancer://'
* If balancer is already provided skip the search
* for balancer, because this is failover attempt.
*/
if (!*balancer &&
!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url, 1)))
return DECLINED;
/* Step 2: Lock the LoadBalancer
* XXX: perhaps we need the process lock here
*/
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
"%s: Lock failed for pre_request", (*balancer)->s->name);
return DECLINED;
}
#endif
/* Step 3: force recovery */
force_recovery(*balancer, r->server);
/* Step 3.5: Update member list for the balancer */
/* TODO: Implement as provider! */
ap_proxy_sync_balancer(*balancer, r->server, conf);
/* Step 4: find the session route */
runtime = find_session_route(*balancer, r, &route, &sticky, url);
if (runtime) {
if ((*balancer)->lbmethod && (*balancer)->lbmethod->updatelbstatus) {
/* Call the LB implementation */
(*balancer)->lbmethod->updatelbstatus(*balancer, runtime, r->server);
}
else { /* Use the default one */
int i, total_factor = 0;
proxy_worker **workers;
/* We have a sticky load balancer
* Update the workers status
* so that even session routes get
* into account.
*/
workers = (proxy_worker **)(*balancer)->workers->elts;
for (i = 0; i < (*balancer)->workers->nelts; i++) {
/* Take into calculation only the workers that are
* not in error state or not disabled.
*/
if (PROXY_WORKER_IS_USABLE(*workers)) {
(*workers)->s->lbstatus += (*workers)->s->lbfactor;
total_factor += (*workers)->s->lbfactor;
}
workers++;
}
runtime->s->lbstatus -= total_factor;
}
runtime->s->elected++;
*worker = runtime;
}
else if (route && (*balancer)->s->sticky_force) {
int i, member_of = 0;
proxy_worker **workers;
/*
* We have a route provided that doesn't match the
* balancer name. See if the provider route is the
* member of the same balancer in which case return 503
*/
workers = (proxy_worker **)(*balancer)->workers->elts;
for (i = 0; i < (*balancer)->workers->nelts; i++) {
if (*((*workers)->s->route) && strcmp((*workers)->s->route, route) == 0) {
member_of = 1;
break;
}
workers++;
}
if (member_of) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
"%s: All workers are in error state for route (%s)",
(*balancer)->s->name, route);
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
"%s: Unlock failed for pre_request",
(*balancer)->s->name);
}
#endif
return HTTP_SERVICE_UNAVAILABLE;
}
}
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
"%s: Unlock failed for pre_request",
(*balancer)->s->name);
}
#endif
if (!*worker) {
runtime = find_best_worker(*balancer, r);
if (!runtime) {
if ((*balancer)->workers->nelts) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01170)
"%s: All workers are in error state",
(*balancer)->s->name);
} else {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01171)
"%s: No workers in balancer",
(*balancer)->s->name);
}
return HTTP_SERVICE_UNAVAILABLE;
}
if (*(*balancer)->s->sticky && runtime) {
/*
* This balancer has sticky sessions and the client either has not
* supplied any routing information or all workers for this route
* including possible redirect and hotstandby workers are in error
* state, but we have found another working worker for this
* balancer where we can send the request. Thus notice that we have
* changed the route to the backend.
*/
apr_table_setn(r->subprocess_env, "BALANCER_ROUTE_CHANGED", "1");
}
*worker = runtime;
}
(*worker)->s->busy++;
apr_pool_cleanup_register(r->pool, *worker, decrement_busy_count,
apr_pool_cleanup_null);
/* Add balancer/worker info to env. */
apr_table_setn(r->subprocess_env,
"BALANCER_NAME", (*balancer)->s->name);
apr_table_setn(r->subprocess_env,
"BALANCER_WORKER_NAME", (*worker)->s->name);
apr_table_setn(r->subprocess_env,
"BALANCER_WORKER_ROUTE", (*worker)->s->route);
/* Rewrite the url from 'balancer://url'
* to the 'worker_scheme://worker_hostname[:worker_port]/url'
* This replaces the balancers fictional name with the
* real hostname of the elected worker.
*/
access_status = rewrite_url(r, *worker, url);
/* Add the session route to request notes if present */
if (route) {
apr_table_setn(r->notes, "session-sticky", sticky);
apr_table_setn(r->notes, "session-route", route);
/* Add session info to env. */
apr_table_setn(r->subprocess_env,
"BALANCER_SESSION_STICKY", sticky);
apr_table_setn(r->subprocess_env,
"BALANCER_SESSION_ROUTE", route);
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01172)
"%s: worker (%s) rewritten to %s",
(*balancer)->s->name, (*worker)->s->name, *url);
return access_status;
}
static int proxy_balancer_post_request(proxy_worker *worker,
proxy_balancer *balancer,
request_rec *r,
proxy_server_conf *conf)
{
apr_status_t rv;
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173)
"%s: Lock failed for post_request",
balancer->s->name);
return HTTP_INTERNAL_SERVER_ERROR;
}
#endif
if (!apr_is_empty_array(balancer->errstatuses)
&& !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
int i;
for (i = 0; i < balancer->errstatuses->nelts; i++) {
int val = ((int *)balancer->errstatuses->elts)[i];
if (r->status == val) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01174)
"%s: Forcing worker (%s) into error state "
"due to status code %d matching 'failonstatus' "
"balancer parameter",
balancer->s->name, ap_proxy_worker_name(r->pool, worker),
val);
worker->s->status |= PROXY_WORKER_IN_ERROR;
worker->s->error_time = apr_time_now();
break;
}
}
}
if (balancer->failontimeout
&& !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)
&& (apr_table_get(r->notes, "proxy_timedout")) != NULL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02460)
"%s: Forcing worker (%s) into error state "
"due to timeout and 'failontimeout' parameter being set",
balancer->s->name, ap_proxy_worker_name(r->pool, worker));
worker->s->status |= PROXY_WORKER_IN_ERROR;
worker->s->error_time = apr_time_now();
}
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
"%s: Unlock failed for post_request", balancer->s->name);
}
#endif
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176)
"proxy_balancer_post_request for (%s)", balancer->s->name);
return OK;
}
static void recalc_factors(proxy_balancer *balancer)
{
int i;
proxy_worker **workers;
/* Recalculate lbfactors */
workers = (proxy_worker **)balancer->workers->elts;
/* Special case if there is only one worker its
* load factor will always be 100
*/
if (balancer->workers->nelts == 1) {
(*workers)->s->lbstatus = (*workers)->s->lbfactor = 100;
return;
}
for (i = 0; i < balancer->workers->nelts; i++) {
/* Update the status entries */
workers[i]->s->lbstatus = workers[i]->s->lbfactor;
}
}
static apr_status_t lock_remove(void *data)
{
int i;
proxy_balancer *balancer;
server_rec *s = data;
void *sconf = s->module_config;
proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
if (balancer->gmutex) {
apr_global_mutex_destroy(balancer->gmutex);
balancer->gmutex = NULL;
}
}
return(0);
}
/*
* Compute an ID for a vhost based on what makes it selected by requests.
* The second and more Host(s)/IP(s):port(s), and the ServerAlias(es) are
* optional (see make_servers_ids() below).
*/
static const char *make_server_id(server_rec *s, apr_pool_t *p, int full)
{
apr_md5_ctx_t md5_ctx;
unsigned char md5[APR_MD5_DIGESTSIZE];
char host_ip[64]; /* for any IPv[46] string */
server_addr_rec *sar;
int i;
apr_md5_init(&md5_ctx);
for (sar = s->addrs; sar; sar = sar->next) {
host_ip[0] = '\0';
apr_sockaddr_ip_getbuf(host_ip, sizeof host_ip, sar->host_addr);
apr_md5_update(&md5_ctx, (void *)sar->virthost, strlen(sar->virthost));
apr_md5_update(&md5_ctx, (void *)host_ip, strlen(host_ip));
apr_md5_update(&md5_ctx, (void *)&sar->host_port,
sizeof(sar->host_port));
if (!full) {
break;
}
}
if (s->server_hostname) {
apr_md5_update(&md5_ctx, (void *)s->server_hostname,
strlen(s->server_hostname));
}
if (full) {
if (s->names) {
for (i = 0; i < s->names->nelts; ++i) {
const char *name = APR_ARRAY_IDX(s->names, i, char *);
apr_md5_update(&md5_ctx, (void *)name, strlen(name));
}
}
if (s->wild_names) {
for (i = 0; i < s->wild_names->nelts; ++i) {
const char *name = APR_ARRAY_IDX(s->wild_names, i, char *);
apr_md5_update(&md5_ctx, (void *)name, strlen(name));
}
}
}
apr_md5_final(md5, &md5_ctx);
return apr_pescape_hex(p, md5, sizeof md5, 0);
}
/*
* First try to compute an unique ID for each vhost with minimal criteria,
* that is the first Host/IP:port and ServerName. For most cases this should
* be enough and avoids changing the ID unnecessarily accross restart (or
* stop/start w.r.t. persisted files) for things that this module does not
* care about.
*
* But if it's not enough (collisions) do a second pass for the full monty,
* that is additionally the other Host(s)/IP(s):port(s) and ServerAlias(es).
*
* Finally, for pathological configs where this is still not enough, let's
* append a counter to duplicates, because we really want that ID to be unique
* even if the vhost will never be selected to handle requests at run time, at
* load time a duplicate may steal the original slotmems (depending on its
* balancers' configurations), see how mod_slotmem_shm reuses slots/files based
* solely on this ID and resets them if the sizes don't match.
*/
static apr_array_header_t *make_servers_ids(server_rec *main_s, apr_pool_t *p)
{
server_rec *s = main_s;
apr_array_header_t *ids = apr_array_make(p, 10, sizeof(const char *));
apr_hash_t *dups = apr_hash_make(p);
int idx, *dup, full_monty = 0;
const char *id;
for (idx = 0, s = main_s; s; s = s->next, ++idx) {
id = make_server_id(s, p, 0);
dup = apr_hash_get(dups, id, APR_HASH_KEY_STRING);
apr_hash_set(dups, id, APR_HASH_KEY_STRING,
apr_pmemdup(p, &idx, sizeof(int)));
if (dup) {
full_monty = 1;
APR_ARRAY_IDX(ids, *dup, const char *) = NULL;
APR_ARRAY_PUSH(ids, const char *) = NULL;
}
else {
APR_ARRAY_PUSH(ids, const char *) = id;
}
}
if (full_monty) {
apr_hash_clear(dups);
for (idx = 0, s = main_s; s; s = s->next, ++idx) {
id = APR_ARRAY_IDX(ids, idx, const char *);
if (id) {
/* Preserve non-duplicates */
continue;
}
id = make_server_id(s, p, 1);
if (apr_hash_get(dups, id, APR_HASH_KEY_STRING)) {
id = apr_psprintf(p, "%s_%x", id, idx);
}
else {
apr_hash_set(dups, id, APR_HASH_KEY_STRING, (void *)-1);
}
APR_ARRAY_IDX(ids, idx, const char *) = id;
}
}
return ids;
}
/* post_config hook: */
static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
apr_status_t rv;
proxy_server_conf *conf;
ap_slotmem_instance_t *new = NULL;
apr_time_t tstamp;
apr_array_header_t *ids;
int idx;
/* balancer_post_config() will be called twice during startup. So, don't
* set up the static data the 1st time through. */
if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
return OK;
}
ap_proxy_retry_worker_fn =
APR_RETRIEVE_OPTIONAL_FN(ap_proxy_retry_worker);
if (!ap_proxy_retry_worker_fn) {
ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02230)
"mod_proxy must be loaded for mod_proxy_balancer");
return !OK;
}
/*
* Get slotmem setups
*/
storage = ap_lookup_provider(AP_SLOTMEM_PROVIDER_GROUP, "shm",
AP_SLOTMEM_PROVIDER_VERSION);
if (!storage) {
ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01177)
"Failed to lookup provider 'shm' for '%s': is "
"mod_slotmem_shm loaded??",
AP_SLOTMEM_PROVIDER_GROUP);
return !OK;
}
ids = make_servers_ids(s, ptemp);
tstamp = apr_time_now();
/*
* Go thru each Vhost and create the shared mem slotmem for
* each balancer's workers
*/
for (idx = 0; s; ++idx) {
int i,j;
const char *id;
proxy_balancer *balancer;
ap_slotmem_type_t type;
void *sconf = s->module_config;
conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
/*
* During create_proxy_config() we created a dummy id. Now that
* we have identifying info, we can create the real id
*/
id = APR_ARRAY_IDX(ids, idx, const char *);
conf->id = apr_psprintf(pconf, "p%x",
ap_proxy_hashfunc(id, PROXY_HASHFUNC_DEFAULT));
if (conf->bslot) {
/* Shared memory already created for this proxy_server_conf.
*/
s = s->next;
continue;
}
if (conf->bal_persist) {
type = AP_SLOTMEM_TYPE_PERSIST | AP_SLOTMEM_TYPE_CLEARINUSE;
} else {
type = 0;
}
if (conf->balancers->nelts) {
conf->max_balancers = conf->balancers->nelts + conf->bgrowth;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01178) "Doing balancers create: %d, %d (%d)",
(int)ALIGNED_PROXY_BALANCER_SHARED_SIZE,
(int)conf->balancers->nelts, conf->max_balancers);
rv = storage->create(&new, conf->id,
ALIGNED_PROXY_BALANCER_SHARED_SIZE,
conf->max_balancers, type, pconf);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01179) "balancer slotmem_create failed");
return !OK;
}
conf->bslot = new;
}
conf->storage = storage;
/* Initialize shared scoreboard data */
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
proxy_worker **workers;
proxy_worker *worker;
proxy_balancer_shared *bshm;
const char *sname;
unsigned int index;
/* now that we have the right id, we need to redo the sname field */
ap_pstr2_alnum(pconf, balancer->s->name + sizeof(BALANCER_PREFIX) - 1,
&sname);
sname = apr_pstrcat(pconf, conf->id, "_", sname, NULL);
PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
balancer->max_workers = balancer->workers->nelts + balancer->growth;
/* Create global mutex */
rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
balancer->s->sname, s, pconf, 0);
if (rv != APR_SUCCESS || !balancer->gmutex) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01180)
"mutex creation of %s : %s failed", balancer_mutex_type,
balancer->s->sname);
return HTTP_INTERNAL_SERVER_ERROR;
}
apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
apr_pool_cleanup_null);
/* setup shm for balancers */
bshm = ap_proxy_find_balancershm(storage, conf->bslot, balancer, &index);
if (bshm) {
if ((rv = storage->fgrab(conf->bslot, index)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(02408) "balancer slotmem_fgrab failed");
return !OK;
}
}
else {
if ((rv = storage->grab(conf->bslot, &index)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01181) "balancer slotmem_grab failed");
return !OK;
}
if ((rv = storage->dptr(conf->bslot, index, (void *)&bshm)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01182) "balancer slotmem_dptr failed");
return !OK;
}
}
if ((rv = ap_proxy_share_balancer(balancer, bshm, index)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01183) "Cannot share balancer");
return !OK;
}
/* create slotmem slots for workers */
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01184) "Doing workers create: %s (%s), %d, %d [%u]",
balancer->s->name, balancer->s->sname,
(int)ALIGNED_PROXY_WORKER_SHARED_SIZE,
(int)balancer->max_workers, i);
rv = storage->create(&new, balancer->s->sname,
ALIGNED_PROXY_WORKER_SHARED_SIZE,
balancer->max_workers, type, pconf);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01185) "worker slotmem_create failed");
return !OK;
}
balancer->wslot = new;
balancer->storage = storage;
/* sync all timestamps */
balancer->wupdated = balancer->s->wupdated = tstamp;
/* now go thru each worker */
workers = (proxy_worker **)balancer->workers->elts;
for (j = 0; j < balancer->workers->nelts; j++, workers++) {
proxy_worker_shared *shm;
worker = *workers;
shm = ap_proxy_find_workershm(storage, balancer->wslot, worker, &index);
if (shm) {
if ((rv = storage->fgrab(balancer->wslot, index)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(02409) "worker slotmem_fgrab failed");
return !OK;
}
}
else {
if ((rv = storage->grab(balancer->wslot, &index)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01186) "worker slotmem_grab failed");
return !OK;
}
if ((rv = storage->dptr(balancer->wslot, index, (void *)&shm)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01187) "worker slotmem_dptr failed");
return !OK;
}
}
if ((rv = ap_proxy_share_worker(worker, shm, index)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(01188) "Cannot share worker");
return !OK;
}
worker->s->updated = tstamp;
}
if (conf->bal_persist) {
/* We could have just read-in a persisted config. Force a sync. */
balancer->wupdated--;
ap_proxy_sync_balancer(balancer, s, conf);
}
}
s = s->next;
}
return OK;
}
static void create_radio(const char *name, unsigned int flag, request_rec *r)
{
ap_rvputs(r, "
\n", r);
}
static void push2table(const char *input, apr_table_t *params,
const char *allowed[], apr_pool_t *p)
{
char *args;
char *tok, *val;
char *key;
if (input == NULL) {
return;
}
args = apr_pstrdup(p, input);
key = apr_strtok(args, "&", &tok);
while (key) {
val = strchr(key, '=');
if (val) {
*val++ = '\0';
}
else {
val = "";
}
ap_unescape_url(key);
ap_unescape_url(val);
if (allowed == NULL) { /* allow all */
apr_table_set(params, key, val);
}
else {
const char **ok = allowed;
while (*ok) {
if (strcmp(*ok, key) == 0) {
apr_table_set(params, key, val);
break;
}
ok++;
}
}
key = apr_strtok(NULL, "&", &tok);
}
}
/*
* Process the paramters and add or update the worker of the balancer
*/
static int balancer_process_balancer_worker(request_rec *r, proxy_server_conf *conf,
proxy_balancer *bsel,
proxy_worker *wsel, int ok2change,
apr_table_t *params)
{
apr_status_t rv;
/* First set the params */
if (wsel && ok2change) {
const char *val;
int was_usable = PROXY_WORKER_IS_USABLE(wsel);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01192) "settings worker params");
if ((val = apr_table_get(params, "w_lf"))) {
int ival;
double fval = atof(val);
ival = fval * 100.0;
if (ival >= 100 && ival <= 10000) {
wsel->s->lbfactor = ival;
if (bsel)
recalc_factors(bsel);
}
}
if ((val = apr_table_get(params, "w_wr"))) {
if (*val && strlen(val) < sizeof(wsel->s->route))
strcpy(wsel->s->route, val);
else
*wsel->s->route = '\0';
}
if ((val = apr_table_get(params, "w_rr"))) {
if (*val && strlen(val) < sizeof(wsel->s->redirect))
strcpy(wsel->s->redirect, val);
else
*wsel->s->redirect = '\0';
}
/*
* TODO: Look for all 'w_status_#' keys and then loop thru
* on that # character, since the character == the flag
*/
if ((val = apr_table_get(params, "w_status_I"))) {
ap_proxy_set_wstatus(PROXY_WORKER_IGNORE_ERRORS_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_N"))) {
ap_proxy_set_wstatus(PROXY_WORKER_DRAIN_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_D"))) {
ap_proxy_set_wstatus(PROXY_WORKER_DISABLED_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_H"))) {
ap_proxy_set_wstatus(PROXY_WORKER_HOT_STANDBY_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_R"))) {
ap_proxy_set_wstatus(PROXY_WORKER_HOT_SPARE_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_S"))) {
ap_proxy_set_wstatus(PROXY_WORKER_STOPPED_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_status_C"))) {
ap_proxy_set_wstatus(PROXY_WORKER_HC_FAIL_FLAG, atoi(val), wsel);
}
if ((val = apr_table_get(params, "w_ls"))) {
int ival = atoi(val);
if (ival >= 0 && ival <= 99) {
wsel->s->lbset = ival;
}
}
if ((val = apr_table_get(params, "w_hi"))) {
apr_interval_time_t hci;
if (ap_timeout_parameter_parse(val, &hci, "ms") == APR_SUCCESS) {
if (hci >= AP_WD_TM_SLICE) {
wsel->s->interval = hci;
}
}
}
if ((val = apr_table_get(params, "w_hp"))) {
int ival = atoi(val);
if (ival >= 1) {
wsel->s->passes = ival;
}
}
if ((val = apr_table_get(params, "w_hf"))) {
int ival = atoi(val);
if (ival >= 1) {
wsel->s->fails = ival;
}
}
if ((val = apr_table_get(params, "w_hm"))) {
proxy_hcmethods_t *method = proxy_hcmethods;
for (; method->name; method++) {
if (!ap_cstr_casecmp(method->name, val) && method->implemented)
wsel->s->method = method->method;
}
}
if ((val = apr_table_get(params, "w_hu"))) {
if (*val && strlen(val) < sizeof(wsel->s->hcuri))
strcpy(wsel->s->hcuri, val);
else
*wsel->s->hcuri = '\0';
}
if (hc_valid_expr_f && (val = apr_table_get(params, "w_he"))) {
if (*val && hc_valid_expr_f(r, val) && strlen(val) < sizeof(wsel->s->hcexpr))
strcpy(wsel->s->hcexpr, val);
else
*wsel->s->hcexpr = '\0';
}
/* If the health check method doesn't support an expr, then null it */
if (wsel->s->method == NONE || wsel->s->method == TCP) {
*wsel->s->hcexpr = '\0';
}
/* if enabling, we need to reset all lb params */
if (bsel && !was_usable && PROXY_WORKER_IS_USABLE(wsel)) {
bsel->s->need_reset = 1;
}
}
if (bsel && ok2change) {
const char *val;
int ival;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01193)
"settings balancer params");
if ((val = apr_table_get(params, "b_lbm"))) {
if ((strlen(val) < (sizeof(bsel->s->lbpname)-1)) &&
strcmp(val, bsel->s->lbpname)) {
proxy_balancer_method *lbmethod;
lbmethod = ap_lookup_provider(PROXY_LBMETHOD, val, "0");
if (lbmethod) {
PROXY_STRNCPY(bsel->s->lbpname, val);
bsel->lbmethod = lbmethod;
bsel->s->wupdated = apr_time_now();
bsel->s->need_reset = 1;
}
}
}
if ((val = apr_table_get(params, "b_tmo"))) {
ival = atoi(val);
if (ival >= 0 && ival <= 7200) { /* 2 hrs enuff? */
bsel->s->timeout = apr_time_from_sec(ival);
}
}
if ((val = apr_table_get(params, "b_max"))) {
ival = atoi(val);
if (ival >= 0 && ival <= 99) {
bsel->s->max_attempts = ival;
}
}
if ((val = apr_table_get(params, "b_sforce"))) {
ival = atoi(val);
bsel->s->sticky_force = (ival != 0);
}
if ((val = apr_table_get(params, "b_ss")) && *val) {
if (strlen(val) < (sizeof(bsel->s->sticky_path)-1)) {
if (*val == '-' && *(val+1) == '\0')
*bsel->s->sticky_path = *bsel->s->sticky = '\0';
else {
char *path;
PROXY_STRNCPY(bsel->s->sticky_path, val);
PROXY_STRNCPY(bsel->s->sticky, val);
if ((path = strchr((char *)bsel->s->sticky, '|'))) {
*path++ = '\0';
PROXY_STRNCPY(bsel->s->sticky_path, path);
}
}
}
}
if ((val = apr_table_get(params, "b_wyes")) &&
(*val == '1' && *(val+1) == '\0') &&
(val = apr_table_get(params, "b_nwrkr"))) {
char *ret;
proxy_worker *nworker;
nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
if (!nworker && storage->num_free_slots(bsel->wslot)) {
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
"%s: Lock failed for adding worker",
bsel->s->name);
}
#endif
ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0);
if (!ret) {
unsigned int index;
proxy_worker_shared *shm;
PROXY_COPY_CONF_PARAMS(nworker, conf);
if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195)
"worker slotmem_grab failed");
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
#endif
return HTTP_BAD_REQUEST;
}
if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197)
"worker slotmem_dptr failed");
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
#endif
return HTTP_BAD_REQUEST;
}
if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199)
"Cannot share worker");
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
#endif
return HTTP_BAD_REQUEST;
}
if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201)
"Cannot init worker");
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
#endif
return HTTP_BAD_REQUEST;
}
/* sync all timestamps */
bsel->wupdated = bsel->s->wupdated = nworker->s->updated = apr_time_now();
/* by default, all new workers are disabled */
ap_proxy_set_wstatus(PROXY_WORKER_DISABLED_FLAG, 1, nworker);
} else {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10163)
"%s: failed to add worker %s",
bsel->s->name, val);
#if APR_HAS_THREADS
PROXY_GLOBAL_UNLOCK(bsel);
#endif
return HTTP_BAD_REQUEST;
}
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203)
"%s: Unlock failed for adding worker",
bsel->s->name);
}
#endif
} else {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10164)
"%s: failed to add worker %s",
bsel->s->name, val);
return HTTP_BAD_REQUEST;
}
}
}
return APR_SUCCESS;
}
/*
* builds the page and links to configure via HTLM or XML.
*/
static void balancer_display_page(request_rec *r, proxy_server_conf *conf,
proxy_balancer *bsel,
proxy_worker *wsel,
int usexml)
{
const char *action;
proxy_balancer *balancer;
proxy_worker *worker;
proxy_worker **workers;
int i, n;
action = ap_construct_url(r->pool, r->uri, r);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01204) "genning page");
if (usexml) {
char date[APR_RFC822_DATE_LEN];
ap_set_content_type(r, "text/xml");
ap_rputs("\n", r);
ap_rputs("\n", r);
ap_rputs(" \n", r);
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++) {
ap_rputs(" \n", r);
/* Start proxy_balancer */
ap_rvputs(r, " ", balancer->s->name, "\n", NULL);
if (*balancer->s->sticky) {
ap_rvputs(r, " ", ap_escape_html(r->pool, balancer->s->sticky),
"\n", NULL);
ap_rprintf(r,
" %s\n",
(balancer->s->sticky_force ? "On" : "Off"));
}
ap_rprintf(r,
" %" APR_TIME_T_FMT "\n",
apr_time_sec(balancer->s->timeout));
if (balancer->s->max_attempts_set) {
ap_rprintf(r,
" %d\n",
balancer->s->max_attempts);
}
ap_rvputs(r, " ", balancer->lbmethod->name,
"\n", NULL);
if (*balancer->s->sticky) {
ap_rprintf(r,
" %s\n",
(balancer->s->scolonsep ? "On" : "Off"));
}
/* End proxy_balancer */
ap_rputs(" \n", r);
workers = (proxy_worker **)balancer->workers->elts;
for (n = 0; n < balancer->workers->nelts; n++) {
worker = *workers;
/* Start proxy_worker */
ap_rputs(" \n", r);
ap_rvputs(r, " ", ap_proxy_worker_name(r->pool, worker),
"\n", NULL);
ap_rvputs(r, " ", worker->s->scheme,
"\n", NULL);
ap_rvputs(r, " ", worker->s->hostname_ex,
"\n", NULL);
ap_rprintf(r, " %.2f\n",
(float)(worker->s->lbfactor)/100.0);
ap_rprintf(r,
" %d\n",
worker->s->port);
ap_rprintf(r, " %d\n",
worker->s->min);
ap_rprintf(r, " %d\n",
worker->s->smax);
ap_rprintf(r, " %d\n",
worker->s->hmax);
ap_rprintf(r,
" %" APR_TIME_T_FMT "\n",
apr_time_sec(worker->s->ttl));
if (worker->s->timeout_set) {
ap_rprintf(r,
" %" APR_TIME_T_FMT "\n",
apr_time_sec(worker->s->timeout));
}
if (worker->s->acquire_set) {
ap_rprintf(r,
" %" APR_TIME_T_FMT "\n",
apr_time_msec(worker->s->acquire));
}
if (worker->s->recv_buffer_size_set) {
ap_rprintf(r,
" %" APR_SIZE_T_FMT "\n",
worker->s->recv_buffer_size);
}
if (worker->s->io_buffer_size_set) {
ap_rprintf(r,
" %" APR_SIZE_T_FMT "\n",
worker->s->io_buffer_size);
}
if (worker->s->keepalive_set) {
ap_rprintf(r,
" %s\n",
(worker->s->keepalive ? "On" : "Off"));
}
/* Begin proxy_worker_stat */
ap_rputs(" ", r);
ap_rputs(ap_proxy_parse_wstatus(r->pool, worker), r);
ap_rputs("\n", r);
if ((worker->s->error_time > 0) && apr_rfc822_date(date, worker->s->error_time) == APR_SUCCESS) {
ap_rvputs(r, " ", date,
"\n", NULL);
}
ap_rprintf(r,
" %d\n",
worker->s->retries);
ap_rprintf(r,
" %d\n",
worker->s->lbstatus);
ap_rprintf(r,
" %.2f\n",
(float)(worker->s->lbfactor)/100.0);
ap_rprintf(r,
" %" APR_OFF_T_FMT "\n",
worker->s->transferred);
ap_rprintf(r,
" %" APR_OFF_T_FMT "\n",
worker->s->read);
ap_rprintf(r,
" %" APR_SIZE_T_FMT "\n",
worker->s->elected);
ap_rvputs(r, " ",
ap_escape_html(r->pool, worker->s->route),
"\n", NULL);
ap_rvputs(r, " ",
ap_escape_html(r->pool, worker->s->redirect),
"\n", NULL);
ap_rprintf(r,
" %" APR_SIZE_T_FMT "\n",
worker->s->busy);
ap_rprintf(r, " %d\n",
worker->s->lbset);
/* End proxy_worker_stat */
if (!ap_cstr_casecmp(worker->s->scheme, "ajp")) {
ap_rputs(" ", r);
switch (worker->s->flush_packets) {
case flush_off:
ap_rputs("Off", r);
break;
case flush_on:
ap_rputs("On", r);
break;
case flush_auto:
ap_rputs("Auto", r);
break;
}
ap_rputs("\n", r);
if (worker->s->flush_packets == flush_auto) {
ap_rprintf(r,
" %d\n",
worker->s->flush_wait);
}
if (worker->s->ping_timeout_set) {
ap_rprintf(r,
" %" APR_TIME_T_FMT "",
apr_time_msec(worker->s->ping_timeout));
}
}
if (worker->s->disablereuse_set) {
ap_rprintf(r,
" %s\n",
(worker->s->disablereuse ? "On" : "Off"));
}
if (worker->s->conn_timeout_set) {
ap_rprintf(r,
" %" APR_TIME_T_FMT "\n",
apr_time_msec(worker->s->conn_timeout));
}
if (worker->s->retry_set) {
ap_rprintf(r,
" %" APR_TIME_T_FMT "\n",
apr_time_sec(worker->s->retry));
}
ap_rputs(" \n", r);
++workers;
}
ap_rputs(" \n", r);
ap_rputs(" \n", r);
++balancer;
}
ap_rputs(" \n", r);
ap_rputs("", r);
}
else {
ap_set_content_type(r, "text/html; charset=ISO-8859-1");
ap_rputs(DOCTYPE_HTML_3_2
"Balancer Manager\n", r);
ap_rputs("\n\n", r);
ap_rputs("
Load Balancer Manager for ", r);
ap_rvputs(r, ap_escape_html(r->pool, ap_get_server_name(r)),
"
\n\n", NULL);
ap_rvputs(r, "
Server Version: ",
ap_get_server_description(), "
\n", NULL);
ap_rvputs(r, "
Server Built: ",
ap_get_server_built(), "
\n", NULL);
ap_rvputs(r, "
Balancer changes will ", conf->bal_persist ? "" : "NOT ",
"be persisted on restart.
", NULL);
ap_rvputs(r, "
Balancers are ", conf->inherit ? "" : "NOT ",
"inherited from main server.
", NULL);
ap_rvputs(r, "
ProxyPass settings are ", conf->ppinherit ? "" : "NOT ",
"inherited from main server.
", NULL);
ap_rputs("
\n", r);
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++) {
ap_rputs("\n
", r);
/* the below is a safe cast, since the number of slots total will
* never be more than max_workers, which is restricted to int */
ap_rprintf(r, "
%d [%d Used]
\n", balancer->max_workers,
balancer->max_workers - (int)storage->num_free_slots(balancer->wslot));
if (*balancer->s->sticky) {
if (strcmp(balancer->s->sticky, balancer->s->sticky_path)) {
ap_rvputs(r, "