/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.
*/
/* * Register our mutex type before the config is read so we * can adjust the mutex settings using the Mutex directive.
*/ staticint balancer_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp)
{
/* do syntatic check. * We break the URL into host, port, path
*/
err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port); if (err) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01157) "error parsing URL %s: %s",
url, err); return HTTP_BAD_REQUEST;
}
/* The canon_handler hooks are run per the BalancerMember in * balancer_fixup(), keep the original/raw path for now.
*/
r->filename = apr_pstrcat(r->pool, "proxy:" BALANCER_PREFIX,
host, "/", url, NULL);
for (i = 0; i < balancer->workers->nelts; i++) { int worker_is_initialized;
proxy_worker *worker = *workers;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01158) "Looking at %s -> %s initialized?", balancer->s->name,
ap_proxy_worker_name(p, worker));
worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(worker); if (!worker_is_initialized) {
ap_proxy_initialize_worker(worker, s, p);
}
++workers;
}
/* Set default number of attempts to the number of * workers.
*/ if (!balancer->s->max_attempts_set && balancer->workers->nelts > 1) {
balancer->s->max_attempts = balancer->workers->nelts - 1;
balancer->s->max_attempts_set = 1;
}
}
/* Retrieve the parameter with the given name * Something like 'JSESSIONID=12345...N'
*/ staticchar *get_path_param(apr_pool_t *pool, char *url, constchar *name, int scolon_sep)
{ char *path = NULL; char *pathdelims = "?&";
if (scolon_sep) {
pathdelims = ";?&";
} for (path = strstr(url, name); path; path = strstr(path + 1, name)) {
path += strlen(name); if (*path == '=') { /* * Session path was found, get its value
*/
++path; if (*path) { char *q;
path = apr_strtok(apr_pstrdup(pool, path), pathdelims, &q); return path;
}
}
} return NULL;
}
if ((cookies = apr_table_get(r->headers_in, "Cookie"))) { for (start_cookie = ap_strstr_c(cookies, name); start_cookie;
start_cookie = ap_strstr_c(start_cookie + 1, name)) { if (start_cookie == cookies ||
start_cookie[-1] == ';' ||
start_cookie[-1] == ',' ||
isspace(start_cookie[-1])) {
start_cookie += strlen(name); while(*start_cookie && isspace(*start_cookie))
++start_cookie; if (*start_cookie++ == '=' && *start_cookie) { /* * Session cookie was found, get its value
*/ char *end_cookie, *cookie;
cookie = apr_pstrdup(r->pool, start_cookie); if ((end_cookie = strchr(cookie, ';')) != NULL)
*end_cookie = '\0'; if((end_cookie = strchr(cookie, ',')) != NULL)
*end_cookie = '\0'; return cookie;
}
}
}
} return NULL;
}
/* Find the worker that has the 'route' defined
*/ static proxy_worker *find_route_worker(proxy_balancer *balancer, constchar *route, request_rec *r, int recursion)
{ int i; int checking_standby; int checked_standby;
proxy_worker **workers;
checking_standby = checked_standby = 0; while (!checked_standby) {
workers = (proxy_worker **)balancer->workers->elts; for (i = 0; i < balancer->workers->nelts; i++, workers++) {
proxy_worker *worker = *workers; if ( (checking_standby ? !PROXY_WORKER_IS_STANDBY(worker) : PROXY_WORKER_IS_STANDBY(worker)) ) continue; if (*(worker->s->route) && strcmp(worker->s->route, route) == 0) { if (PROXY_WORKER_IS_USABLE(worker)) { return worker;
} else { /* * If the worker is in error state run * retry on that worker. It will be marked as * operational if the retry timeout is elapsed. * The worker might still be unusable, but we try * anyway.
*/
ap_proxy_retry_worker_fn("BALANCER", worker, r->server); if (PROXY_WORKER_IS_USABLE(worker)) { return worker;
} else { /* * We have a worker that is unusable. * It can be in error or disabled, but in case * it has a redirection set use that redirection worker. * This enables to safely remove the member from the * balancer. Of course you will need some kind of * session replication between those two remote. * Also check that we haven't gone thru all the * balancer members by means of redirects. * This should avoid redirect cycles.
*/ if ((*worker->s->redirect)
&& (recursion < balancer->workers->nelts)) {
proxy_worker *rworker = NULL;
rworker = find_route_worker(balancer, worker->s->redirect,
r, recursion + 1); /* Check if the redirect worker is usable */ if (rworker && !PROXY_WORKER_IS_USABLE(rworker)) { /* * If the worker is in error state run * retry on that worker. It will be marked as * operational if the retry timeout is elapsed. * The worker might still be unusable, but we try * anyway.
*/
ap_proxy_retry_worker_fn("BALANCER", rworker, r->server);
} if (rworker && PROXY_WORKER_IS_USABLE(rworker)) return rworker;
}
}
}
}
}
checked_standby = checking_standby++;
} return NULL;
}
if (!*balancer->s->sticky) return NULL; /* * The route might be contained in the query string and *url is not * supposed to contain the query string. Hence add it temporarily if * present.
*/ if (r->args) {
url_with_qs = apr_pstrcat(r->pool, *url, "?", r->args, NULL);
} else {
url_with_qs = *url;
} /* Try to find the sticky route inside url */
*route = get_path_param(r->pool, url_with_qs, balancer->s->sticky_path, balancer->s->scolonsep); if (*route) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01159) "Found value %s for stickysession %s",
*route, balancer->s->sticky_path);
*sticky_used = balancer->s->sticky_path;
} else {
*route = get_cookie_param(r, balancer->s->sticky); if (*route) {
*sticky_used = balancer->s->sticky;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01160) "Found value %s for stickysession %s",
*route, balancer->s->sticky);
}
} /* * If we found a value for stickysession, find the first '.' (or whatever * sticky_separator is set to) within. Everything after '.' (if present) * is our route.
*/ if ((*route) && (balancer->s->sticky_separator != 0) && ((*route = strchr(*route, balancer->s->sticky_separator)) != NULL ))
(*route)++; if ((*route) && (**route)) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01161) "Found route %s", *route); /* We have a route in path or in cookie * Find the worker that has this route defined.
*/
worker = find_route_worker(balancer, *route, r, 1); if (worker && strcmp(*route, worker->s->route)) { /* * Notice that the route of the worker chosen is different from * the route supplied by the client.
*/
apr_table_setn(r->subprocess_env, "BALANCER_ROUTE_CHANGED", "1");
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01162) "Route changed from %s to %s",
*route, worker->s->route);
} return worker;
} else return NULL;
}
if (candidate == NULL) { /* All the workers are in error state or disabled. * If the balancer has a timeout sleep for a while * and try again to find the worker. The chances are * that some other thread will release a connection. * By default the timeout is not set, and the server * returns SERVER_BUSY.
*/ if (balancer->s->timeout) { /* XXX: This can perhaps be build using some * smarter mechanism, like tread_cond. * But since the statuses can came from * different children, use the provided algo.
*/
apr_interval_time_t timeout = balancer->s->timeout;
apr_interval_time_t step, tval = 0; /* Set the timeout to 0 so that we don't * end in infinite loop
*/
balancer->s->timeout = 0;
step = timeout / 100; while (tval < timeout) {
apr_sleep(step); /* Try again */ if ((candidate = find_best_worker(balancer, r))) break;
tval += step;
} /* restore the timeout */
balancer->s->timeout = timeout;
}
}
/* Build the proxy URL from the worker URL and the actual path */
path = strstr(*url, "://"); if (path) {
path = ap_strchr_c(path + 3, '/');
}
r->filename = apr_pstrcat(r->pool, "proxy:", worker->s->name_ex, path, NULL);
staticvoid force_recovery(proxy_balancer *balancer, server_rec *s)
{ int i; int ok = 0;
proxy_worker **worker;
worker = (proxy_worker **)balancer->workers->elts; for (i = 0; i < balancer->workers->nelts; i++, worker++) { if (!((*worker)->s->status & PROXY_WORKER_IN_ERROR)) {
ok = 1; break;
} else { /* Try if we can recover */
ap_proxy_retry_worker_fn("BALANCER", *worker, s); if (!((*worker)->s->status & PROXY_WORKER_IN_ERROR)) {
ok = 1; break;
}
}
} if (!ok && balancer->s->forcerecovery) { /* If all workers are in error state force the recovery.
*/
worker = (proxy_worker **)balancer->workers->elts; for (i = 0; i < balancer->workers->nelts; i++, worker++) {
++(*worker)->s->retries;
(*worker)->s->status &= ~PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01165) "%s: Forcing recovery for worker (%s:%d)",
balancer->s->name, (*worker)->s->hostname_ex,
(int)(*worker)->s->port);
}
}
}
*worker = NULL; /* Step 1: check if the url is for us * The url we can handle starts with 'balancer://' * If balancer is already provided skip the search * for balancer, because this is failover attempt.
*/ if (!*balancer &&
(ap_cstr_casecmpn(*url, BALANCER_PREFIX, sizeof(BALANCER_PREFIX) - 1)
|| !(*balancer = ap_proxy_get_balancer(r->pool, conf, *url, 1)))) return DECLINED;
/* Step 2: Lock the LoadBalancer * XXX: perhaps we need the process lock here
*/ #if APR_HAS_THREADS if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166) "%s: Lock failed for pre_request", (*balancer)->s->name); return DECLINED;
} #endif
/* Step 3: force recovery */
force_recovery(*balancer, r->server);
/* Step 3.5: Update member list for the balancer */ /* TODO: Implement as provider! */
ap_proxy_sync_balancer(*balancer, r->server, conf);
/* Step 4: find the session route */
runtime = find_session_route(*balancer, r, &route, &sticky, url); if (runtime) { if ((*balancer)->lbmethod && (*balancer)->lbmethod->updatelbstatus) { /* Call the LB implementation */
(*balancer)->lbmethod->updatelbstatus(*balancer, runtime, r->server);
} else { /* Use the default one */ int i, total_factor = 0;
proxy_worker **workers; /* We have a sticky load balancer * Update the workers status * so that even session routes get * into account.
*/
workers = (proxy_worker **)(*balancer)->workers->elts; for (i = 0; i < (*balancer)->workers->nelts; i++) { /* Take into calculation only the workers that are * not in error state or not disabled.
*/ if (PROXY_WORKER_IS_USABLE(*workers)) {
(*workers)->s->lbstatus += (*workers)->s->lbfactor;
total_factor += (*workers)->s->lbfactor;
}
workers++;
}
runtime->s->lbstatus -= total_factor;
}
runtime->s->elected++;
*worker = runtime;
} elseif (route && (*balancer)->s->sticky_force) { int i, member_of = 0;
proxy_worker **workers; /* * We have a route provided that doesn't match the * balancer name. See if the provider route is the * member of the same balancer in which case return 503
*/
workers = (proxy_worker **)(*balancer)->workers->elts; for (i = 0; i < (*balancer)->workers->nelts; i++) { if (*((*workers)->s->route) && strcmp((*workers)->s->route, route) == 0) {
member_of = 1; break;
}
workers++;
} if (member_of) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167) "%s: All workers are in error state for route (%s)",
(*balancer)->s->name, route); #if APR_HAS_THREADS if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168) "%s: Unlock failed for pre_request",
(*balancer)->s->name);
} #endif return HTTP_SERVICE_UNAVAILABLE;
}
}
#if APR_HAS_THREADS if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169) "%s: Unlock failed for pre_request",
(*balancer)->s->name);
} #endif if (!*worker) {
runtime = find_best_worker(*balancer, r); if (!runtime) { if ((*balancer)->workers->nelts) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01170) "%s: All workers are in error state",
(*balancer)->s->name);
} else {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01171) "%s: No workers in balancer",
(*balancer)->s->name);
}
return HTTP_SERVICE_UNAVAILABLE;
} if (*(*balancer)->s->sticky && runtime) { /* * This balancer has sticky sessions and the client either has not * supplied any routing information or all workers for this route * including possible redirect and hotstandby workers are in error * state, but we have found another working worker for this * balancer where we can send the request. Thus notice that we have * changed the route to the backend.
*/
apr_table_setn(r->subprocess_env, "BALANCER_ROUTE_CHANGED", "1");
}
*worker = runtime;
}
/* Add balancer/worker info to env. */
apr_table_setn(r->subprocess_env, "BALANCER_NAME", (*balancer)->s->name);
apr_table_setn(r->subprocess_env, "BALANCER_WORKER_NAME", (*worker)->s->name_ex);
apr_table_setn(r->subprocess_env, "BALANCER_WORKER_ROUTE", (*worker)->s->route);
/* Rewrite the url from 'balancer://url' * to the 'worker_scheme://worker_hostname[:worker_port]/url' * This replaces the balancers fictional name with the real * hostname of the elected worker and canonicalizes according * to the worker scheme (calls canon_handler hooks).
*/
access_status = balancer_fixup(r, *worker, url);
/* Add the session route to request notes if present */ if (route) {
apr_table_setn(r->notes, "session-sticky", sticky);
apr_table_setn(r->notes, "session-route", route);
/* Add session info to env. */
apr_table_setn(r->subprocess_env, "BALANCER_SESSION_STICKY", sticky);
apr_table_setn(r->subprocess_env, "BALANCER_SESSION_ROUTE", route);
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01172) "%s: worker (%s) rewritten to %s",
(*balancer)->s->name, (*worker)->s->name_ex, *url);
staticvoid recalc_factors(proxy_balancer *balancer)
{ int i;
proxy_worker **workers;
/* Recalculate lbfactors */
workers = (proxy_worker **)balancer->workers->elts; /* Special case if there is only one worker its * load factor will always be 100
*/ if (balancer->workers->nelts == 1) {
(*workers)->s->lbstatus = (*workers)->s->lbfactor = 100; return;
} for (i = 0; i < balancer->workers->nelts; i++) { /* Update the status entries */
workers[i]->s->lbstatus = workers[i]->s->lbfactor;
}
}
balancer = (proxy_balancer *)conf->balancers->elts; for (i = 0; i < conf->balancers->nelts; i++, balancer++) { if (balancer->gmutex) {
apr_global_mutex_destroy(balancer->gmutex);
balancer->gmutex = NULL;
}
} return(0);
}
/* * Compute an ID for a vhost based on what makes it selected by requests. * The second and more Host(s)/IP(s):port(s), and the ServerAlias(es) are * optional (see make_servers_ids() below).
*/ staticconstchar *make_server_id(server_rec *s, apr_pool_t *p, int full)
{
apr_md5_ctx_t md5_ctx; unsignedchar md5[APR_MD5_DIGESTSIZE]; char id[2 * APR_MD5_DIGESTSIZE + 1]; char host_ip[64]; /* for any IPv[46] string */
server_addr_rec *sar; int i;
apr_md5_init(&md5_ctx); for (sar = s->addrs; sar; sar = sar->next) {
host_ip[0] = '\0';
apr_sockaddr_ip_getbuf(host_ip, sizeof host_ip, sar->host_addr);
apr_md5_update(&md5_ctx, (void *)sar->virthost, strlen(sar->virthost));
apr_md5_update(&md5_ctx, (void *)host_ip, strlen(host_ip));
apr_md5_update(&md5_ctx, (void *)&sar->host_port, sizeof(sar->host_port)); if (!full) { break;
}
} if (s->server_hostname) {
apr_md5_update(&md5_ctx, (void *)s->server_hostname,
strlen(s->server_hostname));
} if (full) { if (s->names) { for (i = 0; i < s->names->nelts; ++i) { constchar *name = APR_ARRAY_IDX(s->names, i, char *);
apr_md5_update(&md5_ctx, (void *)name, strlen(name));
}
} if (s->wild_names) { for (i = 0; i < s->wild_names->nelts; ++i) { constchar *name = APR_ARRAY_IDX(s->wild_names, i, char *);
apr_md5_update(&md5_ctx, (void *)name, strlen(name));
}
}
}
apr_md5_final(md5, &md5_ctx);
ap_bin2hex(md5, APR_MD5_DIGESTSIZE, id);
return apr_pstrmemdup(p, id, sizeof(id) - 1);
}
/* * First try to compute an unique ID for each vhost with minimal criteria, * that is the first Host/IP:port and ServerName. For most cases this should * be enough and avoids changing the ID unnecessarily across restart (or * stop/start w.r.t. persisted files) for things that this module does not * care about. * * But if it's not enough (collisions) do a second pass for the full monty, * that is additionally the other Host(s)/IP(s):port(s) and ServerAlias(es). * * Finally, for pathological configs where this is still not enough, let's * append a counter to duplicates, because we really want that ID to be unique * even if the vhost will never be selected to handle requests at run time, at * load time a duplicate may steal the original slotmems (depending on its * balancers' configurations), see how mod_slotmem_shm reuses slots/files based * solely on this ID and resets them if the sizes don't match.
*/ static apr_array_header_t *make_servers_ids(server_rec *main_s, apr_pool_t *p)
{
server_rec *s = main_s;
apr_array_header_t *ids = apr_array_make(p, 10, sizeof(constchar *));
apr_hash_t *dups = apr_hash_make(p); int idx, *dup, full_monty = 0; constchar *id;
for (idx = 0, s = main_s; s; s = s->next, ++idx) {
id = make_server_id(s, p, 0);
dup = apr_hash_get(dups, id, APR_HASH_KEY_STRING);
apr_hash_set(dups, id, APR_HASH_KEY_STRING,
apr_pmemdup(p, &idx, sizeof(int))); if (dup) {
full_monty = 1;
APR_ARRAY_IDX(ids, *dup, constchar *) = NULL;
APR_ARRAY_PUSH(ids, constchar *) = NULL;
} else {
APR_ARRAY_PUSH(ids, constchar *) = id;
}
} if (full_monty) {
apr_hash_clear(dups); for (idx = 0, s = main_s; s; s = s->next, ++idx) {
id = APR_ARRAY_IDX(ids, idx, constchar *); if (id) { /* Preserve non-duplicates */ continue;
}
id = make_server_id(s, p, 1); if (apr_hash_get(dups, id, APR_HASH_KEY_STRING)) {
id = apr_psprintf(p, "%s_%x", id, idx);
} else {
apr_hash_set(dups, id, APR_HASH_KEY_STRING, (void *)-1);
}
APR_ARRAY_IDX(ids, idx, constchar *) = id;
}
}
/* balancer_post_config() will be called twice during startup. So, don't
* set up the static data the 1st time through. */ if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) { return OK;
}
ap_proxy_retry_worker_fn =
APR_RETRIEVE_OPTIONAL_FN(ap_proxy_retry_worker); if (!ap_proxy_retry_worker_fn) {
ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02230) "mod_proxy must be loaded for mod_proxy_balancer"); return !OK;
}
/* * Get slotmem setups
*/
storage = ap_lookup_provider(AP_SLOTMEM_PROVIDER_GROUP, "shm",
AP_SLOTMEM_PROVIDER_VERSION); if (!storage) {
ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01177) "Failed to lookup provider 'shm' for '%s': is " "mod_slotmem_shm loaded??",
AP_SLOTMEM_PROVIDER_GROUP); return !OK;
}
ids = make_servers_ids(s, ptemp);
tstamp = apr_time_now(); /* * Go thru each Vhost and create the shared mem slotmem for * each balancer's workers
*/ for (idx = 0; s; ++idx) { int i,j; constchar *id;
proxy_balancer *balancer;
ap_slotmem_type_t type; void *sconf = s->module_config;
conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module); /* * During create_proxy_config() we created a dummy id. Now that * we have identifying info, we can create the real id
*/
id = APR_ARRAY_IDX(ids, idx, constchar *);
conf->id = apr_psprintf(pconf, "p%x",
ap_proxy_hashfunc(id, PROXY_HASHFUNC_DEFAULT)); if (conf->bslot) { /* Shared memory already created for this proxy_server_conf.
*/
s = s->next; continue;
} if (conf->bal_persist) {
type = AP_SLOTMEM_TYPE_PERSIST | AP_SLOTMEM_TYPE_CLEARINUSE;
} else {
type = 0;
} if (conf->balancers->nelts) {
conf->max_balancers = conf->balancers->nelts + conf->bgrowth;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01178) "Doing balancers create: %d, %d (%d)",
(int)ALIGNED_PROXY_BALANCER_SHARED_SIZE,
(int)conf->balancers->nelts, conf->max_balancers);
/* Initialize shared scoreboard data */
balancer = (proxy_balancer *)conf->balancers->elts; for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
proxy_worker **workers;
proxy_worker *worker;
proxy_balancer_shared *bshm; constchar *sname; unsignedint index;
/* now that we have the right id, we need to redo the sname field */
ap_pstr2_alnum(pconf, balancer->s->name + sizeof(BALANCER_PREFIX) - 1,
&sname);
sname = apr_pstrcat(pconf, conf->id, "_", sname, NULL);
PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
key = apr_strtok(args, "&", &tok); while (key) {
val = strchr(key, '='); if (val) {
*val++ = '\0';
} else {
val = "";
}
ap_unescape_url(key);
ap_unescape_url(val); /* hcuri, worker name, balancer name, at least are escaped when building the form, so twice */
ap_unescape_url(val); if (allowed == NULL) { /* allow all */
apr_table_set(params, key, val);
} else { constchar **ok = allowed; while (*ok) { if (strcmp(*ok, key) == 0) {
apr_table_set(params, key, val); break;
}
ok++;
}
}
key = apr_strtok(NULL, "&", &tok);
}
}
/* Returns non-zero if the Referer: header value passed matches the
* host of the request. */ staticint safe_referer(request_rec *r, constchar *ref)
{
apr_uri_t uri;
if (apr_uri_parse(r->pool, ref, &uri) || !uri.hostname) return 0;
/* * Process the paramters and add or update the worker of the * balancer. Must only be called if the nonce has been validated to * match, to avoid XSS attacks.
*/ staticint balancer_process_balancer_worker(request_rec *r, proxy_server_conf *conf,
proxy_balancer *bsel,
proxy_worker *wsel,
apr_table_t *params)
{
apr_status_t rv; /* First set the params */ if (wsel) { constchar *val; int was_usable = PROXY_WORKER_IS_USABLE(wsel);