/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.
*/
/* * mod_cache_socache: Shared Object Cache Based HTTP 1.1 Cache. * * Flow to Find the entry: * Incoming client requests URI /foo/bar/baz * Fetch URI key (may contain Format #1 or Format #2) * If format #1 (Contains a list of Vary Headers): * Use each header name (from .header) with our request values (headers_in) to * regenerate key using HeaderName+HeaderValue+.../foo/bar/baz * re-read in key (must be format #2) * * Format #1: * apr_uint32_t format; * apr_time_t expire; * apr_array_t vary_headers (delimited by CRLF) * * Format #2: * cache_socache_info_t (first sizeof(apr_uint32_t) bytes is the format) * entity name (sobj->name) [length is in cache_socache_info_t->name_len] * r->headers_out (delimited by CRLF) * CRLF * r->headers_in (delimited by CRLF) * CRLF
*/
/* * cache_socache_object_t * Pointed to by cache_object_t::vobj
*/ typedefstruct cache_socache_object_t
{
apr_pool_t *pool; /* pool */ unsignedchar *buffer; /* the cache buffer */
apr_size_t buffer_len; /* size of the buffer */
apr_bucket_brigade *body; /* brigade containing the body, if any */
apr_table_t *headers_in; /* Input headers to save */
apr_table_t *headers_out; /* Output headers to save */
cache_socache_info_t socache_info; /* Header information. */
apr_size_t body_offset; /* offset to the start of the body */
apr_off_t body_length; /* length of the cached entity body */
apr_time_t expire; /* when to expire the entry */
constchar *name; /* Requested URI without vary bits - suitable for mortals. */ constchar *key; /* On-disk prefix; URI with Vary bits (if present) */
apr_off_t offset; /* Max size to set aside */
apr_time_t timeout; /* Max time to set aside */ unsignedint newbody :1; /* whether a new body is present */ unsignedint done :1; /* Is the attempt to cache complete? */
} cache_socache_object_t;
typedefstruct cache_socache_dir_conf
{
apr_off_t max; /* maximum file size for cached files */
apr_time_t maxtime; /* maximum expiry time */
apr_time_t mintime; /* minimum expiry time */
apr_off_t readsize; /* maximum data to attempt to cache in one go */
apr_time_t readtime; /* maximum time taken to cache in one go */ unsignedint max_set :1; unsignedint maxtime_set :1; unsignedint mintime_set :1; unsignedint readsize_set :1; unsignedint readtime_set :1;
} cache_socache_dir_conf;
/* TODO: * - Handle multiple-value headers better. (sort them?) * - Handle Case in-sensitive Values better. * This isn't the end of the world, since it just lowers the cache * hit rate, but it would be nice to fix. * * The majority are case insenstive if they are values (encoding etc). * Most of rfc2616 is case insensitive on header contents. * * So the better solution may be to identify headers which should be * treated case-sensitive? * HTTP URI's (3.2.3) [host and scheme are insensitive] * HTTP method (5.1.1) * HTTP-date values (3.3.1) * 3.7 Media Types [excerpt] * The type, subtype, and parameter attribute names are case- * insensitive. Parameter values might or might not be case-sensitive, * depending on the semantics of the parameter name. * 4.20 Except [excerpt] * Comparison of expectation values is case-insensitive for unquoted * tokens (including the 100-continue token), and is case-sensitive for * quoted-string expectation-extensions.
*/
for (i = 0, k = 0; i < varray->nelts; i++) {
header = apr_table_get(headers, elts[i]); if (!header) {
header = "";
}
iov[k].iov_base = (char*) elts[i];
iov[k].iov_len = strlen(elts[i]);
k++;
iov[k].iov_base = (char*) header;
iov[k].iov_len = strlen(header);
k++;
}
iov[k].iov_base = (char*) oldkey;
iov[k].iov_len = strlen(oldkey);
k++;
/* we don't support caching of range requests (yet) */ /* TODO: but we could */ if (r->status == HTTP_PARTIAL_CONTENT) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02345) "URL %s partial content response not cached",
key); return DECLINED;
}
/* * We have a chicken and egg problem. We don't know until we * attempt to store_headers just how big the response will be * and whether it will fit in the cache limits set. But we * need to make a decision now as to whether we plan to try. * If we make the wrong decision, we could prevent another * cache implementation, such as cache_disk, from getting the * opportunity to cache, and that would be unfortunate. * * In a series of tests, from cheapest to most expensive, * decide whether or not to ignore this attempt to cache, * with a small margin just to be sure.
*/ if (len < 0) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02346) "URL '%s' had no explicit size, ignoring", key); return DECLINED;
} if (len > dconf->max) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02347) "URL '%s' body larger than limit, ignoring " "(%" APR_OFF_T_FMT " > %" APR_OFF_T_FMT ")",
key, len, dconf->max); return DECLINED;
}
/* estimate the total cached size, given current headers */
total = len + sizeof(cache_socache_info_t) + strlen(key); if (APR_SUCCESS != store_table(r->headers_out, NULL, dconf->max, &total)
|| APR_SUCCESS != store_table(r->headers_in, NULL, dconf->max,
&total)) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02348) "URL '%s' estimated headers size larger than limit, ignoring " "(%" APR_SIZE_T_FMT " > %" APR_OFF_T_FMT ")",
key, total, dconf->max); return DECLINED;
}
if (total >= dconf->max) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02349) "URL '%s' body and headers larger than limit, ignoring " "(%" APR_OFF_T_FMT " > %" APR_OFF_T_FMT ")",
key, len, dconf->max); return DECLINED;
}
/* Allocate and initialize cache_object_t and cache_socache_object_t */
h->cache_obj = obj = apr_pcalloc(r->pool, sizeof(*obj));
obj->vobj = sobj = apr_pcalloc(r->pool, sizeof(*sobj));
if (!conf->provider || !conf->provider->socache_instance) { return DECLINED;
}
/* Create and init the cache object */
obj = apr_pcalloc(r->pool, sizeof(cache_object_t));
sobj = apr_pcalloc(r->pool, sizeof(cache_socache_object_t));
info = &(obj->info);
/* Create a temporary pool for the buffer, and destroy it if something * goes wrong so we don't have large buffers of unused memory hanging * about for the lifetime of the response.
*/
apr_pool_create(&sobj->pool, r->pool);
apr_pool_tag(sobj->pool, "mod_cache_socache (open_entity)");
/* Store it away so we can get it later. */
info->status = sobj->socache_info.status;
info->date = sobj->socache_info.date;
info->expire = sobj->socache_info.expire;
info->request_time = sobj->socache_info.request_time;
info->response_time = sobj->socache_info.response_time;
/* Call routine to read the header lines/status line */ if (APR_SUCCESS != read_table(h, r, h->resp_hdrs, sobj->buffer, buffer_len,
&slider)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(02364) "Cache entry for key '%s' response headers unreadable, removing", nkey); goto fail;
} if (APR_SUCCESS != read_table(h, r, h->req_hdrs, sobj->buffer, buffer_len,
&slider)) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, APLOGNO(02365) "Cache entry for key '%s' request headers unreadable, removing", nkey); goto fail;
}
/* Retrieve the body if we have one */
len = buffer_len - slider; if (len > 0) {
apr_bucket *e; /* Create the body brigade later concatenated to the output filters' * brigade by recall_body(). Since sobj->buffer (the data) point to * sobj->pool (a subpool of r->pool), be safe by using a pool bucket * which can morph to heap if sobj->pool is destroyed while the bucket * is still alive. But if sobj->pool gets destroyed while the bucket is * still in sobj->body (i.e. recall_body() was never called), we don't * need to morph to something just about to be freed, so a pre_cleanup * will take care of cleaning up sobj->body before this happens (and is * a noop otherwise).
*/
sobj->body = apr_brigade_create(sobj->pool, r->connection->bucket_alloc);
apr_pool_pre_cleanup_register(sobj->pool, sobj, sobj_body_pre_cleanup);
e = apr_bucket_pool_create((constchar *) sobj->buffer + slider, len,
sobj->pool, r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(sobj->body, e);
}
/* make the configuration stick */
h->cache_obj = obj;
obj->vobj = sobj;
return OK;
fail: if (socache_mutex) {
apr_status_t status = apr_global_mutex_lock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02366) "could not acquire lock, ignoring: %s", obj->key);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return DECLINED;
}
} if (nkey) {
conf->provider->socache_provider->remove(
conf->provider->socache_instance, r->server,
(unsignedchar *) nkey, strlen(nkey), r->pool);
} if (socache_mutex) {
apr_status_t status = apr_global_mutex_unlock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02367) "could not release lock, ignoring: %s", obj->key);
}
}
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return DECLINED;
}
staticint remove_entity(cache_handle_t *h)
{ /* Null out the cache object pointer so next time we start from scratch */
h->cache_obj = NULL; return OK;
}
/* Remove the key from the cache */ if (socache_mutex) {
apr_status_t status = apr_global_mutex_lock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02368) "could not acquire lock, ignoring: %s", sobj->key);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return DECLINED;
}
}
conf->provider->socache_provider->remove(conf->provider->socache_instance,
r->server, (unsignedchar *) sobj->key, strlen(sobj->key), r->pool); if (socache_mutex) {
apr_status_t status = apr_global_mutex_unlock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02369) "could not release lock, ignoring: %s", sobj->key);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return DECLINED;
}
}
return OK;
}
static apr_status_t recall_headers(cache_handle_t *h, request_rec *r)
{ /* we recalled the headers during open_entity, so do nothing */ return APR_SUCCESS;
}
if (slider + socache_info->name_len >= sobj->buffer_len) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02374) "cache buffer too small for name: %s",
sobj->name);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return APR_EGENERAL;
}
memcpy(sobj->buffer + slider, sobj->name, socache_info->name_len);
slider += socache_info->name_len;
if (sobj->headers_out) { if (APR_SUCCESS != store_table(sobj->headers_out, sobj->buffer,
sobj->buffer_len, &slider)) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02375) "out-headers didn't fit in buffer: %s", sobj->name);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return APR_EGENERAL;
}
}
/* Parse the vary header and dump those fields from the headers_in. */ /* TODO: Make call to the same thing cache_select calls to crack Vary. */ if (sobj->headers_in) { if (APR_SUCCESS != store_table(sobj->headers_in, sobj->buffer,
sobj->buffer_len, &slider)) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02376) "in-headers didn't fit in buffer %s",
sobj->key);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return APR_EGENERAL;
}
}
/* are we done completely? if so, pass any trailing buckets right through */ if (sobj->done || !sobj->pool) {
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(out, e); continue;
}
/* have we seen eos yet? */ if (APR_BUCKET_IS_EOS(e)) {
seen_eos = 1;
sobj->done = 1;
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(out, e); break;
}
/* honour flush buckets, we'll get called again */ if (APR_BUCKET_IS_FLUSH(e)) {
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(out, e); break;
}
/* metadata buckets are preserved as is */ if (APR_BUCKET_IS_METADATA(e)) {
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(out, e); continue;
}
/* read the bucket, write to the cache */
rv = apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
APR_BUCKET_REMOVE(e);
APR_BRIGADE_INSERT_TAIL(out, e); if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02377) "Error when reading bucket for URL %s",
h->cache_obj->key); /* Remove the intermediate cache file and return non-APR_SUCCESS */
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return rv;
}
/* don't write empty buckets to the cache */ if (!length) { continue;
}
/* have we reached the limit of how much we're prepared to write in one * go? If so, leave, we'll get called again. This prevents us from trying * to swallow too much data at once, or taking so long to write the data * the client times out.
*/
sobj->offset -= length; if (sobj->offset <= 0) {
sobj->offset = 0; break;
} if ((dconf->readtime && apr_time_now() > sobj->timeout)) {
sobj->timeout = 0; break;
}
}
/* Was this the final bucket? If yes, perform sanity checks.
*/ if (seen_eos) { constchar *cl_header;
apr_off_t cl;
if (r->connection->aborted || r->no_cache) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02380) "Discarding body for URL %s " "because connection has been aborted.",
h->cache_obj->key);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return APR_EGENERAL;
}
if (socache_mutex) {
apr_status_t status = apr_global_mutex_lock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02384) "could not acquire lock, ignoring: %s", obj->key);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return status;
}
}
rv = conf->provider->socache_provider->store(
conf->provider->socache_instance, r->server,
(unsignedchar *) sobj->key, strlen(sobj->key), sobj->expire,
sobj->buffer, sobj->body_offset + sobj->body_length, sobj->pool); if (socache_mutex) {
apr_status_t status = apr_global_mutex_unlock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02385) "could not release lock, ignoring: %s", obj->key);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return status;
}
} if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r, APLOGNO(02386) "could not write to cache, ignoring: %s", sobj->key); goto fail;
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02387) "commit_entity: Headers and body for URL %s cached for maximum of %d seconds.",
sobj->name, (apr_uint32_t)apr_time_sec(sobj->expire - r->request_time));
apr_pool_destroy(sobj->pool);
sobj->pool = NULL;
return APR_SUCCESS;
fail: /* For safety, remove any existing entry on failure, just in case it could not * be revalidated successfully.
*/ if (socache_mutex) {
apr_status_t status = apr_global_mutex_lock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02388) "could not acquire lock, ignoring: %s", obj->key);
apr_pool_destroy(sobj->pool);
sobj->pool = NULL; return rv;
}
}
conf->provider->socache_provider->remove(conf->provider->socache_instance,
r->server, (unsignedchar *) sobj->key, strlen(sobj->key), r->pool); if (socache_mutex) {
apr_status_t status = apr_global_mutex_unlock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02389) "could not release lock, ignoring: %s", obj->key);
}
}
/* Argument is of form 'name:args' or just 'name'. */
sep = ap_strchr_c(arg, ':'); if (sep) {
name = apr_pstrmemdup(cmd->pool, arg, sep - arg);
sep++;
provider->args = sep;
} else {
name = arg;
}
provider->socache_provider = ap_lookup_provider(AP_SOCACHE_PROVIDER_GROUP,
name, AP_SOCACHE_PROVIDER_VERSION); if (provider->socache_provider == NULL) {
err = apr_psprintf(cmd->pool, "Unknown socache provider '%s'. Maybe you need " "to load the appropriate socache module " "(mod_socache_%s?)", name, name);
} return err;
}
if (apr_strtoff(&dconf->max, arg, NULL, 10) != APR_SUCCESS
|| dconf->max < 1024 || dconf->max > APR_UINT32_MAX) { return"CacheSocacheMaxSize argument must be a integer representing " "the max size of a cached entry (headers and body), at least 1024 " "and at most " APR_STRINGIFY(APR_UINT32_MAX);
}
dconf->max_set = 1; return NULL;
}
if (apr_strtoff(&seconds, arg, NULL, 10) != APR_SUCCESS || seconds < 0) { return"CacheSocacheMaxTime argument must be the maximum amount of time in seconds to cache an entry.";
}
dconf->maxtime = apr_time_from_sec(seconds);
dconf->maxtime_set = 1; return NULL;
}
if (apr_strtoff(&seconds, arg, NULL, 10) != APR_SUCCESS || seconds < 0) { return"CacheSocacheMinTime argument must be the minimum amount of time in seconds to cache an entry.";
}
dconf->mintime = apr_time_from_sec(seconds);
dconf->mintime_set = 1; return NULL;
}
if (apr_strtoff(&dconf->readsize, arg, NULL, 10) != APR_SUCCESS
|| dconf->readsize < 0) { return"CacheSocacheReadSize argument must be a non-negative integer representing the max amount of data to cache in go.";
}
dconf->readsize_set = 1; return NULL;
}
if (apr_strtoff(&milliseconds, arg, NULL, 10) != APR_SUCCESS
|| milliseconds < 0) { return"CacheSocacheReadTime argument must be a non-negative integer representing the max amount of time taken to cache in go.";
}
dconf->readtime = apr_time_from_msec(milliseconds);
dconf->readtime_set = 1; return NULL;
}
if (socache_mutex) {
status = apr_global_mutex_lock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02816) "could not acquire lock for cache status");
}
}
if (status != APR_SUCCESS) { if (!(flags & AP_STATUS_SHORT)) {
ap_rputs("No cache status data available\n", r);
} else {
ap_rputs("NotAvailable\n", r);
}
} else {
conf->provider->socache_provider->status(conf->provider->socache_instance,
r, flags);
}
if (socache_mutex && status == APR_SUCCESS) {
status = apr_global_mutex_unlock(socache_mutex); if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02817) "could not release lock for cache status");
}
}
for (s = base_server; s; s = s->next) {
cache_socache_conf *conf =
ap_get_module_config(s->module_config, &cache_socache_module);
if (!conf->provider) { continue;
}
if (!socache_mutex && conf->provider->socache_provider->flags
& AP_SOCACHE_FLAG_NOTMPSAFE) {
rv = ap_global_mutex_create(&socache_mutex, NULL, cache_socache_id,
NULL, s, pconf, 0); if (rv != APR_SUCCESS) {
ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02391) "failed to create %s mutex", cache_socache_id); return 500; /* An HTTP status would be a misnomer! */
}
apr_pool_cleanup_register(pconf, NULL, remove_lock,
apr_pool_cleanup_null);
}
errmsg = conf->provider->socache_provider->create(
&conf->provider->socache_instance, conf->provider->args, ptmp,
pconf); if (errmsg) {
ap_log_perror(APLOG_MARK, APLOG_CRIT, 0, plog,
APLOGNO(02392) "%s", errmsg); return 500; /* An HTTP status would be a misnomer! */
}
rv = conf->provider->socache_provider->init(
conf->provider->socache_instance, cache_socache_id,
&socache_hints, s, pconf); if (rv != APR_SUCCESS) {
ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02393) "failed to initialise %s cache", cache_socache_id); return 500; /* An HTTP status would be a misnomer! */
}
apr_pool_cleanup_register(pconf, (void *) s, destroy_cache,
apr_pool_cleanup_null);
}
return OK;
}
staticvoid socache_child_init(apr_pool_t *p, server_rec *s)
{ constchar *lock;
apr_status_t rv; if (!socache_mutex) { return; /* don't waste the overhead of creating mutex & cache */
}
lock = apr_global_mutex_lockfile(socache_mutex);
rv = apr_global_mutex_child_init(&socache_mutex, lock, p); if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(02394) "failed to initialise mutex in child_init");
}
}
staticconst command_rec cache_socache_cmds[] =
{
AP_INIT_TAKE1("CacheSocache", set_cache_socache, NULL, RSRC_CONF, "The shared object cache to store cache files"),
AP_INIT_TAKE1("CacheSocacheMaxTime", set_cache_maxtime, NULL, RSRC_CONF | ACCESS_CONF, "The maximum cache expiry age to cache a document in seconds"),
AP_INIT_TAKE1("CacheSocacheMinTime", set_cache_mintime, NULL, RSRC_CONF | ACCESS_CONF, "The minimum cache expiry age to cache a document in seconds"),
AP_INIT_TAKE1("CacheSocacheMaxSize", set_cache_max, NULL, RSRC_CONF | ACCESS_CONF, "The maximum cache entry size (headers and body) to cache a document"),
AP_INIT_TAKE1("CacheSocacheReadSize", set_cache_readsize, NULL, RSRC_CONF | ACCESS_CONF, "The maximum quantity of data to attempt to read and cache in one go"),
AP_INIT_TAKE1("CacheSocacheReadTime", set_cache_readtime, NULL, RSRC_CONF | ACCESS_CONF, "The maximum time taken to attempt to read and cache in go"),
{ NULL }
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.