Changes with Apache 2.0.51
+ *) Backport all mod_cache, mod_mem_cache, mod_disk_cache and
+ related cache utilities from the Apache 2.1 branch.
+ [Bill Stoddard]
*) mod_autoindex: Don't truncate the directory listing if a stat()
call fails (for instance on a >2Gb file). PR 17357.
#include "mod_cache.h"
-APR_HOOK_STRUCT(
- APR_HOOK_LINK(remove_url)
- APR_HOOK_LINK(create_entity)
- APR_HOOK_LINK(open_entity)
-)
-
extern APR_OPTIONAL_FN_TYPE(ap_cache_generate_key) *cache_generate_key;
extern module AP_MODULE_DECLARE_DATA cache_module;
* delete all URL entities from the cache
*
*/
-int cache_remove_url(request_rec *r, const char *types, char *url)
+int cache_remove_url(request_rec *r, char *url)
{
- const char *next = types;
- const char *type;
+ cache_provider_list *list;
apr_status_t rv;
char *key;
+ cache_request_rec *cache = (cache_request_rec *)
+ ap_get_module_config(r->request_config, &cache_module);
rv = cache_generate_key(r,r->pool,&key);
if (rv != APR_SUCCESS) {
return rv;
}
+ list = cache->providers;
+
/* for each specified cache type, delete the URL */
- while(next) {
- type = ap_cache_tokstr(r->pool, next, &next);
- cache_run_remove_url(type, key);
+ while(list) {
+ list->provider->remove_url(key);
+ list = list->next;
}
return OK;
}
* decide whether or not it wants to cache this particular entity.
* If the size is unknown, a size of -1 should be set.
*/
-int cache_create_entity(request_rec *r, const char *types, char *url, apr_off_t size)
+int cache_create_entity(request_rec *r, char *url, apr_off_t size)
{
+ cache_provider_list *list;
cache_handle_t *h = apr_pcalloc(r->pool, sizeof(cache_handle_t));
- const char *next = types;
- const char *type;
char *key;
apr_status_t rv;
cache_request_rec *cache = (cache_request_rec *)
if (rv != APR_SUCCESS) {
return rv;
}
+
+ list = cache->providers;
/* for each specified cache type, delete the URL */
- while (next) {
- type = ap_cache_tokstr(r->pool, next, &next);
- switch (rv = cache_run_create_entity(h, r, type, key, size)) {
+ while (list) {
+ switch (rv = list->provider->create_entity(h, r, key, size)) {
case OK: {
cache->handle = h;
+ cache->provider = list->provider;
+ cache->provider_name = list->provider_name;
return OK;
}
case DECLINED: {
+ list = list->next;
continue;
}
default: {
return DECLINED;
}
-/*
- * remove a specific URL entity from the cache
- *
- * The specific entity referenced by the cache_handle is removed
- * from the cache, and the cache_handle is closed.
- */
-/* XXX Don't think we need to pass in request_rec or types ... */
-int cache_remove_entity(request_rec *r, const char *types, cache_handle_t *h)
-{
- h->remove_entity(h);
- return 1;
-}
-
/*
* select a specific URL entity in the cache
*
* This function returns OK if successful, DECLINED if no
* cached entity fits the bill.
*/
-int cache_select_url(request_rec *r, const char *types, char *url)
+int cache_select_url(request_rec *r, char *url)
{
- const char *next = types;
- const char *type;
+ cache_provider_list *list;
apr_status_t rv;
cache_handle_t *h;
char *key;
/* go through the cache types till we get a match */
h = cache->handle = apr_palloc(r->pool, sizeof(cache_handle_t));
- while (next) {
- type = ap_cache_tokstr(r->pool, next, &next);
- switch ((rv = cache_run_open_entity(h, r, type, key))) {
+ list = cache->providers;
+
+ while (list) {
+ switch ((rv = list->provider->open_entity(h, r, key))) {
case OK: {
char *vary = NULL;
const char *varyhdr = NULL;
- if (cache_read_entity_headers(h, r) != APR_SUCCESS) {
+ if (list->provider->recall_headers(h, r) != APR_SUCCESS) {
/* TODO: Handle this error */
return DECLINED;
}
+ r->filename = apr_pstrdup(r->pool, h->cache_obj->info.filename);
+
/*
* Check Content-Negotiation - Vary
*
return DECLINED;
}
}
+ cache->provider = list->provider;
+ cache->provider_name = list->provider_name;
return OK;
}
case DECLINED: {
/* try again with next cache type */
+ list = list->next;
continue;
}
default: {
return DECLINED;
}
-apr_status_t cache_write_entity_headers(cache_handle_t *h,
- request_rec *r,
- cache_info *info)
-{
- return (h->write_headers(h, r, info));
-}
-apr_status_t cache_write_entity_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
-{
- return (h->write_body(h, r, b));
-}
-
-apr_status_t cache_read_entity_headers(cache_handle_t *h, request_rec *r)
-{
- apr_status_t rv;
- cache_info *info = &(h->cache_obj->info);
-
- /* Build the header table from info in the info struct */
- rv = h->read_headers(h, r);
- if (rv != APR_SUCCESS) {
- return rv;
- }
-
- r->filename = apr_pstrdup(r->pool, info->filename );
-
- return APR_SUCCESS;
-}
-apr_status_t cache_read_entity_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *b)
-{
- return (h->read_body(h, p, b));
-}
-
apr_status_t cache_generate_key_default( request_rec *r, apr_pool_t*p, char**key )
{
if (r->hostname) {
}
return APR_SUCCESS;
}
-
-APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(cache, CACHE, int, create_entity,
- (cache_handle_t *h, request_rec *r, const char *type,
- const char *urlkey, apr_off_t len),
- (h, r, type,urlkey,len),DECLINED)
-APR_IMPLEMENT_EXTERNAL_HOOK_RUN_FIRST(cache, CACHE, int, open_entity,
- (cache_handle_t *h, request_rec *r, const char *type,
- const char *urlkey),(h,r,type,urlkey),
- DECLINED)
-APR_IMPLEMENT_EXTERNAL_HOOK_RUN_ALL(cache, CACHE, int, remove_url,
- (const char *type, const char *urlkey),
- (type,urlkey),OK,DECLINED)
-
-
#include "mod_cache.h"
-
+#include <ap_provider.h>
/* -------------------------------------------------------------- */
apr_table_get(r->headers_in, "If-None-Match") ||
apr_table_get(r->headers_in, "If-Modified-Since") ||
apr_table_get(r->headers_in, "If-Unmodified-Since")) {
-
return 1;
}
return 0;
}
-
-/* remove other filters from filter stack */
-CACHE_DECLARE(void) ap_cache_reset_output_filters(request_rec *r)
-{
- ap_filter_t *f = r->output_filters;
-
- while (f) {
- if (!strcasecmp(f->frec->name, "CORE") ||
- !strcasecmp(f->frec->name, "CONTENT_LENGTH") ||
- !strcasecmp(f->frec->name, "HTTP_HEADER")) {
- f = f->next;
- continue;
- }
- else {
- ap_remove_output_filter(f);
- f = f->next;
- }
- }
-}
-
-CACHE_DECLARE(const char *)ap_cache_get_cachetype(request_rec *r,
+CACHE_DECLARE(cache_provider_list *)ap_cache_get_providers(request_rec *r,
cache_server_conf *conf,
const char *url)
{
- const char *type = NULL;
+ cache_provider_list *providers = NULL;
int i;
/* we can't cache if there's no URL */
+ /* Is this case even possible?? */
if (!url) return NULL;
/* loop through all the cacheenable entries */
for (i = 0; i < conf->cacheenable->nelts; i++) {
struct cache_enable *ent =
(struct cache_enable *)conf->cacheenable->elts;
- const char *thisurl = ent[i].url;
- const char *thistype = ent[i].type;
- if ((thisurl) && !strncasecmp(thisurl, url, strlen(thisurl))) {
- if (!type) {
- type = thistype;
+ if ((ent[i].url) && !strncasecmp(url, ent[i].url, ent[i].urllen)) {
+ /* Fetch from global config and add to the list. */
+ cache_provider *provider;
+ provider = ap_lookup_provider(CACHE_PROVIDER_GROUP, ent[i].type,
+ "0");
+ if (!provider) {
+ /* Log an error! */
}
else {
- type = apr_pstrcat(r->pool, type, ",", thistype, NULL);
+ cache_provider_list *newp;
+ newp = apr_pcalloc(r->pool, sizeof(cache_provider_list));
+ newp->provider_name = ent[i].type;
+ newp->provider = provider;
+
+ if (!providers) {
+ providers = newp;
+ }
+ else {
+ cache_provider_list *last = providers;
+
+ while (last->next) {
+ last = last->next;
+ }
+ last->next = newp;
+ }
}
}
}
- /* then loop through all the cachedisable entries */
- /* Looking for urls that contain the full cachedisable url and possibly more. */
- /* This means we are disabling cachedisable url and below... */
+ /* then loop through all the cachedisable entries
+ * Looking for urls that contain the full cachedisable url and possibly
+ * more.
+ * This means we are disabling cachedisable url and below...
+ */
for (i = 0; i < conf->cachedisable->nelts; i++) {
struct cache_disable *ent =
(struct cache_disable *)conf->cachedisable->elts;
- const char *thisurl = ent[i].url;
- if ((thisurl) && !strncasecmp(thisurl, url, strlen(thisurl))) {
- type = NULL;
+ if ((ent[i].url) && !strncasecmp(url, ent[i].url, ent[i].urllen)) {
+ /* Stop searching now. */
+ return NULL;
}
}
- return type;
+ return providers;
}
/* do a HTTP/1.1 age calculation */
-CACHE_DECLARE(apr_int64_t) ap_cache_current_age(cache_info *info, const apr_time_t age_value,
+CACHE_DECLARE(apr_int64_t) ap_cache_current_age(cache_info *info,
+ const apr_time_t age_value,
apr_time_t now)
{
apr_time_t apparent_age, corrected_received_age, response_delay,
CACHE_DECLARE(int) ap_cache_check_freshness(cache_request_rec *cache,
request_rec *r)
{
- apr_int64_t age, maxage_req, maxage_cresp, maxage, smaxage, maxstale, minfresh;
+ apr_int64_t age, maxage_req, maxage_cresp, maxage, smaxage, maxstale;
+ apr_int64_t minfresh;
int age_in_errhdr = 0;
const char *cc_cresp, *cc_ceresp, *cc_req;
const char *agestr = NULL;
* We now want to check if our cached data is still fresh. This depends
* on a few things, in this order:
*
- * - RFC2616 14.9.4 End to end reload, Cache-Control: no-cache no-cache in
+ * - RFC2616 14.9.4 End to end reload, Cache-Control: no-cache. no-cache in
* either the request or the cached response means that we must
* revalidate the request unconditionally, overriding any expiration
* mechanism. It's equivalent to max-age=0,must-revalidate.
age = ap_cache_current_age(info, age_c, r->request_time);
/* extract s-maxage */
- if (cc_cresp && ap_cache_liststr(r->pool, cc_cresp, "s-maxage", &val))
+ if (cc_cresp && ap_cache_liststr(r->pool, cc_cresp, "s-maxage", &val)) {
smaxage = apr_atoi64(val);
+ }
else if (cc_ceresp && ap_cache_liststr(r->pool, cc_ceresp, "s-maxage", &val)) {
smaxage = apr_atoi64(val);
}
- else
+ else {
smaxage = -1;
+ }
/* extract max-age from request */
- if (cc_req && ap_cache_liststr(r->pool, cc_req, "max-age", &val))
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "max-age", &val)) {
maxage_req = apr_atoi64(val);
- else
+ }
+ else {
maxage_req = -1;
+ }
/* extract max-age from response */
- if (cc_cresp && ap_cache_liststr(r->pool, cc_cresp, "max-age", &val))
+ if (cc_cresp && ap_cache_liststr(r->pool, cc_cresp, "max-age", &val)) {
maxage_cresp = apr_atoi64(val);
+ }
else if (cc_ceresp && ap_cache_liststr(r->pool, cc_ceresp, "max-age", &val)) {
maxage_cresp = apr_atoi64(val);
}
else
+ {
maxage_cresp = -1;
+ }
/*
* if both maxage request and response, the smaller one takes priority
*/
- if (-1 == maxage_req)
+ if (-1 == maxage_req) {
maxage = maxage_cresp;
- else if (-1 == maxage_cresp)
+ }
+ else if (-1 == maxage_cresp) {
maxage = maxage_req;
- else
+ }
+ else {
maxage = MIN(maxage_req, maxage_cresp);
+ }
/* extract max-stale */
- if (cc_req && ap_cache_liststr(r->pool, cc_req, "max-stale", &val))
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "max-stale", &val)) {
maxstale = apr_atoi64(val);
- else
+ }
+ else {
maxstale = 0;
+ }
/* extract min-fresh */
- if (cc_req && ap_cache_liststr(r->pool, cc_req, "min-fresh", &val))
+ if (cc_req && ap_cache_liststr(r->pool, cc_req, "min-fresh", &val)) {
minfresh = apr_atoi64(val);
- else
+ }
+ else {
minfresh = 0;
+ }
/* override maxstale if must-revalidate or proxy-revalidate */
if (maxstale && ((cc_cresp &&
/* add warning if maxstale overrode freshness calculation */
if (!(((smaxage != -1) && age < smaxage) ||
((maxage != -1) && age < maxage) ||
- (info->expire != APR_DATE_BAD && (info->expire - info->date) > age))) {
+ (info->expire != APR_DATE_BAD &&
+ (info->expire - info->date) > age))) {
/* make sure we don't stomp on a previous warning */
if ((warn_head == NULL) ||
((warn_head != NULL) && (ap_strstr_c(warn_head, "110") == NULL))) {
}
return 0; /* Cache object is stale */
}
-/*
+
+/*
* list is a comma-separated list of case-insensitive tokens, with
* optional whitespace around the tokens.
* The return returns 1 if the token val is found in the list, or 0
}
/* return each comma separated token, one at a time */
-CACHE_DECLARE(const char *)ap_cache_tokstr(apr_pool_t *p, const char *list, const char **str)
+CACHE_DECLARE(const char *)ap_cache_tokstr(apr_pool_t *p, const char *list,
+ const char **str)
{
apr_size_t i;
const char *s;
val[i + 22 - k] = '\0';
}
-CACHE_DECLARE(char *)generate_name(apr_pool_t *p, int dirlevels, int dirlength, const char *name)
+CACHE_DECLARE(char *)generate_name(apr_pool_t *p, int dirlevels,
+ int dirlength, const char *name)
{
char hashfile[66];
cache_hash(name, hashfile, dirlevels, dirlength);
/* Handles for cache filters, resolved at startup to eliminate
* a name-to-function mapping on each request
*/
-static ap_filter_rec_t *cache_in_filter_handle;
+static ap_filter_rec_t *cache_save_filter_handle;
static ap_filter_rec_t *cache_out_filter_handle;
static ap_filter_rec_t *cache_conditional_filter_handle;
* If no:
* check whether we're allowed to try cache it
* If yes:
- * add CACHE_IN filter
+ * add CACHE_SAVE filter
* If No:
* oh well.
*/
static int cache_url_handler(request_rec *r, int lookup)
{
apr_status_t rv;
- const char *cc_in, *pragma, *auth;
- apr_uri_t uri = r->parsed_uri;
- char *url = r->unparsed_uri;
- apr_size_t urllen;
- char *path = uri.path;
- const char *types;
- cache_info *info = NULL;
+ const char *pragma, *auth;
+ apr_uri_t uri;
+ char *url;
+ char *path;
+ cache_provider_list *providers;
+ cache_info *info;
cache_request_rec *cache;
cache_server_conf *conf;
+ apr_bucket_brigade *out;
- conf = (cache_server_conf *) ap_get_module_config(r->server->module_config,
- &cache_module);
-
- /* we don't handle anything but GET */
+ /* Delay initialization until we know we are handling a GET */
if (r->method_number != M_GET) {
return DECLINED;
}
+ uri = r->parsed_uri;
+ url = r->unparsed_uri;
+ path = uri.path;
+ info = NULL;
+
+ conf = (cache_server_conf *) ap_get_module_config(r->server->module_config,
+ &cache_module);
+
/*
* Which cache module (if any) should handle this request?
*/
- if (!(types = ap_cache_get_cachetype(r, conf, path))) {
- return DECLINED;
- }
-
- urllen = strlen(url);
- if (urllen > MAX_URL_LENGTH) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: URL exceeds length threshold: %s", url);
- return DECLINED;
- }
- /* DECLINE urls ending in / ??? EGP: why? */
- if (url[urllen-1] == '/') {
+ if (!(providers = ap_cache_get_providers(r, conf, path))) {
return DECLINED;
}
/* make space for the per request config */
- cache = (cache_request_rec *) ap_get_module_config(r->request_config,
+ cache = (cache_request_rec *) ap_get_module_config(r->request_config,
&cache_module);
if (!cache) {
cache = apr_pcalloc(r->pool, sizeof(cache_request_rec));
ap_set_module_config(r->request_config, &cache_module, cache);
}
- /* save away the type */
- cache->types = types;
+ /* save away the possible providers */
+ cache->providers = providers;
/*
* Are we allowed to serve cached info at all?
*/
/* find certain cache controlling headers */
- cc_in = apr_table_get(r->headers_in, "Cache-Control");
pragma = apr_table_get(r->headers_in, "Pragma");
auth = apr_table_get(r->headers_in, "Authorization");
"%s, but we know better and are ignoring it", url);
}
else {
- if (ap_cache_liststr(NULL, cc_in, "no-store", NULL) ||
- ap_cache_liststr(NULL, pragma, "no-cache", NULL) || (auth != NULL)) {
- /* delete the previously cached file */
- cache_remove_url(r, cache->types, url);
-
+ if (ap_cache_liststr(NULL, pragma, "no-cache", NULL) ||
+ auth != NULL) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: no-store forbids caching of %s", url);
+ "cache: no-cache or authorization forbids caching "
+ "of %s", url);
return DECLINED;
}
}
/*
* Try to serve this request from the cache.
*
- * If no existing cache file
- * add cache_in filter
- * If stale cache file
- * If conditional request
- * add cache_in filter
- * If non-conditional request
- * fudge response into a conditional
- * add cache_conditional filter
- * If fresh cache file
- * clear filter stack
- * add cache_out filter
+ * If no existing cache file (DECLINED)
+ * add cache_save filter
+ * If cached file (OK)
+ * If fresh cache file
+ * clear filter stack
+ * add cache_out filter
+ * return OK
+ * If stale cache file
+ * add cache_conditional filter (which updates cache)
*/
- rv = cache_select_url(r, cache->types, url);
- if (DECLINED == rv) {
- if (!lookup) {
- /* no existing cache file */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: no cache - add cache_in filter and DECLINE");
- /* add cache_in filter to cache this request */
- ap_add_output_filter_handle(cache_in_filter_handle, NULL, r,
- r->connection);
+ rv = cache_select_url(r, url);
+ if (rv != OK) {
+ if (rv == DECLINED) {
+ if (!lookup) {
+ /* add cache_save filter to cache this request */
+ ap_add_output_filter_handle(cache_save_filter_handle, NULL, r,
+ r->connection);
+ }
+ }
+ else {
+ /* error */
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "cache: error returned while checking for cached "
+ "file by %s cache", cache->provider_name);
}
return DECLINED;
}
- else if (OK == rv) {
- /* RFC2616 13.2 - Check cache object expiration */
- cache->fresh = ap_cache_check_freshness(cache, r);
- if (cache->fresh) {
- /* fresh data available */
- apr_bucket_brigade *out;
- conn_rec *c = r->connection;
-
- if (lookup) {
- return OK;
- }
- rv = ap_meets_conditions(r);
- if (rv != OK) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: fresh cache - returning status %d", rv);
- return rv;
- }
- /*
- * Not a conditionl request. Serve up the content
- */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: fresh cache - add cache_out filter and "
- "handle request");
-
- /* We are in the quick handler hook, which means that no output
- * filters have been set. So lets run the insert_filter hook.
- */
- ap_run_insert_filter(r);
- ap_add_output_filter_handle(cache_out_filter_handle, NULL,
- r, r->connection);
-
- /* kick off the filter stack */
- out = apr_brigade_create(r->pool, c->bucket_alloc);
- if (APR_SUCCESS
- != (rv = ap_pass_brigade(r->output_filters, out))) {
- ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
- "cache: error returned while trying to return %s "
- "cached data",
- cache->type);
- return rv;
- }
- return OK;
- }
- else {
- if (!r->err_headers_out) {
- r->err_headers_out = apr_table_make(r->pool, 3);
- }
- /* stale data available */
- if (lookup) {
- return DECLINED;
- }
+ /* We have located a suitable cache file now. */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: stale cache - test conditional");
- /* if conditional request */
- if (ap_cache_request_is_conditional(r)) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
- r->server,
- "cache: conditional - add cache_in filter and "
- "DECLINE");
- /* Why not add CACHE_CONDITIONAL? */
- ap_add_output_filter_handle(cache_in_filter_handle, NULL,
- r, r->connection);
-
- return DECLINED;
- }
- /* else if non-conditional request */
- else {
- /* Temporarily hack this to work the way it had been. Its broken,
- * but its broken the way it was before. I'm working on figuring
- * out why the filter add in the conditional filter doesn't work. pjr
- *
- * info = &(cache->handle->cache_obj->info);
- *
- * Uncomment the above when the code in cache_conditional_filter_handle
- * is properly fixed... pjr
- */
-
- /* fudge response into a conditional */
- if (info && info->etag) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
- r->server,
- "cache: nonconditional - fudge conditional "
- "by etag");
- /* if we have a cached etag */
- apr_table_set(r->headers_in, "If-None-Match", info->etag);
- }
- else if (info && info->lastmods) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
- r->server,
- "cache: nonconditional - fudge conditional "
- "by lastmod");
- /* if we have a cached IMS */
- apr_table_set(r->headers_in,
- "If-Modified-Since",
- info->lastmods);
- }
- else {
- /* something else - pretend there was no cache */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
- r->server,
- "cache: nonconditional - no cached "
- "etag/lastmods - add cache_in and DECLINE");
-
- ap_add_output_filter_handle(cache_in_filter_handle, NULL,
- r, r->connection);
-
- return DECLINED;
- }
- /* add cache_conditional filter */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0,
- r->server,
- "cache: nonconditional - add cache_conditional "
- "and DECLINE");
- ap_add_output_filter_handle(cache_conditional_filter_handle,
- NULL,
- r,
- r->connection);
+ /* RFC2616 13.2 - Check cache object expiration */
+ cache->fresh = ap_cache_check_freshness(cache, r);
+
+ /* What we have in our cache isn't fresh. */
+ if (!cache->fresh) {
+ /* If our stale cached response was conditional... */
+ if (!lookup && ap_cache_request_is_conditional(r)) {
+ info = &(cache->handle->cache_obj->info);
- return DECLINED;
+ /* fudge response into a conditional */
+ if (info && info->etag) {
+ /* if we have a cached etag */
+ apr_table_set(r->headers_in, "If-None-Match", info->etag);
+ }
+ else if (info && info->lastmods) {
+ /* if we have a cached IMS */
+ apr_table_set(r->headers_in, "If-Modified-Since",
+ info->lastmods);
}
}
- }
- else {
- /* error */
- ap_log_error(APLOG_MARK, APLOG_ERR, rv,
- r->server,
- "cache: error returned while checking for cached file by "
- "%s cache",
- cache->type);
+
+ /* Add cache_conditional_filter to see if we can salvage
+ * later.
+ */
+ ap_add_output_filter_handle(cache_conditional_filter_handle,
+ NULL, r, r->connection);
return DECLINED;
}
+
+ /* fresh data available */
+
+ info = &(cache->handle->cache_obj->info);
+
+ if (info && info->lastmod) {
+ ap_update_mtime(r, info->lastmod);
+ }
+
+ rv = ap_meets_conditions(r);
+ if (rv != OK) {
+ /* Return cached status. */
+ return rv;
+ }
+
+ /* If we're a lookup, we can exit now instead of serving the content. */
+ if (lookup) {
+ return OK;
+ }
+
+ /* Serve up the content */
+
+ /* We are in the quick handler hook, which means that no output
+ * filters have been set. So lets run the insert_filter hook.
+ */
+ ap_run_insert_filter(r);
+ ap_add_output_filter_handle(cache_out_filter_handle, NULL,
+ r, r->connection);
+
+ /* kick off the filter stack */
+ out = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ rv = ap_pass_brigade(r->output_filters, out);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "cache: error returned while trying to return %s "
+ "cached data",
+ cache->provider_name);
+ return rv;
+ }
+
+ return OK;
}
/*
ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r->server,
"cache: running CACHE_OUT filter");
- /* cache_read_entity_headers() was called in cache_select_url() */
- cache_read_entity_body(cache->handle, r->pool, bb);
+ /* recall_body() was called in cache_select_url() */
+ cache->provider->recall_body(cache->handle, r->pool, bb);
/* This filter is done once it has served up its content */
ap_remove_output_filter(f);
* If response HTTP_NOT_MODIFIED
* replace ourselves with cache_out filter
* Otherwise
- * replace ourselves with cache_in filter
+ * replace ourselves with cache_save filter
*/
static int cache_conditional_filter(ap_filter_t *f, apr_bucket_brigade *in)
f->r, f->r->connection);
}
else {
- /* replace ourselves with CACHE_IN filter */
- ap_add_output_filter_handle(cache_in_filter_handle, NULL,
+ /* replace ourselves with CACHE_SAVE filter */
+ ap_add_output_filter_handle(cache_save_filter_handle, NULL,
f->r, f->r->connection);
}
ap_remove_output_filter(f);
/*
- * CACHE_IN filter
+ * CACHE_SAVE filter
* ---------------
*
* Decide whether or not this content should be cached.
*
*/
-static int cache_in_filter(ap_filter_t *f, apr_bucket_brigade *in)
+static int cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in)
{
int rv;
int date_in_errhdr = 0;
cache_request_rec *cache;
cache_server_conf *conf;
char *url = r->unparsed_uri;
- const char *cc_out, *cl;
+ const char *cc_in, *cc_out, *cl;
const char *exps, *lastmods, *dates, *etag;
apr_time_t exp, date, lastmod, now;
apr_off_t size;
apr_pool_t *p;
/* check first whether running this filter has any point or not */
- if(r->no_cache) {
+ /* If the user has Cache-Control: no-store from RFC 2616, don't store! */
+ cc_in = apr_table_get(r->headers_in, "Cache-Control");
+ if (r->no_cache || ap_cache_liststr(NULL, cc_in, "no-store", NULL)) {
ap_remove_output_filter(f);
return ap_pass_brigade(f->next, in);
}
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
- "cache: running CACHE_IN filter");
-
/* Setup cache_request_rec */
- cache = (cache_request_rec *) ap_get_module_config(r->request_config, &cache_module);
+ cache = (cache_request_rec *) ap_get_module_config(r->request_config,
+ &cache_module);
if (!cache) {
+ /* user likely configured CACHE_SAVE manually; they should really use
+ * mod_cache configuration to do that
+ */
cache = apr_pcalloc(r->pool, sizeof(cache_request_rec));
ap_set_module_config(r->request_config, &cache_module, cache);
}
/* pass the brigades into the cache, then pass them
* up the filter stack
*/
- rv = cache_write_entity_body(cache->handle, r, in);
+ rv = cache->provider->store_body(cache->handle, r, in);
if (rv != APR_SUCCESS) {
ap_remove_output_filter(f);
}
* BillS Asks.. Why do we need to make this call to remove_url?
* leave it in for now..
*/
- cache_remove_url(r, cache->types, url);
+ cache_remove_url(r, url);
/* remove this filter from the chain */
ap_remove_output_filter(f);
cl = apr_table_get(r->headers_out, "Content-Length");
}
if (cl) {
+#if 0
+ char *errp;
+ if (apr_strtoff(&size, cl, &errp, 10) || *errp || size < 0) {
+ cl = NULL; /* parse error, see next 'if' block */
+ }
+#else
size = apr_atoi64(cl);
+ if (size < 0) {
+ cl = NULL;
+ }
+#endif
}
- else {
+
+ if (!cl) {
/* if we don't get the content-length, see if we have all the
* buckets and use their length to calculate the size
*/
int all_buckets_here=0;
int unresolved_length = 0;
size=0;
- APR_BRIGADE_FOREACH(e, in) {
+ for (e = APR_BRIGADE_FIRST(in);
+ e != APR_BRIGADE_SENTINEL(in);
+ e = APR_BUCKET_NEXT(e))
+ {
if (APR_BUCKET_IS_EOS(e)) {
all_buckets_here=1;
break;
*/
/* no cache handle, create a new entity */
if (!cache->handle) {
- rv = cache_create_entity(r, cache->types, url, size);
+ rv = cache_create_entity(r, url, size);
}
/* pre-existing cache handle and 304, make entity fresh */
else if (r->status == HTTP_NOT_MODIFIED) {
* with this one
*/
else {
- cache_remove_entity(r, cache->types, cache->handle);
- rv = cache_create_entity(r, cache->types, url, size);
+ cache->provider->remove_entity(cache->handle);
+ rv = cache_create_entity(r, url, size);
}
if (rv != OK) {
/*
* Write away header information to cache.
*/
- rv = cache_write_entity_headers(cache->handle, r, info);
+ rv = cache->provider->store_headers(cache->handle, r, info);
if (rv == APR_SUCCESS) {
- rv = cache_write_entity_body(cache->handle, r, in);
+ rv = cache->provider->store_body(cache->handle, r, in);
}
if (rv != APR_SUCCESS) {
ap_remove_output_filter(f);
new = apr_array_push(conf->cacheenable);
new->type = type;
new->url = url;
+ new->urllen = strlen(url);
return NULL;
}
&cache_module);
new = apr_array_push(conf->cachedisable);
new->url = url;
+ new->urllen = strlen(url);
return NULL;
}
* Make them AP_FTYPE_CONTENT for now.
* XXX ianhH:they should run AFTER all the other content filters.
*/
- cache_in_filter_handle =
- ap_register_output_filter("CACHE_IN",
- cache_in_filter,
+ cache_save_filter_handle =
+ ap_register_output_filter("CACHE_SAVE",
+ cache_save_filter,
NULL,
AP_FTYPE_CONTENT_SET-1);
/* CACHE_OUT must go into the filter chain before SUBREQ_CORE to
#include <arpa/inet.h>
#endif
-/* USE_ATOMICS should be replaced with the appropriate APR feature macro */
-#define USE_ATOMICS
-#ifdef USE_ATOMICS
#include "apr_atomic.h"
-#endif
#ifndef MAX
#define MAX(a,b) ((a) > (b) ? (a) : (b))
/* default completion is 60% */
#define DEFAULT_CACHE_COMPLETION (60)
-#define MAX_URL_LENGTH 1024
#define MSEC_ONE_DAY ((apr_time_t)(86400*APR_USEC_PER_SEC)) /* one day, in microseconds */
#define MSEC_ONE_HR ((apr_time_t)(3600*APR_USEC_PER_SEC)) /* one hour, in microseconds */
#define MSEC_ONE_MIN ((apr_time_t)(60*APR_USEC_PER_SEC)) /* one minute, in microseconds */
struct cache_enable {
const char *url;
const char *type;
+ apr_size_t urllen;
};
struct cache_disable {
const char *url;
+ apr_size_t urllen;
};
/* static information about the local cache */
void *vobj; /* Opaque portion (specific to the cache implementation) of the cache object */
apr_size_t count; /* Number of body bytes written to the cache so far */
int complete;
-#ifdef USE_ATOMICS
- apr_atomic_t refcount;
-#else
- apr_size_t refcount;
-#endif
+ apr_uint32_t refcount;
apr_size_t cleanup;
};
typedef struct cache_handle cache_handle_t;
+
+#define CACHE_PROVIDER_GROUP "cache"
+
+typedef struct {
+ int (*remove_entity) (cache_handle_t *h);
+ apr_status_t (*store_headers)(cache_handle_t *h, request_rec *r, cache_info *i);
+ apr_status_t (*store_body)(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+ apr_status_t (*recall_headers) (cache_handle_t *h, request_rec *r);
+ apr_status_t (*recall_body) (cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+ int (*create_entity) (cache_handle_t *h, request_rec *r,
+ const char *urlkey, apr_off_t len);
+ int (*open_entity) (cache_handle_t *h, request_rec *r,
+ const char *urlkey);
+ int (*remove_url) (const char *urlkey);
+} cache_provider;
+
+/* A linked-list of authn providers. */
+typedef struct cache_provider_list cache_provider_list;
+
+struct cache_provider_list {
+ const char *provider_name;
+ const cache_provider *provider;
+ cache_provider_list *next;
+};
+
struct cache_handle {
cache_object_t *cache_obj;
- int (*remove_entity) (cache_handle_t *h);
- apr_status_t (*write_headers)(cache_handle_t *h, request_rec *r, cache_info *i);
- apr_status_t (*write_body)(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
- apr_status_t (*read_headers) (cache_handle_t *h, request_rec *r);
- apr_status_t (*read_body) (cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
apr_table_t *req_hdrs; /* These are the original request headers */
};
/* per request cache information */
typedef struct {
- const char *types; /* the types of caches allowed */
- const char *type; /* the type of cache selected */
+ cache_provider_list *providers; /* possible cache providers */
+ const cache_provider *provider; /* current cache provider */
+ const char *provider_name; /* current cache provider name */
int fresh; /* is the entitey fresh? */
cache_handle_t *handle; /* current cache handle */
int in_checked; /* CACHE_IN must cache the entity */
int dirlength,
const char *name);
CACHE_DECLARE(int) ap_cache_request_is_conditional(request_rec *r);
-CACHE_DECLARE(void) ap_cache_reset_output_filters(request_rec *r);
-CACHE_DECLARE(const char *)ap_cache_get_cachetype(request_rec *r, cache_server_conf *conf, const char *url);
+CACHE_DECLARE(cache_provider_list *)ap_cache_get_providers(request_rec *r, cache_server_conf *conf, const char *url);
CACHE_DECLARE(int) ap_cache_liststr(apr_pool_t *p, const char *list,
const char *key, char **val);
CACHE_DECLARE(const char *)ap_cache_tokstr(apr_pool_t *p, const char *list, const char **str);
/**
* cache_storage.c
*/
-int cache_remove_url(request_rec *r, const char *types, char *url);
-int cache_create_entity(request_rec *r, const char *types, char *url, apr_off_t size);
-int cache_remove_entity(request_rec *r, const char *types, cache_handle_t *h);
-int cache_select_url(request_rec *r, const char *types, char *url);
+int cache_remove_url(request_rec *r, char *url);
+int cache_create_entity(request_rec *r, char *url, apr_off_t size);
+int cache_select_url(request_rec *r, char *url);
apr_status_t cache_generate_key_default( request_rec *r, apr_pool_t*p, char**key );
/**
* create a key for the cache based on the request record
*/
const char* cache_create_key( request_rec*r );
-apr_status_t cache_write_entity_headers(cache_handle_t *h, request_rec *r, cache_info *info);
-apr_status_t cache_write_entity_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *bb);
-
-apr_status_t cache_read_entity_headers(cache_handle_t *h, request_rec *r);
-apr_status_t cache_read_entity_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+/*
+apr_status_t cache_store_entity_headers(cache_handle_t *h, request_rec *r, cache_info *info);
+apr_status_t cache_store_entity_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *bb);
+apr_status_t cache_recall_entity_headers(cache_handle_t *h, request_rec *r);
+apr_status_t cache_recall_entity_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+*/
/* hooks */
#define CACHE_DECLARE_DATA __declspec(dllimport)
#endif
-APR_DECLARE_EXTERNAL_HOOK(cache, CACHE, int, create_entity,
- (cache_handle_t *h, request_rec *r, const char *type,
- const char *urlkey, apr_off_t len))
-APR_DECLARE_EXTERNAL_HOOK(cache, CACHE, int, open_entity,
- (cache_handle_t *h, request_rec *r, const char *type,
- const char *urlkey))
-APR_DECLARE_EXTERNAL_HOOK(cache, CACHE, int, remove_url,
- (const char *type, const char *urlkey))
-
-
-
APR_DECLARE_OPTIONAL_FN(apr_status_t,
ap_cache_generate_key,
(request_rec *r, apr_pool_t*p, char**key ));
(MODCACHE)
ap_cache_request_is_conditional,
- ap_cache_reset_output_filters,
- ap_cache_get_cachetype,
+ ap_cache_get_providers,
ap_cache_liststr,
ap_cache_tokstr,
ap_cache_hex2usec,
ap_cache_usec2hex,
ap_cache_cacheable_hdrs_out,
- generate_name,
- cache_hook_create_entity,
- cache_hook_open_entity,
- cache_hook_remove_url
-
+ generate_name
* limitations under the License.
*/
-#include "mod_cache.h"
#include "apr_file_io.h"
#include "apr_strings.h"
+#include "mod_cache.h"
+#include "ap_provider.h"
#include "util_filter.h"
#include "util_script.h"
#include <unistd.h> /* needed for unlink/link */
#endif
+/* Our on-disk header format is:
+ *
+ * disk_cache_info_t
+ * entity name (dobj->name) [length is in disk_cache_info_t->name_len]
+ * r->headers_out (delimited by CRLF)
+ * CRLF
+ * r->headers_in (delimited by CRLF)
+ * CRLF
+ */
+#define DISK_FORMAT_VERSION 0
+typedef struct {
+ /* Indicates the format of the header struct stored on-disk. */
+ int format;
+ /* The HTTP status code returned for this response. */
+ int status;
+ /* The size of the entity name that follows. */
+ apr_size_t name_len;
+ /* The number of times we've cached this entity. */
+ apr_size_t entity_version;
+ /* Miscellaneous time values. */
+ apr_time_t date;
+ apr_time_t expire;
+ apr_time_t request_time;
+ apr_time_t response_time;
+} disk_cache_info_t;
+
/*
* disk_cache_object_t
* Pointed to by cache_object_t::vobj
char *tempfile; /* temp file tohold the content */
#if 0
int dirlevels; /* Number of levels of subdirectories */
- int dirlength; /* Length of subdirectory names */
+ int dirlength; /* Length of subdirectory names */
#endif
char *datafile; /* name of file where the data will go */
char *hdrsfile; /* name of file where the hdrs will go */
+ char *hashfile; /* Computed hash key for this URI */
char *name;
- apr_time_t version; /* update count of the file */
apr_file_t *fd; /* data file */
apr_file_t *hfd; /* headers file */
- apr_off_t file_size; /* File size of the cached data file */
+ apr_off_t file_size; /* File size of the cached data file */
+ disk_cache_info_t disk_info; /* Header information. */
} disk_cache_object_t;
+
/*
* mod_disk_cache configuration
*/
#define DEFAULT_MIN_FILE_SIZE 1
#define DEFAULT_MAX_FILE_SIZE 1000000
#define DEFAULT_CACHE_SIZE 1000000
-
+
typedef struct {
const char* cache_root;
apr_size_t cache_root_len;
apr_time_t gcinterval; /* garbage collection interval, in msec */
int dirlevels; /* Number of levels of subdirectories */
int dirlength; /* Length of subdirectory names */
- int expirychk; /* true if expiry time is observed for cached files */
+ int expirychk; /* true if expiry time is observed for cached files */
apr_size_t minfs; /* minumum file size for cached files */
apr_size_t maxfs; /* maximum file size for cached files */
apr_time_t mintm; /* minimum time margin for caching files */
/* Forward declarations */
static int remove_entity(cache_handle_t *h);
-static apr_status_t write_headers(cache_handle_t *h, request_rec *r, cache_info *i);
-static apr_status_t write_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
-static apr_status_t read_headers(cache_handle_t *h, request_rec *r);
-static apr_status_t read_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *i);
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r);
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
/*
* Local static functions
*/
#define CACHE_HEADER_SUFFIX ".header"
#define CACHE_DATA_SUFFIX ".data"
-static char *header_file(apr_pool_t *p, int dirlevels, int dirlength,
- const char *root, const char *name)
+static char *header_file(apr_pool_t *p, disk_cache_conf *conf,
+ disk_cache_object_t *dobj, const char *name)
{
- char *hashfile;
- hashfile = generate_name(p, dirlevels, dirlength, name);
- return apr_pstrcat(p, root, "/", hashfile, CACHE_HEADER_SUFFIX, NULL);
+ if (!dobj->hashfile) {
+ dobj->hashfile = generate_name(p, conf->dirlevels, conf->dirlength,
+ name);
+ }
+ return apr_pstrcat(p, conf->cache_root, "/", dobj->hashfile,
+ CACHE_HEADER_SUFFIX, NULL);
}
-static char *data_file(apr_pool_t *p, int dirlevels, int dirlength,
- const char *root, const char *name)
+static char *data_file(apr_pool_t *p, disk_cache_conf *conf,
+ disk_cache_object_t *dobj, const char *name)
{
- char *hashfile;
- hashfile = generate_name(p, dirlevels, dirlength, name);
- return apr_pstrcat(p, root, "/", hashfile, CACHE_DATA_SUFFIX, NULL);
+ if (!dobj->hashfile) {
+ dobj->hashfile = generate_name(p, conf->dirlevels, conf->dirlength,
+ name);
+ }
+ return apr_pstrcat(p, conf->cache_root, "/", dobj->hashfile,
+ CACHE_DATA_SUFFIX, NULL);
}
static void mkdir_structure(disk_cache_conf *conf, char *file, apr_pool_t *pool)
break;
*p = '\0';
- rv = apr_dir_make(file,
+ rv = apr_dir_make(file,
APR_UREAD|APR_UWRITE|APR_UEXECUTE, pool);
if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
/* XXX */
if (dobj->fd) {
apr_file_flush(dobj->fd);
if (!dobj->datafile) {
- dobj->datafile = data_file(r->pool, conf->dirlevels, conf->dirlength,
- conf->cache_root, h->cache_obj->key);
+ dobj->datafile = data_file(r->pool, conf, dobj, h->cache_obj->key);
}
/* Remove old file with the same name. If remove fails, then
* perhaps we need to create the directory tree where we are
apr_file_close(dobj->fd);
dobj->fd = NULL;
- /* XXX log */
- }
+ /* XXX log */
+ }
- return APR_SUCCESS;
+ return APR_SUCCESS;
}
+static apr_status_t file_cache_errorcleanup(disk_cache_object_t *dobj, request_rec *r)
+{
+ if (dobj->fd) {
+ apr_file_close(dobj->fd);
+ dobj->fd = NULL;
+ }
+ /* Remove the header file, the temporary body file, and a potential old body file */
+ apr_file_remove(dobj->hdrsfile, r->pool);
+ apr_file_remove(dobj->tempfile, r->pool);
+ apr_file_remove(dobj->datafile, r->pool);
-/* These two functions get and put state information into the data
- * file for an ap_cache_el, this state information will be read
- * and written transparent to clients of this module
+ /* Return non-APR_SUCCESS in order to have mod_cache remove the disk_cache filter */
+ return DECLINED;
+}
+
+
+/* These two functions get and put state information into the data
+ * file for an ap_cache_el, this state information will be read
+ * and written transparent to clients of this module
*/
-static int file_cache_read_mydata(apr_file_t *fd, cache_info *info,
- disk_cache_object_t *dobj)
+static int file_cache_recall_mydata(apr_file_t *fd, cache_info *info,
+ disk_cache_object_t *dobj, request_rec *r)
{
apr_status_t rv;
- char urlbuff[1034]; /* XXX FIXME... THIS IS A POTENTIAL SECURITY HOLE */
- int urllen = sizeof(urlbuff);
- int offset=0;
- char * temp;
+ char *urlbuff;
+ disk_cache_info_t disk_info;
+ apr_size_t len;
/* read the data from the cache file */
- /* format
- * date SP expire SP count CRLF
- * dates are stored as a hex representation of apr_time_t (number of
- * microseconds since 00:00:00 january 1, 1970 UTC)
- */
- rv = apr_file_gets(&urlbuff[0], urllen, fd);
+ len = sizeof(disk_cache_info_t);
+ rv = apr_file_read_full(fd, &disk_info, len, &len);
if (rv != APR_SUCCESS) {
return rv;
}
- if ((temp = strchr(&urlbuff[0], '\n')) != NULL) /* trim off new line character */
- *temp = '\0'; /* overlay it with the null terminator */
-
- if (!apr_date_checkmask(urlbuff, "&&&&&&&&&&&&&&&& &&&&&&&&&&&&&&&& &&&&&&&&&&&&&&&& &&&&&&&&&&&&&&&& &&&&&&&&&&&&&&&&")) {
+ if (disk_info.format != DISK_FORMAT_VERSION) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "cache_disk: URL %s had a on-disk version mismatch",
+ r->uri);
return APR_EGENERAL;
}
- info->date = ap_cache_hex2usec(urlbuff + offset);
- offset += (sizeof(info->date)*2) + 1;
- info->expire = ap_cache_hex2usec(urlbuff + offset);
- offset += (sizeof(info->expire)*2) + 1;
- dobj->version = ap_cache_hex2usec(urlbuff + offset);
- offset += (sizeof(info->expire)*2) + 1;
- info->request_time = ap_cache_hex2usec(urlbuff + offset);
- offset += (sizeof(info->expire)*2) + 1;
- info->response_time = ap_cache_hex2usec(urlbuff + offset);
-
- /* check that we have the same URL */
- rv = apr_file_gets(&urlbuff[0], urllen, fd);
+ /* Store it away so we can get it later. */
+ dobj->disk_info = disk_info;
+
+ info->date = disk_info.date;
+ info->expire = disk_info.expire;
+ info->request_time = disk_info.request_time;
+ info->response_time = disk_info.response_time;
+
+ /* Note that we could optimize this by conditionally doing the palloc
+ * depending upon the size. */
+ urlbuff = apr_palloc(r->pool, disk_info.name_len + 1);
+ len = disk_info.name_len;
+ rv = apr_file_read_full(fd, urlbuff, len, &len);
if (rv != APR_SUCCESS) {
return rv;
}
+ urlbuff[disk_info.name_len] = '\0';
- if ((temp = strchr(&urlbuff[0], '\n')) != NULL) { /* trim off new line character */
- *temp = '\0'; /* overlay it with the null terminator */
- }
-
- if (strncmp(urlbuff, "X-NAME: ", 7) != 0) {
- return APR_EGENERAL;
- }
- if (strcmp(urlbuff + 8, dobj->name) != 0) {
+ /* check that we have the same URL */
+ /* Would strncmp be correct? */
+ if (strcmp(urlbuff, dobj->name) != 0) {
return APR_EGENERAL;
}
-
- return APR_SUCCESS;
-}
-
-static int file_cache_write_mydata(apr_file_t *fd , cache_handle_t *h, request_rec *r)
-{
- apr_status_t rc;
- char *buf;
- apr_size_t amt;
-
- char dateHexS[sizeof(apr_time_t) * 2 + 1];
- char expireHexS[sizeof(apr_time_t) * 2 + 1];
- char verHexS[sizeof(apr_time_t) * 2 + 1];
- char requestHexS[sizeof(apr_time_t) * 2 + 1];
- char responseHexS[sizeof(apr_time_t) * 2 + 1];
- cache_info *info = &(h->cache_obj->info);
- disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj;
-
- if (!r->headers_out) {
- /* XXX log message */
- return 0;
- }
-
- ap_cache_usec2hex(info->date, dateHexS);
- ap_cache_usec2hex(info->expire, expireHexS);
- ap_cache_usec2hex(dobj->version++, verHexS);
- ap_cache_usec2hex(info->request_time, requestHexS);
- ap_cache_usec2hex(info->response_time, responseHexS);
- buf = apr_pstrcat(r->pool, dateHexS, " ", expireHexS, " ", verHexS, " ", requestHexS, " ", responseHexS, "\n", NULL);
- amt = strlen(buf);
- rc = apr_file_write(fd, buf, &amt);
- if (rc != APR_SUCCESS) {
- /* XXX log message */
- return 0;
- }
- buf = apr_pstrcat(r->pool, "X-NAME: ", dobj->name, "\n", NULL);
- amt = strlen(buf);
- rc = apr_file_write(fd, buf, &amt);
- if (rc != APR_SUCCESS) {
- /* XXX log message */
- return 0;
- }
- return 1;
+ return APR_SUCCESS;
}
/*
*/
#define AP_TEMPFILE "/aptmpXXXXXX"
static int create_entity(cache_handle_t *h, request_rec *r,
- const char *type,
- const char *key,
+ const char *key,
apr_off_t len)
-{
- disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+{
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
&disk_cache_module);
apr_status_t rv;
cache_object_t *obj;
disk_cache_object_t *dobj;
apr_file_t *tmpfile;
- if (strcasecmp(type, "disk")) {
- return DECLINED;
- }
-
if (conf->cache_root == NULL) {
return DECLINED;
}
- if (len < conf->minfs || len > conf->maxfs) {
+ /* If the Content-Length is still unknown, cache anyway */
+ if (len != -1 && (len < conf->minfs || len > conf->maxfs)) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"cache_disk: URL %s failed the size check, "
- "or is incomplete",
+ "or is incomplete",
key);
return DECLINED;
}
/* open temporary file */
dobj->tempfile = apr_pstrcat(r->pool, conf->cache_root, AP_TEMPFILE, NULL);
- rv = apr_file_mktemp(&tmpfile, dobj->tempfile,
+ rv = apr_file_mktemp(&tmpfile, dobj->tempfile,
APR_CREATE | APR_READ | APR_WRITE | APR_EXCL, r->pool);
if (rv == APR_SUCCESS) {
/* Populate the cache handle */
h->cache_obj = obj;
- h->read_body = &read_body;
- h->read_headers = &read_headers;
- h->write_body = &write_body;
- h->write_headers = &write_headers;
- h->remove_entity = &remove_entity;
ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "disk_cache: Caching URL %s", key);
+ "disk_cache: Storing URL %s", key);
}
else {
ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "disk_cache: Could not cache URL %s [%d]", key, rv);
+ "disk_cache: Could not store URL %s [%d]", key, rv);
return DECLINED;
}
return OK;
}
-static int open_entity(cache_handle_t *h, request_rec *r, const char *type, const char *key)
+static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
{
apr_status_t rc;
static int error_logged = 0;
- disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
&disk_cache_module);
- char *data;
- char *headers;
- apr_file_t *fd;
- apr_file_t *hfd;
apr_finfo_t finfo;
cache_object_t *obj;
cache_info *info;
disk_cache_object_t *dobj;
+ int flags;
h->cache_obj = NULL;
/* Look up entity keyed to 'url' */
- if (strcasecmp(type, "disk")) {
- return DECLINED;
- }
-
if (conf->cache_root == NULL) {
if (!error_logged) {
error_logged = 1;
return DECLINED;
}
- data = data_file(r->pool, conf->dirlevels, conf->dirlength,
- conf->cache_root, key);
- headers = header_file(r->pool, conf->dirlevels, conf->dirlength,
- conf->cache_root, key);
+
+ /* Create and init the cache object */
+ h->cache_obj = obj = apr_pcalloc(r->pool, sizeof(cache_object_t));
+ obj->vobj = dobj = apr_pcalloc(r->pool, sizeof(disk_cache_object_t));
+
+ info = &(obj->info);
+ obj->key = (char *) key;
+ dobj->name = (char *) key;
+ dobj->datafile = data_file(r->pool, conf, dobj, key);
+ dobj->hdrsfile = header_file(r->pool, conf, dobj, key);
/* Open the data file */
- rc = apr_file_open(&fd, data, APR_READ|APR_BINARY, 0, r->pool);
+ flags = APR_READ|APR_BINARY;
+#ifdef APR_SENDFILE_ENABLED
+ flags |= APR_SENDFILE_ENABLED;
+#endif
+ rc = apr_file_open(&dobj->fd, dobj->datafile, flags, 0, r->pool);
if (rc != APR_SUCCESS) {
/* XXX: Log message */
return DECLINED;
}
/* Open the headers file */
- rc = apr_file_open(&hfd, headers, APR_READ|APR_BINARY, 0, r->pool);
+ flags = APR_READ|APR_BINARY|APR_BUFFERED;
+ rc = apr_file_open(&dobj->hfd, dobj->hdrsfile, flags, 0, r->pool);
if (rc != APR_SUCCESS) {
/* XXX: Log message */
return DECLINED;
}
- /* Create and init the cache object */
- h->cache_obj = obj = apr_pcalloc(r->pool, sizeof(cache_object_t));
- obj->vobj = dobj = apr_pcalloc(r->pool, sizeof(disk_cache_object_t));
-
- info = &(obj->info);
- obj->key = (char *) key;
- dobj->name = (char *) key;
- dobj->fd = fd;
- dobj->hfd = hfd;
- dobj->datafile = data;
- dobj->hdrsfile = headers;
-
- rc = apr_file_info_get(&finfo, APR_FINFO_SIZE, fd);
+ rc = apr_file_info_get(&finfo, APR_FINFO_SIZE, dobj->fd);
if (rc == APR_SUCCESS) {
dobj->file_size = finfo.size;
}
-
+
/* Read the bytes to setup the cache_info fields */
- rc = file_cache_read_mydata(hfd, info, dobj);
+ rc = file_cache_recall_mydata(dobj->hfd, info, dobj, r);
if (rc != APR_SUCCESS) {
/* XXX log message */
return DECLINED;
}
/* Initialize the cache_handle callback functions */
- h->read_body = &read_body;
- h->read_headers = &read_headers;
- h->write_body = &write_body;
- h->write_headers = &write_headers;
- h->remove_entity = &remove_entity;
-
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "disk_cache: Serving Cached URL %s", dobj->name);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Recalled cached URL info header %s", dobj->name);
return OK;
}
-static int remove_entity(cache_handle_t *h)
+static int remove_entity(cache_handle_t *h)
{
/* Null out the cache object pointer so next time we start from scratch */
h->cache_obj = NULL;
return OK;
}
+static int remove_url(const char *key)
+{
+ /* XXX: Delete file from cache! */
+ return OK;
+}
+
/*
* Reads headers from a buffer and returns an array of headers.
* Returns NULL on file error
* This routine tries to deal with too long lines and continuation lines.
- * @@@: XXX: FIXME: currently the headers are passed thru un-merged.
+ * @@@: XXX: FIXME: currently the headers are passed thru un-merged.
* Is that okay, or should they be collapsed where possible?
*/
-static apr_status_t read_headers(cache_handle_t *h, request_rec *r)
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r)
{
- apr_status_t rv;
- char urlbuff[1034];
- int urllen = sizeof(urlbuff);
disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj;
apr_table_t * tmp;
/* This case should not happen... */
- if (!dobj->fd || !dobj->hfd) {
+ if (!dobj->hfd) {
/* XXX log message */
- return APR_NOTFOUND;
+ return APR_NOTFOUND;
}
if(!r->headers_out) {
r->headers_out = apr_table_make(r->pool, 20);
}
-
+
/*
- * Call routine to read the header lines/status line
+ * Call routine to read the header lines/status line
*/
+ r->status = dobj->disk_info.status;
ap_scan_script_header_err(r, dobj->hfd, NULL);
-
- apr_table_setn(r->headers_out, "Content-Type",
- ap_make_content_type(r, r->content_type));
- rv = apr_file_gets(&urlbuff[0], urllen, dobj->hfd); /* Read status */
- if (rv != APR_SUCCESS) {
- /* XXX log message */
- return rv;
- }
-
- r->status = atoi(urlbuff); /* Save status line into request rec */
-
- /* Read and ignore the status line (This request might result in a
- * 304, so we don't necessarily want to retransmit a 200 from the cache.)
- */
- rv = apr_file_gets(&urlbuff[0], urllen, dobj->hfd);
- if (rv != APR_SUCCESS) {
- /* XXX log message */
- return rv;
- }
+ apr_table_setn(r->headers_out, "Content-Type",
+ ap_make_content_type(r, r->content_type));
h->req_hdrs = apr_table_make(r->pool, 20);
-
+
/*
- * Call routine to read the header lines/status line
+ * Call routine to read the header lines/status line
+ *
+ * Note that ap_scan_script_header_err sets to r->err_headers_out,
+ * so we must set the real one aside.
*/
tmp = r->err_headers_out;
r->err_headers_out = h->req_hdrs;
- rv = apr_file_gets(&urlbuff[0], urllen, dobj->hfd); /* Read status */
ap_scan_script_header_err(r, dobj->hfd, NULL);
r->err_headers_out = tmp;
-
+
apr_file_close(dobj->hfd);
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "disk_cache: Served headers for URL %s", dobj->name);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Recalled headers for URL %s", dobj->name);
return APR_SUCCESS;
}
-static apr_status_t read_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
{
apr_bucket *e;
disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj;
return APR_SUCCESS;
}
-static apr_status_t write_headers(cache_handle_t *h, request_rec *r, cache_info *info)
+static apr_status_t store_table(apr_file_t *fd, apr_table_t *table)
{
- disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ int i;
+ apr_status_t rv;
+ struct iovec iov[4];
+ apr_size_t amt;
+ apr_table_entry_t *elts;
+
+ elts = (apr_table_entry_t *) apr_table_elts(table)->elts;
+ for (i = 0; i < apr_table_elts(table)->nelts; ++i) {
+ if (elts[i].key != NULL) {
+ iov[0].iov_base = elts[i].key;
+ iov[0].iov_len = strlen(elts[i].key);
+ iov[1].iov_base = ": ";
+ iov[1].iov_len = sizeof(": ") - 1;
+ iov[2].iov_base = elts[i].val;
+ iov[2].iov_len = strlen(elts[i].val);
+ iov[3].iov_base = CRLF;
+ iov[3].iov_len = sizeof(CRLF) - 1;
+
+ rv = apr_file_writev(fd, (const struct iovec *) &iov, 4,
+ &amt);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+ }
+ iov[0].iov_base = CRLF;
+ iov[0].iov_len = sizeof(CRLF) - 1;
+ rv = apr_file_writev(fd, (const struct iovec *) &iov, 1,
+ &amt);
+ return rv;
+}
+
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *info)
+{
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
&disk_cache_module);
apr_status_t rv;
- char *buf;
- char statusbuf[8];
apr_size_t amt;
disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj;
apr_file_t *hfd = dobj->hfd;
if (!hfd) {
+ disk_cache_info_t disk_info;
+ struct iovec iov[2];
+
if (!dobj->hdrsfile) {
- dobj->hdrsfile = header_file(r->pool,
- conf->dirlevels,
- conf->dirlength,
- conf->cache_root,
+ dobj->hdrsfile = header_file(r->pool, conf, dobj,
h->cache_obj->key);
}
-
+
/* This is flaky... we need to manage the cache_info differently */
h->cache_obj->info = *info;
-
+
/* Remove old file with the same name. If remove fails, then
* perhaps we need to create the directory tree where we are
* about to write the new headers file.
hfd = dobj->hfd;
dobj->name = h->cache_obj->key;
- file_cache_write_mydata(dobj->hfd, h, r);
+ disk_info.format = DISK_FORMAT_VERSION;
+ disk_info.date = info->date;
+ disk_info.expire = info->expire;
+ disk_info.entity_version = dobj->disk_info.entity_version++;
+ disk_info.request_time = info->request_time;
+ disk_info.response_time = info->response_time;
+
+ disk_info.name_len = strlen(dobj->name);
+ disk_info.status = r->status;
+
+ /* This case only occurs when the content is generated locally */
+ if (!r->status_line) {
+ r->status_line = ap_get_status_line(r->status);
+ }
+
+ iov[0].iov_base = (void*)&disk_info;
+ iov[0].iov_len = sizeof(disk_cache_info_t);
+ iov[1].iov_base = dobj->name;
+ iov[1].iov_len = disk_info.name_len;
+
+ rv = apr_file_writev(hfd, (const struct iovec *) &iov, 2, &amt);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
if (r->headers_out) {
- int i;
- apr_table_t* headers_out = ap_cache_cacheable_hdrs_out(r->pool, r->headers_out);
- apr_table_entry_t *elts = (apr_table_entry_t *) apr_table_elts(headers_out)->elts;
- for (i = 0; i < apr_table_elts(headers_out)->nelts; ++i) {
- if (elts[i].key != NULL) {
- buf = apr_pstrcat(r->pool, elts[i].key, ": ", elts[i].val, CRLF, NULL);
- amt = strlen(buf);
- apr_file_write(hfd, buf, &amt);
- }
+ apr_table_t *headers_out;
+
+ headers_out = ap_cache_cacheable_hdrs_out(r->pool, r->headers_out);
+
+ rv = store_table(hfd, headers_out);
+ if (rv != APR_SUCCESS) {
+ return rv;
}
- buf = apr_pstrcat(r->pool, CRLF, NULL);
- amt = strlen(buf);
- apr_file_write(hfd, buf, &amt);
-
+
/* This case only occurs when the content is generated locally */
if (!apr_table_get(r->headers_out, "Content-Type") && r->content_type) {
- apr_table_setn(r->headers_out, "Content-Type",
+ apr_table_setn(r->headers_out, "Content-Type",
ap_make_content_type(r, r->content_type));
}
}
- sprintf(statusbuf,"%d", r->status);
- buf = apr_pstrcat(r->pool, statusbuf, CRLF, NULL);
- amt = strlen(buf);
- apr_file_write(hfd, buf, &amt);
- /* This case only occurs when the content is generated locally */
- if (!r->status_line) {
- r->status_line = ap_get_status_line(r->status);
- }
- buf = apr_pstrcat(r->pool, r->status_line, "\n", NULL);
- amt = strlen(buf);
- apr_file_write(hfd, buf, &amt);
- buf = apr_pstrcat(r->pool, CRLF, NULL);
- amt = strlen(buf);
- apr_file_write(hfd, buf, &amt);
-
- /* Parse the vary header and dump those fields from the headers_in. */
- /* Make call to the same thing cache_select_url calls to crack Vary. */
- /* @@@ Some day, not today. */
+ /* Parse the vary header and dump those fields from the headers_in. */
+ /* Make call to the same thing cache_select_url calls to crack Vary. */
+ /* @@@ Some day, not today. */
if (r->headers_in) {
- int i;
- apr_table_entry_t *elts = (apr_table_entry_t *) apr_table_elts(r->headers_in)->elts;
- for (i = 0; i < apr_table_elts(r->headers_in)->nelts; ++i) {
- if (elts[i].key != NULL) {
- buf = apr_pstrcat(r->pool, elts[i].key, ": ", elts[i].val, CRLF, NULL);
- amt = strlen(buf);
- apr_file_write(hfd, buf, &amt);
- }
+ rv = store_table(hfd, r->headers_in);
+ if (rv != APR_SUCCESS) {
+ return rv;
}
- buf = apr_pstrcat(r->pool, CRLF, NULL);
- amt = strlen(buf);
- apr_file_write(hfd, buf, &amt);
}
apr_file_close(hfd); /* flush and close */
}
/* XXX log message */
}
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "disk_cache: Caching headers for URL %s", dobj->name);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Stored headers for URL %s", dobj->name);
return APR_SUCCESS;
}
-static apr_status_t write_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
+
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
{
apr_bucket *e;
apr_status_t rv;
disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj;
+ disk_cache_conf *conf = ap_get_module_config(r->server->module_config,
+ &disk_cache_module);
if (!dobj->fd) {
- rv = apr_file_open(&dobj->fd, dobj->tempfile,
+ rv = apr_file_open(&dobj->fd, dobj->tempfile,
APR_WRITE | APR_CREATE | APR_BINARY| APR_TRUNCATE | APR_BUFFERED,
APR_UREAD | APR_UWRITE, r->pool);
if (rv != APR_SUCCESS) {
return rv;
}
+ dobj->file_size = 0;
}
- APR_BRIGADE_FOREACH(e, b) {
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
const char *str;
apr_size_t length;
apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
- apr_file_write(dobj->fd, str, &length);
+ if (apr_file_write(dobj->fd, str, &length) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "cache_disk: Error when writing cache file for URL %s",
+ h->cache_obj->key);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ return file_cache_errorcleanup(dobj, r);
+ }
+ dobj->file_size += length;
+ if (dobj->file_size > conf->maxfs) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache_disk: URL %s failed the size check (%lu>%lu)",
+ h->cache_obj->key, (unsigned long)dobj->file_size, (unsigned long)conf->maxfs);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ return file_cache_errorcleanup(dobj, r);
+ }
}
+
+ /* Was this the final bucket? If yes, close the body file and make sanity checks */
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(b))) {
- file_cache_el_final(h, r); /* Link to the perm file, and close the descriptor */
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
- "disk_cache: Cached body for URL %s", dobj->name);
+ if (h->cache_obj->info.len <= 0) {
+ /* XXX Fixme: file_size isn't constrained by size_t. */
+ h->cache_obj->info.len = dobj->file_size;
+ }
+ else if (h->cache_obj->info.len != dobj->file_size) {
+ /* "Content-Length" and actual content disagree in size. Log that. */
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "disk_cache: URL %s failed the size check (%lu != %lu)",
+ h->cache_obj->key,
+ (unsigned long)h->cache_obj->info.len,
+ (unsigned long)dobj->file_size);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ return file_cache_errorcleanup(dobj, r);
+ }
+ if (dobj->file_size < conf->minfs) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache_disk: URL %s failed the size check (%lu<%lu)",
+ h->cache_obj->key, (unsigned long)dobj->file_size, (unsigned long)conf->minfs);
+ /* Remove the intermediate cache file and return non-APR_SUCCESS */
+ return file_cache_errorcleanup(dobj, r);
+ }
+ /* All checks were fine. Move tempfile to final destination */
+ file_cache_el_final(h, r); /* Link to the perm file, and close the descriptor */
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "disk_cache: Body for URL %s cached.", dobj->name);
}
- return APR_SUCCESS;
+ return APR_SUCCESS;
}
static void *create_config(apr_pool_t *p, server_rec *s)
static const char
*set_cache_root(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
conf->cache_root = arg;
conf->cache_root_len = strlen(arg);
static const char
*set_cache_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
conf->space = atoi(arg);
return NULL;
*set_cache_gcint(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
/*
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
*/
/* XXX */
/*
* Consider eliminating the next two directives in favor of
* Ian's prime number hash...
- * key = hash_fn( r->uri)
- * filename = "/key % prime1 /key %prime2/key %prime3"
+ * key = hash_fn( r->uri)
+ * filename = "/key % prime1 /key %prime2/key %prime3"
*/
static const char
*set_cache_dirlevels(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
int val = atoi(arg);
if (val < 1)
static const char
*set_cache_dirlength(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
int val = atoi(arg);
if (val < 1)
static const char
*set_cache_exchk(cmd_parms *parms, void *in_struct_ptr, int flag)
{
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
conf->expirychk = flag;
static const char
*set_cache_minfs(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
conf->minfs = atoi(arg);
return NULL;
static const char
*set_cache_maxfs(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
conf->maxfs = atoi(arg);
return NULL;
*set_cache_minetm(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
/* XXX
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
*/
return NULL;
*set_cache_gctime(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
/* XXX
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
*/
return NULL;
*add_cache_gcclean(cmd_parms *parms, void *in_struct_ptr, const char *arg, const char *arg1)
{
/* XXX
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
*/
return NULL;
*add_cache_gcclnun(cmd_parms *parms, void *in_struct_ptr, const char *arg, const char *arg1)
{
/* XXX
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
*/
return NULL;
*set_cache_maxgcmem(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
/* XXX
- disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
+ disk_cache_conf *conf = ap_get_module_config(parms->server->module_config,
&disk_cache_module);
*/
return NULL;
{NULL}
};
+static const cache_provider cache_disk_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_entity,
+ &open_entity,
+ &remove_url,
+};
+
static void disk_cache_register_hook(apr_pool_t *p)
{
/* cache initializer */
- cache_hook_create_entity(create_entity, NULL, NULL, APR_HOOK_MIDDLE);
- cache_hook_open_entity(open_entity, NULL, NULL, APR_HOOK_MIDDLE);
-/* cache_hook_remove_entity(remove_entity, NULL, NULL, APR_HOOK_MIDDLE); */
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "disk", "0",
+ &cache_disk_provider);
}
module AP_MODULE_DECLARE_DATA disk_cache_module = {
#include "mod_cache.h"
#include "cache_pqueue.h"
#include "cache_cache.h"
+#include "ap_provider.h"
#include "ap_mpm.h"
#include "apr_thread_mutex.h"
#if APR_HAVE_UNISTD_H
long priority; /**< the priority of this entry */
long total_refs; /**< total number of references this entry has had */
-#ifdef USE_ATOMICS
- apr_atomic_t pos; /**< the position of this entry in the cache */
-#else
- apr_ssize_t pos;
-#endif
+ apr_uint32_t pos; /**< the position of this entry in the cache */
} mem_cache_object_t;
typedef struct {
apr_thread_mutex_t *lock;
cache_cache_t *cache_cache;
- apr_size_t cache_size;
- apr_size_t object_cnt;
/* Fields set by config directives */
apr_size_t min_cache_object_size; /* in bytes */
/* Forward declarations */
static int remove_entity(cache_handle_t *h);
-static apr_status_t write_headers(cache_handle_t *h, request_rec *r, cache_info *i);
-static apr_status_t write_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
-static apr_status_t read_headers(cache_handle_t *h, request_rec *r);
-static apr_status_t read_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *i);
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r);
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
static void cleanup_cache_object(cache_object_t *obj);
cache_object_t *obj = (cache_object_t *)a;
mem_cache_object_t *mobj = obj->vobj;
-#ifdef USE_ATOMICS
apr_atomic_set(&mobj->pos, pos);
-#else
- mobj->pos = pos;
-#endif
}
static apr_ssize_t memcache_get_pos(void *a)
{
cache_object_t *obj = (cache_object_t *)a;
mem_cache_object_t *mobj = obj->vobj;
-#ifdef USE_ATOMICS
return apr_atomic_read(&mobj->pos);
-#else
- return mobj->pos;
-#endif
}
static apr_size_t memcache_cache_get_size(void*a)
* now. Increment the refcount before setting cleanup to avoid a race
* condition. A similar pattern is used in remove_url()
*/
-#ifdef USE_ATOMICS
apr_atomic_inc(&obj->refcount);
-#else
- obj->refcount++;
-#endif
obj->cleanup = 1;
-#ifdef USE_ATOMICS
if (!apr_atomic_dec(&obj->refcount)) {
cleanup_cache_object(obj);
}
-#else
- obj->refcount--;
- if (!obj->refcount) {
- cleanup_cache_object(obj);
- }
-#endif
}
/*
* functions return a 'negative' score since priority queues
* object needs to be removed from the cache then cleaned up.
*/
if (!obj->complete) {
- mem_cache_object_t *mobj = (mem_cache_object_t *) obj->vobj;
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
*/
if (!obj->cleanup) {
cache_remove(sconf->cache_cache, obj);
- sconf->object_cnt--;
- sconf->cache_size -= mobj->m_len;
obj->cleanup = 1;
}
if (sconf->lock) {
}
/* Cleanup the cache object */
-#ifdef USE_ATOMICS
if (!apr_atomic_dec(&obj->refcount)) {
if (obj->cleanup) {
cleanup_cache_object(obj);
}
}
-#else
- if (sconf->lock) {
- apr_thread_mutex_lock(sconf->lock);
- }
- obj->refcount--;
- /* If the object is marked for cleanup and the refcount
- * has dropped to zero, cleanup the object
- */
- if ((obj->cleanup) && (!obj->refcount)) {
- cleanup_cache_object(obj);
- }
- if (sconf->lock) {
- apr_thread_mutex_unlock(sconf->lock);
- }
-#endif
return APR_SUCCESS;
}
static apr_status_t cleanup_cache_mem(void *sconfv)
while (obj) {
/* Iterate over the cache and clean up each entry */
/* Free the object if the recount == 0 */
-#ifdef USE_ATOMICS
apr_atomic_inc(&obj->refcount);
obj->cleanup = 1;
if (!apr_atomic_dec(&obj->refcount)) {
-#else
- obj->cleanup = 1;
- if (!obj->refcount) {
-#endif
cleanup_cache_object(obj);
}
obj = cache_pop(co->cache_cache);
sconf->max_cache_object_size = DEFAULT_MAX_CACHE_OBJECT_SIZE;
/* Number of objects in the cache */
sconf->max_object_cnt = DEFAULT_MAX_OBJECT_CNT;
- sconf->object_cnt = 0;
/* Size of the cache in bytes */
sconf->max_cache_size = DEFAULT_MAX_CACHE_SIZE;
- sconf->cache_size = 0;
sconf->cache_cache = NULL;
sconf->cache_remove_algorithm = memcache_gdsf_algorithm;
sconf->max_streaming_buffer_size = DEFAULT_MAX_STREAMING_BUFFER_SIZE;
return sconf;
}
-static int create_entity(cache_handle_t *h, request_rec *r,
- const char *type,
- const char *key,
- apr_off_t len)
+static int create_entity(cache_handle_t *h, cache_type_e type_e,
+ request_rec *r, const char *key, apr_off_t len)
{
cache_object_t *obj, *tmp_obj;
mem_cache_object_t *mobj;
- cache_type_e type_e;
apr_size_t key_len;
- if (!strcasecmp(type, "mem")) {
- type_e = CACHE_TYPE_HEAP;
- }
- else if (!strcasecmp(type, "fd")) {
- type_e = CACHE_TYPE_FILE;
- }
- else {
- return DECLINED;
- }
-
if (len == -1) {
/* Caching a streaming response. Assume the response is
* less than or equal to max_streaming_buffer_size. We will
- * correct all the cache size counters in write_body once
+ * correct all the cache size counters in store_body once
* we know exactly know how much we are caching.
*/
len = sconf->max_streaming_buffer_size;
}
/* Finish initing the cache object */
-#ifdef USE_ATOMICS
apr_atomic_set(&obj->refcount, 1);
-#else
- obj->refcount = 1;
-#endif
mobj->total_refs = 1;
obj->complete = 0;
obj->cleanup = 0;
if (!tmp_obj) {
cache_insert(sconf->cache_cache, obj);
- sconf->object_cnt++;
- /* Safe cast: Must fit in cache_size or alloc would have failed */
- sconf->cache_size += (apr_size_t)len;
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
/* Populate the cache handle */
h->cache_obj = obj;
- h->read_body = &read_body;
- h->read_headers = &read_headers;
- h->write_body = &write_body;
- h->write_headers = &write_headers;
- h->remove_entity = &remove_entity;
return OK;
}
-static int open_entity(cache_handle_t *h, request_rec *r, const char *type, const char *key)
+static int create_mem_entity(cache_handle_t *h, request_rec *r,
+ const char *key, apr_off_t len)
+{
+ return create_entity(h, CACHE_TYPE_HEAP, r, key, len);
+}
+
+static int create_fd_entity(cache_handle_t *h, request_rec *r,
+ const char *key, apr_off_t len)
+{
+ return create_entity(h, CACHE_TYPE_FILE, r, key, len);
+}
+
+static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
{
cache_object_t *obj;
/* Look up entity keyed to 'url' */
- if (strcasecmp(type, "mem") && strcasecmp(type, "fd")) {
- return DECLINED;
- }
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
if (obj) {
if (obj->complete) {
request_rec *rmain=r, *rtmp;
-#ifdef USE_ATOMICS
apr_atomic_inc(&obj->refcount);
-#else
- obj->refcount++;
-#endif
/* cache is worried about overall counts, not 'open' ones */
cache_update(sconf->cache_cache, obj);
}
/* Initialize the cache_handle */
- h->read_body = &read_body;
- h->read_headers = &read_headers;
- h->write_body = &write_body;
- h->write_headers = &write_headers;
- h->remove_entity = &remove_entity;
h->cache_obj = obj;
- h->req_hdrs = NULL; /* Pick these up in read_headers() */
+ h->req_hdrs = NULL; /* Pick these up in recall_headers() */
return OK;
}
* hash table.
*/
if (!obj->cleanup) {
- mem_cache_object_t *mobj = (mem_cache_object_t *) obj->vobj;
cache_remove(sconf->cache_cache, obj);
- sconf->object_cnt--;
- sconf->cache_size -= mobj->m_len;
obj->cleanup = 1;
ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, "gcing a cache entry");
}
return APR_SUCCESS;
}
/* Define request processing hook handlers */
-static int remove_url(const char *type, const char *key)
+static int remove_url(const char *key)
{
cache_object_t *obj;
- if (strcasecmp(type, "mem") && strcasecmp(type, "fd")) {
- return DECLINED;
- }
/* Order of the operations is important to avoid race conditions.
* First, remove the object from the cache. Remember, all additions
* deletions from the cache are protected by sconf->lock.
mem_cache_object_t *mobj;
cache_remove(sconf->cache_cache, obj);
mobj = (mem_cache_object_t *) obj->vobj;
- sconf->object_cnt--;
- sconf->cache_size -= mobj->m_len;
-#ifdef USE_ATOMICS
/* Refcount increment in this case MUST be made under
* protection of the lock
*/
apr_atomic_inc(&obj->refcount);
-#else
- if (!obj->refcount) {
- cleanup_cache_object(obj);
- obj = NULL;
- }
-#endif
if (obj) {
obj->cleanup = 1;
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
-#ifdef USE_ATOMICS
if (obj) {
if (!apr_atomic_dec(&obj->refcount)) {
cleanup_cache_object(obj);
}
}
-#endif
return OK;
}
-static apr_status_t read_headers(cache_handle_t *h, request_rec *r)
+static apr_status_t recall_headers(cache_handle_t *h, request_rec *r)
{
int rc;
mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
return rc;
}
-static apr_status_t read_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
+static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
{
apr_bucket *b;
mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
}
-static apr_status_t write_headers(cache_handle_t *h, request_rec *r, cache_info *info)
+static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *info)
{
cache_object_t *obj = h->cache_obj;
mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
return APR_SUCCESS;
}
-static apr_status_t write_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
+static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
{
apr_status_t rv;
cache_object_t *obj = h->cache_obj;
* - the brigade is complete &&
* - the file_bucket is the last data bucket in the brigade
*/
- APR_BRIGADE_FOREACH(e, b) {
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
if (APR_BUCKET_IS_EOS(e)) {
eos = 1;
}
if (rv != APR_SUCCESS) {
return rv;
}
- apr_file_unset_inherit(tmpfile);
+ apr_file_inherit_unset(tmpfile);
apr_os_file_get(&(mobj->fd), tmpfile);
/* Open for business */
cur = (char*) mobj->m + obj->count;
/* Iterate accross the brigade and populate the cache storage */
- APR_BRIGADE_FOREACH(e, b) {
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
const char *s;
apr_size_t len;
(cache_object_t *) cache_find(sconf->cache_cache, obj->key);
if (tmp_obj) {
cache_remove(sconf->cache_cache, tmp_obj);
- sconf->object_cnt--;
- sconf->cache_size -= mobj->m_len;
tmp_obj->cleanup = 1;
if (!tmp_obj->refcount) {
cleanup_cache_object(tmp_obj);
}
mobj->m_len = obj->count;
cache_insert(sconf->cache_cache, obj);
- sconf->cache_size -= (mobj->m_len - obj->count);
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
static const char *set_max_streaming_buffer(cmd_parms *parms, void *dummy,
const char *arg)
{
- apr_off_t val;
+#if 0
char *err;
- val = (apr_off_t)strtol(arg, &err, 10);
- if (*err != 0) {
+ if (apr_strtoff(&sconf->max_streaming_buffer_size, arg, &err, 10) || *err) {
return "MCacheMaxStreamingBuffer value must be a number";
}
- sconf->max_streaming_buffer_size = val;
+#else
+ sconf->max_streaming_buffer_size = apr_atoi64(arg);
+#endif
return NULL;
}
{NULL}
};
+static const cache_provider cache_mem_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_mem_entity,
+ &open_entity,
+ &remove_url,
+};
+
+static const cache_provider cache_fd_provider =
+{
+ &remove_entity,
+ &store_headers,
+ &store_body,
+ &recall_headers,
+ &recall_body,
+ &create_fd_entity,
+ &open_entity,
+ &remove_url,
+};
+
static void register_hooks(apr_pool_t *p)
{
ap_hook_post_config(mem_cache_post_config, NULL, NULL, APR_HOOK_MIDDLE);
/* cache initializer */
/* cache_hook_init(cache_mem_init, NULL, NULL, APR_HOOK_MIDDLE); */
+ /*
cache_hook_create_entity(create_entity, NULL, NULL, APR_HOOK_MIDDLE);
cache_hook_open_entity(open_entity, NULL, NULL, APR_HOOK_MIDDLE);
cache_hook_remove_url(remove_url, NULL, NULL, APR_HOOK_MIDDLE);
+ */
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "mem", "0",
+ &cache_mem_provider);
+ ap_register_provider(p, CACHE_PROVIDER_GROUP, "fd", "0",
+ &cache_fd_provider);
}
module AP_MODULE_DECLARE_DATA mem_cache_module =