return NULL;
entry = eb32_entry(node, struct cache_entry, eb);
- if (entry->expire > now.tv_sec)
+ if (entry->expire > now.tv_sec) {
return entry;
- else
+ } else {
eb32_delete(node);
+ entry->eb.key = 0;
+ }
return NULL;
}
{
struct cache_st *st = filter->ctx;
struct shared_context *shctx = shctx_ptr((struct cache *)filter->config->conf);
+ struct cache_entry *object;
int ret;
/*
/* disable buffering if too much data (never greater than a buffer size */
if (len - st->hdrs_len > global.tune.bufsize - global.tune.maxrewrite - st->first_block->len) {
disable_cache:
+ object = (struct cache_entry *)st->first_block->data;
filter->ctx = NULL; /* disable cache */
shctx_lock(shctx);
shctx_row_dec_hot(shctx, st->first_block);
+ object->eb.key = 0;
shctx_unlock(shctx);
pool_free2(pool2_cache_st, st);
} else {
* doesn't, the blocks will be reused anyway */
shctx_lock(shctx);
- eb32_insert(&cache->entries, &object->eb);
- shctx_unlock(shctx);
-
+ if (eb32_insert(&cache->entries, &object->eb) != &object->eb) {
+ object->eb.key = 0;
+ }
/* remove from the hotlist */
- shctx_lock(shctx);
shctx_row_dec_hot(shctx, st->first_block);
shctx_unlock(shctx);
{
if (first == block) {
struct cache_entry *object = (struct cache_entry *)first->data;
- eb32_delete(&object->eb);
+ if (object->eb.key) {
+ eb32_delete(&object->eb);
+ object->eb.key = 0;
+ }
}
}
if (entry_exist((struct cache *)rule->arg.act.p[0], object)) {
shctx_unlock(shctx);
if (filter->ctx) {
+ object->eb.key = 0;
pool_free2(pool2_cache_st, filter->ctx);
filter->ctx = NULL;
}
out:
/* if does not cache */
if (first) {
+ object = (struct cache_entry *)first->data;
+
shctx_lock(shctx);
+ first->len = 0;
+ object->eb.key = 0;
shctx_row_dec_hot(shctx, first);
shctx_unlock(shctx);
}