struct shared_context {
__decl_thread(HA_SPINLOCK_T lock);
struct list avail; /* list for active and free blocks */
- struct list hot; /* list for locked blocks */
unsigned int nbav; /* number of available blocks */
unsigned int max_obj_size; /* maximum object size (in bytes). */
void (*free_block)(struct shared_block *first, struct shared_block *block, void *data);
int extra, int shared);
struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
struct shared_block *last, int data_len);
-void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first);
-void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first);
+void shctx_row_detach(struct shared_context *shctx, struct shared_block *first);
+void shctx_row_reattach(struct shared_context *shctx, struct shared_block *first);
int shctx_row_data_append(struct shared_context *shctx,
struct shared_block *first,
unsigned char *data, int len);
* so between <head> and the next element after <head>.
*/
static inline void shctx_block_append_hot(struct shared_context *shctx,
- struct list *head,
+ struct shared_block *first,
struct shared_block *s)
{
shctx->nbav--;
LIST_DELETE(&s->list);
- LIST_INSERT(head, &s->list);
+ LIST_APPEND(&first->list, &s->list);
}
-static inline void shctx_block_set_hot(struct shared_context *shctx,
- struct shared_block *s)
+static inline struct shared_block *shctx_block_detach(struct shared_context *shctx,
+ struct shared_block *s)
{
shctx->nbav--;
LIST_DELETE(&s->list);
- LIST_APPEND(&shctx->hot, &s->list);
-}
-
-static inline void shctx_block_set_avail(struct shared_context *shctx,
- struct shared_block *s)
-{
- shctx->nbav++;
- LIST_DELETE(&s->list);
- LIST_APPEND(&shctx->avail, &s->list);
+ LIST_INIT(&s->list);
+ return s;
}
#endif /* __HAPROXY_SHCTX_H */
*/
struct cache_st {
struct shared_block *first_block;
+ struct list detached_head;
};
#define DEFAULT_MAX_SECONDARY_ENTRY 10
* there too, in case of errors */
if (st && st->first_block) {
shctx_lock(shctx);
- shctx_row_dec_hot(shctx, st->first_block);
+ shctx_row_reattach(shctx, st->first_block);
shctx_unlock(shctx);
}
if (st) {
object->eb.key = 0;
cache_wrunlock(cache);
shctx_lock(shctx);
- shctx_row_dec_hot(shctx, st->first_block);
+ shctx_row_reattach(shctx, st->first_block);
shctx_unlock(shctx);
pool_free(pool_head_cache_st, st);
}
/* The whole payload was cached, the entry can now be used. */
object->complete = 1;
/* remove from the hotlist */
- shctx_row_dec_hot(shctx, st->first_block);
+ shctx_row_reattach(shctx, st->first_block);
shctx_unlock(shctx);
}
/* register the buffer in the filter ctx for filling it with data*/
if (cache_ctx) {
cache_ctx->first_block = first;
+ LIST_INIT(&cache_ctx->detached_head);
/* store latest value and expiration time */
object->latest_validation = date.tv_sec;
object->expire = date.tv_sec + effective_maxage;
object->eb.key = 0;
}
shctx_lock(shctx);
- shctx_row_dec_hot(shctx, first);
+ shctx_row_reattach(shctx, first);
shctx_unlock(shctx);
}
struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
struct cache_entry *cache_ptr = ctx->entry;
struct cache *cache = cconf->c.cache;
+ struct shared_context *shctx = shctx_ptr(cache);
struct shared_block *first = block_ptr(cache_ptr);
- shctx_lock(shctx_ptr(cache));
- shctx_row_dec_hot(shctx_ptr(cache), first);
- shctx_unlock(shctx_ptr(cache));
+ shctx_lock(shctx);
+ shctx_row_reattach(shctx, first);
+ shctx_unlock(shctx);
}
struct cache_entry *res, *sec_entry = NULL;
struct cache_flt_conf *cconf = rule->arg.act.p[0];
struct cache *cache = cconf->c.cache;
+ struct shared_context *shctx = shctx_ptr(cache);
struct shared_block *entry_block;
if (res) {
struct appctx *appctx;
entry_block = block_ptr(res);
- shctx_row_inc_hot(shctx_ptr(cache), entry_block);
+ shctx_row_detach(shctx, entry_block);
cache_rdunlock(cache);
- shctx_unlock(shctx_ptr(cache));
+ shctx_unlock(shctx);
/* In case of Vary, we could have multiple entries with the same
* primary hash. We need to calculate the secondary hash in order
* to find the actual entry we want (if it exists). */
if (res->secondary_key_signature) {
if (!http_request_build_secondary_key(s, res->secondary_key_signature)) {
- shctx_lock(shctx_ptr(cache));
+ shctx_lock(shctx);
cache_rdlock(cache);
sec_entry = secondary_entry_exist(cache, res,
s->txn->cache_secondary_hash, 0);
if (sec_entry && sec_entry != res) {
/* The wrong row was added to the hot list. */
- shctx_row_dec_hot(shctx_ptr(cache), entry_block);
+ shctx_row_reattach(shctx, entry_block);
entry_block = block_ptr(sec_entry);
- shctx_row_inc_hot(shctx_ptr(cache), entry_block);
+ shctx_row_detach(shctx, entry_block);
}
res = sec_entry;
cache_rdunlock(cache);
- shctx_unlock(shctx_ptr(cache));
+ shctx_unlock(shctx);
}
else
res = NULL;
* can't use the cache's entry and must forward the request to
* the server. */
if (!res || !res->complete) {
- shctx_lock(shctx_ptr(cache));
- shctx_row_dec_hot(shctx_ptr(cache), entry_block);
- shctx_unlock(shctx_ptr(cache));
+ shctx_lock(shctx);
+ shctx_row_reattach(shctx, entry_block);
+ shctx_unlock(shctx);
return ACT_RET_CONT;
}
return ACT_RET_CONT;
} else {
s->target = NULL;
- shctx_lock(shctx_ptr(cache));
- shctx_row_dec_hot(shctx_ptr(cache), entry_block);
- shctx_unlock(shctx_ptr(cache));
+ shctx_lock(shctx);
+ shctx_row_reattach(shctx, entry_block);
+ shctx_unlock(shctx);
return ACT_RET_CONT;
}
}
cache_rdunlock(cache);
- shctx_unlock(shctx_ptr(cache));
+ shctx_unlock(shctx);
/* Shared context does not need to be locked while we calculate the
* secondary hash. */
return NULL;
}
- /* Initialize the first block of a new row. */
- if (!first) {
- ret = LIST_NEXT(&shctx->avail, struct shared_block*, list);
- ret->block_count = 0;
- ret->last_append = NULL;
- ret->refcount = 1;
- }
-
list_for_each_entry_safe(block, sblock, &shctx->avail, list) {
/* release callback */
shctx->free_block(block, block, shctx->cb_data);
block->len = 0;
- if (last) {
- shctx_block_append_hot(shctx, &last->list, block);
- last = block;
+ if (ret) {
+ shctx_block_append_hot(shctx, ret, block);
if (!remain) {
first->last_append = block;
remain = 1;
}
- } else
- shctx_block_set_hot(shctx, block);
+ } else {
+ ret = shctx_block_detach(shctx, block);
+ ret->len = 0;
+ ret->block_count = 0;
+ ret->last_append = NULL;
+ ret->refcount = 1;
+ }
++ret->block_count;
/*
* if the refcount is 0 move the row to the hot list. Increment the refcount
*/
-void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first)
+void shctx_row_detach(struct shared_context *shctx, struct shared_block *first)
{
if (first->refcount <= 0) {
first->list.p->n = first->last_reserved->list.n;
first->last_reserved->list.n->p = first->list.p;
- /* Reattach to hot list */
first->list.p = &first->last_reserved->list;
- LIST_SPLICE_END_DETACHED(&shctx->hot, &first->list);
+ first->last_reserved->list.n = &first->list;
shctx->nbav -= first->block_count;
}
/*
* decrement the refcount and move the row at the end of the avail list if it reaches 0.
*/
-void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first)
+void shctx_row_reattach(struct shared_context *shctx, struct shared_block *first)
{
first->refcount--;
BUG_ON(!first->last_reserved);
- /* Detach row from hot list, link first item's prev to last
- * item's next. This allows to use the LIST_SPLICE_END_DETACHED
- * macro. */
- first->list.p->n = first->last_reserved->list.n;
- first->last_reserved->list.n->p = first->list.p;
-
/* Reattach to avail list */
first->list.p = &first->last_reserved->list;
LIST_SPLICE_END_DETACHED(&shctx->avail, &first->list);
return (first->block_count * shctx->block_size - first->len) - len;
block = first->last_append ? first->last_append : first;
- list_for_each_entry_from(block, &shctx->hot, list) {
+ do {
/* end of copy */
if (len <= 0)
break;
len -= remain;
first->len += remain; /* update len in the head of the row */
first->last_append = block;
- }
+
+ block = LIST_ELEM(block->list.n, struct shared_block*, list);
+ } while (block != first);
return len;
}
block = first;
count = 0;
/* Pass through the blocks to copy them */
- list_for_each_entry_from(block, &shctx->hot, list) {
+ do {
if (count >= first->block_count || len <= 0)
break;
dst += size;
len -= size;
start = 0;
- }
+
+ block = LIST_ELEM(block->list.n, struct shared_block*, list);
+ } while (block != first);
return len;
}
shctx->nbav = 0;
LIST_INIT(&shctx->avail);
- LIST_INIT(&shctx->hot);
shctx->block_size = blocksize;
shctx->max_obj_size = maxobjsz == (unsigned int)-1 ? 0 : maxobjsz;
/* NOTE: Row couldn't be in use because we lock read & write function */
/* release the reserved row */
first->len = 0; /* the len must be liberated in order not to call the release callback on it */
- shctx_row_dec_hot(ssl_shctx, first);
+ shctx_row_reattach(ssl_shctx, first);
/* replace the previous session already in the tree */
sh_ssl_sess = oldsh_ssl_sess;
/* ignore the previous session data, only use the header */
first = sh_ssl_sess_first_block(sh_ssl_sess);
- shctx_row_inc_hot(ssl_shctx, first);
+ shctx_row_detach(ssl_shctx, first);
first->len = sizeof(struct sh_ssl_sess_hdr);
}
if (shctx_row_data_append(ssl_shctx, first, data, data_len) < 0) {
- shctx_row_dec_hot(ssl_shctx, first);
+ shctx_row_reattach(ssl_shctx, first);
return 0;
}
- shctx_row_dec_hot(ssl_shctx, first);
+ shctx_row_reattach(ssl_shctx, first);
return 1;
}