extern int use_shared_mem;
-#define shctx_lock(shctx) if (use_shared_mem) HA_SPIN_LOCK(SHCTX_LOCK, &shctx->lock)
-#define shctx_unlock(shctx) if (use_shared_mem) HA_SPIN_UNLOCK(SHCTX_LOCK, &shctx->lock)
-
+static inline void shctx_rdlock(struct shared_context *shctx)
+{
+ if (use_shared_mem)
+ HA_RWLOCK_RDLOCK(SHCTX_LOCK, &shctx->lock);
+}
+static inline void shctx_rdunlock(struct shared_context *shctx)
+{
+ if (use_shared_mem)
+ HA_RWLOCK_RDUNLOCK(SHCTX_LOCK, &shctx->lock);
+}
+static inline void shctx_wrlock(struct shared_context *shctx)
+{
+ if (use_shared_mem)
+ HA_RWLOCK_WRLOCK(SHCTX_LOCK, &shctx->lock);
+}
+static inline void shctx_wrunlock(struct shared_context *shctx)
+{
+ if (use_shared_mem)
+ HA_RWLOCK_WRUNLOCK(SHCTX_LOCK, &shctx->lock);
+}
/* List Macros */
/* Everything should be released in the http_end filter, but we need to do it
* there too, in case of errors */
if (st && st->first_block) {
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
shctx_row_reattach(shctx, st->first_block);
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
}
if (st) {
pool_free(pool_head_cache_st, st);
eb32_delete(&object->eb);
object->eb.key = 0;
cache_wrunlock(cache);
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
shctx_row_reattach(shctx, st->first_block);
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
pool_free(pool_head_cache_st, st);
}
}
end:
- shctx_lock(shctx);
+
+ shctx_wrlock(shctx);
fb = shctx_row_reserve_hot(shctx, st->first_block, trash.data);
if (!fb) {
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
goto no_cache;
}
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
ret = shctx_row_data_append(shctx, st->first_block,
(unsigned char *)b_head(&trash), b_data(&trash));
object = (struct cache_entry *)st->first_block->data;
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
/* The whole payload was cached, the entry can now be used. */
object->complete = 1;
/* remove from the hotlist */
shctx_row_reattach(shctx, st->first_block);
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
}
if (st) {
}
cache_wrunlock(cache);
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry));
+ shctx_wrunlock(shctx);
if (!first) {
- shctx_unlock(shctx);
goto out;
}
- shctx_unlock(shctx);
+
/* the received memory is not initialized, we need at least to mark
* the object as not indexed yet.
*/
if (set_secondary_key_encoding(htx, object->secondary_key))
goto out;
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
if (!shctx_row_reserve_hot(shctx, first, trash.data)) {
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
goto out;
}
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
/* cache the headers in a http action because it allows to chose what
* to cache, for example you might want to cache a response before
cache_wrunlock(cache);
object->eb.key = 0;
}
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
shctx_row_reattach(shctx, first);
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
}
return ACT_RET_CONT;
struct shared_context *shctx = shctx_ptr(cache);
struct shared_block *first = block_ptr(cache_ptr);
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
shctx_row_reattach(shctx, first);
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
}
else
_HA_ATOMIC_INC(&px->be_counters.p.http.cache_lookups);
- shctx_lock(shctx_ptr(cache));
cache_rdlock(cache);
res = entry_exist(cache, s->txn->cache_hash, 0);
/* We must not use an entry that is not complete but the check will be
if (res) {
struct appctx *appctx;
entry_block = block_ptr(res);
+ shctx_wrlock(shctx);
shctx_row_detach(shctx, entry_block);
+ shctx_wrunlock(shctx);
cache_rdunlock(cache);
- shctx_unlock(shctx);
/* In case of Vary, we could have multiple entries with the same
* primary hash. We need to calculate the secondary hash in order
* to find the actual entry we want (if it exists). */
if (res->secondary_key_signature) {
if (!http_request_build_secondary_key(s, res->secondary_key_signature)) {
- shctx_lock(shctx);
cache_rdlock(cache);
sec_entry = secondary_entry_exist(cache, res,
s->txn->cache_secondary_hash, 0);
if (sec_entry && sec_entry != res) {
/* The wrong row was added to the hot list. */
+ shctx_wrlock(shctx);
shctx_row_reattach(shctx, entry_block);
entry_block = block_ptr(sec_entry);
shctx_row_detach(shctx, entry_block);
+ shctx_wrunlock(shctx);
}
res = sec_entry;
cache_rdunlock(cache);
- shctx_unlock(shctx);
}
else
res = NULL;
* can't use the cache's entry and must forward the request to
* the server. */
if (!res || !res->complete) {
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
shctx_row_reattach(shctx, entry_block);
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
return ACT_RET_CONT;
}
return ACT_RET_CONT;
} else {
s->target = NULL;
- shctx_lock(shctx);
+ shctx_wrlock(shctx);
shctx_row_reattach(shctx, entry_block);
- shctx_unlock(shctx);
+ shctx_wrunlock(shctx);
return ACT_RET_CONT;
}
}
cache_rdunlock(cache);
- shctx_unlock(shctx);
/* Shared context does not need to be locked while we calculate the
* secondary hash. */
unsigned int i;
struct shared_context *shctx = shctx_ptr(cache);
- shctx_lock(shctx);
next_key = ctx->next_key;
if (!next_key) {
+ shctx_rdlock(shctx);
chunk_printf(buf, "%p: %s (shctx:%p, available blocks:%d)\n", cache, cache->id, shctx_ptr(cache), shctx_ptr(cache)->nbav);
+ shctx_rdunlock(shctx);
if (applet_putchk(appctx, buf) == -1) {
- shctx_unlock(shctx);
goto yield;
}
}
- shctx_unlock(shctx);
ctx->cache = cache;
i2d_SSL_SESSION(sess, &p);
- shctx_lock(ssl_shctx);
+ shctx_wrlock(ssl_shctx);
/* store to cache */
sh_ssl_sess_store(encid, encsess, data_len);
- shctx_unlock(ssl_shctx);
+ shctx_wrunlock(ssl_shctx);
err:
/* reset original length values */
SSL_SESSION_set1_id(sess, encid, sid_length);
}
/* lock cache */
- shctx_lock(ssl_shctx);
+ shctx_wrlock(ssl_shctx);
/* lookup for session */
sh_ssl_sess = sh_ssl_sess_tree_lookup(key);
if (!sh_ssl_sess) {
/* no session found: unlock cache and exit */
- shctx_unlock(ssl_shctx);
+ shctx_wrunlock(ssl_shctx);
_HA_ATOMIC_INC(&global.shctx_misses);
return NULL;
}
shctx_row_data_get(ssl_shctx, first, data, sizeof(struct sh_ssl_sess_hdr), first->len-sizeof(struct sh_ssl_sess_hdr));
- shctx_unlock(ssl_shctx);
+ shctx_wrunlock(ssl_shctx);
/* decode ASN1 session */
p = data;
sid_data = tmpkey;
}
- shctx_lock(ssl_shctx);
+ shctx_wrlock(ssl_shctx);
/* lookup for session */
sh_ssl_sess = sh_ssl_sess_tree_lookup(sid_data);
}
/* unlock cache */
- shctx_unlock(ssl_shctx);
+ shctx_wrunlock(ssl_shctx);
}
/* Set session cache mode to server and disable openssl internal cache.