int shctx_init(struct shared_context **orig_shctx,
int maxblocks, int blocksize, unsigned int maxobjsz,
- int extra, int shared);
+ int extra);
struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
struct shared_block *last, int data_len);
void shctx_row_detach(struct shared_context *shctx, struct shared_block *first);
/* Lock functions */
-extern int use_shared_mem;
-
static inline void shctx_rdlock(struct shared_context *shctx)
{
- if (use_shared_mem)
- HA_RWLOCK_RDLOCK(SHCTX_LOCK, &shctx->lock);
+ HA_RWLOCK_RDLOCK(SHCTX_LOCK, &shctx->lock);
}
static inline void shctx_rdunlock(struct shared_context *shctx)
{
- if (use_shared_mem)
- HA_RWLOCK_RDUNLOCK(SHCTX_LOCK, &shctx->lock);
+ HA_RWLOCK_RDUNLOCK(SHCTX_LOCK, &shctx->lock);
}
static inline void shctx_wrlock(struct shared_context *shctx)
{
- if (use_shared_mem)
- HA_RWLOCK_WRLOCK(SHCTX_LOCK, &shctx->lock);
+ HA_RWLOCK_WRLOCK(SHCTX_LOCK, &shctx->lock);
}
static inline void shctx_wrunlock(struct shared_context *shctx)
{
- if (use_shared_mem)
- HA_RWLOCK_WRUNLOCK(SHCTX_LOCK, &shctx->lock);
+ HA_RWLOCK_WRUNLOCK(SHCTX_LOCK, &shctx->lock);
}
/* List Macros */
list_for_each_entry_safe(cache_config, back, &caches_config, list) {
ret_shctx = shctx_init(&shctx, cache_config->maxblocks, CACHE_BLOCKSIZE,
- cache_config->maxobjsz, sizeof(struct cache), 1);
+ cache_config->maxobjsz, sizeof(struct cache));
if (ret_shctx <= 0) {
if (ret_shctx == SHCTX_E_INIT_LOCK)
#include <haproxy/list.h>
#include <haproxy/shctx.h>
-int use_shared_mem = 0;
-
/*
* Reserve a new row if <first> is null, put it in the hotlist, set the refcount to 1
* or append new blocks to the row with <first> as first block if non null.
* and 0 if cache is already allocated.
*/
int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize,
- unsigned int maxobjsz, int extra, int shared)
+ unsigned int maxobjsz, int extra)
{
int i;
struct shared_context *shctx;
int ret;
void *cur;
- int maptype = MAP_PRIVATE;
+ int maptype = MAP_SHARED;
if (maxblocks <= 0)
return 0;
blocksize = (blocksize + sizeof(void *) - 1) & -sizeof(void *);
extra = (extra + sizeof(void *) - 1) & -sizeof(void *);
- if (shared) {
- maptype = MAP_SHARED;
- use_shared_mem = 1;
- }
-
shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)),
PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
if (!shctx || shctx == MAP_FAILED) {
if (!ssl_shctx && global.tune.sslcachesize) {
alloc_ctx = shctx_init(&ssl_shctx, global.tune.sslcachesize,
sizeof(struct sh_ssl_sess_hdr) + SHSESS_BLOCK_MIN_SIZE, -1,
- sizeof(*sh_ssl_sess_tree), (global.nbthread > 1));
+ sizeof(*sh_ssl_sess_tree));
if (alloc_ctx <= 0) {
if (alloc_ctx == SHCTX_E_INIT_LOCK)
ha_alert("Unable to initialize the lock for the shared SSL session cache. You can retry using the global statement 'tune.ssl.force-private-cache' but it could increase CPU usage due to renegotiations if nbproc > 1.\n");