#ifndef SHCTX_H
#define SHCTX_H
+#include <common/mini-clist.h>
#include <types/shctx.h>
-#include <openssl/ssl.h>
#include <stdint.h>
#ifndef USE_PRIVATE_CACHE
#endif
#endif
-
-/* Allocate shared memory context.
- * <size> is the number of allocated blocks into cache (default 128 bytes)
- * A block is large enough to contain a classic session (without client cert)
- * If <size> is set less or equal to 0, ssl cache is disabled.
- * Set <use_shared_memory> to 1 to use a mapped shared memory instead
- * of private. (ignored if compiled with USE_PRIVATE_CACHE=1).
- * Returns: -1 on alloc failure, <size> if it performs context alloc,
- * and 0 if cache is already allocated.
- */
-
-int shared_context_init(struct shared_context **orig_shctx, int size, int shared);
-
-/* Set shared cache callbacks on an ssl context.
- * Set session cache mode to server and disable openssl internal cache.
- * Shared context MUST be firstly initialized */
-void shared_context_set_cache(SSL_CTX *ctx);
-
-
-int shsess_free(struct shared_context *shctx, struct shared_session *shsess);
-
-struct shared_session *shsess_get_next(struct shared_context *shctx, int data_len);
-
-int shsess_store(struct shared_context *shctx, unsigned char *s_id, unsigned char *data, int data_len);
+int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize, int extra, int shared);
+struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx, int data_len);
+void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first);
+void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first);
+int shctx_row_data_append(struct shared_context *shctx,
+ struct shared_block *first, unsigned char *data, int len);
+int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
+ unsigned char *dst, int offset, int len);
/* Lock functions */
/* List Macros */
-#define shblock_unset(s) (s)->n->p = (s)->p; \
- (s)->p->n = (s)->n;
-
-static inline void shblock_set_free(struct shared_context *shctx,
+static inline void shctx_block_set_hot(struct shared_context *shctx,
struct shared_block *s)
{
- shblock_unset(s);
- (s)->n = &shctx->free;
- (s)->p = shctx->free.p;
- shctx->free.p->n = s;
- shctx->free.p = s;
+ shctx->nbav--;
+ LIST_DEL(&s->list);
+ LIST_ADDQ(&shctx->hot, &s->list);
}
-static inline void shblock_set_active(struct shared_context *shctx,
+static inline void shctx_block_set_avail(struct shared_context *shctx,
struct shared_block *s)
{
- shblock_unset(s)
- (s)->n = &shctx->active;
- (s)->p = shctx->active.p;
- shctx->active.p->n = s;
- shctx->active.p = s;
+ shctx->nbav++;
+ LIST_DEL(&s->list);
+ LIST_ADDQ(&shctx->avail, &s->list);
}
#endif /* SHCTX_H */
/* ssl shctx macro */
-#define shsess_tree_delete(s) ebmb_delete(&(s)->key);
-
-#define shsess_tree_insert(shctx, s) (struct shared_session *)ebmb_insert(&shctx->active.data.session.key.node.branches, \
- &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH);
-
-#define shsess_tree_lookup(shctx, k) (struct shared_session *)ebmb_lookup(&shctx->active.data.session.key.node.branches, \
- (k), SSL_MAX_SSL_SESSION_ID_LENGTH);
-
+#define sh_ssl_sess_tree_delete(s) ebmb_delete(&(s)->key);
+#define sh_ssl_sess_tree_insert(s) (struct sh_ssl_sess_hdr *)ebmb_insert(sh_ssl_sess_tree, \
+ &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH);
+#define sh_ssl_sess_tree_lookup(k) (struct sh_ssl_sess_hdr *)ebmb_lookup(sh_ssl_sess_tree, \
+ (k), SSL_MAX_SSL_SESSION_ID_LENGTH);
#endif /* _PROTO_SSL_SOCK_H */
/*
#ifndef __TYPES_SHCTX
#define __TYPES_SHCTX
-#include <openssl/ssl.h> /* shared session depend of openssl */
-
#ifndef SHSESS_BLOCK_MIN_SIZE
#define SHSESS_BLOCK_MIN_SIZE 128
#endif
#define SHCTX_E_ALLOC_CACHE -1
#define SHCTX_E_INIT_LOCK -2
-struct shared_session {
- struct ebmb_node key;
- unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
- unsigned char data[SHSESS_BLOCK_MIN_SIZE];
-};
+#define SHCTX_F_REMOVING 0x1 /* Removing flag, does not accept new */
+/* generic shctx struct */
struct shared_block {
- union {
- struct shared_session session;
- unsigned char data[sizeof(struct shared_session)];
- } data;
- short int data_len;
- struct shared_block *p;
- struct shared_block *n;
+ struct list list;
+ short int len; /* data length for the row */
+ short int block_count; /* number of blocks */
+ unsigned int refcount;
+ unsigned char data[0];
};
struct shared_context {
unsigned int waiters;
#endif
#endif
- struct shared_block active;
- struct shared_block free;
+ struct list avail; /* list for active and free blocks */
+ struct list hot; /* list for locked blocks */
+ unsigned int nbav; /* number of available blocks */
+ void (*free_block)(struct shared_block *first, struct shared_block *block);
+ short int block_size;
+ unsigned char data[0];
};
-extern struct shared_context *ssl_shctx;
-
#endif
int tls_ticket_enc_index;
};
+/* shared ssl session */
+struct sh_ssl_sess_hdr {
+ struct ebmb_node key;
+ unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
+};
+
#endif /* _TYPES_SSL_SOCK_H */
#include <sys/mman.h>
#include <arpa/inet.h>
#include <ebmbtree.h>
-
-#include <proto/connection.h>
-#include <proto/shctx.h>
-#include <proto/ssl_sock.h>
-#include <proto/openssl-compat.h>
-
#include <types/global.h>
-#include <types/shctx.h>
-
+#include <common/mini-clist.h>
+#include "proto/shctx.h"
+#include <proto/openssl-compat.h>
#if !defined (USE_PRIVATE_CACHE)
-int use_shared_mem = 0;
-#endif
-/* Tree Macros */
+int use_shared_mem = 0;
-/* shared session functions */
+#endif
-/* Free session blocks, returns number of freed blocks */
-int shsess_free(struct shared_context *shctx, struct shared_session *shsess)
+/*
+ * Reserve a row, put it in the hotlist, set the refcount to 1
+ *
+ * Reserve blocks in the avail list and put them in the hot list
+ * Return the first block put in the hot list or NULL if not enough blocks available
+ */
+struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx, int data_len)
{
- struct shared_block *block;
- int ret = 1;
+ struct shared_block *block, *sblock, *ret = NULL, *first;
+ int enough = 0;
+ int freed = 0;
+
+ /* not enough usable blocks */
+ if (data_len > shctx->nbav * shctx->block_size)
+ goto out;
+
+ while (!enough && !LIST_ISEMPTY(&shctx->avail)) {
+ int count = 0;
+ int first_count = 0, first_len = 0;
+
+ first = block = LIST_NEXT(&shctx->avail, struct shared_block *, list);
+ if (ret == NULL)
+ ret = first;
+
+ first_count = first->block_count;
+ first_len = first->len;
+ /*
+ Should never been set to 0.
+ if (first->block_count == 0)
+ first->block_count = 1;
+ */
+
+ list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
+
+ /* release callback */
+ if (first_len && shctx->free_block)
+ shctx->free_block(first, block);
+
+ block->block_count = 1;
+ block->len = 0;
+
+ freed++;
+ data_len -= shctx->block_size;
+
+ if (data_len > 0)
+ shctx_block_set_hot(shctx, block);
+
+ if (data_len <= 0 && !enough) {
+ shctx_block_set_hot(shctx, block);
+ ret->block_count = freed;
+ ret->refcount = 1;
+ enough = 1;
+ }
- if (((struct shared_block *)shsess)->data_len <= sizeof(shsess->data)) {
- shblock_set_free(shctx, (struct shared_block *)shsess);
- return ret;
- }
- block = ((struct shared_block *)shsess)->n;
- shblock_set_free(shctx, (struct shared_block *)shsess);
- while (1) {
- struct shared_block *next;
-
- if (block->data_len <= sizeof(block->data)) {
- /* last block */
- shblock_set_free(shctx, block);
- ret++;
- break;
+ count++;
+ if (count >= first_count)
+ break;
}
- next = block->n;
- shblock_set_free(shctx, block);
- ret++;
- block = next;
}
+
+out:
return ret;
}
-/* This function frees enough blocks to store a new session of data_len.
- * Returns a ptr on a free block if it succeeds, or NULL if there are not
- * enough blocks to store that session.
+/*
+ * if the refcount is 0 move the row to the hot list. Increment the refcount
*/
-struct shared_session *shsess_get_next(struct shared_context *shctx, int data_len)
+void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first)
{
- int head = 0;
- struct shared_block *b;
-
- b = shctx->free.n;
- while (b != &shctx->free) {
- if (!head) {
- data_len -= sizeof(b->data.session.data);
- head = 1;
+ struct shared_block *block, *sblock;
+ int count = 0;
+
+ if (first->refcount <= 0) {
+
+ block = first;
+
+ list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) {
+
+ shctx_block_set_hot(shctx, block);
+
+ count++;
+ if (count >= first->block_count)
+ break;
}
- else
- data_len -= sizeof(b->data.data);
- if (data_len <= 0)
- return &shctx->free.n->data.session;
- b = b->n;
}
- b = shctx->active.n;
- while (b != &shctx->active) {
- int freed;
-
- shsess_tree_delete(&b->data.session);
- freed = shsess_free(shctx, &b->data.session);
- if (!head)
- data_len -= sizeof(b->data.session.data) + (freed-1)*sizeof(b->data.data);
- else
- data_len -= freed*sizeof(b->data.data);
- if (data_len <= 0)
- return &shctx->free.n->data.session;
- b = shctx->active.n;
- }
- return NULL;
+
+ first->refcount++;
}
-/* store a session into the cache
- * s_id : session id padded with zero to SSL_MAX_SSL_SESSION_ID_LENGTH
- * data: asn1 encoded session
- * data_len: asn1 encoded session length
- * Returns 1 id session was stored (else 0)
+/*
+ * decrement the refcount and move the row at the end of the avail list if it reaches 0.
*/
-int shsess_store(struct shared_context *shctx, unsigned char *s_id, unsigned char *data, int data_len)
+void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first)
{
- struct shared_session *shsess, *oldshsess;
+ struct shared_block *block, *sblock;
+ int count = 0;
- shsess = shsess_get_next(shctx, data_len);
- if (!shsess) {
- /* Could not retrieve enough free blocks to store that session */
- return 0;
- }
+ first->refcount--;
- /* prepare key */
- memcpy(shsess->key_data, s_id, SSL_MAX_SSL_SESSION_ID_LENGTH);
+ if (first->refcount <= 0) {
- /* it returns the already existing node
- or current node if none, never returns null */
- oldshsess = shsess_tree_insert(shctx, shsess);
- if (oldshsess != shsess) {
- /* free all blocks used by old node */
- shsess_free(shctx, oldshsess);
- shsess = oldshsess;
- }
+ block = first;
- ((struct shared_block *)shsess)->data_len = data_len;
- if (data_len <= sizeof(shsess->data)) {
- /* Store on a single block */
- memcpy(shsess->data, data, data_len);
- shblock_set_active(shctx, (struct shared_block *)shsess);
- }
- else {
- unsigned char *p;
- /* Store on multiple blocks */
- int cur_len;
-
- memcpy(shsess->data, data, sizeof(shsess->data));
- p = data + sizeof(shsess->data);
- cur_len = data_len - sizeof(shsess->data);
- shblock_set_active(shctx, (struct shared_block *)shsess);
- while (1) {
- /* Store next data on free block.
- * shsess_get_next guarantees that there are enough
- * free blocks in queue.
- */
- struct shared_block *block;
-
- block = shctx->free.n;
- if (cur_len <= sizeof(block->data)) {
- /* This is the last block */
- block->data_len = cur_len;
- memcpy(block->data.data, p, cur_len);
- shblock_set_active(shctx, block);
+ list_for_each_entry_safe_from(block, sblock, &shctx->hot, list) {
+
+ shctx_block_set_avail(shctx, block);
+
+ count++;
+ if (count >= first->block_count)
break;
- }
- /* Intermediate block */
- block->data_len = cur_len;
- memcpy(block->data.data, p, sizeof(block->data));
- p += sizeof(block->data.data);
- cur_len -= sizeof(block->data.data);
- shblock_set_active(shctx, block);
}
}
- return 1;
}
+/*
+ * Append data in the row if there is enough space.
+ * The row should be in the hot list
+ *
+ * Return the amount of appended data if ret >= 0
+ * or how much more space it needs to contains the data if < 0.
+ */
+int shctx_row_data_append(struct shared_context *shctx, struct shared_block *first, unsigned char *data, int len)
+{
+ int remain, start;
+ int count = 0;
+ struct shared_block *block;
+
+
+ /* return -<len> needed to work */
+ if (len > first->block_count * shctx->block_size - first->len)
+ return (first->block_count * shctx->block_size - first->len) - len;
+
+ /* skipping full buffers, stop at the first buffer with remaining space */
+ block = first;
+ list_for_each_entry_from(block, &shctx->hot, list) {
+ count++;
+
+
+ /* break if there is not enough blocks */
+ if (count > first->block_count)
+ break;
+
+ /* end of copy */
+ if (len <= 0)
+ break;
+
+ /* skip full buffers */
+ if (count * shctx->block_size <= first->len)
+ continue;
+
+ /* remaining space in the current block which is not full */
+ remain = (shctx->block_size * count - first->len) % shctx->block_size;
+ /* if remain == 0, previous buffer are full, or first->len == 0 */
+ remain = remain ? remain : shctx->block_size;
+
+ /* start must be calculated before remain is modified */
+ start = shctx->block_size - remain;
+
+ /* must not try to copy more than len */
+ remain = MIN(remain, len);
+
+ memcpy(block->data + start, data, remain);
+ data += remain;
+ len -= remain;
+ first->len += remain; /* update len in the head of the row */
+ }
+
+ return len;
+}
+
+/*
+ * Copy <len> data from a row of blocks, return the remaining data to copy
+ * If 0 is returned, the full data has successfuly be copied
+ *
+ * The row should be in the hot list
+ */
+int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
+ unsigned char *dst, int offset, int len)
+{
+ int count = 0, size = 0, start = -1;
+ struct shared_block *block;
+
+ block = first;
+ count = 0;
+ /* Pass through the blocks to copy them */
+ list_for_each_entry_from(block, &shctx->hot, list) {
+ if (count >= first->block_count || len <= 0)
+ break;
+
+ count++;
+ /* continue until we are in right block
+ corresponding to the offset */
+ if (count < offset / shctx->block_size + 1)
+ continue;
+
+ /* on the first block, data won't possibly began at offset 0 */
+ if (start == -1)
+ start = offset - (count - 1) * shctx->block_size;
+
+ /* size can be lower than a block when copying the last block */
+ size = MIN(shctx->block_size - start, len);
+
+ memcpy(dst, block->data + start, size);
+ dst += size;
+ len -= size;
+ start = 0;
+ }
+ return len;
+}
/* Allocate shared memory context.
- * <size> is maximum cached sessions.
- * If <size> is set to less or equal to 0, ssl cache is disabled.
- * Returns: -1 on alloc failure, <size> if it performs context alloc,
+ * <maxblocks> is maximum blocks.
+ * If <maxblocks> is set to less or equal to 0, ssl cache is disabled.
+ * Returns: -1 on alloc failure, <maxblocks> if it performs context alloc,
* and 0 if cache is already allocated.
*/
-int shared_context_init(struct shared_context **orig_shctx, int size, int shared)
+int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize, int extra, int shared)
{
int i;
struct shared_context *shctx;
pthread_mutexattr_t attr;
#endif
#endif
- struct shared_block *prev,*cur;
+ void *cur;
int maptype = MAP_PRIVATE;
- if (orig_shctx && *orig_shctx)
+ if (maxblocks <= 0)
return 0;
- if (size<=0)
- return 0;
-
- /* Increate size by one to reserve one node for lookup */
- size++;
#ifndef USE_PRIVATE_CACHE
if (shared)
maptype = MAP_SHARED;
#endif
- shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context)+(size*sizeof(struct shared_block)),
+ shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)),
PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
if (!shctx || shctx == MAP_FAILED) {
shctx = NULL;
goto err;
}
+ shctx->nbav = 0;
+
#ifndef USE_PRIVATE_CACHE
if (maptype == MAP_SHARED) {
#ifdef USE_PTHREAD_PSHARED
if (pthread_mutexattr_init(&attr)) {
- munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block)));
+ munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
shctx = NULL;
ret = SHCTX_E_INIT_LOCK;
goto err;
if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) {
pthread_mutexattr_destroy(&attr);
- munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block)));
+ munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
shctx = NULL;
ret = SHCTX_E_INIT_LOCK;
goto err;
if (pthread_mutex_init(&shctx->mutex, &attr)) {
pthread_mutexattr_destroy(&attr);
- munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block)));
+ munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)));
shctx = NULL;
ret = SHCTX_E_INIT_LOCK;
goto err;
}
#endif
- memset(&shctx->active.data.session.key, 0, sizeof(struct ebmb_node));
- memset(&shctx->free.data.session.key, 0, sizeof(struct ebmb_node));
-
- /* No duplicate authorized in tree: */
- shctx->active.data.session.key.node.branches = EB_ROOT_UNIQUE;
-
- cur = &shctx->active;
- cur->n = cur->p = cur;
-
- cur = &shctx->free;
- for (i = 0 ; i < size ; i++) {
- prev = cur;
- cur++;
- prev->n = cur;
- cur->p = prev;
+ LIST_INIT(&shctx->avail);
+ LIST_INIT(&shctx->hot);
+
+ shctx->block_size = blocksize;
+
+ /* init the free blocks after the shared context struct */
+ cur = (void *)shctx + sizeof(struct shared_context) + extra;
+ for (i = 0; i < maxblocks; i++) {
+ struct shared_block *cur_block = (struct shared_block *)cur;
+ cur_block->len = 0;
+ cur_block->refcount = 0;
+ cur_block->block_count = 1;
+ LIST_ADDQ(&shctx->avail, &cur_block->list);
+ shctx->nbav++;
+ cur += sizeof(struct shared_block) + blocksize;
}
- cur->n = &shctx->free;
- shctx->free.p = cur;
-
- ret = size;
+ ret = maxblocks;
err:
*orig_shctx = shctx;
#define SSL_SOCK_NUM_KEYTYPES 1
#endif
-struct shared_context *ssl_shctx = NULL;
+static struct shared_context *ssl_shctx; /* ssl shared session cache */
+static struct eb_root *sh_ssl_sess_tree; /* ssl shared session tree */
+
+#define sh_ssl_sess_tree_delete(s) ebmb_delete(&(s)->key);
+
+#define sh_ssl_sess_tree_insert(s) (struct sh_ssl_sess_hdr *)ebmb_insert(sh_ssl_sess_tree, \
+ &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH);
+
+#define sh_ssl_sess_tree_lookup(k) (struct sh_ssl_sess_hdr *)ebmb_lookup(sh_ssl_sess_tree, \
+ (k), SSL_MAX_SSL_SESSION_ID_LENGTH);
/*
* This function gives the detail of the SSL error. It is used only
return cfgerr;
}
-/* SSL context callbacks */
+
+static inline void sh_ssl_sess_free_blocks(struct shared_block *first, struct shared_block *block)
+{
+ if (first == block) {
+ struct sh_ssl_sess_hdr *sh_ssl_sess = (struct sh_ssl_sess_hdr *)first->data;
+ if (first->len > 0)
+ sh_ssl_sess_tree_delete(sh_ssl_sess);
+ }
+}
+
+/* return first block from sh_ssl_sess */
+static inline struct shared_block *sh_ssl_sess_first_block(struct sh_ssl_sess_hdr *sh_ssl_sess)
+{
+ return (struct shared_block *)((unsigned char *)sh_ssl_sess - ((struct shared_block *)NULL)->data);
+
+}
+
+/* store a session into the cache
+ * s_id : session id padded with zero to SSL_MAX_SSL_SESSION_ID_LENGTH
+ * data: asn1 encoded session
+ * data_len: asn1 encoded session length
+ * Returns 1 id session was stored (else 0)
+ */
+static int sh_ssl_sess_store(unsigned char *s_id, unsigned char *data, int data_len)
+{
+ struct shared_block *first;
+ struct sh_ssl_sess_hdr *sh_ssl_sess, *oldsh_ssl_sess;
+
+ first = shctx_row_reserve_hot(ssl_shctx, data_len + sizeof(struct sh_ssl_sess_hdr));
+ if (!first) {
+ /* Could not retrieve enough free blocks to store that session */
+ return 0;
+ }
+
+ /* STORE the key in the first elem */
+ sh_ssl_sess = (struct sh_ssl_sess_hdr *)first->data;
+ memcpy(sh_ssl_sess->key_data, s_id, SSL_MAX_SSL_SESSION_ID_LENGTH);
+ first->len = sizeof(struct sh_ssl_sess_hdr);
+
+ /* it returns the already existing node
+ or current node if none, never returns null */
+ oldsh_ssl_sess = sh_ssl_sess_tree_insert(sh_ssl_sess);
+ if (oldsh_ssl_sess != sh_ssl_sess) {
+ /* NOTE: Row couldn't be in use because we lock read & write function */
+ /* release the reserved row */
+ shctx_row_dec_hot(ssl_shctx, first);
+ /* replace the previous session already in the tree */
+ sh_ssl_sess = oldsh_ssl_sess;
+ /* ignore the previous session data, only use the header */
+ first = sh_ssl_sess_first_block(sh_ssl_sess);
+ shctx_row_inc_hot(ssl_shctx, first);
+ first->len = sizeof(struct sh_ssl_sess_hdr);
+ }
+
+ if (shctx_row_data_append(ssl_shctx, first, data, data_len) < 0)
+ return 0;
+
+ return 1;
+}
/* SSL callback used on new session creation */
-int shctx_new_cb(SSL *ssl, SSL_SESSION *sess)
+int sh_ssl_sess_new_cb(SSL *ssl, SSL_SESSION *sess)
{
unsigned char encsess[SHSESS_MAX_DATA_LEN]; /* encoded session */
unsigned char encid[SSL_MAX_SSL_SESSION_ID_LENGTH]; /* encoded id */
memset(encid + sid_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH-sid_length);
shared_context_lock(ssl_shctx);
-
/* store to cache */
- shsess_store(ssl_shctx, encid, encsess, data_len);
-
+ sh_ssl_sess_store(encid, encsess, data_len);
shared_context_unlock(ssl_shctx);
-
err:
/* reset original length values */
SSL_SESSION_set1_id(sess, sid_data, sid_length);
}
/* SSL callback used on lookup an existing session cause none found in internal cache */
-SSL_SESSION *shctx_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *key, int key_len, int *do_copy)
+SSL_SESSION *sh_ssl_sess_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *key, int key_len, int *do_copy)
{
- struct shared_session *shsess;
+ struct sh_ssl_sess_hdr *sh_ssl_sess;
unsigned char data[SHSESS_MAX_DATA_LEN], *p;
unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
- int data_len;
SSL_SESSION *sess;
+ struct shared_block *first;
global.shctx_lookups++;
shared_context_lock(ssl_shctx);
/* lookup for session */
- shsess = shsess_tree_lookup(ssl_shctx, key);
- if (!shsess) {
+ sh_ssl_sess = sh_ssl_sess_tree_lookup(key);
+ if (!sh_ssl_sess) {
/* no session found: unlock cache and exit */
shared_context_unlock(ssl_shctx);
global.shctx_misses++;
return NULL;
}
- data_len = ((struct shared_block *)shsess)->data_len;
- if (data_len <= sizeof(shsess->data)) {
- /* Session stored on single block */
- memcpy(data, shsess->data, data_len);
- shblock_set_active(ssl_shctx, (struct shared_block *)shsess);
- }
- else {
- /* Session stored on multiple blocks */
- struct shared_block *block;
-
- memcpy(data, shsess->data, sizeof(shsess->data));
- p = data + sizeof(shsess->data);
- block = ((struct shared_block *)shsess)->n;
- shblock_set_active(ssl_shctx, (struct shared_block *)shsess);
- while (1) {
- /* Retrieve data from next block */
- struct shared_block *next;
-
- if (block->data_len <= sizeof(block->data.data)) {
- /* This is the last block */
- memcpy(p, block->data.data, block->data_len);
- p += block->data_len;
- shblock_set_active(ssl_shctx, block);
- break;
- }
- /* Intermediate block */
- memcpy(p, block->data.data, sizeof(block->data.data));
- p += sizeof(block->data.data);
- next = block->n;
- shblock_set_active(ssl_shctx, block);
- block = next;
- }
- }
+ /* sh_ssl_sess (shared_block->data) is at the end of shared_block */
+ first = sh_ssl_sess_first_block(sh_ssl_sess);
+
+ shctx_row_data_get(ssl_shctx, first, data, sizeof(struct sh_ssl_sess_hdr), first->len-sizeof(struct sh_ssl_sess_hdr));
shared_context_unlock(ssl_shctx);
/* decode ASN1 session */
p = data;
- sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, data_len);
+ sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, first->len-sizeof(struct sh_ssl_sess_hdr));
/* Reset session id and session id contenxt */
if (sess) {
SSL_SESSION_set1_id(sess, key, key_len);
return sess;
}
+
/* SSL callback used to signal session is no more used in internal cache */
-void shctx_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
+void sh_ssl_sess_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
{
- struct shared_session *shsess;
+ struct sh_ssl_sess_hdr *sh_ssl_sess;
unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
unsigned int sid_length;
const unsigned char *sid_data;
shared_context_lock(ssl_shctx);
/* lookup for session */
- shsess = shsess_tree_lookup(ssl_shctx, sid_data);
- if (shsess) {
+ sh_ssl_sess = sh_ssl_sess_tree_lookup(sid_data);
+ if (sh_ssl_sess) {
/* free session */
- shsess_tree_delete(shsess);
- shsess_free(ssl_shctx, shsess);
+ sh_ssl_sess_tree_delete(sh_ssl_sess);
}
/* unlock cache */
/* Set session cache mode to server and disable openssl internal cache.
* Set shared cache callbacks on an ssl context.
* Shared context MUST be firstly initialized */
-void shared_context_set_cache(SSL_CTX *ctx)
+void ssl_set_shctx(SSL_CTX *ctx)
{
SSL_CTX_set_session_id_context(ctx, (const unsigned char *)SHCTX_APPNAME, strlen(SHCTX_APPNAME));
SSL_SESS_CACHE_NO_AUTO_CLEAR);
/* Set callbacks */
- SSL_CTX_sess_set_new_cb(ctx, shctx_new_cb);
- SSL_CTX_sess_set_get_cb(ctx, shctx_get_cb);
- SSL_CTX_sess_set_remove_cb(ctx, shctx_remove_cb);
+ SSL_CTX_sess_set_new_cb(ctx, sh_ssl_sess_new_cb);
+ SSL_CTX_sess_set_get_cb(ctx, sh_ssl_sess_get_cb);
+ SSL_CTX_sess_set_remove_cb(ctx, sh_ssl_sess_remove_cb);
}
int ssl_sock_prepare_ctx(struct bind_conf *bind_conf, struct ssl_bind_conf *ssl_conf, SSL_CTX *ctx)
}
#endif
- shared_context_set_cache(ctx);
+ ssl_set_shctx(ctx);
conf_ciphers = (ssl_conf && ssl_conf->ciphers) ? ssl_conf->ciphers : bind_conf->ssl_conf.ciphers;
if (conf_ciphers &&
!SSL_CTX_set_cipher_list(ctx, conf_ciphers)) {
}
}
- alloc_ctx = shared_context_init(&ssl_shctx, global.tune.sslcachesize, (!global_ssl.private_cache && (global.nbproc > 1)) ? 1 : 0);
+ alloc_ctx = shctx_init(&ssl_shctx, global.tune.sslcachesize, sizeof(struct sh_ssl_sess_hdr) + SHSESS_BLOCK_MIN_SIZE, sizeof(*sh_ssl_sess_tree), (!global_ssl.private_cache && (global.nbproc > 1)) ? 1 : 0);
if (alloc_ctx < 0) {
if (alloc_ctx == SHCTX_E_INIT_LOCK)
Alert("Unable to initialize the lock for the shared SSL session cache. You can retry using the global statement 'tune.ssl.force-private-cache' but it could increase CPU usage due to renegotiations if nbproc > 1.\n");
Alert("Unable to allocate SSL session cache.\n");
return -1;
}
+ /* free block callback */
+ ssl_shctx->free_block = sh_ssl_sess_free_blocks;
+ /* init the root tree within the extra space */
+ sh_ssl_sess_tree = (void *)ssl_shctx + sizeof(struct shared_context);
+ *sh_ssl_sess_tree = EB_ROOT_UNIQUE;
err = 0;
/* initialize all certificate contexts */