#include "ks.h"
-#define KS_POOL_MAGIC 0xABACABA /* magic for struct */
-#define BLOCK_MAGIC 0xB1B1007 /* magic for blocks */
-#define FENCE_MAGIC0 (unsigned char)(0xFAU) /* 1st magic mem byte */
-#define FENCE_MAGIC1 (unsigned char)(0xD3U) /* 2nd magic mem byte */
-
-#define FENCE_SIZE 2 /* fence space */
-#define MIN_ALLOCATION (sizeof(ks_pool_free_t)) /* min alloc */
-#define MAX_FREE_SEARCH 10240 /* max size to search */
-#define MAX_FREE_LIST_SEARCH 100 /* max looking for free mem */
-
-#define PRE_MAGIC1 0x33U
-#define PRE_MAGIC2 0xCCU
-
-typedef struct alloc_prefix_s {
- unsigned char m1;
- unsigned long size;
- unsigned char m2;
- unsigned int refs;
- unsigned int padding;
-} alloc_prefix_t;
-
-#define PREFIX_SIZE sizeof(struct alloc_prefix_s)
-//#define DEBUG 1
-/*
- * bitflag tools for Variable and a Flag
- */
-#define BIT_FLAG(x) (1 << (x))
-#define BIT_SET(v,f) (v) |= (f)
-#define BIT_CLEAR(v,f) (v) &= ~(f)
-#define BIT_IS_SET(v,f) ((v) & (f))
-#define BIT_TOGGLE(v,f) (v) ^= (f)
+ //#define DEBUG 1
+
+#define KS_POOL_MAGIC 0xDEADBEEF /* magic for struct */
+
+#define KS_POOL_PREFIX_MAGIC 0xDEADBEEF
+
+#define KS_POOL_FENCE_MAGIC0 (ks_byte_t)(0xFAU) /* 1st magic mem byte */
+#define KS_POOL_FENCE_MAGIC1 (ks_byte_t)(0xD3U) /* 2nd magic mem byte */
+
+#define KS_POOL_FENCE_SIZE 2 /* fence space */
+
+typedef struct ks_pool_prefix_s ks_pool_prefix_t;
+
+struct ks_pool_prefix_s {
+ ks_size_t magic1;
+ ks_size_t size;
+ ks_size_t magic2;
+ ks_size_t refs;
+ ks_pool_prefix_t *prev;
+ ks_pool_prefix_t *next;
+ ks_size_t magic3;
+ ks_pool_cleanup_callback_t cleanup_callback;
+ void *cleanup_arg;
+ ks_size_t magic4;
+ ks_size_t reserved[2];
+};
+
+#define KS_POOL_PREFIX_SIZE sizeof(ks_pool_prefix_t)
#define SET_POINTER(pnt, val) \
do { \
} \
} while(0)
-#define BLOCK_FLAG_USED BIT_FLAG(0) /* block is used */
-#define BLOCK_FLAG_FREE BIT_FLAG(1) /* block is free */
-
-#define DEFAULT_PAGE_MULT 16 /* pagesize = this * getpagesize */
-
-/* How many pages SIZE bytes resides in. We add in the block header. */
-#define PAGES_IN_SIZE(mp_p, size) (((size) + sizeof(ks_pool_block_t) + \
- (mp_p)->mp_page_size - 1) / \
- (mp_p)->mp_page_size)
-#define SIZE_OF_PAGES(mp_p, page_n) ((page_n) * (mp_p)->mp_page_size)
-#define MAX_BITS 30 /* we only can allocate 1gb chunks */
-
-#define MAX_BLOCK_USER_MEMORY(mp_p) ((mp_p)->mp_page_size - \
- sizeof(ks_pool_block_t))
-#define FIRST_ADDR_IN_BLOCK(block_p) (void *)((char *)(block_p) + \
- sizeof(ks_pool_block_t))
-#define MEMORY_IN_BLOCK(block_p) ((char *)(block_p)->mb_bounds_p - \
- ((char *)(block_p) + \
- sizeof(ks_pool_block_t)))
-
-typedef struct ks_pool_cleanup_node_s {
- ks_pool_cleanup_fn_t fn;
- void *ptr;
- void *arg;
- int type;
- struct ks_pool_cleanup_node_s *next;
-} ks_pool_cleanup_node_t;
-
struct ks_pool_s {
- unsigned int mp_magic; /* magic number for struct */
- unsigned int mp_flags; /* flags for the struct */
- unsigned long mp_alloc_c; /* number of allocations */
- unsigned long mp_user_alloc; /* user bytes allocated */
- unsigned long mp_max_alloc; /* maximum user bytes allocated */
- unsigned int mp_page_c; /* number of pages allocated */
- unsigned int mp_max_pages; /* maximum number of pages to use */
- unsigned int mp_page_size; /* page-size of our system */
- off_t mp_top; /* top of our allocations in fd */
- ks_pool_log_func_t mp_log_func; /* log callback function */
- void *mp_min_p; /* min address in pool for checks */
- void *mp_bounds_p; /* max address in pool for checks */
- struct ks_pool_block_st *mp_first_p; /* first memory block we are using */
- struct ks_pool_block_st *mp_last_p; /* last memory block we are using */
- struct ks_pool_block_st *mp_free[MAX_BITS + 1]; /* free lists based on size */
- unsigned int mp_magic2; /* upper magic for overwrite sanity */
- ks_pool_cleanup_node_t *clfn_list;
+ ks_size_t magic1; /* magic number for struct */
+ ks_size_t flags; /* flags for the struct */
+ ks_size_t alloc_c; /* number of allocations */
+ ks_size_t user_alloc; /* user bytes allocated */
+ ks_size_t max_alloc; /* maximum user bytes allocated */
+ ks_pool_log_func_t log_func; /* log callback function */
+ ks_pool_prefix_t *first; /* first memory allocation we are using */
+ ks_pool_prefix_t *last; /* last memory allocation we are using */
+ ks_size_t magic2; /* upper magic for overwrite sanity */
ks_mutex_t *mutex;
- ks_mutex_t *cleanup_mutex;
- uint8_t cleaning_up;
+ ks_bool_t cleaning_up;
};
-/* for debuggers to be able to interrogate the generic type in the .h file */
-typedef ks_pool_t ks_pool_ext_t;
-
-/*
- * Block header structure. This structure *MUST* be long-word
- * aligned.
- */
-typedef struct ks_pool_block_st {
- unsigned int mb_magic; /* magic number for block header */
- void *mb_bounds_p; /* block boundary location */
- struct ks_pool_block_st *mb_next_p; /* linked list next pointer */
- unsigned int mb_magic2; /* upper magic for overwrite sanity */
-} ks_pool_block_t;
+static ks_status_t check_pool(const ks_pool_t *pool);
+static ks_status_t check_fence(const void *addr);
+static void write_fence(void *addr);
+static ks_status_t check_prefix(const ks_pool_prefix_t *prefix);
-/*
- * Free list structure.
- */
-typedef struct {
- void *mf_next_p; /* pointer to the next free address */
- unsigned long mf_size; /* size of the free block */
-} ks_pool_free_t;
-
-#ifndef MAP_ANON
-#define MAP_ANON MAP_ANONYMOUS
-#endif
-
-/* local variables */
-static int enabled_b = 0; /* lib initialized? */
-static unsigned int min_bit_free_next = 0; /* min size of next pnt */
-static unsigned int min_bit_free_size = 0; /* min size of next + size */
-static unsigned long bit_array[MAX_BITS + 1]; /* size -> bit */
-
-#ifdef _MSC_VER
-#include <Windows.h>
-long getpagesize(void)
+static void perform_pool_cleanup_on_free(ks_pool_t *pool, ks_pool_prefix_t *prefix)
{
- static long g_pagesize = 0;
- if (!g_pagesize) {
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- g_pagesize = system_info.dwPageSize;
- }
- return g_pagesize;
-}
-#endif
-
-
-/* We need mutex here probably or this notion of cleanup stuff cannot be threadsafe */
-
-#if 0
-static ks_pool_cleanup_node_t *find_cleanup_node(ks_pool_t *mp_p, void *ptr)
-{
- ks_pool_cleanup_node_t *np, *cnode = NULL;
-
- ks_assert(mp_p);
- ks_assert(ptr);
-
- for (np = mp_p->clfn_list; np; np = np->next) {
- if (np->ptr == ptr) {
- cnode = np;
- goto end;
- }
- }
-
- end:
+ void *addr;
- /* done, the nodes are all from the pool so they will be destroyed */
- return cnode;
-}
-#endif
+ ks_assert(pool);
+ ks_assert(prefix);
-static void perform_pool_cleanup_on_free(ks_pool_t *mp_p, void *ptr)
-{
- ks_pool_cleanup_node_t *np, *cnode, *last = NULL;
+ if (pool->cleaning_up) return;
- np = mp_p->clfn_list;
+ addr = (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE);
- ks_mutex_lock(mp_p->mutex);
- if (mp_p->cleaning_up) {
- ks_mutex_unlock(mp_p->mutex);
- return;
- }
- ks_mutex_unlock(mp_p->mutex);
-
- ks_mutex_lock(mp_p->cleanup_mutex);
- while(np) {
- if (np->ptr == ptr) {
- if (last) {
- last->next = np->next;
- } else {
- mp_p->clfn_list = np->next;
- }
-
- cnode = np;
- np = np->next;
- cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_ANNOUNCE, KS_MPCL_FREE);
- cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_TEARDOWN, KS_MPCL_FREE);
- cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_DESTROY, KS_MPCL_FREE);
-
- continue;
- }
- last = np;
- np = np->next;
+ if (prefix->cleanup_callback) {
+ prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_ANNOUNCE, KS_MPCL_FREE);
+ prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_TEARDOWN, KS_MPCL_FREE);
+ prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_DESTROY, KS_MPCL_FREE);
}
- ks_mutex_unlock(mp_p->cleanup_mutex);
}
-static void perform_pool_cleanup(ks_pool_t *mp_p)
+static void perform_pool_cleanup(ks_pool_t *pool)
{
- ks_pool_cleanup_node_t *np;
+ ks_pool_prefix_t *prefix;
- ks_mutex_lock(mp_p->mutex);
- if (mp_p->cleaning_up) {
- ks_mutex_unlock(mp_p->mutex);
+ if (pool->cleaning_up) {
return;
}
- mp_p->cleaning_up = 1;
- ks_mutex_unlock(mp_p->mutex);
+ pool->cleaning_up = KS_TRUE;
- ks_mutex_lock(mp_p->cleanup_mutex);
- for (np = mp_p->clfn_list; np; np = np->next) {
- np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_ANNOUNCE, KS_MPCL_GLOBAL_FREE);
+ for (prefix = pool->first; prefix; prefix = prefix->next) {
+ if (!prefix->cleanup_callback) continue;
+ prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_ANNOUNCE, KS_MPCL_GLOBAL_FREE);
}
- for (np = mp_p->clfn_list; np; np = np->next) {
- np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_TEARDOWN, KS_MPCL_GLOBAL_FREE);
+ for (prefix = pool->first; prefix; prefix = prefix->next) {
+ if (!prefix->cleanup_callback) continue;
+ prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_TEARDOWN, KS_MPCL_GLOBAL_FREE);
}
- for (np = mp_p->clfn_list; np; np = np->next) {
- np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_DESTROY, KS_MPCL_GLOBAL_FREE);
+ for (prefix = pool->first; prefix; prefix = prefix->next) {
+ if (!prefix->cleanup_callback) continue;
+ prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_DESTROY, KS_MPCL_GLOBAL_FREE);
}
- ks_mutex_unlock(mp_p->cleanup_mutex);
-
- mp_p->clfn_list = NULL;
}
-KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *mp_p, void *ptr, void *arg, int type, ks_pool_cleanup_fn_t fn)
+KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_callback_t callback)
{
- ks_pool_cleanup_node_t *cnode;
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ ks_pool_prefix_t *prefix = NULL;
- ks_assert(mp_p);
+ ks_assert(pool);
ks_assert(ptr);
- ks_assert(fn);
-
- /* don't set cleanup on this cnode obj or it will be an endless loop */
- cnode = (ks_pool_cleanup_node_t *) ks_pool_alloc(mp_p, sizeof(*cnode));
-
- if (!cnode) {
- return KS_STATUS_FAIL;
- }
-
- cnode->ptr = ptr;
- cnode->arg = arg;
- cnode->fn = fn;
- cnode->type = type;
-
- ks_mutex_lock(mp_p->cleanup_mutex);
- cnode->next = mp_p->clfn_list;
- mp_p->clfn_list = cnode;
- ks_mutex_unlock(mp_p->cleanup_mutex);
-
- return KS_STATUS_SUCCESS;
-}
-
-
-
-/****************************** local utilities ******************************/
-
-/*
- * static void startup
- *
- * DESCRIPTION:
- *
- * Perform any library level initialization.
- *
- * RETURNS:
- *
- * None.
- *
- * ARGUMENTS:
- *
- * None.
- */
-static void startup(void)
-{
- int bit_c;
- unsigned long size = 1;
+ ks_assert(callback);
- if (enabled_b) {
- return;
- }
+ prefix = (ks_pool_prefix_t *)((uintptr_t)ptr - KS_POOL_PREFIX_SIZE);
- /* allocate our free bit array list */
- for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
- bit_array[bit_c] = size;
-
- /*
- * Note our minimum number of bits that can store a pointer. This
- * is smallest address that we can have a linked list for.
- */
- if (min_bit_free_next == 0 && size >= sizeof(void *)) {
- min_bit_free_next = bit_c;
- }
- /*
- * Note our minimum number of bits that can store a pointer and
- * the size of the block.
- */
- if (min_bit_free_size == 0 && size >= sizeof(ks_pool_free_t)) {
- min_bit_free_size = bit_c;
- }
+ ret = check_prefix(prefix);
- size *= 2;
+ if (ret == KS_STATUS_SUCCESS) {
+ prefix->cleanup_arg = arg;
+ prefix->cleanup_callback = callback;
}
- enabled_b = 1;
+ return ret;
}
-/*
- * static int size_to_bits
- *
- * DESCRIPTION:
- *
- * Calculate the number of bits in a size.
- *
- * RETURNS:
- *
- * Number of bits.
- *
- * ARGUMENTS:
- *
- * size -> Size of memory of which to calculate the number of bits.
- */
-static int size_to_bits(const unsigned long size)
-{
- int bit_c = 0;
-
- for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
- if (size <= bit_array[bit_c]) {
- break;
- }
- }
-
- return bit_c;
-}
-
-/*
- * static int size_to_free_bits
- *
- * DESCRIPTION:
- *
- * Calculate the number of bits in a size going on the free list.
- *
- * RETURNS:
- *
- * Number of bits.
- *
- * ARGUMENTS:
- *
- * size -> Size of memory of which to calculate the number of bits.
- */
-static int size_to_free_bits(const unsigned long size)
-{
- int bit_c = 0;
-
- if (size == 0) {
- return 0;
- }
-
- for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
- if (size < bit_array[bit_c]) {
- break;
- }
- }
- return bit_c - 1;
-}
-/*
- * static int bits_to_size
- *
- * DESCRIPTION:
- *
- * Calculate the size represented by a number of bits.
- *
- * RETURNS:
- *
- * Number of bits.
- *
- * ARGUMENTS:
- *
- * bit_n -> Number of bits
- */
-static unsigned long bits_to_size(const int bit_n)
-{
- if (bit_n > MAX_BITS) {
- return bit_array[MAX_BITS];
- } else {
- return bit_array[bit_n];
- }
-}
+/****************************** local utilities ******************************/
/*
- * static void *alloc_pages
- *
- * DESCRIPTION:
- *
- * Allocate space for a number of memory pages in the memory pool.
- *
- * RETURNS:
- *
- * Success - New pages of memory
- *
- * Failure - NULL
- *
- * ARGUMENTS:
- *
- * mp_p <-> Pointer to our memory pool.
- *
- * page_n -> Number of pages to alloc.
- *
- * error_p <- Pointer to ks_status_t which, if not NULL, will be set with
- * a ks_pool error code.
- */
-static void *alloc_pages(ks_pool_t *mp_p, const unsigned int page_n, ks_status_t *error_p)
+* static ks_status_t check_pool
+*
+* DESCRIPTION:
+*
+* Check the validity of pool checksums.
+*
+* RETURNS:
+*
+* Success - KS_STATUS_SUCCESS
+*
+* Failure - Ks_Pool error code
+*
+* ARGUMENTS:
+*
+* pool -> A pointer to a pool.
+*/
+static ks_status_t check_pool(const ks_pool_t *pool)
{
- void *mem;
- unsigned long size;
-
- /* are we over our max-pages? */
- if (mp_p->mp_max_pages > 0 && mp_p->mp_page_c >= mp_p->mp_max_pages) {
- SET_POINTER(error_p, KS_STATUS_NO_PAGES);
- return NULL;
- }
-
- size = SIZE_OF_PAGES(mp_p, page_n);
-
-#ifdef DEBUG
- (void) printf("allocating %u pages or %lu bytes\n", page_n, size);
-#endif
+ ks_assert(pool);
+ if (pool->magic1 != KS_POOL_MAGIC) return KS_STATUS_PNT;
+ if (pool->magic2 != KS_POOL_MAGIC) return KS_STATUS_POOL_OVER;
- mem = malloc(size);
- ks_assert(mem);
- memset(mem, 0, size);
-
- mp_p->mp_top += size;
- mp_p->mp_page_c += page_n;
-
- SET_POINTER(error_p, KS_STATUS_SUCCESS);
- return mem;
-}
-
-/*
- * static int free_pages
- *
- * DESCRIPTION:
- *
- * Free previously allocated pages of memory.
- *
- * RETURNS:
- *
- * Success - KS_STATUS_SUCCESS
- *
- * Failure - Ks_Pool error code
- *
- * ARGUMENTS:
- *
- * pages <-> Pointer to memory pages that we are freeing.
- *
- * size -> Size of the block that we are freeing.
- *
- */
-static int free_pages(void *pages, const unsigned long size)
-{
- free(pages);
return KS_STATUS_SUCCESS;
}
/*
- * static int check_magic
+ * static ks_status_t check_fence
*
* DESCRIPTION:
*
- * Check for the existance of the magic ID in a memory pointer.
+ * Check the validity of the fence checksums.
*
* RETURNS:
*
*
* ARGUMENTS:
*
- * addr -> Address inside of the block that we are tryign to locate.
- *
- * size -> Size of the block.
+ * addr -> A pointer directly to the fence.
*/
-static int check_magic(const void *addr, const unsigned long size)
+static ks_status_t check_fence(const void *addr)
{
- const unsigned char *mem_p;
+ const ks_byte_t *mem_p;
- /* set our starting point */
- mem_p = (unsigned char *) addr + size;
+ mem_p = (ks_byte_t *)addr;
- if (*mem_p == FENCE_MAGIC0 && *(mem_p + 1) == FENCE_MAGIC1) {
- return KS_STATUS_SUCCESS;
- } else {
- return KS_STATUS_PNT_OVER;
- }
+ if (*mem_p == KS_POOL_FENCE_MAGIC0 && *(mem_p + 1) == KS_POOL_FENCE_MAGIC1) return KS_STATUS_SUCCESS;
+ return KS_STATUS_PNT_OVER;
}
/*
- * static void write_magic
+ * static void write_fence
*
* DESCRIPTION:
*
*
* addr -> Address where to write the magic.
*/
-static void write_magic(const void *addr)
+static void write_fence(void *addr)
{
- *(unsigned char *) addr = FENCE_MAGIC0;
- *((unsigned char *) addr + 1) = FENCE_MAGIC1;
-}
-
-/*
- * static void free_pointer
- *
- * DESCRIPTION:
- *
- * Moved a pointer into our free lists.
- *
- * RETURNS:
- *
- * Success - KS_STATUS_SUCCESS
- *
- * Failure - Ks_Pool error code
- *
- * ARGUMENTS:
- *
- * mp_p <-> Pointer to the memory pool.
- *
- * addr <-> Address where to write the magic. We may write a next
- * pointer to it.
- *
- * size -> Size of the address space.
- */
-static int free_pointer(ks_pool_t *mp_p, void *addr, const unsigned long size)
-{
- unsigned int bit_n;
- unsigned long real_size;
- ks_pool_free_t free_pnt;
-
-#ifdef DEBUG
- (void) printf("freeing a block at %lx of %lu bytes\n", (long) addr, size);
-#endif
-
- if (size == 0) {
- return KS_STATUS_SUCCESS;
- }
-
- /*
- * if the user size is larger then can fit in an entire block then
- * we change the size
- */
- if (size > MAX_BLOCK_USER_MEMORY(mp_p)) {
- real_size = SIZE_OF_PAGES(mp_p, PAGES_IN_SIZE(mp_p, size)) - sizeof(ks_pool_block_t);
- } else {
- real_size = size;
- }
-
- /*
- * We use a specific free bits calculation here because if we are
- * freeing 10 bytes then we will be putting it into the 8-byte free
- * list and not the 16 byte list. size_to_bits(10) will return 4
- * instead of 3.
- */
- bit_n = size_to_free_bits(real_size);
-
- /*
- * Minimal error checking. We could go all the way through the
- * list however this might be prohibitive.
- */
- if (mp_p->mp_free[bit_n] == addr) {
- return KS_STATUS_IS_FREE;
- }
-
- /* add the freed pointer to the free list */
- if (bit_n < min_bit_free_next) {
- /*
- * Yes we know this will lose 99% of the allocations but what else
- * can we do? No space for a next pointer.
- */
- if (mp_p->mp_free[bit_n] == NULL) {
- mp_p->mp_free[bit_n] = addr;
- }
- } else if (bit_n < min_bit_free_size) {
- /* we copy, not assign, to maintain the free list */
- memcpy(addr, mp_p->mp_free + bit_n, sizeof(void *));
- mp_p->mp_free[bit_n] = addr;
- } else {
-
- /* setup our free list structure */
- free_pnt.mf_next_p = mp_p->mp_free[bit_n];
- free_pnt.mf_size = real_size;
-
- /* we copy the structure in since we don't know about alignment */
- memcpy(addr, &free_pnt, sizeof(free_pnt));
- mp_p->mp_free[bit_n] = addr;
- }
-
- return KS_STATUS_SUCCESS;
+ *((ks_byte_t *)addr) = KS_POOL_FENCE_MAGIC0;
+ *((ks_byte_t *)addr + 1) = KS_POOL_FENCE_MAGIC1;
}
/*
- * static int split_block
- *
- * DESCRIPTION:
- *
- * When freeing space in a multi-block chunk we have to create new
- * blocks out of the upper areas being freed.
- *
- * RETURNS:
- *
- * Success - KS_STATUS_SUCCESS
- *
- * Failure - Ks_Pool error code
- *
- * ARGUMENTS:
- *
- * mp_p <-> Pointer to the memory pool.
- *
- * free_addr -> Address that we are freeing.
- *
- * size -> Size of the space that we are taking from address.
- */
-static int split_block(ks_pool_t *mp_p, void *free_addr, const unsigned long size)
+* static ks_status_t check_prefix
+*
+* DESCRIPTION:
+*
+* Check the validity of prefix checksums.
+*
+* RETURNS:
+*
+* Success - KS_STATUS_SUCCESS
+*
+* Failure - Ks_Pool error code
+*
+* ARGUMENTS:
+*
+* prefix -> A pointer to a prefix.
+*/
+static ks_status_t check_prefix(const ks_pool_prefix_t *prefix)
{
- ks_pool_block_t *block_p, *new_block_p;
- int ret, page_n;
- void *end_p;
-
- /*
- * 1st we find the block pointer from our free addr. At this point
- * the pointer must be the 1st one in the block if it is spans
- * multiple blocks.
- */
- block_p = (ks_pool_block_t *) ((char *) free_addr - sizeof(ks_pool_block_t));
- if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
-
- return KS_STATUS_POOL_OVER;
- }
-
- page_n = PAGES_IN_SIZE(mp_p, size);
-
- /* we are creating a new block structure for the 2nd ... */
- new_block_p = (ks_pool_block_t *) ((char *) block_p + SIZE_OF_PAGES(mp_p, page_n));
- new_block_p->mb_magic = BLOCK_MAGIC;
- /* New bounds is 1st block bounds. The 1st block's is reset below. */
- new_block_p->mb_bounds_p = block_p->mb_bounds_p;
- /* Continue the linked list. The 1st block will point to us below. */
- new_block_p->mb_next_p = block_p->mb_next_p;
- new_block_p->mb_magic2 = BLOCK_MAGIC;
-
- /* bounds for the 1st block are reset to the 1st page only */
- block_p->mb_bounds_p = (char *) new_block_p;
- /* the next block pointer for the 1st block is now the new one */
- block_p->mb_next_p = new_block_p;
-
- /* only free the space in the 1st block if it is only 1 block in size */
- if (page_n == 1) {
- /* now free the rest of the 1st block block */
- end_p = (char *) free_addr + size;
- ret = free_pointer(mp_p, end_p, (unsigned long)((char *) block_p->mb_bounds_p - (char *) end_p));
- if (ret != KS_STATUS_SUCCESS) {
- return ret;
- }
- }
-
- /* now free the rest of the block */
- ret = free_pointer(mp_p, FIRST_ADDR_IN_BLOCK(new_block_p), (unsigned long)MEMORY_IN_BLOCK(new_block_p));
- if (ret != KS_STATUS_SUCCESS) {
- return ret;
- }
-
+ if (!(prefix->magic1 == KS_POOL_PREFIX_MAGIC && prefix->magic2 == KS_POOL_PREFIX_MAGIC && prefix->magic3 == KS_POOL_PREFIX_MAGIC && prefix->magic4 == KS_POOL_PREFIX_MAGIC)) return KS_STATUS_INVALID_POINTER;
return KS_STATUS_SUCCESS;
}
-/*
- * static void *get_space
- *
- * DESCRIPTION:
- *
- * Moved a pointer into our free lists.
- *
- * RETURNS:
- *
- * Success - New address that we can use.
- *
- * Failure - NULL
- *
- * ARGUMENTS:
- *
- * mp_p <-> Pointer to the memory pool.
- *
- * byte_size -> Size of the address space that we need.
- *
- * error_p <- Pointer to ks_status_t which, if not NULL, will be set with
- * a ks_pool error code.
- */
-static void *get_space(ks_pool_t *mp_p, const unsigned long byte_size, unsigned long *padding, ks_status_t *error_p)
-{
- ks_pool_block_t *block_p;
- ks_pool_free_t free_pnt;
- int ret;
- unsigned long size;
- unsigned int bit_c, page_n, left;
- void *free_addr = NULL, *free_end;
-
- size = byte_size;
- while ((size & (sizeof(void *) - 1)) > 0) {
- size++;
- }
-
- if (size > byte_size) {
- *padding = size - byte_size;
- }
-
- /*
- * First we check the free lists looking for something with enough
- * pages. Maybe we should only look X bits higher in the list.
- *
- * XXX: this is where we'd do the best fit. We'd look for the
- * closest match. We then could put the rest of the allocation that
- * we did not use in a lower free list. Have a define which states
- * how deep in the free list to go to find the closest match.
- */
- for (bit_c = size_to_bits(size); bit_c <= MAX_BITS; bit_c++) {
- if (mp_p->mp_free[bit_c] != NULL) {
- free_addr = mp_p->mp_free[bit_c];
- break;
- }
- }
-
- /*
- * If we haven't allocated any blocks or if the last block doesn't
- * have enough memory then we need a new block.
- */
- if (bit_c > MAX_BITS) {
-
- /* we need to allocate more space */
-
- page_n = PAGES_IN_SIZE(mp_p, size);
-
- /* now we try and get the pages we need/want */
- block_p = alloc_pages(mp_p, page_n, error_p);
- if (block_p == NULL) {
- /* error_p set in alloc_pages */
- return NULL;
- }
-
- /* init the block header */
- block_p->mb_magic = BLOCK_MAGIC;
- block_p->mb_bounds_p = (char *) block_p + SIZE_OF_PAGES(mp_p, page_n);
- block_p->mb_next_p = mp_p->mp_first_p;
- block_p->mb_magic2 = BLOCK_MAGIC;
-
- /*
- * We insert it into the front of the queue. We could add it to
- * the end but there is not much use.
- */
- mp_p->mp_first_p = block_p;
- if (mp_p->mp_last_p == NULL) {
- mp_p->mp_last_p = block_p;
- }
-
- free_addr = FIRST_ADDR_IN_BLOCK(block_p);
-
-#ifdef DEBUG
- (void) printf("had to allocate space for %lx of %lu bytes\n", (long) free_addr, size);
-#endif
-
- free_end = (char *) free_addr + size;
- left = (unsigned) ((char *) block_p->mb_bounds_p - (char *) free_end);
- } else {
-
- if (bit_c < min_bit_free_next) {
- mp_p->mp_free[bit_c] = NULL;
- /* calculate the number of left over bytes */
- left = bits_to_size(bit_c) - size;
- } else if (bit_c < min_bit_free_next) {
- /* grab the next pointer from the freed address into our list */
- memcpy(mp_p->mp_free + bit_c, free_addr, sizeof(void *));
- /* calculate the number of left over bytes */
- left = bits_to_size(bit_c) - size;
- } else {
- /* grab the free structure from the address */
- memcpy(&free_pnt, free_addr, sizeof(free_pnt));
- mp_p->mp_free[bit_c] = free_pnt.mf_next_p;
-
- /* are we are splitting up a multiblock chunk into fewer blocks? */
- if (PAGES_IN_SIZE(mp_p, free_pnt.mf_size) > PAGES_IN_SIZE(mp_p, size)) {
- ret = split_block(mp_p, free_addr, size);
- if (ret != KS_STATUS_SUCCESS) {
- SET_POINTER(error_p, ret);
- return NULL;
- }
- /* left over memory was taken care of in split_block */
- left = 0;
- } else {
- /* calculate the number of left over bytes */
- left = free_pnt.mf_size - size;
- }
- }
-
-#ifdef DEBUG
- (void) printf("found a free block at %lx of %lu bytes\n", (long) free_addr, left + size);
-#endif
-
- free_end = (char *) free_addr + size;
- }
-
- /*
- * If we have memory left over then we free it so someone else can
- * use it. We do not free the space if we just allocated a
- * multi-block chunk because we need to have every allocation easily
- * find the start of the block. Every user address % page-size
- * should take us to the start of the block.
- */
- if (left > 0 && size <= MAX_BLOCK_USER_MEMORY(mp_p)) {
- /* free the rest of the block */
- ret = free_pointer(mp_p, free_end, left);
- if (ret != KS_STATUS_SUCCESS) {
- SET_POINTER(error_p, ret);
- return NULL;
- }
- }
-
- /* update our bounds */
- if (free_addr > mp_p->mp_bounds_p) {
- mp_p->mp_bounds_p = free_addr;
- } else if (free_addr < mp_p->mp_min_p) {
- mp_p->mp_min_p = free_addr;
- }
-
- return free_addr;
-}
-
/*
* static void *alloc_mem
*
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool. If NULL then it will do a
- * normal malloc.
+ * pool -> Pointer to the memory pool.
*
* byte_size -> Number of bytes to allocate in the pool. Must be >0.
*
* error_p <- Pointer to ks_status_t which, if not NULL, will be set with
* a ks_pool error code.
*/
-static void *alloc_mem(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p)
+static void *alloc_mem(ks_pool_t *pool, const ks_size_t size, ks_status_t *error_p)
{
- unsigned long size, fence, padding = 0;
- void *addr;
- alloc_prefix_t *prefix;
-
- /* make sure we have enough bytes */
- if (byte_size < MIN_ALLOCATION) {
- size = MIN_ALLOCATION;
- } else {
- size = byte_size;
- }
-
- fence = FENCE_SIZE;
-
- /* get our free space + the space for the fence post */
- addr = get_space(mp_p, size + fence + PREFIX_SIZE, &padding, error_p);
-
- if (addr == NULL) {
- /* error_p set in get_space */
- return NULL;
- }
-
-
- prefix = (alloc_prefix_t *) addr;
- prefix->m1 = PRE_MAGIC1;
- prefix->m2 = PRE_MAGIC2;
- prefix->size = size + fence + PREFIX_SIZE + padding;
+ ks_size_t required;
+ void *start = NULL;
+ void *addr = NULL;
+ void *fence = NULL;
+ ks_pool_prefix_t *prefix = NULL;
+
+ ks_assert(pool);
+ ks_assert(size);
+
+ required = KS_POOL_PREFIX_SIZE + size + KS_POOL_FENCE_SIZE;
+ start = malloc(required);
+ ks_assert(start);
+ memset(start, 0, required);
+
+ prefix = (ks_pool_prefix_t *)start;
+ addr = (void *)((ks_byte_t *)start + KS_POOL_PREFIX_SIZE);
+ fence = (void *)((ks_byte_t *)addr + size);
+
+ prefix->magic1 = KS_POOL_PREFIX_MAGIC;
+ prefix->size = size;
+ prefix->magic2 = KS_POOL_PREFIX_MAGIC;
prefix->refs = 1;
- prefix->padding = padding;
-
- write_magic((char *) prefix + prefix->size - padding - fence);
+ prefix->next = pool->first;
+ if (pool->first) pool->first->prev = prefix;
+ pool->first = prefix;
+ if (!pool->last) pool->last = prefix;
+ prefix->magic3 = KS_POOL_PREFIX_MAGIC;
+ prefix->magic4 = KS_POOL_PREFIX_MAGIC;
+
+ write_fence(fence);
- if (mp_p->mp_log_func != NULL) {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_INCREF, prefix->size, prefix->refs, NULL, addr, 0);
+ if (pool->log_func != NULL) {
+ pool->log_func(pool, KS_POOL_FUNC_INCREF, prefix->size, prefix->refs, NULL, addr, 0);
}
- /* maintain our stats */
- mp_p->mp_alloc_c++;
- mp_p->mp_user_alloc += prefix->size;
- if (mp_p->mp_user_alloc > mp_p->mp_max_alloc) {
- mp_p->mp_max_alloc = mp_p->mp_user_alloc;
+ pool->alloc_c++;
+ pool->user_alloc += prefix->size;
+ if (pool->user_alloc > pool->max_alloc) {
+ pool->max_alloc = pool->user_alloc;
}
SET_POINTER(error_p, KS_STATUS_SUCCESS);
- return (uint8_t *)addr + PREFIX_SIZE;
+ return addr;
}
/*
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool. If NULL then it will do a
- * normal free.
+ * pool -> Pointer to the memory pool.
*
- * addr <-> Address to free.
+ * addr -> Address to free.
*
*/
-static int free_mem(ks_pool_t *mp_p, void *addr)
+static ks_status_t free_mem(ks_pool_t *pool, void *addr)
{
- unsigned long size;
- int ret;
- ks_pool_block_t *block_p;
- alloc_prefix_t *prefix;
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ void *start = NULL;
+ void *fence = NULL;
+ ks_pool_prefix_t *prefix = NULL;
+ ks_assert(pool);
+ ks_assert(addr);
- prefix = (alloc_prefix_t *) ((char *) addr - PREFIX_SIZE);
- if (!(prefix->m1 == PRE_MAGIC1 && prefix->m2 == PRE_MAGIC2)) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_INVALID_POINTER;
- }
+ start = (void *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
+ prefix = (ks_pool_prefix_t *)start;
+
+ if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) return ret;
if (prefix->refs > 0) {
prefix->refs--;
+
+ if (pool->log_func != NULL) {
+ pool->log_func(pool, KS_POOL_FUNC_DECREF, prefix->size, prefix->refs, addr, NULL, 0);
+ }
}
if (prefix->refs > 0) {
return KS_STATUS_REFS_EXIST;
}
- size = prefix->size;
-
- /*
- * If the size is larger than a block then the allocation must be at
- * the front of the block.
- */
- if (size > MAX_BLOCK_USER_MEMORY(mp_p)) {
- block_p = (ks_pool_block_t *) ((char *) addr - PREFIX_SIZE - sizeof(ks_pool_block_t));
- if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_INVALID_POINTER;
- }
- }
-
- /* find the user's magic numbers */
- ret = check_magic(prefix, prefix->size - FENCE_SIZE - prefix->padding);
-
- perform_pool_cleanup_on_free(mp_p, addr);
+ fence = (void *)((uintptr_t)addr + prefix->size);
+ ret = check_fence(fence);
- /* move pointer to actual beginning */
- addr = prefix;
+ perform_pool_cleanup_on_free(pool, prefix);
- if (ret != KS_STATUS_SUCCESS) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return ret;
+ if (!prefix->prev && !prefix->next) pool->first = pool->last = NULL;
+ else if (!prefix->prev) {
+ pool->first = prefix->next;
+ pool->first->prev = NULL;
}
-
- ret = free_pointer(mp_p, addr, size);
-
- if (ret != KS_STATUS_SUCCESS) {
- return ret;
+ else if (!prefix->next) {
+ pool->last = prefix->prev;
+ pool->last->next = NULL;
+ } else {
+ prefix->prev->next = prefix->next;
+ prefix->next->prev = prefix->prev;
}
- mp_p->mp_user_alloc -= size;
+ pool->alloc_c--;
+ pool->user_alloc -= prefix->size;
- /* adjust our stats */
- mp_p->mp_alloc_c--;
+ free(start);
- return KS_STATUS_SUCCESS;
+ return ret;
}
/***************************** exported routines *****************************/
* flags -> Flags to set attributes of the memory pool. See the top
* of ks_pool.h.
*
- * page_size -> Set the internal memory page-size. This must be a
- * multiple of the getpagesize() value. Set to 0 for the default.
- *
- * start_addr -> Starting address to try and allocate memory pools.
- *
* error_p <- Pointer to ks_status_t which, if not NULL, will be set with
* a ks_pool error code.
*/
-static ks_pool_t *ks_pool_raw_open(const unsigned int flags, const unsigned int page_size, void *start_addr, ks_status_t *error_p)
+static ks_pool_t *ks_pool_raw_open(const ks_size_t flags, ks_status_t *error_p)
{
- ks_pool_block_t *block_p;
- int page_n, ret;
- ks_pool_t mp, *mp_p;
- void *free_addr;
-
- if (!enabled_b) {
- startup();
- }
-
- /* zero our temp struct */
- memset(&mp, 0, sizeof(mp));
-
- mp.mp_magic = KS_POOL_MAGIC;
- mp.mp_flags = flags;
- mp.mp_alloc_c = 0;
- mp.mp_user_alloc = 0;
- mp.mp_max_alloc = 0;
- mp.mp_page_c = 0;
- /* mp.mp_page_size set below */
- /* mp.mp_blocks_bit_n set below */
- /* mp.mp_top set below */
- mp.mp_log_func = NULL;
- mp.mp_min_p = NULL;
- mp.mp_bounds_p = NULL;
- mp.mp_first_p = NULL;
- mp.mp_last_p = NULL;
- mp.mp_magic2 = KS_POOL_MAGIC;
-
- /* get and sanity check our page size */
- if (page_size > 0) {
- mp.mp_page_size = page_size;
- if (mp.mp_page_size % getpagesize() != 0) {
- SET_POINTER(error_p, KS_STATUS_ARG_INVALID);
- return NULL;
- }
- } else {
- mp.mp_page_size = getpagesize() * DEFAULT_PAGE_MULT;
- if (mp.mp_page_size % 1024 != 0) {
- SET_POINTER(error_p, KS_STATUS_PAGE_SIZE);
- return NULL;
- }
- }
-
- /* we start at the front of the file */
- mp.mp_top = 0;
+ ks_pool_t *pool;
+ pool = malloc(sizeof(ks_pool_t));
+ ks_assert(pool);
+ memset(pool, 0, sizeof(ks_pool_t));
- /*
- * Find out how many pages we need for our ks_pool structure.
- *
- * NOTE: this adds possibly unneeded space for ks_pool_block_t which
- * may not be in this block.
- */
- page_n = PAGES_IN_SIZE(&mp, sizeof(ks_pool_t));
-
- /* now allocate us space for the actual struct */
- mp_p = alloc_pages(&mp, page_n, error_p);
- if (mp_p == NULL) {
- return NULL;
- }
-
- /*
- * NOTE: we do not normally free the rest of the block here because
- * we want to lesson the chance of an allocation overwriting the
- * main structure.
- */
- if (BIT_IS_SET(flags, KS_POOL_FLAG_HEAVY_PACKING)) {
-
- /* we add a block header to the front of the block */
- block_p = (ks_pool_block_t *) mp_p;
-
- /* init the block header */
- block_p->mb_magic = BLOCK_MAGIC;
- block_p->mb_bounds_p = (char *) block_p + SIZE_OF_PAGES(&mp, page_n);
- block_p->mb_next_p = NULL;
- block_p->mb_magic2 = BLOCK_MAGIC;
-
- /* the ks_pool pointer is then the 2nd thing in the block */
- mp_p = FIRST_ADDR_IN_BLOCK(block_p);
- free_addr = (char *) mp_p + sizeof(ks_pool_t);
-
- /* free the rest of the block */
- ret = free_pointer(&mp, free_addr, (unsigned long)((char *) block_p->mb_bounds_p - (char *) free_addr));
- if (ret != KS_STATUS_SUCCESS) {
- /* NOTE: after this line mp_p will be invalid */
- (void) free_pages(block_p, SIZE_OF_PAGES(&mp, page_n));
-
- SET_POINTER(error_p, ret);
- return NULL;
- }
-
- /*
- * NOTE: if we are HEAVY_PACKING then the 1st block with the ks_pool
- * header is not on the block linked list.
- */
-
- /* now copy our tmp structure into our new memory area */
- memcpy(mp_p, &mp, sizeof(ks_pool_t));
-
- /* we setup min/max to our current address which is as good as any */
- mp_p->mp_min_p = block_p;
- mp_p->mp_bounds_p = block_p->mb_bounds_p;
- } else {
- /* now copy our tmp structure into our new memory area */
- memcpy(mp_p, &mp, sizeof(ks_pool_t));
-
- /* we setup min/max to our current address which is as good as any */
- mp_p->mp_min_p = mp_p;
- mp_p->mp_bounds_p = (char *) mp_p + SIZE_OF_PAGES(mp_p, page_n);
- }
+ pool->magic1 = KS_POOL_MAGIC;
+ pool->flags = flags;
+ pool->magic2 = KS_POOL_MAGIC;
SET_POINTER(error_p, KS_STATUS_SUCCESS);
- return mp_p;
+ return pool;
}
/*
KS_DECLARE(ks_status_t) ks_pool_open(ks_pool_t **poolP)
{
- ks_status_t err;
- ks_pool_t *pool = ks_pool_raw_open(KS_POOL_FLAG_DEFAULT, 0, NULL, &err);
-
- if (pool && (err == KS_STATUS_SUCCESS)) {
- ks_mutex_create(&pool->mutex, KS_MUTEX_FLAG_DEFAULT, NULL);
- ks_mutex_create(&pool->cleanup_mutex, KS_MUTEX_FLAG_DEFAULT, NULL);
- *poolP = pool;
- return KS_STATUS_SUCCESS;
- } else {
- *poolP = NULL;
- return err;
- }
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ ks_pool_t *pool = NULL;
+
+ ks_assert(poolP);
+
+ pool = ks_pool_raw_open(KS_POOL_FLAG_DEFAULT, &ret);
+
+ *poolP = pool;
+
+ if (pool) ks_mutex_create(&pool->mutex, KS_MUTEX_FLAG_DEFAULT, NULL);
+
+ return ret;
}
/*
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to our memory pool.
+ * pool -> Pointer to our memory pool.
*/
-static ks_status_t ks_pool_raw_close(ks_pool_t *mp_p)
+static ks_status_t ks_pool_raw_close(ks_pool_t *pool)
{
- ks_pool_block_t *block_p, *next_p;
- void *addr;
- //unsigned long size;
- int ret, final = KS_STATUS_SUCCESS;
-
- /* special case, just return no-error */
- if (mp_p == NULL) {
- return KS_STATUS_ARG_NULL;
- }
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_PNT;
- }
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_POOL_OVER;
- }
-
- if (mp_p->mp_log_func != NULL) {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CLOSE, 0, 0, NULL, NULL, 0);
- }
-
- perform_pool_cleanup(mp_p);
-
- ks_mutex_t *mutex = mp_p->mutex, *cleanup_mutex = mp_p->cleanup_mutex;
- ks_mutex_lock(mutex);
- /*
- * NOTE: if we are HEAVY_PACKING then the 1st block with the ks_pool
- * header is not on the linked list.
- */
-
- /* free/invalidate the blocks */
- for (block_p = mp_p->mp_first_p; block_p != NULL; block_p = next_p) {
- if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
- final = KS_STATUS_POOL_OVER;
- break;
- }
- block_p->mb_magic = 0;
- block_p->mb_magic2 = 0;
- /* record the next pointer because it might be invalidated below */
- next_p = block_p->mb_next_p;
-
- if (next_p && (next_p->mb_magic != BLOCK_MAGIC || next_p->mb_magic2 != BLOCK_MAGIC)) {
- final = KS_STATUS_POOL_OVER;
- break;
- }
- ret = free_pages(block_p, (unsigned long)((char *) block_p->mb_bounds_p - (char *) block_p));
- if (next_p && (next_p->mb_magic != BLOCK_MAGIC || next_p->mb_magic2 != BLOCK_MAGIC)) {
- final = KS_STATUS_POOL_OVER;
- break;
- }
-
- if (ret != KS_STATUS_SUCCESS) {
- final = ret;
- }
- }
+ ks_status_t ret = KS_STATUS_SUCCESS;
- /* invalidate the ks_pool before we ditch it */
- mp_p->mp_magic = 0;
- mp_p->mp_magic2 = 0;
+ if ((ret = ks_pool_clear(pool)) != KS_STATUS_SUCCESS) goto done;
- /* if we are heavy packing then we need to free the 1st block later */
- if (BIT_IS_SET(mp_p->mp_flags, KS_POOL_FLAG_HEAVY_PACKING)) {
- addr = (char *) mp_p - sizeof(ks_pool_block_t);
- } else {
- addr = mp_p;
+ if (pool->log_func != NULL) {
+ pool->log_func(pool, KS_POOL_FUNC_CLOSE, 0, 0, NULL, NULL, 0);
}
- //size = SIZE_OF_PAGES(mp_p, PAGES_IN_SIZE(mp_p, sizeof(ks_pool_t)));
-
- free(addr);
+ ks_mutex_destroy(&pool->mutex);
- ks_mutex_unlock(mutex);
- ks_mutex_destroy(&mutex);
- ks_mutex_destroy(&cleanup_mutex);
+ free(pool);
- return final;
+done:
+ ks_assert(ret == KS_STATUS_SUCCESS);
+ return ret;
}
*
* ARGUMENTS:
*
- * mp_pp <-> Pointer to pointer of our memory pool.
- * error_p <- Pointer to error
+ * poolP <-> Pointer to pointer of our memory pool.
*/
-KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **mp_pP)
+KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **poolP)
{
- ks_status_t err;
-
- ks_assert(mp_pP);
+ ks_status_t ret = KS_STATUS_SUCCESS;
- err = ks_pool_raw_close(*mp_pP);
+ ks_assert(poolP);
+ ks_assert(*poolP);
- if (err == KS_STATUS_SUCCESS) {
- *mp_pP = NULL;
- }
+ if ((ret = ks_pool_raw_close(*poolP)) == KS_STATUS_SUCCESS) *poolP = NULL;
- return err;
+ return ret;
}
/*
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to our memory pool.
+ * pool -> Pointer to our memory pool.
*/
-KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *mp_p)
+KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *pool)
{
- ks_pool_block_t *block_p;
- int final = KS_STATUS_SUCCESS, bit_n, ret;
- void *first_p;
-
- /* special case, just return no-error */
- if (mp_p == NULL) {
- return KS_STATUS_ARG_NULL;
- }
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_PNT;
- }
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_POOL_OVER;
- }
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ ks_pool_prefix_t *prefix, *nprefix;
- ks_mutex_lock(mp_p->mutex);
- if (mp_p->mp_log_func != NULL) {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CLEAR, 0, 0, NULL, NULL, 0);
- }
+ ks_assert(pool);
- perform_pool_cleanup(mp_p);
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
- /* reset all of our free lists */
- for (bit_n = 0; bit_n <= MAX_BITS; bit_n++) {
- mp_p->mp_free[bit_n] = NULL;
+ if (pool->log_func != NULL) {
+ pool->log_func(pool, KS_POOL_FUNC_CLEAR, 0, 0, NULL, NULL, 0);
}
- /* free the blocks */
- for (block_p = mp_p->mp_first_p; block_p != NULL; block_p = block_p->mb_next_p) {
- if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- final = KS_STATUS_POOL_OVER;
- break;
- }
+ ks_mutex_lock(pool->mutex);
- first_p = FIRST_ADDR_IN_BLOCK(block_p);
+ perform_pool_cleanup(pool);
- /* free the memory */
- ret = free_pointer(mp_p, first_p, (unsigned long)MEMORY_IN_BLOCK(block_p));
- if (ret != KS_STATUS_SUCCESS) {
- final = ret;
- }
+ for (prefix = pool->first; prefix; prefix = nprefix) {
+ nprefix = prefix->next;
+ // @todo check_prefix()? still want to clear out properly if some has been cleared though, not leak memory if there has been corruption
+ free(prefix);
}
- ks_mutex_unlock(mp_p->mutex);
- return final;
+ ks_mutex_unlock(pool->mutex);
+
+done:
+ ks_assert(ret == KS_STATUS_SUCCESS);
+ return ret;
}
/*
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool.
- *
+ * pool -> Pointer to the memory pool.
*
- * byte_size -> Number of bytes to allocate in the pool. Must be >0.
+ * size -> Number of bytes to allocate in the pool. Must be >0.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
-KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p)
+KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *pool, const ks_size_t size, ks_status_t *error_p)
{
- void *addr;
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ void *addr = NULL;
- ks_assert(mp_p);
+ ks_assert(pool);
+ ks_assert(size);
- //if (1) return calloc(1, byte_size);
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_PNT);
- return NULL;
- }
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_POOL_OVER);
- return NULL;
- }
+ ks_mutex_lock(pool->mutex);
+ addr = alloc_mem(pool, size, &ret);
+ ks_mutex_unlock(pool->mutex);
- if (byte_size == 0) {
- SET_POINTER(error_p, KS_STATUS_ARG_INVALID);
- return NULL;
+ if (pool->log_func != NULL) {
+ pool->log_func(pool, KS_POOL_FUNC_ALLOC, size, 0, addr, NULL, 0);
}
- ks_mutex_lock(mp_p->mutex);
- addr = alloc_mem(mp_p, byte_size, error_p);
- ks_mutex_unlock(mp_p->mutex);
-
- if (mp_p->mp_log_func != NULL) {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_ALLOC, byte_size, 0, addr, NULL, 0);
- }
-
- ks_assert(addr || (mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT));
-
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ZERO)) {
- memset(addr, 0, byte_size);
- }
+ ks_assert(addr);
+done:
+ ks_assert(ret == KS_STATUS_SUCCESS);
return addr;
}
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool.
+ * pool -> Pointer to the memory pool.
*
*
- * byte_size -> Number of bytes to allocate in the pool. Must be >0.
+ * size -> Number of bytes to allocate in the pool. Must be >0.
*
*/
-KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *mp_p, const unsigned long byte_size)
+KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *pool, const ks_size_t size)
{
- return ks_pool_alloc_ex(mp_p, byte_size, NULL);
+ return ks_pool_alloc_ex(pool, size, NULL);
}
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool. If NULL then it will do a
+ * pool -> Pointer to the memory pool. If NULL then it will do a
* normal calloc.
*
* ele_n -> Number of elements to allocate.
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
-KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size, ks_status_t *error_p)
+KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *pool, const ks_size_t ele_n, const ks_size_t ele_size, ks_status_t *error_p)
{
- void *addr;
- unsigned long byte_size;
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ void *addr = NULL;
+ ks_size_t size;
- ks_assert(mp_p);
+ ks_assert(pool);
+ ks_assert(ele_n);
+ ks_assert(ele_size);
- //if (1) return calloc(ele_n, ele_size);
-
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_PNT);
- return NULL;
- }
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_POOL_OVER);
- return NULL;
- }
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
- if (ele_n == 0 || ele_size == 0) {
- SET_POINTER(error_p, KS_STATUS_ARG_INVALID);
- return NULL;
- }
+ size = ele_n * ele_size;
- ks_mutex_lock(mp_p->mutex);
- byte_size = ele_n * ele_size;
- addr = alloc_mem(mp_p, byte_size, error_p);
- if (addr != NULL) {
- memset(addr, 0, byte_size);
- }
- ks_mutex_unlock(mp_p->mutex);
+ ks_mutex_lock(pool->mutex);
+ addr = alloc_mem(pool, size, &ret);
+ ks_mutex_unlock(pool->mutex);
- if (mp_p->mp_log_func != NULL) {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CALLOC, ele_size, ele_n, addr, NULL, 0);
+ if (pool->log_func != NULL) {
+ pool->log_func(pool, KS_POOL_FUNC_CALLOC, ele_size, ele_n, addr, NULL, 0);
}
- ks_assert(addr || (mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT));
+ ks_assert(addr);
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ZERO)) {
- memset(addr, 0, ele_n * ele_size);
- }
+done:
+ ks_assert(ret == KS_STATUS_SUCCESS);
return addr;
}
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool. If NULL then it will do a
+ * pool -> Pointer to the memory pool. If NULL then it will do a
* normal calloc.
*
* ele_n -> Number of elements to allocate.
* ele_size -> Number of bytes per element being allocated.
*
*/
-KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size)
+KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *pool, const ks_size_t ele_n, const ks_size_t ele_size)
{
- return ks_pool_calloc_ex(mp_p, ele_n, ele_size, NULL);
+ return ks_pool_calloc_ex(pool, ele_n, ele_size, NULL);
}
/*
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool.
+ * pool -> Pointer to the memory pool.
*
- *
- * addr <-> pointer to pointer of Address to free.
+ * addr <-> Pointer to pointer of Address to free.
*
*/
-KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *mp_p, void **addrP)
+KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *pool, void **addrP)
{
- ks_status_t r;
+ ks_status_t ret = KS_STATUS_SUCCESS;
void *addr;
+ ks_assert(pool);
ks_assert(addrP);
+ ks_assert(*addrP);
addr = *addrP;
- ks_assert(mp_p);
- ks_assert(addr);
-
- //if (1) {
- // *addrP = NULL;
- // free(addr);
- // return KS_STATUS_SUCCESS;
- //}
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
- ks_mutex_lock(mp_p->mutex);
+ ks_mutex_lock(pool->mutex);
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- r = KS_STATUS_PNT;
- goto end;
+ if (pool->log_func != NULL) {
+ ks_pool_prefix_t *prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
+ // @todo check_prefix()?
+ pool->log_func(pool, prefix->refs == 1 ? KS_POOL_FUNC_FREE : KS_POOL_FUNC_DECREF, prefix->size, prefix->refs - 1, addr, NULL, 0);
}
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- r = KS_STATUS_POOL_OVER;
- goto end;
- }
+ ret = free_mem(pool, addr);
+ ks_mutex_unlock(pool->mutex);
- if (mp_p->mp_log_func != NULL) {
- alloc_prefix_t *prefix = (alloc_prefix_t *)((char *)addr - PREFIX_SIZE);
- if (prefix->refs == 1) {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_FREE, prefix->size, prefix->refs - 1, NULL, addr, 0);
- } else {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_DECREF, prefix->size, prefix->refs - 1, NULL, addr, 0);
- }
- }
-
- r = free_mem(mp_p, addr);
-
- end:
-
- ks_mutex_unlock(mp_p->mutex);
-
- if (r == KS_STATUS_SUCCESS) {
+done:
+ if (ret != KS_STATUS_REFS_EXIST) {
+ ks_assert(ret == KS_STATUS_SUCCESS);
*addrP = NULL;
}
- return r;
-
+ return ret;
}
/*
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool.
+ * pool -> Pointer to the memory pool.
*
* addr -> The addr to ref
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
-KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *mp_p, void *addr, ks_status_t *error_p)
+KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *pool, void *addr, ks_status_t *error_p)
{
- alloc_prefix_t *prefix;
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ ks_pool_prefix_t *prefix;
+ ks_size_t refs;
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_PNT);
- return NULL;
- }
-
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_POOL_OVER);
- return NULL;
- }
+ ks_assert(pool);
+ ks_assert(addr);
- ks_mutex_lock(mp_p->mutex);
- prefix = (alloc_prefix_t *) ((char *) addr - PREFIX_SIZE);
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
- if (!(prefix->m1 == PRE_MAGIC1 && prefix->m2 == PRE_MAGIC2)) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_INVALID_POINTER);
- return NULL;
- }
+ prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
+ if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) goto done;
- prefix->refs++;
+ ks_mutex_lock(pool->mutex);
+ refs = ++prefix->refs;
+ ks_mutex_unlock(pool->mutex);
- if (mp_p->mp_log_func != NULL) {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_INCREF, prefix->size, prefix->refs, NULL, addr, 0);
+ if (pool->log_func != NULL) {
+ pool->log_func(pool, KS_POOL_FUNC_INCREF, prefix->size, refs, addr, NULL, 0);
}
- ks_mutex_unlock(mp_p->mutex);
+done:
+ ks_assert(ret == KS_STATUS_SUCCESS);
return addr;
}
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool.
+ * pool -> Pointer to the memory pool.
*
*
* old_addr -> Previously allocated address.
*
- * new_byte_size -> New size of the allocation.
+ * new_size -> New size of the allocation.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
-KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size, ks_status_t *error_p)
+KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *pool, void *old_addr, const ks_size_t new_size, ks_status_t *error_p)
{
- unsigned long copy_size, new_size, old_byte_size;
- void *new_addr;
- ks_pool_block_t *block_p;
- int ret;
- alloc_prefix_t *prefix;
- void *orig_old_addr = NULL;
-
- ks_assert(mp_p);
- //ks_assert(old_addr);
-
- //if (1) return realloc(old_addr, new_byte_size);
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ ks_size_t old_size;
+ ks_pool_prefix_t *prefix;
+ void *new_addr = NULL;
+ ks_size_t required;
- if (!old_addr) {
- return ks_pool_alloc_ex(mp_p, new_byte_size, error_p);
- }
+ ks_assert(pool);
+ ks_assert(new_size);
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_PNT);
- return NULL;
- }
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_POOL_OVER);
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) {
+ SET_POINTER(error_p, ret);
return NULL;
}
- prefix = (alloc_prefix_t *) ((char *) old_addr - PREFIX_SIZE);
+ if (!old_addr) {
+ return ks_pool_alloc_ex(pool, new_size, error_p);
+ }
- if (!(prefix->m1 == PRE_MAGIC1 && prefix->m2 == PRE_MAGIC2)) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_INVALID_POINTER);
+ prefix = (ks_pool_prefix_t *)((uintptr_t)old_addr - KS_POOL_PREFIX_SIZE);
+ if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) {
+ SET_POINTER(error_p, ret);
return NULL;
}
-
- ks_mutex_lock(mp_p->mutex);
+ ks_mutex_lock(pool->mutex);
if (prefix->refs > 1) {
- SET_POINTER(error_p,KS_STATUS_NOT_ALLOWED);
- return NULL;
+ ret = KS_STATUS_NOT_ALLOWED;
+ goto done;
}
-
- old_byte_size = prefix->size - PREFIX_SIZE - FENCE_SIZE - prefix->padding;
-
- if (old_byte_size == new_byte_size) {
- SET_POINTER(error_p, KS_STATUS_SUCCESS);
+ if (new_size == prefix->size) {
new_addr = old_addr;
- goto end;
+ goto done;
}
- if (old_byte_size >= new_byte_size) {
- unsigned long diff = old_byte_size - new_byte_size;
- //prefix->size -= diff;
- prefix->padding += diff;
- write_magic((char *)prefix + prefix->size - prefix->padding - FENCE_SIZE);
- SET_POINTER(error_p, KS_STATUS_SUCCESS);
- new_addr = old_addr;
- goto end;
- }
+ old_size = prefix->size;
- {
- unsigned long diff = new_byte_size - old_byte_size;
- if (prefix->padding >= diff) {
- prefix->padding -= diff;
- write_magic((char *)prefix + prefix->size - prefix->padding - FENCE_SIZE);
- SET_POINTER(error_p, KS_STATUS_SUCCESS);
- new_addr = old_addr;
- goto end;
- }
- }
+ required = KS_POOL_PREFIX_SIZE + new_size + KS_POOL_FENCE_SIZE;
+ new_addr = realloc((void *)prefix, required);
+ ks_assert(new_addr);
- /*
- * If the size is larger than a block then the allocation must be at
- * the front of the block.
- */
- if (old_byte_size > MAX_BLOCK_USER_MEMORY(mp_p)) {
- block_p = (ks_pool_block_t *) ((char *) old_addr - PREFIX_SIZE - sizeof(ks_pool_block_t));
- if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, KS_STATUS_POOL_OVER);
- new_addr = NULL;
- goto end;
- }
- }
-
- if (old_byte_size > 0) {
- ret = check_magic(prefix, prefix->size - FENCE_SIZE - prefix->padding);
-
- if (ret != KS_STATUS_SUCCESS) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- SET_POINTER(error_p, ret);
- new_addr = NULL;
- goto end;
- }
- }
-
- orig_old_addr = old_addr;
- /* move pointer to actual beginning */
- old_addr = prefix;
-
- /* make sure we have enough bytes */
- if (new_byte_size < MIN_ALLOCATION) {
- new_size = MIN_ALLOCATION;
- } else {
- new_size = new_byte_size;
- }
-
- /*
- * NOTE: we could here see if the size is the same or less and then
- * use the current memory and free the space above. This is harder
- * than it sounds if we are changing the block size of the
- * allocation.
- */
-
- /* we need to get another address */
- new_addr = alloc_mem(mp_p, new_size, error_p);
- if (new_addr == NULL) {
- /* error_p set in ks_pool_alloc */
- new_addr = NULL;
- goto end;
- }
+ prefix = (ks_pool_prefix_t *)new_addr;
- copy_size = old_byte_size;
- memcpy(new_addr, orig_old_addr, copy_size);
+ prefix->size = new_size;
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ZERO)) {
- memset(((unsigned char *)new_addr) + copy_size, 0, new_byte_size - old_byte_size);
- }
+ new_addr = (void *)((uintptr_t)new_addr + KS_POOL_PREFIX_SIZE);
+ write_fence((void *)((uintptr_t)new_addr + new_size));
- /* free the old address */
- ret = free_mem(mp_p, (uint8_t *)old_addr + PREFIX_SIZE);
- if (ret != KS_STATUS_SUCCESS) {
- /* if the old free failed, try and free the new address */
- (void) free_mem(mp_p, new_addr);
- SET_POINTER(error_p, ret);
- new_addr = NULL;
- goto end;
- }
+ if (prefix->prev) prefix->prev->next = prefix;
+ else pool->first = prefix;
+ if (prefix->next) prefix->next->prev = prefix;
+ else pool->last = prefix;
- if (mp_p->mp_log_func != NULL) {
- mp_p->mp_log_func(mp_p, KS_POOL_FUNC_RESIZE, new_byte_size, 0, new_addr, old_addr, old_byte_size);
+ if (pool->log_func != NULL) {
+ pool->log_func(pool, KS_POOL_FUNC_RESIZE, new_size, 0, old_addr, new_addr, old_size);
}
- SET_POINTER(error_p, KS_STATUS_SUCCESS);
+done:
+ ks_mutex_unlock(pool->mutex);
- end:
-
- ks_mutex_unlock(mp_p->mutex);
-
- ks_assert(new_addr || (mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT));
+ ks_assert(ret == KS_STATUS_SUCCESS);
return new_addr;
}
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool.
+ * pool -> Pointer to the memory pool.
*
*
* old_addr -> Previously allocated address.
*
- * new_byte_size -> New size of the allocation.
+ * new_size -> New size of the allocation.
*
*/
-KS_DECLARE(void *) ks_pool_resize(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size)
+KS_DECLARE(void *) ks_pool_resize(ks_pool_t *pool, void *old_addr, const ks_size_t new_size)
{
- return ks_pool_resize_ex(mp_p, old_addr, new_byte_size, NULL);
+ return ks_pool_resize_ex(pool, old_addr, new_size, NULL);
}
/*
*
* ARGUMENTS:
*
- * mp_p -> Pointer to the memory pool.
- *
- * page_size_p <- Pointer to an unsigned integer which, if not NULL,
- * will be set to the page-size of the pool.
+ * pool -> Pointer to the memory pool.
*
* num_alloced_p <- Pointer to an unsigned long which, if not NULL,
* will be set to the number of pointers currently allocated in pool.
* will be set to the total amount of space (including administrative
* overhead) used by the pool.
*/
-KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *mp_p, unsigned int *page_size_p,
- unsigned long *num_alloced_p, unsigned long *user_alloced_p, unsigned long *max_alloced_p, unsigned long *tot_alloced_p)
+KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *pool, ks_size_t *num_alloced_p, ks_size_t *user_alloced_p, ks_size_t *max_alloced_p, ks_size_t *tot_alloced_p)
{
- if (mp_p == NULL) {
- return KS_STATUS_ARG_NULL;
- }
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_PNT;
- }
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_POOL_OVER;
- }
+ ks_status_t ret = KS_STATUS_SUCCESS;
- SET_POINTER(page_size_p, mp_p->mp_page_size);
- SET_POINTER(num_alloced_p, mp_p->mp_alloc_c);
- SET_POINTER(user_alloced_p, mp_p->mp_user_alloc);
- SET_POINTER(max_alloced_p, mp_p->mp_max_alloc);
- SET_POINTER(tot_alloced_p, SIZE_OF_PAGES(mp_p, mp_p->mp_page_c));
+ ks_assert(pool);
- return KS_STATUS_SUCCESS;
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
+
+ SET_POINTER(num_alloced_p, pool->alloc_c);
+ SET_POINTER(user_alloced_p, pool->user_alloc);
+ SET_POINTER(max_alloced_p, pool->max_alloc);
+ SET_POINTER(tot_alloced_p, pool->user_alloc + (pool->alloc_c * (KS_POOL_PREFIX_SIZE + KS_POOL_FENCE_SIZE)));
+
+done:
+ ks_assert(ret == KS_STATUS_SUCCESS);
+ return ret;
}
/*
*
* ARGUMENTS:
*
- * mp_p <-> Pointer to the memory pool.
+ * pool -> Pointer to the memory pool.
*
* log_func -> Log function (defined in ks_pool.h) which will be called
* with each ks_pool transaction.
*/
-KS_DECLARE(ks_status_t) ks_pool_set_log_func(ks_pool_t *mp_p, ks_pool_log_func_t log_func)
+KS_DECLARE(ks_status_t) ks_pool_set_log_func(ks_pool_t *pool, ks_pool_log_func_t log_func)
{
- if (mp_p == NULL) {
- return KS_STATUS_ARG_NULL;
- }
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_PNT;
- }
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_POOL_OVER;
- }
-
- mp_p->mp_log_func = log_func;
+ ks_status_t ret = KS_STATUS_SUCCESS;
- return KS_STATUS_SUCCESS;
-}
+ ks_assert(pool);
+ ks_assert(log_func);
-/*
- * int ks_pool_set_max_pages
- *
- * DESCRIPTION:
- *
- * Set the maximum number of pages that the library will use. Once it
- * hits the limit it will return KS_STATUS_NO_PAGES.
- *
- * NOTE: if the KS_POOL_FLAG_HEAVY_PACKING is set then this max-pages
- * value will include the page with the ks_pool header structure in it.
- * If the flag is _not_ set then the max-pages will not include this
- * first page.
- *
- * RETURNS:
- *
- * Success - KS_STATUS_SUCCESS
- *
- * Failure - ks_status_t error code
- *
- * ARGUMENTS:
- *
- * mp_p <-> Pointer to the memory pool.
- *
- * max_pages -> Maximum number of pages used by the library.
- */
-KS_DECLARE(ks_status_t) ks_pool_set_max_pages(ks_pool_t *mp_p, const unsigned int max_pages)
-{
- if (mp_p == NULL) {
- return KS_STATUS_ARG_NULL;
- }
- if (mp_p->mp_magic != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_PNT;
- }
- if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
- if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
- abort();
- }
- return KS_STATUS_POOL_OVER;
- }
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
- if (BIT_IS_SET(mp_p->mp_flags, KS_POOL_FLAG_HEAVY_PACKING)) {
- mp_p->mp_max_pages = max_pages;
- } else {
- /*
- * If we are not heavy-packing the pool then we don't count the
- * 1st page allocated which holds the ks_pool header structure.
- */
- mp_p->mp_max_pages = max_pages + 1;
- }
+ pool->log_func = log_func;
- return KS_STATUS_SUCCESS;
+done:
+ ks_assert(ret == KS_STATUS_SUCCESS);
+ return ret;
}
/*
KS_DECLARE(char *) ks_pstrdup(ks_pool_t *pool, const char *str)
{
char *result;
- unsigned long len;
+ ks_size_t len;
if (!str) {
return NULL;
}
- len = (unsigned long)strlen(str) + 1;
+ len = (ks_size_t)strlen(str) + 1;
result = ks_pool_alloc(pool, len);
memcpy(result, str, len);
return result;
}
-KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, size_t len)
+KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, ks_size_t len)
{
char *result;
const char *end;
len = end - str;
}
- result = ks_pool_alloc(pool, (unsigned long)(len + 1));
+ result = ks_pool_alloc(pool, len + 1);
memcpy(result, str, len);
result[len] = '\0';
return result;
}
-KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, size_t len)
+KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, ks_size_t len)
{
char *result;
return NULL;
}
- result = ks_pool_alloc(pool, (unsigned long)(len + 1));
+ result = ks_pool_alloc(pool, len + 1);
memcpy(result, str, len);
result[len] = '\0';
return result;
}
-KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, size_t len)
+KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, ks_size_t len)
{
void *result;
return NULL;
}
- result = ks_pool_alloc(pool, (unsigned long)len);
+ result = ks_pool_alloc(pool, len);
memcpy(result, buf, len);
return result;
{
char *endp, *argp;
char *result;
- size_t lengths[10];
+ ks_size_t lengths[10];
int i = 0;
- size_t len = 0;
+ ks_size_t len = 0;
va_list ap;
va_start(ap, pool);
/* get lengths so we know what to allocate, cache some so we don't have to double strlen those */
while ((argp = va_arg(ap, char *))) {
- size_t arglen = strlen(argp);
+ ks_size_t arglen = strlen(argp);
if (i < 10) lengths[i++] = arglen;
len += arglen;
}
va_end(ap);
- result = (char *) ks_pool_alloc(pool, (unsigned long)(len + 1));
+ result = (char *) ks_pool_alloc(pool, len + 1);
endp = result;
va_start(ap, pool);