struct blade_connection_s {
blade_handle_t *handle;
- ks_pool_t *pool;
void *transport_data;
blade_transport_callbacks_t *transport_callbacks;
ks_status_t blade_connection_onstate_run(blade_connection_t *bc);
-static void blade_connection_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_connection_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_connection_t *bc = (blade_connection_t *)ptr;
blade_connection_shutdown(bc);
break;
case KS_MPCL_DESTROY:
- // @todo remove this, it's just for posterity in debugging
- bc->sending = NULL;
- bc->lock = NULL;
-
- //ks_pool_free(bc->pool, &bc->id);
- bc->id = NULL;
break;
}
}
bc = ks_pool_alloc(pool, sizeof(blade_connection_t));
bc->handle = bh;
- bc->pool = pool;
ks_cond_create(&bc->cond, pool);
ks_assert(bc->cond);
ks_q_create(&bc->sending, pool, 0);
ks_assert(bc->sending);
- ks_pool_set_cleanup(pool, bc, NULL, blade_connection_cleanup);
+ ks_pool_set_cleanup(bc, NULL, blade_connection_cleanup);
ks_log(KS_LOG_DEBUG, "Created\n");
ks_assert(*bcP);
bc = *bcP;
+ *bcP = NULL;
- pool = bc->pool;
- //ks_pool_free(bc->pool, bcP);
+ pool = ks_pool_get(bc);
ks_pool_close(&pool);
- *bcP = NULL;
-
return KS_STATUS_SUCCESS;
}
return bc->handle;
}
-KS_DECLARE(ks_pool_t *) blade_connection_pool_get(blade_connection_t *bc)
-{
- ks_assert(bc);
-
- return bc->pool;
-}
-
KS_DECLARE(const char *) blade_connection_id_get(blade_connection_t *bc)
{
ks_assert(bc);
{
ks_assert(bc);
- if (bc->session) ks_pool_free(bc->pool, &bc->session);
- bc->session = ks_pstrdup(bc->pool, id);
+ if (bc->session) ks_pool_free(&bc->session);
+ bc->session = ks_pstrdup(ks_pool_get(bc), id);
}
void *blade_connection_state_thread(ks_thread_t *thread, void *data)
struct blade_connectionmgr_s {
blade_handle_t *handle;
- ks_pool_t *pool;
ks_hash_t *connections; // id, blade_connection_t*
};
-static void blade_connectionmgr_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_connectionmgr_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_connectionmgr_t *bcmgr = (blade_connectionmgr_t *)ptr;
bcmgr = ks_pool_alloc(pool, sizeof(blade_connectionmgr_t));
bcmgr->handle = bh;
- bcmgr->pool = pool;
- ks_hash_create(&bcmgr->connections, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bcmgr->pool);
+ ks_hash_create(&bcmgr->connections, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bcmgr->connections);
- ks_pool_set_cleanup(pool, bcmgr, NULL, blade_connectionmgr_cleanup);
+ ks_pool_set_cleanup(bcmgr, NULL, blade_connectionmgr_cleanup);
*bcmgrP = bcmgr;
bcmgr = *bcmgrP;
*bcmgrP = NULL;
- ks_assert(bcmgr);
-
- pool = bcmgr->pool;
-
+ pool = ks_pool_get(bcmgr);
ks_pool_close(&pool);
return KS_STATUS_SUCCESS;
ks_assert(bcmgr);
ks_assert(bc);
- key = ks_pstrdup(bcmgr->pool, blade_connection_id_get(bc));
+ key = ks_pstrdup(ks_pool_get(bcmgr), blade_connection_id_get(bc));
ks_hash_insert(bcmgr->connections, (void *)key, bc);
ks_log(KS_LOG_DEBUG, "Connection Added: %s\n", key);
#include "blade.h"
struct blade_identity_s {
- ks_pool_t *pool;
-
const char *uri;
const char *components;
};
// @todo missed a structure to use cleanup callbacks
-static void blade_identity_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_identity_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_identity_t *bi = (blade_identity_t *)ptr;
case KS_MPCL_ANNOUNCE:
break;
case KS_MPCL_TEARDOWN:
- if (bi->uri) ks_pool_free(bi->pool, &bi->uri);
- if (bi->components) ks_pool_free(bi->pool, &bi->components);
+ if (bi->uri) ks_pool_free(&bi->uri);
+ if (bi->components) ks_pool_free(&bi->components);
if (bi->parameters) ks_hash_destroy(&bi->parameters);
break;
case KS_MPCL_DESTROY:
ks_assert(pool);
bi = ks_pool_alloc(pool, sizeof(blade_identity_t));
- bi->pool = pool;
- ks_pool_set_cleanup(pool, bi, NULL, blade_identity_cleanup);
+ ks_pool_set_cleanup(bi, NULL, blade_identity_cleanup);
*biP = bi;
bi = *biP;
- ks_pool_free(bi->pool, biP);
+ ks_pool_free(biP);
return KS_STATUS_SUCCESS;
}
{
char *tmp = NULL;
char *tmp2 = NULL;
+ ks_pool_t *pool = NULL;
ks_assert(bi);
ks_assert(uri);
ks_log(KS_LOG_DEBUG, "Parsing URI: %s\n", uri);
+ pool = ks_pool_get(bi);
+
if (bi->uri) {
- ks_pool_free(bi->pool, &bi->uri);
- ks_pool_free(bi->pool, &bi->components);
+ ks_pool_free(&bi->uri);
+ ks_pool_free(&bi->components);
}
- bi->uri = ks_pstrdup(bi->pool, uri);
- bi->components = tmp = ks_pstrdup(bi->pool, uri);
+ bi->uri = ks_pstrdup(pool, uri);
+ bi->components = tmp = ks_pstrdup(pool, uri);
bi->name = tmp;
if (!(tmp = strchr(tmp, '@'))) return KS_STATUS_FAIL;
}
if (!bi->parameters) {
- ks_hash_create(&bi->parameters, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bi->pool);
+ ks_hash_create(&bi->parameters, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, pool);
ks_assert(bi->parameters);
}
ks_hash_insert(bi->parameters, key, val);
struct blade_mastermgr_s {
blade_handle_t *handle;
- ks_pool_t *pool;
// @todo how does "exclusive" play into the controllers, does "exclusive" mean only one provider can exist for a given protocol and realm? what does non exclusive mean?
ks_hash_t *protocols; // protocols that have been published with blade.publish, and the details to locate a protocol controller with blade.locate
};
-static void blade_mastermgr_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_mastermgr_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_mastermgr_t *bmmgr = (blade_mastermgr_t *)ptr;
bmmgr = ks_pool_alloc(pool, sizeof(blade_mastermgr_t));
bmmgr->handle = bh;
- bmmgr->pool = pool;
- ks_hash_create(&bmmgr->protocols, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, bmmgr->pool);
+ ks_hash_create(&bmmgr->protocols, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, pool);
ks_assert(bmmgr->protocols);
- ks_pool_set_cleanup(pool, bmmgr, NULL, blade_mastermgr_cleanup);
+ ks_pool_set_cleanup(bmmgr, NULL, blade_mastermgr_cleanup);
*bmmgrP = bmmgr;
bmmgr = *bmmgrP;
*bmmgrP = NULL;
- ks_assert(bmmgr);
-
- pool = bmmgr->pool;
+ pool = ks_pool_get(bmmgr);
ks_pool_close(&pool);
KS_DECLARE(ks_status_t) blade_mastermgr_purge(blade_mastermgr_t *bmmgr, const char *nodeid)
{
+ ks_pool_t *pool = NULL;
ks_hash_t *cleanup = NULL;
+ ks_assert(bmmgr);
+
+ pool = ks_pool_get(bmmgr);
+
ks_hash_write_lock(bmmgr->protocols);
for (ks_hash_iterator_t *it = ks_hash_first(bmmgr->protocols, KS_UNLOCKED); it; it = ks_hash_next(&it)) {
const char *key = NULL;
ks_hash_this(it, (const void **)&key, NULL, (void **)&bp);
if (blade_protocol_purge(bp, nodeid)) {
- if (!cleanup) ks_hash_create(&cleanup, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bmmgr->pool);
+ if (!cleanup) ks_hash_create(&cleanup, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, pool);
ks_hash_insert(cleanup, (void *)key, bp);
}
}
ks_assert(protocol);
ks_assert(realm);
- key = ks_psprintf(bmmgr->pool, "%s@%s", protocol, realm);
+ key = ks_psprintf(ks_pool_get(bmmgr), "%s@%s", protocol, realm);
bp = (blade_protocol_t *)ks_hash_search(bmmgr->protocols, (void *)key, KS_READLOCKED);
// @todo if (bp) blade_protocol_read_lock(bp);
KS_DECLARE(ks_status_t) blade_mastermgr_controller_add(blade_mastermgr_t *bmmgr, const char *protocol, const char *realm, const char *controller)
{
+ ks_pool_t *pool = NULL;
blade_protocol_t *bp = NULL;
char *key = NULL;
ks_assert(realm);
ks_assert(controller);
- key = ks_psprintf(bmmgr->pool, "%s@%s", protocol, realm);
+ pool = ks_pool_get(bmmgr);
+
+ key = ks_psprintf(pool, "%s@%s", protocol, realm);
ks_hash_write_lock(bmmgr->protocols);
}
if (!bp) {
- blade_protocol_create(&bp, bmmgr->pool, protocol, realm);
+ blade_protocol_create(&bp, pool, protocol, realm);
ks_assert(bp);
ks_log(KS_LOG_DEBUG, "Protocol Added: %s\n", key);
- ks_hash_insert(bmmgr->protocols, (void *)ks_pstrdup(bmmgr->pool, key), bp);
+ ks_hash_insert(bmmgr->protocols, (void *)ks_pstrdup(pool, key), bp);
}
blade_protocol_controllers_add(bp, controller);
- ks_pool_free(bmmgr->pool, &key);
+ ks_pool_free(&key);
ks_hash_write_unlock(bmmgr->protocols);
ks_assert(realm);
ks_assert(channel);
- key = ks_psprintf(bmmgr->pool, "%s@%s", protocol, realm);
+ key = ks_psprintf(ks_pool_get(bmmgr), "%s@%s", protocol, realm);
bp = (blade_protocol_t *)ks_hash_search(bmmgr->protocols, (void *)key, KS_READLOCKED);
if (!bp) {
blade_protocol_channel_add(bp, channel);
done:
- ks_pool_free(bmmgr->pool, &key);
+ ks_pool_free(&key);
ks_hash_read_unlock(bmmgr->protocols);
ks_assert(realm);
ks_assert(channel);
- key = ks_psprintf(bmmgr->pool, "%s@%s", protocol, realm);
+ key = ks_psprintf(ks_pool_get(bmmgr), "%s@%s", protocol, realm);
bp = (blade_protocol_t *)ks_hash_search(bmmgr->protocols, (void *)key, KS_READLOCKED);
if (!bp) {
blade_protocol_channel_remove(bp, channel);
done:
- ks_pool_free(bmmgr->pool, &key);
+ ks_pool_free(&key);
ks_hash_read_unlock(bmmgr->protocols);
ks_assert(controller);
ks_assert(target);
- key = ks_psprintf(bmmgr->pool, "%s@%s", protocol, realm);
+ key = ks_psprintf(ks_pool_get(bmmgr), "%s@%s", protocol, realm);
bp = (blade_protocol_t *)ks_hash_search(bmmgr->protocols, (void *)key, KS_READLOCKED);
if (!bp) {
ret = blade_protocol_channel_authorize(bp, remove, channel, controller, target);
done:
- ks_pool_free(bmmgr->pool, &key);
+ ks_pool_free(&key);
ks_hash_read_unlock(bmmgr->protocols);
ks_assert(channel);
ks_assert(target);
- key = ks_psprintf(bmmgr->pool, "%s@%s", protocol, realm);
+ key = ks_psprintf(ks_pool_get(bmmgr), "%s@%s", protocol, realm);
bp = (blade_protocol_t *)ks_hash_search(bmmgr->protocols, (void *)key, KS_READLOCKED);
if (!bp) goto done;
ret = blade_protocol_channel_verify(bp, channel, target);
done:
- ks_pool_free(bmmgr->pool, &key);
+ ks_pool_free(&key);
ks_hash_read_unlock(bmmgr->protocols);
#include "blade.h"
struct blade_protocol_s {
- ks_pool_t *pool;
-
const char *name;
const char *realm;
ks_hash_t *controllers;
};
-static void blade_protocol_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_protocol_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_protocol_t *bp = (blade_protocol_t *)ptr;
case KS_MPCL_ANNOUNCE:
break;
case KS_MPCL_TEARDOWN:
- if (bp->name) ks_pool_free(bp->pool, &bp->name);
- if (bp->realm) ks_pool_free(bp->pool, &bp->realm);
+ if (bp->name) ks_pool_free(&bp->name);
+ if (bp->realm) ks_pool_free(&bp->realm);
if (bp->controllers) ks_hash_destroy(&bp->controllers);
if (bp->channels) ks_hash_destroy(&bp->channels);
break;
ks_assert(realm);
bp = ks_pool_alloc(pool, sizeof(blade_protocol_t));
- bp->pool = pool;
bp->name = ks_pstrdup(pool, name);
bp->realm = ks_pstrdup(pool, realm);
- ks_hash_create(&bp->controllers, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bp->pool);
+ ks_hash_create(&bp->controllers, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bp->controllers);
- ks_hash_create(&bp->channels, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bp->pool);
+ ks_hash_create(&bp->channels, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bp->channels);
- ks_pool_set_cleanup(pool, bp, NULL, blade_protocol_cleanup);
+ ks_pool_set_cleanup(bp, NULL, blade_protocol_cleanup);
*bpP = bp;
bp = *bpP;
- ks_pool_free(bp->pool, bpP);
+ ks_pool_free(bpP);
return KS_STATUS_SUCCESS;
}
ks_assert(bp);
ks_assert(nodeid);
- key = ks_pstrdup(bp->pool, nodeid);
+ key = ks_pstrdup(ks_pool_get(bp), nodeid);
ks_hash_insert(bp->controllers, (void *)key, (void *)KS_TRUE);
ks_log(KS_LOG_DEBUG, "Protocol Controller Added: %s to %s@%s\n", nodeid, bp->name, bp->realm);
KS_DECLARE(ks_status_t) blade_protocol_channel_add(blade_protocol_t *bp, const char *name)
{
ks_status_t ret = KS_STATUS_SUCCESS;
+ ks_pool_t *pool = NULL;
ks_hash_t *authorized = NULL;
char *key = NULL;
ks_assert(bp);
ks_assert(name);
+ pool = ks_pool_get(bp);
+
ks_hash_write_lock(bp->channels);
if (ks_hash_search(bp->channels, (void *)name, KS_UNLOCKED)) {
goto done;
}
- ks_hash_create(&authorized, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bp->pool);
+ ks_hash_create(&authorized, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
- key = ks_pstrdup(bp->pool, name);
+ key = ks_pstrdup(pool, name);
ks_hash_insert(bp->channels, (void *)key, (void *)authorized);
ks_log(KS_LOG_DEBUG, "Protocol Channel Added: %s to %s@%s\n", key, bp->name, bp->realm);
} else ret = KS_STATUS_NOT_FOUND;
}
else {
- ks_hash_insert(authorizations, (void *)ks_pstrdup(bp->pool, target), (void *)KS_TRUE);
+ ks_hash_insert(authorizations, (void *)ks_pstrdup(ks_pool_get(bp), target), (void *)KS_TRUE);
ks_log(KS_LOG_DEBUG, "Protocol Channel Authorization Added: %s to %s@%s/%s\n", target, bp->name, bp->realm, channel);
}
}
struct blade_routemgr_s {
blade_handle_t *handle;
- ks_pool_t *pool;
ks_hash_t *routes; // id, id
};
-static void blade_routemgr_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_routemgr_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_routemgr_t *brmgr = (blade_routemgr_t *)ptr;
brmgr = ks_pool_alloc(pool, sizeof(blade_routemgr_t));
brmgr->handle = bh;
- brmgr->pool = pool;
// @note can let removes free keys and values for routes, both are strings and allocated from the same pool as the hash itself
- ks_hash_create(&brmgr->routes, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, brmgr->pool);
+ ks_hash_create(&brmgr->routes, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, pool);
ks_assert(brmgr->routes);
- ks_pool_set_cleanup(pool, brmgr, NULL, blade_routemgr_cleanup);
+ ks_pool_set_cleanup(brmgr, NULL, blade_routemgr_cleanup);
*brmgrP = brmgr;
brmgr = *brmgrP;
*brmgrP = NULL;
- ks_assert(brmgr);
-
- pool = brmgr->pool;
+ pool = ks_pool_get(brmgr);
ks_pool_close(&pool);
KS_DECLARE(ks_status_t) blade_routemgr_route_add(blade_routemgr_t *brmgr, const char *target, const char *router)
{
+ ks_pool_t *pool = NULL;
char *key = NULL;
char *value = NULL;
ks_assert(target);
ks_assert(router);
- key = ks_pstrdup(brmgr->pool, target);
- value = ks_pstrdup(brmgr->pool, router);
+ pool = ks_pool_get(brmgr);
+
+ key = ks_pstrdup(pool, target);
+ value = ks_pstrdup(pool, router);
ks_hash_insert(brmgr->routes, (void *)key, (void *)value);
struct blade_rpc_s {
blade_handle_t *handle;
- ks_pool_t *pool;
const char *method;
const char *protocol;
struct blade_rpc_request_s {
blade_handle_t *handle;
- ks_pool_t *pool;
const char *session_id;
struct blade_rpc_response_s {
blade_handle_t *handle;
- ks_pool_t *pool;
const char *session_id;
};
-static void blade_rpc_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_rpc_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_rpc_t *brpc = (blade_rpc_t *)ptr;
brpc = ks_pool_alloc(pool, sizeof(blade_rpc_t));
brpc->handle = bh;
- brpc->pool = pool;
brpc->method = ks_pstrdup(pool, method);
if (protocol) brpc->protocol = ks_pstrdup(pool, protocol);
if (realm) brpc->realm = ks_pstrdup(pool, realm);
brpc->callback = callback;
brpc->data = data;
- ks_pool_set_cleanup(pool, brpc, NULL, blade_rpc_cleanup);
+ ks_pool_set_cleanup(brpc, NULL, blade_rpc_cleanup);
*brpcP = brpc;
ks_assert(*brpcP);
brpc = *brpcP;
+ *brpcP = NULL;
- pool = brpc->pool;
+ pool = ks_pool_get(brpc);
ks_pool_close(&pool);
- *brpcP = NULL;
-
return KS_STATUS_SUCCESS;
}
}
-static void blade_rpc_request_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_rpc_request_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_rpc_request_t *brpcreq = (blade_rpc_request_t *)ptr;
case KS_MPCL_ANNOUNCE:
break;
case KS_MPCL_TEARDOWN:
- ks_pool_free(brpcreq->pool, (void **)&brpcreq->session_id);
+ ks_pool_free((void **)&brpcreq->session_id);
cJSON_Delete(brpcreq->message);
// @todo delete data if present, requires update to ks_pool for self tracking the pool in allocation header
break;
brpcreq = ks_pool_alloc(pool, sizeof(blade_rpc_request_t));
brpcreq->handle = bh;
- brpcreq->pool = pool;
brpcreq->session_id = ks_pstrdup(pool, session_id);
brpcreq->message = cJSON_Duplicate(json, 1);
brpcreq->message_id = cJSON_GetObjectCstr(brpcreq->message, "id");
brpcreq->callback = callback;
brpcreq->data = data;
- ks_pool_set_cleanup(pool, brpcreq, NULL, blade_rpc_request_cleanup);
+ ks_pool_set_cleanup(brpcreq, NULL, blade_rpc_request_cleanup);
*brpcreqP = brpcreq;
brpcreq = *brpcreqP;
- ks_pool_free(brpcreq->pool, brpcreqP);
+ ks_pool_free(brpcreqP);
return KS_STATUS_SUCCESS;
}
KS_DECLARE(ks_status_t) blade_rpc_request_duplicate(blade_rpc_request_t **brpcreqP, blade_rpc_request_t *brpcreq)
{
- return blade_rpc_request_create(brpcreqP, brpcreq->handle, brpcreq->pool, brpcreq->session_id, brpcreq->message, brpcreq->callback, brpcreq->data);
+ return blade_rpc_request_create(brpcreqP, brpcreq->handle, ks_pool_get(brpcreq), brpcreq->session_id, brpcreq->message, brpcreq->callback, brpcreq->data);
}
KS_DECLARE(blade_handle_t *) blade_rpc_request_handle_get(blade_rpc_request_t *brpcreq)
ks_uuid(&msgid);
mid = ks_uuid_str(pool, &msgid);
cJSON_AddStringToObject(root, "id", mid);
- ks_pool_free(pool, &mid);
+ ks_pool_free(&mid);
cJSON_AddStringToObject(root, "method", method);
}
-static void blade_rpc_response_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_rpc_response_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_rpc_response_t *brpcres = (blade_rpc_response_t *)ptr;
case KS_MPCL_ANNOUNCE:
break;
case KS_MPCL_TEARDOWN:
- ks_pool_free(brpcres->pool, (void **)&brpcres->session_id);
+ ks_pool_free((void **)&brpcres->session_id);
blade_rpc_request_destroy(&brpcres->request);
cJSON_Delete(brpcres->message);
break;
brpcres = ks_pool_alloc(pool, sizeof(blade_rpc_response_t));
brpcres->handle = bh;
- brpcres->pool = pool;
brpcres->session_id = ks_pstrdup(pool, session_id);
brpcres->request = brpcreq;
brpcres->message = cJSON_Duplicate(json, 1);
- ks_pool_set_cleanup(pool, brpcres, NULL, blade_rpc_response_cleanup);
+ ks_pool_set_cleanup(brpcres, NULL, blade_rpc_response_cleanup);
*brpcresP = brpcres;
brpcres = *brpcresP;
- ks_pool_free(brpcres->pool, brpcresP);
+ ks_pool_free(brpcresP);
return KS_STATUS_SUCCESS;
}
struct blade_rpcmgr_s {
blade_handle_t *handle;
- ks_pool_t *pool;
ks_hash_t *corerpcs; // method, blade_rpc_t*
ks_hash_t *protocolrpcs; // method, blade_rpc_t*
};
-static void blade_rpcmgr_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_rpcmgr_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_rpcmgr_t *brpcmgr = (blade_rpcmgr_t *)ptr;
ks_hash_iterator_t *it = NULL;
brpcmgr = ks_pool_alloc(pool, sizeof(blade_rpcmgr_t));
brpcmgr->handle = bh;
- brpcmgr->pool = pool;
- ks_hash_create(&brpcmgr->corerpcs, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, brpcmgr->pool);
+ ks_hash_create(&brpcmgr->corerpcs, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(brpcmgr->corerpcs);
- ks_hash_create(&brpcmgr->protocolrpcs, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, brpcmgr->pool);
+ ks_hash_create(&brpcmgr->protocolrpcs, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(brpcmgr->protocolrpcs);
- ks_hash_create(&brpcmgr->requests, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, brpcmgr->pool);
+ ks_hash_create(&brpcmgr->requests, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(brpcmgr->requests);
- ks_pool_set_cleanup(pool, brpcmgr, NULL, blade_rpcmgr_cleanup);
+ ks_pool_set_cleanup(brpcmgr, NULL, blade_rpcmgr_cleanup);
*brpcmgrP = brpcmgr;
brpcmgr = *brpcmgrP;
*brpcmgrP = NULL;
- ks_assert(brpcmgr);
-
- pool = brpcmgr->pool;
+ pool = ks_pool_get(brpcmgr);
ks_pool_close(&pool);
ks_assert(brpcmgr);
ks_assert(brpc);
- key = ks_pstrdup(brpcmgr->pool, blade_rpc_method_get(brpc));
+ key = ks_pstrdup(ks_pool_get(brpcmgr), blade_rpc_method_get(brpc));
ks_hash_insert(brpcmgr->corerpcs, (void *)key, (void *)brpc);
ks_log(KS_LOG_DEBUG, "CoreRPC Added: %s\n", key);
ks_assert(protocol);
ks_assert(realm);
- key = ks_psprintf(brpcmgr->pool, "%s@%s/%s", protocol, realm, method);
+ key = ks_psprintf(ks_pool_get(brpcmgr), "%s@%s/%s", protocol, realm, method);
brpc = ks_hash_search(brpcmgr->protocolrpcs, (void *)key, KS_READLOCKED);
// @todo if (brpc) blade_rpc_read_lock(brpc);
ks_hash_read_unlock(brpcmgr->protocolrpcs);
- ks_pool_free(brpcmgr->pool, &key);
+ ks_pool_free(&key);
return brpc;
}
realm = blade_rpc_realm_get(brpc);
ks_assert(realm);
- key = ks_psprintf(brpcmgr->pool, "%s@%s/%s", protocol, realm, method);
+ key = ks_psprintf(ks_pool_get(brpcmgr), "%s@%s/%s", protocol, realm, method);
ks_assert(key);
ks_hash_insert(brpcmgr->protocolrpcs, (void *)key, (void *)brpc);
realm = blade_rpc_realm_get(brpc);
ks_assert(realm);
- key = ks_psprintf(brpcmgr->pool, "%s@%s/%s", protocol, realm, method);
+ key = ks_psprintf(ks_pool_get(brpcmgr), "%s@%s/%s", protocol, realm, method);
ks_assert(key);
ks_hash_remove(brpcmgr->protocolrpcs, (void *)key);
ks_log(KS_LOG_DEBUG, "ProtocolRPC Removed: %s\n", key);
- ks_pool_free(brpcmgr->pool, &key);
+ ks_pool_free(&key);
return KS_STATUS_SUCCESS;
}
ks_assert(brpcmgr);
ks_assert(brpcreq);
- key = ks_pstrdup(brpcmgr->pool, blade_rpc_request_messageid_get(brpcreq));
+ key = ks_pstrdup(ks_pool_get(brpcmgr), blade_rpc_request_messageid_get(brpcreq));
ks_hash_insert(brpcmgr->requests, (void *)key, (void *)brpcreq);
ks_log(KS_LOG_DEBUG, "Request Added: %s\n", key);
struct blade_session_s {
blade_handle_t *handle;
- ks_pool_t *pool;
volatile blade_session_state_t state;
ks_status_t blade_session_onstate_run(blade_session_t *bs);
ks_status_t blade_session_process(blade_session_t *bs, cJSON *json);
-static void blade_session_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_session_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_session_t *bs = (blade_session_t *)ptr;
bs = ks_pool_alloc(pool, sizeof(blade_session_t));
bs->handle = bh;
- bs->pool = pool;
if (id) bs->id = ks_pstrdup(pool, id);
else {
ks_q_create(&bs->receiving, pool, 0);
ks_assert(bs->receiving);
- ks_hash_create(&bs->realms, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bs->pool);
+ ks_hash_create(&bs->realms, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bs->realms);
- ks_hash_create(&bs->routes, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bs->pool);
+ ks_hash_create(&bs->routes, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bs->routes);
bs->properties = cJSON_CreateObject();
ks_rwl_create(&bs->properties_lock, pool);
ks_assert(bs->properties_lock);
- ks_pool_set_cleanup(pool, bs, NULL, blade_session_cleanup);
+ ks_pool_set_cleanup(bs, NULL, blade_session_cleanup);
ks_log(KS_LOG_DEBUG, "Created\n");
ks_assert(*bsP);
bs = *bsP;
+ *bsP = NULL;
- pool = bs->pool;
- //ks_pool_free(bs->pool, bsP);
- ks_pool_close(&pool);
+ pool = ks_pool_get(bs);
- *bsP = NULL;
+ ks_pool_close(&pool);
return KS_STATUS_SUCCESS;
}
ks_assert(bs);
ks_assert(realm);
- key = ks_pstrdup(bs->pool, realm);
+ key = ks_pstrdup(ks_pool_get(bs), realm);
ks_hash_insert(bs->realms, (void *)key, (void *)KS_TRUE);
return KS_STATUS_SUCCESS;
ks_assert(bs);
ks_assert(nodeid);
- key = ks_pstrdup(bs->pool, nodeid);
+ key = ks_pstrdup(ks_pool_get(bs), nodeid);
ks_hash_insert(bs->routes, (void *)key, (void *)KS_TRUE);
return KS_STATUS_SUCCESS;
if (id) {
if (bs->connection) {
// @todo best that can be done in this situation is see if the connection is still available, and if so then disconnect it... this really shouldn't happen
- ks_pool_free(bs->pool, &bs->connection);
+ ks_pool_free(&bs->connection);
}
- bs->connection = ks_pstrdup(bs->pool, id);
+ bs->connection = ks_pstrdup(ks_pool_get(bs), id);
ks_assert(bs->connection);
bs->ttl = 0;
} else if (bs->connection) {
ks_log(KS_LOG_DEBUG, "Session (%s) cleared connection (%s)\n", bs->id, bs->connection);
- ks_pool_free(bs->pool, &bs->connection);
+ ks_pool_free(&bs->connection);
bs->ttl = ks_time_now() + (5 * KS_USEC_PER_SEC);
}
// 1) Sending a request (client: method caller or consumer)
ks_log(KS_LOG_DEBUG, "Session (%s) sending request (%s) for %s\n", bs->id, id, method);
- blade_rpc_request_create(&brpcreq, bs->handle, blade_handle_pool_get(bs->handle), bs->id, json, callback, data);
+ blade_rpc_request_create(&brpcreq, bs->handle, ks_pool_get(bs->handle), bs->id, json, callback, data);
ks_assert(brpcreq);
// @todo set request TTL and figure out when requests are checked for expiration (separate thread in the handle?)
callback = blade_rpc_callback_get(brpc);
ks_assert(callback);
- blade_rpc_request_create(&brpcreq, bs->handle, blade_handle_pool_get(bs->handle), bs->id, json, NULL, NULL);
+ blade_rpc_request_create(&brpcreq, bs->handle, ks_pool_get(bs->handle), bs->id, json, NULL, NULL);
ks_assert(brpcreq);
disconnect = callback(brpcreq, blade_rpc_data_get(brpc));
callback = blade_rpc_request_callback_get(brpcreq);
- blade_rpc_response_create(&brpcres, bs->handle, bs->pool, bs->id, brpcreq, json);
+ blade_rpc_response_create(&brpcres, bs->handle, ks_pool_get(bs), bs->id, brpcreq, json);
ks_assert(brpcres);
if (callback) disconnect = callback(brpcres, blade_rpc_request_data_get(brpcreq));
struct blade_sessionmgr_s {
blade_handle_t *handle;
- ks_pool_t *pool;
ks_hash_t *sessions; // id, blade_session_t*
ks_hash_t *callbacks; // id, blade_session_callback_data_t*
};
struct blade_session_callback_data_s {
- ks_pool_t *pool;
-
const char *id;
void *data;
blade_session_callback_t callback;
};
-static void blade_sessionmgr_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_sessionmgr_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_sessionmgr_t *bsmgr = (blade_sessionmgr_t *)ptr;
}
}
-static void blade_session_callback_data_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_session_callback_data_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_session_callback_data_t *bscd = (blade_session_callback_data_t *)ptr;
case KS_MPCL_ANNOUNCE:
break;
case KS_MPCL_TEARDOWN:
- ks_pool_free(bscd->pool, &bscd->id);
+ ks_pool_free(&bscd->id);
break;
case KS_MPCL_DESTROY:
break;
bsmgr = ks_pool_alloc(pool, sizeof(blade_sessionmgr_t));
bsmgr->handle = bh;
- bsmgr->pool = pool;
- ks_hash_create(&bsmgr->sessions, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bsmgr->pool);
+ ks_hash_create(&bsmgr->sessions, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bsmgr->sessions);
- ks_hash_create(&bsmgr->callbacks, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, bsmgr->pool);
+ ks_hash_create(&bsmgr->callbacks, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, pool);
ks_assert(bsmgr->callbacks);
- ks_pool_set_cleanup(pool, bsmgr, NULL, blade_sessionmgr_cleanup);
+ ks_pool_set_cleanup(bsmgr, NULL, blade_sessionmgr_cleanup);
*bsmgrP = bsmgr;
bsmgr = *bsmgrP;
*bsmgrP = NULL;
- ks_assert(bsmgr);
-
- pool = bsmgr->pool;
+ pool = ks_pool_get(bsmgr);
ks_pool_close(&pool);
ks_assert(bsmgr);
ks_assert(bs);
- key = ks_pstrdup(bsmgr->pool, blade_session_id_get(bs));
+ key = ks_pstrdup(ks_pool_get(bsmgr), blade_session_id_get(bs));
ks_hash_insert(bsmgr->sessions, (void *)key, bs);
ks_log(KS_LOG_DEBUG, "Session Added: %s\n", key);
KS_DECLARE(ks_status_t) blade_sessionmgr_callback_add(blade_sessionmgr_t *bsmgr, void *data, blade_session_callback_t callback, const char **id)
{
+ ks_pool_t *pool = NULL;
blade_session_callback_data_t *bscd = NULL;
uuid_t uuid;
ks_assert(callback);
ks_assert(id);
+ pool = ks_pool_get(bsmgr);
+
ks_uuid(&uuid);
- bscd = ks_pool_alloc(bsmgr->pool, sizeof(blade_session_callback_data_t));
- bscd->pool = bsmgr->pool;
- bscd->id = ks_uuid_str(bsmgr->pool, &uuid);
+ bscd = ks_pool_alloc(pool, sizeof(blade_session_callback_data_t));
+ bscd->id = ks_uuid_str(pool, &uuid);
bscd->data = data;
bscd->callback = callback;
- ks_pool_set_cleanup(bsmgr->pool, bscd, NULL, blade_session_callback_data_cleanup);
+ ks_pool_set_cleanup(bscd, NULL, blade_session_callback_data_cleanup);
- ks_hash_insert(bsmgr->callbacks, (void *)ks_pstrdup(bscd->pool, bscd->id), bscd);
+ ks_hash_insert(bsmgr->callbacks, (void *)ks_pstrdup(pool, bscd->id), bscd);
*id = bscd->id;
#include "blade.h"
struct blade_handle_s {
- ks_pool_t *pool;
ks_thread_pool_t *tpool;
blade_transportmgr_t *transportmgr;
ks_bool_t blade_rpcbroadcast_request_handler(blade_rpc_request_t *brpcreq, void *data);
-static void blade_handle_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_handle_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_handle_t *bh = (blade_handle_t *)ptr;
ks_assert(tpool);
bh = ks_pool_alloc(pool, sizeof(blade_handle_t));
- bh->pool = pool;
bh->tpool = tpool;
blade_transportmgr_create(&bh->transportmgr, bh);
ks_assert(bh->sessionmgr);
- ks_pool_set_cleanup(pool, bh, NULL, blade_handle_cleanup);
+ ks_pool_set_cleanup(bh, NULL, blade_handle_cleanup);
*bhP = bh;
ks_pool_t *pool;
ks_assert(bhP);
+ ks_assert(*bhP);
bh = *bhP;
*bhP = NULL;
- ks_assert(bh);
-
- pool = bh->pool;
-
// shutdown cannot happen inside of the cleanup callback because it'll lock a mutex for the pool during cleanup callbacks which connections and sessions need to finish their cleanup
// and more importantly, memory needs to remain intact until shutdown is completed to avoid various things hitting teardown before shutdown runs
blade_handle_shutdown(bh);
+ pool = ks_pool_get(bh);
+
ks_pool_close(&pool);
return KS_STATUS_SUCCESS;
return KS_STATUS_SUCCESS;
}
-KS_DECLARE(ks_pool_t *) blade_handle_pool_get(blade_handle_t *bh)
-{
- ks_assert(bh);
- return bh->pool;
-}
-
KS_DECLARE(ks_thread_pool_t *) blade_handle_tpool_get(blade_handle_t *bh)
{
ks_assert(bh);
typedef struct blade_rpcsubscribe_data_s blade_rpcsubscribe_data_t;
struct blade_rpcsubscribe_data_s {
- ks_pool_t *pool;
blade_rpc_response_callback_t original_callback;
void *original_data;
blade_rpc_request_callback_t channel_callback;
goto done;
}
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
blade_rpc_request_raw_create(pool, &req, &req_params, NULL, "blade.register");
goto done;
}
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
blade_rpc_request_raw_create(pool, &req, &req_params, NULL, "blade.publish");
ks_assert(id);
cJSON_AddStringToObject(req_params, "requester-nodeid", id);
- ks_pool_free(pool, &id);
+ ks_pool_free(&id);
blade_upstreammgr_masterid_copy(bh->upstreammgr, pool, &id);
ks_assert(id);
cJSON_AddStringToObject(req_params, "responder-nodeid", id);
- ks_pool_free(pool, &id);
+ ks_pool_free(&id);
// @todo may want to switch this system to use a blade_rpcpublish_args_t with validation on the contents on this list internally
// and to produce the entire json block internally in case the channel args change to include additional information like an encryption key,
goto done;
}
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
blade_rpc_request_raw_create(pool, &req, &req_params, NULL, "blade.authorize");
ks_assert(id);
cJSON_AddStringToObject(req_params, "requester-nodeid", id);
- ks_pool_free(pool, &id);
+ ks_pool_free(&id);
blade_upstreammgr_masterid_copy(bh->upstreammgr, pool, &id);
ks_assert(id);
cJSON_AddStringToObject(req_params, "responder-nodeid", id);
- ks_pool_free(pool, &id);
+ ks_pool_free(&id);
cJSON_AddItemToObject(req_params, "channels", cJSON_Duplicate(channels, 1));
goto done;
}
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
blade_rpc_request_raw_create(pool, &req, &req_params, NULL, "blade.locate");
ks_assert(id);
cJSON_AddStringToObject(req_params, "requester-nodeid", id);
- ks_pool_free(pool, &id);
+ ks_pool_free(&id);
blade_upstreammgr_masterid_copy(bh->upstreammgr, pool, &id);
ks_assert(id);
cJSON_AddStringToObject(req_params, "responder-nodeid", id);
- ks_pool_free(pool, &id);
+ ks_pool_free(&id);
ks_log(KS_LOG_DEBUG, "Session (%s) locate request started\n", blade_session_id_get(bs));
}
}
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
blade_rpc_request_raw_create(pool, &req, &req_params, NULL, "blade.execute");
ks_assert(localid);
cJSON_AddStringToObject(req_params, "requester-nodeid", localid);
- ks_pool_free(pool, &localid);
+ ks_pool_free(&localid);
cJSON_AddStringToObject(req_params, "responder-nodeid", nodeid);
}
-static void blade_rpcsubscribe_data_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_rpcsubscribe_data_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_rpcsubscribe_data_t *brpcsd = (blade_rpcsubscribe_data_t *)ptr;
case KS_MPCL_ANNOUNCE:
break;
case KS_MPCL_TEARDOWN:
- if (brpcsd->relayed_messageid) ks_pool_free(brpcsd->pool, &brpcsd->relayed_messageid);
+ if (brpcsd->relayed_messageid) ks_pool_free(&brpcsd->relayed_messageid);
break;
case KS_MPCL_DESTROY:
break;
ks_assert(realm);
ks_assert(subscribe_channels || unsubscribe_channels);
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
// @note this is always produced by a subscriber, and sent upstream, master will only use the internal raw call
goto done;
}
- blade_upstreammgr_localid_copy(bh->upstreammgr, bh->pool, &localid);
+ blade_upstreammgr_localid_copy(bh->upstreammgr, ks_pool_get(bh), &localid);
ks_assert(localid);
// @note since this is allocated in the handle's pool, if the handle is shutdown during a pending request, then the data
// memory will be cleaned up with the handle, otherwise should be cleaned up in the response callback
temp_data = (blade_rpcsubscribe_data_t *)ks_pool_alloc(pool, sizeof(blade_rpcsubscribe_data_t));
- temp_data->pool = pool;
temp_data->original_callback = callback;
temp_data->original_data = data;
temp_data->channel_callback = channel_callback;
temp_data->channel_data = channel_data;
- ks_pool_set_cleanup(pool, temp_data, NULL, blade_rpcsubscribe_data_cleanup);
+ ks_pool_set_cleanup(temp_data, NULL, blade_rpcsubscribe_data_cleanup);
ret = blade_handle_rpcsubscribe_raw(bh, protocol, realm, subscribe_channels, unsubscribe_channels, localid, KS_FALSE, blade_rpcsubscribe_response_handler, temp_data);
- ks_pool_free(bh->pool, &localid);
+ ks_pool_free(&localid);
done:
if (bs) blade_session_read_unlock(bs);
goto done;
}
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
if (unsubscribe_channels) {
bh = blade_rpc_request_handle_get(brpcreq);
ks_assert(bh);
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
bs = blade_sessionmgr_session_lookup(blade_handle_sessionmgr_get(bh), blade_rpc_request_sessionid_get(brpcreq));
blade_session_send(bs, res, NULL, NULL);
} else {
blade_rpcsubscribe_data_t *temp_data = (blade_rpcsubscribe_data_t *)ks_pool_alloc(pool, sizeof(blade_rpcsubscribe_data_t));
- temp_data->pool = pool;
temp_data->relayed_messageid = ks_pstrdup(pool, blade_rpc_request_messageid_get(brpcreq));
- ks_pool_set_cleanup(pool, temp_data, NULL, blade_rpcsubscribe_data_cleanup);
+ ks_pool_set_cleanup(temp_data, NULL, blade_rpcsubscribe_data_cleanup);
blade_handle_rpcsubscribe_raw(bh, req_params_protocol, req_params_realm, req_params_subscribe_channels, req_params_unsubscribe_channels, req_params_subscriber_nodeid, downstream, blade_rpcsubscribe_response_handler, temp_data);
}
cJSON *res_result_failed_channels = NULL;
ks_assert(brpcres);
- ks_assert(data);
bh = blade_rpc_response_handle_get(brpcres);
ks_assert(bh);
}
done:
- if (temp_data) ks_pool_free(temp_data->pool, &temp_data);
+ if (temp_data) ks_pool_free(&temp_data);
blade_session_read_unlock(bs);
return ret;
}
const char *localid = NULL;
ks_pool_t *pool = NULL;
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
blade_upstreammgr_localid_copy(bh->upstreammgr, pool, &localid);
ks_assert(localid);
callback = blade_subscription_callback_get(bsub);
if (callback) ret = callback(brpcreq, blade_subscription_callback_data_get(bsub));
}
- ks_pool_free(pool, &localid);
+ ks_pool_free(&localid);
}
// build the actual response finally
#include "blade.h"
struct blade_subscription_s {
- ks_pool_t *pool;
-
const char *protocol;
const char *realm;
const char *channel;
};
-static void blade_subscription_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_subscription_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_subscription_t *bsub = (blade_subscription_t *)ptr;
case KS_MPCL_ANNOUNCE:
break;
case KS_MPCL_TEARDOWN:
- if (bsub->protocol) ks_pool_free(bsub->pool, &bsub->protocol);
- if (bsub->realm) ks_pool_free(bsub->pool, &bsub->subscribers);
- if (bsub->channel) ks_pool_free(bsub->pool, &bsub->channel);
+ if (bsub->protocol) ks_pool_free(&bsub->protocol);
+ if (bsub->realm) ks_pool_free(&bsub->subscribers);
+ if (bsub->channel) ks_pool_free(&bsub->channel);
if (bsub->subscribers) ks_hash_destroy(&bsub->subscribers);
break;
case KS_MPCL_DESTROY:
ks_assert(channel);
bsub = ks_pool_alloc(pool, sizeof(blade_subscription_t));
- bsub->pool = pool;
bsub->protocol = ks_pstrdup(pool, protocol);
bsub->realm = ks_pstrdup(pool, realm);
bsub->channel = ks_pstrdup(pool, channel);
- ks_hash_create(&bsub->subscribers, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bsub->pool);
+ ks_hash_create(&bsub->subscribers, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bsub->subscribers);
- ks_pool_set_cleanup(pool, bsub, NULL, blade_subscription_cleanup);
+ ks_pool_set_cleanup(bsub, NULL, blade_subscription_cleanup);
*bsubP = bsub;
bsub = *bsubP;
- ks_pool_free(bsub->pool, bsubP);
+ ks_pool_free(bsubP);
return KS_STATUS_SUCCESS;
}
ks_assert(bsub);
ks_assert(nodeid);
- key = ks_pstrdup(bsub->pool, nodeid);
+ key = ks_pstrdup(ks_pool_get(bsub), nodeid);
ks_hash_insert(bsub->subscribers, (void *)key, (void *)KS_TRUE);
return KS_STATUS_SUCCESS;
struct blade_subscriptionmgr_s {
blade_handle_t *handle;
- ks_pool_t *pool;
ks_hash_t *subscriptions; // key, blade_subscription_t*
ks_hash_t *subscriptions_cleanup; // target, ks_hash_t*
};
-static void blade_subscriptionmgr_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_subscriptionmgr_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_routemgr_t *brmgr = (blade_routemgr_t *)ptr;
bsmgr = ks_pool_alloc(pool, sizeof(blade_subscriptionmgr_t));
bsmgr->handle = bh;
- bsmgr->pool = pool;
// @note can let removes free keys and values for subscriptions, both are allocated from the same pool as the hash itself
- ks_hash_create(&bsmgr->subscriptions, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, bsmgr->pool);
+ ks_hash_create(&bsmgr->subscriptions, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, pool);
ks_assert(bsmgr->subscriptions);
- ks_hash_create(&bsmgr->subscriptions_cleanup, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, bsmgr->pool);
+ ks_hash_create(&bsmgr->subscriptions_cleanup, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, pool);
ks_assert(bsmgr->subscriptions_cleanup);
- ks_pool_set_cleanup(pool, bsmgr, NULL, blade_subscriptionmgr_cleanup);
+ ks_pool_set_cleanup(bsmgr, NULL, blade_subscriptionmgr_cleanup);
*bsmgrP = bsmgr;
bsmgr = *bsmgrP;
*bsmgrP = NULL;
- ks_assert(bsmgr);
-
- pool = bsmgr->pool;
+ pool = ks_pool_get(bsmgr);
ks_pool_close(&pool);
ks_assert(realm);
ks_assert(channel);
- key = ks_psprintf(bsmgr->pool, "%s@%s/%s", protocol, realm, channel);
+ key = ks_psprintf(ks_pool_get(bsmgr), "%s@%s/%s", protocol, realm, channel);
bsub = (blade_subscription_t *)ks_hash_search(bsmgr->subscriptions, (void *)key, KS_READLOCKED);
// @todo if (bsub) blade_subscription_read_lock(bsub);
ks_hash_read_unlock(bsmgr->subscriptions);
- ks_pool_free(bsmgr->pool, &key);
+ ks_pool_free(&key);
return bsub;
}
KS_DECLARE(ks_bool_t) blade_subscriptionmgr_subscriber_add(blade_subscriptionmgr_t *bsmgr, blade_subscription_t **bsubP, const char *protocol, const char *realm, const char *channel, const char *subscriber)
{
+ ks_pool_t *pool = NULL;
char *key = NULL;
blade_subscription_t *bsub = NULL;
ks_hash_t *bsub_cleanup = NULL;
ks_assert(channel);
ks_assert(subscriber);
- key = ks_psprintf(bsmgr->pool, "%s@%s/%s", protocol, realm, channel);
+ pool = ks_pool_get(bsmgr);
+
+ key = ks_psprintf(pool, "%s@%s/%s", protocol, realm, channel);
ks_hash_write_lock(bsmgr->subscriptions);
bsub = (blade_subscription_t *)ks_hash_search(bsmgr->subscriptions, (void *)key, KS_UNLOCKED);
if (!bsub) {
- blade_subscription_create(&bsub, bsmgr->pool, protocol, realm, channel);
+ blade_subscription_create(&bsub, pool, protocol, realm, channel);
ks_assert(bsub);
- ks_hash_insert(bsmgr->subscriptions, (void *)ks_pstrdup(bsmgr->pool, key), bsub);
+ ks_hash_insert(bsmgr->subscriptions, (void *)ks_pstrdup(pool, key), bsub);
propagate = KS_TRUE;
}
bsub_cleanup = (ks_hash_t *)ks_hash_search(bsmgr->subscriptions_cleanup, (void *)subscriber, KS_UNLOCKED);
if (!bsub_cleanup) {
- ks_hash_create(&bsub_cleanup, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bsmgr->pool);
+ ks_hash_create(&bsub_cleanup, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bsub_cleanup);
ks_log(KS_LOG_DEBUG, "Subscription Added: %s\n", key);
- ks_hash_insert(bsmgr->subscriptions_cleanup, (void *)ks_pstrdup(bsmgr->pool, subscriber), (void *)bsub_cleanup);
+ ks_hash_insert(bsmgr->subscriptions_cleanup, (void *)ks_pstrdup(pool, subscriber), (void *)bsub_cleanup);
}
- ks_hash_insert(bsub_cleanup, (void *)ks_pstrdup(bsmgr->pool, key), (void *)KS_TRUE);
+ ks_hash_insert(bsub_cleanup, (void *)ks_pstrdup(pool, key), (void *)KS_TRUE);
blade_subscription_subscribers_add(bsub, subscriber);
ks_log(KS_LOG_DEBUG, "Subscriber Added: %s to %s\n", subscriber, key);
- ks_pool_free(bsmgr->pool, &key);
+ ks_pool_free(&key);
if (bsubP) *bsubP = bsub;
ks_assert(channel);
ks_assert(subscriber);
- key = ks_psprintf(bsmgr->pool, "%s@%s/%s", protocol, realm, channel);
+ key = ks_psprintf(ks_pool_get(bsmgr), "%s@%s/%s", protocol, realm, channel);
ks_hash_write_lock(bsmgr->subscriptions);
if (bsub) {
bsub_cleanup = (ks_hash_t *)ks_hash_search(bsmgr->subscriptions_cleanup, (void *)subscriber, KS_UNLOCKED);
- ks_assert(bsub_cleanup);
- ks_hash_remove(bsub_cleanup, key);
+ if (bsub_cleanup) {
+ ks_hash_remove(bsub_cleanup, key);
- if (ks_hash_count(bsub_cleanup) == 0) {
- ks_hash_remove(bsmgr->subscriptions_cleanup, (void *)subscriber);
- }
+ if (ks_hash_count(bsub_cleanup) == 0) {
+ ks_hash_remove(bsmgr->subscriptions_cleanup, (void *)subscriber);
+ }
- ks_log(KS_LOG_DEBUG, "Subscriber Removed: %s from %s\n", subscriber, key);
- blade_subscription_subscribers_remove(bsub, subscriber);
+ ks_log(KS_LOG_DEBUG, "Subscriber Removed: %s from %s\n", subscriber, key);
+ blade_subscription_subscribers_remove(bsub, subscriber);
- if (ks_hash_count(blade_subscription_subscribers_get(bsub)) == 0) {
- ks_log(KS_LOG_DEBUG, "Subscription Removed: %s\n", key);
- ks_hash_remove(bsmgr->subscriptions, (void *)key);
- propagate = KS_TRUE;
+ if (ks_hash_count(blade_subscription_subscribers_get(bsub)) == 0) {
+ ks_log(KS_LOG_DEBUG, "Subscription Removed: %s\n", key);
+ ks_hash_remove(bsmgr->subscriptions, (void *)key);
+ propagate = KS_TRUE;
+ }
}
}
ks_hash_write_unlock(bsmgr->subscriptions);
- ks_pool_free(bsmgr->pool, &key);
+ ks_pool_free(&key);
if (bsubP) *bsubP = bsub;
KS_DECLARE(void) blade_subscriptionmgr_purge(blade_subscriptionmgr_t *bsmgr, const char *target)
{
+ ks_pool_t *pool = NULL;
ks_bool_t unsubbed = KS_FALSE;
ks_assert(bsmgr);
ks_assert(target);
+ pool = ks_pool_get(bsmgr);
+
while (!unsubbed) {
ks_hash_t *subscriptions = NULL;
const char *protocol = NULL;
ks_assert(bsub);
// @note allocate these to avoid lifecycle issues when the last subscriber is removed causing the subscription to be removed
- protocol = ks_pstrdup(bsmgr->pool, blade_subscription_protocol_get(bsub));
- realm = ks_pstrdup(bsmgr->pool, blade_subscription_realm_get(bsub));
- channel = ks_pstrdup(bsmgr->pool, blade_subscription_channel_get(bsub));
+ protocol = ks_pstrdup(pool, blade_subscription_protocol_get(bsub));
+ realm = ks_pstrdup(pool, blade_subscription_realm_get(bsub));
+ channel = ks_pstrdup(pool, blade_subscription_channel_get(bsub));
}
ks_hash_read_unlock(bsmgr->subscriptions);
if (!unsubbed) {
blade_subscriptionmgr_subscriber_remove(bsmgr, NULL, protocol, realm, channel, target);
- ks_pool_free(bsmgr->pool, &protocol);
- ks_pool_free(bsmgr->pool, &realm);
- ks_pool_free(bsmgr->pool, &channel);
+ ks_pool_free(&protocol);
+ ks_pool_free(&realm);
+ ks_pool_free(&channel);
}
}
}
KS_DECLARE(ks_status_t) blade_subscriptionmgr_broadcast(blade_subscriptionmgr_t *bsmgr, const char *excluded_nodeid, const char *protocol, const char *realm, const char *channel, const char *event, cJSON *params, blade_rpc_response_callback_t callback, void *data)
{
+ ks_pool_t *pool = NULL;
const char *bsub_key = NULL;
blade_subscription_t *bsub = NULL;
blade_session_t *bs = NULL;
ks_assert(realm);
ks_assert(channel);
- bsub_key = ks_psprintf(bsmgr->pool, "%s@%s/%s", protocol, realm, channel);
+ pool = ks_pool_get(bsmgr);
+
+ bsub_key = ks_psprintf(pool, "%s@%s/%s", protocol, realm, channel);
- blade_rpc_request_raw_create(bsmgr->pool, &req, &req_params, NULL, "blade.broadcast");
+ blade_rpc_request_raw_create(pool, &req, &req_params, NULL, "blade.broadcast");
cJSON_AddStringToObject(req_params, "protocol", protocol);
cJSON_AddStringToObject(req_params, "realm", realm);
cJSON_AddStringToObject(req_params, "channel", channel);
cJSON_Delete(req);
- ks_pool_free(bsmgr->pool, &bsub_key);
+ ks_pool_free(&bsub_key);
return KS_STATUS_SUCCESS;
}
struct blade_transport_s {
blade_handle_t *handle;
- ks_pool_t *pool;
const char *name;
void *data;
};
-static void blade_transport_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_transport_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_transport_t *bt = (blade_transport_t *)ptr;
bt = ks_pool_alloc(pool, sizeof(blade_transport_t));
bt->handle = bh;
- bt->pool = pool;
bt->name = ks_pstrdup(pool, name);
bt->data = data;
bt->callbacks = callbacks;
- ks_pool_set_cleanup(pool, bt, NULL, blade_transport_cleanup);
+ ks_pool_set_cleanup(bt, NULL, blade_transport_cleanup);
ks_log(KS_LOG_DEBUG, "Created transport %s\n", name);
ks_assert(*btP);
bt = *btP;
+ *btP = NULL;
- pool = bt->pool;
+ pool = ks_pool_get(bt);
ks_pool_close(&pool);
- *btP = NULL;
-
return KS_STATUS_SUCCESS;
}
struct blade_transport_wss_s {
blade_handle_t *handle;
- ks_pool_t *pool;
blade_transport_t *transport;
blade_transport_callbacks_t *callbacks;
struct blade_transport_wss_link_s {
blade_transport_wss_t *transport;
- ks_pool_t *pool;
const char *session_id;
ks_socket_t sock;
};
-static void blade_transport_wss_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_transport_wss_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_transport_wss_t *btwss = (blade_transport_wss_t *)ptr;
btwss = ks_pool_alloc(pool, sizeof(blade_transport_wss_t));
btwss->handle = bh;
- btwss->pool = pool;
blade_transport_create(&btwss->transport, bh, pool, BLADE_MODULE_WSS_TRANSPORT_NAME, btwss, &g_transport_wss_callbacks);
btwss->callbacks = &g_transport_wss_callbacks;
- ks_pool_set_cleanup(pool, btwss, NULL, blade_transport_wss_cleanup);
+ ks_pool_set_cleanup(btwss, NULL, blade_transport_wss_cleanup);
ks_log(KS_LOG_DEBUG, "Created\n");
return KS_STATUS_SUCCESS;
}
-static void blade_transport_wss_link_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_transport_wss_link_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_transport_wss_link_t *btwssl = (blade_transport_wss_link_t *)ptr;
case KS_MPCL_ANNOUNCE:
break;
case KS_MPCL_TEARDOWN:
- if (btwssl->session_id) ks_pool_free(btwssl->pool, &btwssl->session_id);
+ if (btwssl->session_id) ks_pool_free(&btwssl->session_id);
if (btwssl->kws) kws_destroy(&btwssl->kws);
else ks_socket_close(&btwssl->sock);
break;
btwssl = ks_pool_alloc(pool, sizeof(blade_transport_wss_link_t));
btwssl->transport = btwss;
- btwssl->pool = pool;
btwssl->sock = sock;
if (session_id) btwssl->session_id = ks_pstrdup(pool, session_id);
- ks_pool_set_cleanup(pool, btwssl, NULL, blade_transport_wss_link_cleanup);
+ ks_pool_set_cleanup(btwssl, NULL, blade_transport_wss_link_cleanup);
ks_log(KS_LOG_DEBUG, "Created\n");
}
listener_index = btwss->listeners_count++;
- btwss->listeners_poll = (struct pollfd *)ks_pool_resize(btwss->pool,
- btwss->listeners_poll,
- sizeof(struct pollfd) * btwss->listeners_count);
+ if (!btwss->listeners_poll) btwss->listeners_poll = (struct pollfd *)ks_pool_alloc(ks_pool_get(btwss), sizeof(struct pollfd) * btwss->listeners_count);
+ else btwss->listeners_poll = (struct pollfd *)ks_pool_resize(btwss->listeners_poll, sizeof(struct pollfd) * btwss->listeners_count);
ks_assert(btwss->listeners_poll);
btwss->listeners_poll[listener_index].fd = listener;
btwss->listeners_poll[listener_index].events = POLLIN; // | POLLERR;
blade_connection_create(&bc, btwss->handle);
ks_assert(bc);
- blade_transport_wss_link_create(&btwssl, blade_connection_pool_get(bc), btwss, sock, NULL);
+ blade_transport_wss_link_create(&btwssl, ks_pool_get(bc), btwss, sock, NULL);
ks_assert(btwssl);
blade_connection_transport_set(bc, btwssl, btwss->callbacks);
blade_connection_create(&bc, btwss->handle);
ks_assert(bc);
- blade_transport_wss_link_create(&btwssl, blade_connection_pool_get(bc), btwss, sock, session_id);
+ blade_transport_wss_link_create(&btwssl, ks_pool_get(bc), btwss, sock, session_id);
ks_assert(btwssl);
blade_connection_transport_set(bc, btwssl, btwss->callbacks);
btwssl = (blade_transport_wss_link_t *)blade_connection_transport_get(bc);
// @todo: SSL init stuffs based on data from config to pass into kws_init
- if (kws_init(&btwssl->kws, btwssl->sock, NULL, NULL, KWS_BLOCK, btwssl->pool) != KS_STATUS_SUCCESS) {
+ if (kws_init(&btwssl->kws, btwssl->sock, NULL, NULL, KWS_BLOCK, ks_pool_get(btwssl)) != KS_STATUS_SUCCESS) {
ks_log(KS_LOG_DEBUG, "Failed websocket init\n");
ret = BLADE_CONNECTION_STATE_HOOK_DISCONNECT;
goto done;
cJSON_AddStringToObject(json_result, "nodeid", nodeid);
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
blade_upstreammgr_masterid_copy(blade_handle_upstreammgr_get(bh), pool, &master_nodeid);
if (!master_nodeid) {
ks_log(KS_LOG_DEBUG, "Master nodeid unavailable\n");
goto done;
}
cJSON_AddStringToObject(json_result, "master-nodeid", master_nodeid);
- ks_pool_free(pool, &master_nodeid);
+ ks_pool_free(&master_nodeid);
// add the list of actual realms the local node will permit the remote node to register or route, this is the same list that the remote side would be adding to the handle with blade_handle_realm_add()
bh = blade_connection_handle_get(bc);
btwssl = (blade_transport_wss_link_t *)blade_connection_transport_get(bc);
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
// @todo: SSL init stuffs based on data from config to pass into kws_init
- if (kws_init(&btwssl->kws, btwssl->sock, NULL, "/blade:blade.invalid:blade", KWS_BLOCK, btwssl->pool) != KS_STATUS_SUCCESS) {
+ if (kws_init(&btwssl->kws, btwssl->sock, NULL, "/blade:blade.invalid:blade", KWS_BLOCK, ks_pool_get(btwssl)) != KS_STATUS_SUCCESS) {
ks_log(KS_LOG_DEBUG, "Failed websocket init\n");
ret = BLADE_CONNECTION_STATE_HOOK_DISCONNECT;
goto done;
struct blade_transportmgr_s {
blade_handle_t *handle;
- ks_pool_t *pool;
ks_hash_t *transports; // name, blade_transport_t*
blade_transport_t *default_transport; // default wss transport
};
-static void blade_transportmgr_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_transportmgr_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
blade_transportmgr_t *btmgr = (blade_transportmgr_t *)ptr;
ks_hash_iterator_t *it = NULL;
btmgr = ks_pool_alloc(pool, sizeof(blade_transportmgr_t));
btmgr->handle = bh;
- btmgr->pool = pool;
- ks_hash_create(&btmgr->transports, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, btmgr->pool);
+ ks_hash_create(&btmgr->transports, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(btmgr->transports);
- ks_pool_set_cleanup(pool, btmgr, NULL, blade_transportmgr_cleanup);
+ ks_pool_set_cleanup(btmgr, NULL, blade_transportmgr_cleanup);
*btmgrP = btmgr;
btmgr = *btmgrP;
*btmgrP = NULL;
- ks_assert(btmgr);
-
- pool = btmgr->pool;
+ pool = ks_pool_get(btmgr);
ks_pool_close(&pool);
ks_assert(btmgr);
ks_assert(bt);
- key = ks_pstrdup(btmgr->pool, blade_transport_name_get(bt));
+ key = ks_pstrdup(ks_pool_get(btmgr), blade_transport_name_get(bt));
ks_hash_insert(btmgr->transports, (void *)key, (void *)bt);
ks_log(KS_LOG_DEBUG, "Transport Added: %s\n", key);
#include "blade.h"
struct blade_tuple_s {
- ks_pool_t *pool;
-
void *value1;
void *value2;
};
-static void blade_tuple_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_tuple_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_tuple_t *bt = (blade_tuple_t *)ptr;
ks_assert(pool);
bt = ks_pool_alloc(pool, sizeof(blade_tuple_t));
- bt->pool = pool;
bt->value1 = value1;
bt->value2 = value2;
- ks_pool_set_cleanup(pool, bt, NULL, blade_tuple_cleanup);
+ ks_pool_set_cleanup(bt, NULL, blade_tuple_cleanup);
*btP = bt;
ks_assert(btP);
ks_assert(*btP);
- ks_pool_free((*btP)->pool, btP);
+ ks_pool_free(btP);
return KS_STATUS_SUCCESS;
}
struct blade_upstreammgr_s {
blade_handle_t *handle;
- ks_pool_t *pool;
// local node id, can be used to get the upstream session, provided by upstream "blade.connect" response
const char *localid;
};
-static void blade_upstreammgr_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void blade_upstreammgr_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//blade_upstreammgr_t *bumgr = (blade_upstreammgr_t *)ptr;
bumgr = ks_pool_alloc(pool, sizeof(blade_upstreammgr_t));
bumgr->handle = bh;
- bumgr->pool = pool;
- //ks_hash_create(&bumgr->routes, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, bumgr->pool);
+ //ks_hash_create(&bumgr->routes, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY | KS_HASH_FLAG_FREE_VALUE, pool);
//ks_assert(bumgr->routes);
- ks_rwl_create(&bumgr->localid_rwl, bumgr->pool);
+ ks_rwl_create(&bumgr->localid_rwl, pool);
ks_assert(bumgr->localid_rwl);
- ks_rwl_create(&bumgr->masterid_rwl, bumgr->pool);
+ ks_rwl_create(&bumgr->masterid_rwl, pool);
ks_assert(bumgr->masterid_rwl);
- ks_hash_create(&bumgr->realms, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, bumgr->pool);
+ ks_hash_create(&bumgr->realms, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
ks_assert(bumgr->realms);
- ks_pool_set_cleanup(pool, bumgr, NULL, blade_upstreammgr_cleanup);
+ ks_pool_set_cleanup(bumgr, NULL, blade_upstreammgr_cleanup);
*bumgrP = bumgr;
bumgr = *bumgrP;
*bumgrP = NULL;
- ks_assert(bumgr);
-
- pool = bumgr->pool;
+ pool = ks_pool_get(bumgr);
ks_pool_close(&pool);
goto done;
}
- if (bumgr->localid) ks_pool_free(bumgr->pool, &bumgr->localid);
- if (id) bumgr->localid = ks_pstrdup(bumgr->pool, id);
+ if (bumgr->localid) ks_pool_free(&bumgr->localid);
+ if (id) bumgr->localid = ks_pstrdup(ks_pool_get(bumgr), id);
ks_log(KS_LOG_DEBUG, "LocalID: %s\n", id);
ks_assert(bumgr);
ks_rwl_write_lock(bumgr->masterid_rwl);
- if (bumgr->masterid) ks_pool_free(bumgr->pool, &bumgr->masterid);
- if (id) bumgr->masterid = ks_pstrdup(bumgr->pool, id);
+ if (bumgr->masterid) ks_pool_free(&bumgr->masterid);
+ if (id) bumgr->masterid = ks_pstrdup(ks_pool_get(bumgr), id);
ks_log(KS_LOG_DEBUG, "MasterID: %s\n", id);
ks_assert(bumgr);
ks_assert(realm);
- key = ks_pstrdup(bumgr->pool, realm);
+ key = ks_pstrdup(ks_pool_get(bumgr), realm);
ks_hash_insert(bumgr->realms, (void *)key, (void *)KS_TRUE);
ks_log(KS_LOG_DEBUG, "Realm Added: %s\n", key);
KS_DECLARE(ks_status_t) blade_connection_startup(blade_connection_t *bc, blade_connection_direction_t direction);
KS_DECLARE(ks_status_t) blade_connection_shutdown(blade_connection_t *bc);
KS_DECLARE(blade_handle_t *) blade_connection_handle_get(blade_connection_t *bc);
-KS_DECLARE(ks_pool_t *) blade_connection_pool_get(blade_connection_t *bc);
KS_DECLARE(const char *) blade_connection_id_get(blade_connection_t *bc);
KS_DECLARE(ks_status_t) blade_connection_read_lock(blade_connection_t *bc, ks_bool_t block);
KS_DECLARE(ks_status_t) blade_connection_read_unlock(blade_connection_t *bc);
KS_DECLARE(ks_status_t) blade_handle_create(blade_handle_t **bhP);
KS_DECLARE(ks_status_t) blade_handle_startup(blade_handle_t *bh, config_setting_t *config);
KS_DECLARE(ks_status_t) blade_handle_shutdown(blade_handle_t *bh);
-KS_DECLARE(ks_pool_t *) blade_handle_pool_get(blade_handle_t *bh);
KS_DECLARE(ks_thread_pool_t *) blade_handle_tpool_get(blade_handle_t *bh);
KS_DECLARE(blade_transportmgr_t *) blade_handle_transportmgr_get(blade_handle_t *bh);
blade_connection_t *bc = NULL;
blade_identity_t *target = NULL;
- blade_identity_create(&target, blade_handle_pool_get(bh));
+ blade_identity_create(&target, ks_pool_get(bh));
if (blade_identity_parse(target, autoconnect) == KS_STATUS_SUCCESS) blade_handle_connect(bh, &bc, target, NULL);
blade_connection_t *bc = NULL;
blade_identity_t *target = NULL;
- blade_identity_create(&target, blade_handle_pool_get(bh));
+ blade_identity_create(&target, ks_pool_get(bh));
if (blade_identity_parse(target, autoconnect) == KS_STATUS_SUCCESS) blade_handle_connect(bh, &bc, target, NULL);
blade_session_read_unlock(bs);
if (nodeid) {
- g_testcon_nodeid = ks_pstrdup(blade_handle_pool_get(bh), nodeid);
+ g_testcon_nodeid = ks_pstrdup(ks_pool_get(bh), nodeid);
}
ks_log(KS_LOG_DEBUG, "Session (%s) locate (%s@%s) provider (%s)\n", blade_session_id_get(bs), res_result_protocol, res_result_realm, g_testcon_nodeid);
blade_identity_t *target = NULL;
ks_bool_t connected = KS_FALSE;
- blade_identity_create(&target, blade_handle_pool_get(bh));
+ blade_identity_create(&target, ks_pool_get(bh));
if (blade_identity_parse(target, autoconnect) == KS_STATUS_SUCCESS) connected = blade_handle_connect(bh, &bc, target, NULL) == KS_STATUS_SUCCESS;
};
typedef struct testproto_s testproto_t;
-static void testproto_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void testproto_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//testproto_t *test = (testproto_t *)ptr;
ks_hash_create(&test->participants, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK | KS_HASH_FLAG_FREE_KEY, pool);
- ks_pool_set_cleanup(pool, test, NULL, testproto_cleanup);
+ ks_pool_set_cleanup(test, NULL, testproto_cleanup);
*testP = test;
blade_handle_create(&bh);
ks_assert(bh);
- pool = blade_handle_pool_get(bh);
+ pool = ks_pool_get(bh);
ks_assert(pool);
if (argc > 1) autoconnect = argv[1];
ks_bool_t connected = KS_FALSE;
blade_rpc_t *brpc = NULL;
- blade_identity_create(&target, blade_handle_pool_get(bh));
+ blade_identity_create(&target, ks_pool_get(bh));
if (blade_identity_parse(target, autoconnect) == KS_STATUS_SUCCESS) connected = blade_handle_connect(bh, &bc, target, NULL) == KS_STATUS_SUCCESS;
KS_DECLARE(ks_status_t) ks_init(void);
KS_DECLARE(ks_status_t) ks_shutdown(void);
KS_DECLARE(ks_pool_t *) ks_global_pool(void);
-KS_DECLARE(ks_status_t) ks_global_set_cleanup(ks_pool_cleanup_callback_t callback, void *arg);
KS_DECLARE(int) ks_vasprintf(char **ret, const char *fmt, va_list ap);
//KS_DECLARE_DATA extern ks_logger_t ks_logger;
KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *pool);
+// @todo fill in documentation
+KS_DECLARE(ks_bool_t) ks_pool_verify(void *addr);
+
+// @todo fill in documentation
+KS_DECLARE(ks_pool_t *) ks_pool_get(void *addr);
+
/*
* void *ks_pool_alloc
*
*
* ARGUMENTS:
*
- * pool -> Pointer to the memory pool. If NULL then it will do a
- * normal free.
- *
* addr <-> Address to free.
*
*/
-KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *pool, void **addrP);
+KS_DECLARE(ks_status_t) ks_pool_free_ex(void **addrP);
/*
*
* ARGUMENTS:
*
- * pool -> Pointer to the memory pool.
- *
* addr -> The addr to ref
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
-KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *pool, void *addr, ks_status_t *error_p);
+KS_DECLARE(void *) ks_pool_ref_ex(void *addr, ks_status_t *error_p);
-#define ks_pool_ref(_p, _x) ks_pool_ref_ex(_p, _x, NULL)
+#define ks_pool_ref(_x) ks_pool_ref_ex(_x, NULL)
/*
* void *ks_pool_resize
*
* ARGUMENTS:
*
- * pool -> Pointer to the memory pool. If NULL then it will do a
- * normal realloc.
- *
* old_addr -> Previously allocated address.
*
* new_size -> New size of the allocation.
*
*/
-KS_DECLARE(void *) ks_pool_resize(ks_pool_t *pool, void *old_addr, const ks_size_t new_size);
+KS_DECLARE(void *) ks_pool_resize(void *old_addr, const ks_size_t new_size);
/*
* void *ks_pool_resize_ex
*
* ARGUMENTS:
*
- * pool -> Pointer to the memory pool.
- *
* old_addr -> Previously allocated address.
*
* new_size -> New size of the allocation.
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
-KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *pool, void *old_addr, const ks_size_t new_size, ks_status_t *error_p);
+KS_DECLARE(void *) ks_pool_resize_ex(void *old_addr, const ks_size_t new_size, ks_status_t *error_p);
/*
* int ks_pool_stats
*/
KS_DECLARE(const char *) ks_pool_strerror(const ks_status_t error);
-KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_callback_t callback);
+KS_DECLARE(ks_status_t) ks_pool_set_cleanup(void *ptr, void *arg, ks_pool_cleanup_callback_t callback);
-#define ks_pool_free(_p, _x) ks_pool_free_ex(_p, (void **)_x)
+#define ks_pool_free(_x) ks_pool_free_ex((void **)_x)
/*<<<<<<<<<< This is end of the auto-generated output from fillproto. */
#define KS_THREAD_IS_RUNNING(_thread) _thread->state == KS_THREAD_RUNNING
struct ks_thread {
- ks_pool_t *pool;
#ifdef WIN32
void *handle;
#else
char host[48];
} ks_sockaddr_t;
-typedef void (*ks_pool_cleanup_callback_t)(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type);
+typedef void (*ks_pool_cleanup_callback_t)(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type);
typedef void (*ks_logger_t) (const char *file, const char *func, int line, int level, const char *fmt, ...);
typedef void (*ks_listen_callback_t) (ks_socket_t server_sock, ks_socket_t client_sock, ks_sockaddr_t *addr, void *user_data);
/** list object */
typedef struct {
- ks_pool_t *pool;
ks_rwl_t *lock;
struct ks_list_entry_s *head_sentinel;
}
-KS_DECLARE(ks_status_t) ks_global_set_cleanup(ks_pool_cleanup_callback_t callback, void *arg)
-{
- return ks_pool_set_cleanup(ks_global_pool(), NULL, arg, callback);
-}
-
KS_DECLARE(ks_status_t) ks_init(void)
{
unsigned int pid = 0;
struct ks_network_list {
struct ks_network_node *node_head;
ks_bool_t default_type;
- ks_pool_t *pool;
char *name;
};
}
new_list = ks_pool_alloc(pool, sizeof(**list));
- new_list->pool = pool;
new_list->default_type = default_type;
- new_list->name = ks_pstrdup(new_list->pool, name);
+ new_list->name = ks_pstrdup(pool, name);
*list = new_list;
KS_DECLARE(ks_status_t) ks_network_list_perform_add_cidr_token(ks_network_list_t *list, const char *cidr_str, ks_bool_t ok,
const char *token)
{
+ ks_pool_t *pool = NULL;
ks_ip_t ip, mask;
uint32_t bits;
ks_network_node_t *node;
return KS_STATUS_GENERR;
}
- node = ks_pool_alloc(list->pool, sizeof(*node));
+ pool = ks_pool_get(list);
+
+ node = ks_pool_alloc(pool, sizeof(*node));
node->ip = ip;
node->mask = mask;
node->ok = ok;
node->bits = bits;
- node->str = ks_pstrdup(list->pool, cidr_str);
+ node->str = ks_pstrdup(pool, cidr_str);
if (strchr(cidr_str,':')) {
node->family = AF_INET6;
}
if (!zstr(token)) {
- node->token = ks_pstrdup(list->pool, token);
+ node->token = ks_pstrdup(pool, token);
}
node->next = list->node_head;
KS_DECLARE(ks_status_t) ks_network_list_add_host_mask(ks_network_list_t *list, const char *host, const char *mask_str, ks_bool_t ok)
{
+ ks_pool_t *pool = NULL;
ks_ip_t ip, mask;
ks_network_node_t *node;
ks_inet_pton(AF_INET, host, &ip);
ks_inet_pton(AF_INET, mask_str, &mask);
- node = ks_pool_alloc(list->pool, sizeof(*node));
+ pool = ks_pool_get(list);
+
+ node = ks_pool_alloc(pool, sizeof(*node));
node->ip.v4 = ntohl(ip.v4);
node->mask.v4 = ntohl(mask.v4);
mask.v4 = (mask.v4 & 0x33333333) + ((mask.v4 >> 2) & 0x33333333);
node->bits = (((mask.v4 + (mask.v4 >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
- node->str = ks_psprintf(list->pool, "%s:%s", host, mask_str);
+ node->str = ks_psprintf(pool, "%s:%s", host, mask_str);
node->next = list->node_head;
list->node_head = node;
};
struct ks_hash {
- ks_pool_t *pool;
unsigned int tablelength;
struct entry **table;
unsigned int entrycount;
/*****************************************************************************/
-static void ks_hash_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void ks_hash_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//ks_hash_t *hash = (ks_hash_t *) ptr;
}
h = (ks_hash_t *) ks_pool_alloc(pool, sizeof(ks_hash_t));
- h->pool = pool;
h->flags = flags;
h->destructor = destructor;
h->keysize = keysize;
h->mode = mode;
if ((flags & KS_HASH_FLAG_RWLOCK)) {
- ks_rwl_create(&h->rwl, h->pool);
+ ks_rwl_create(&h->rwl, pool);
}
if (!(flags & KS_HASH_FLAG_NOLOCK)) {
- ks_mutex_create(&h->mutex, KS_MUTEX_FLAG_DEFAULT, h->pool);
+ ks_mutex_create(&h->mutex, KS_MUTEX_FLAG_DEFAULT, pool);
}
if (NULL == h) abort(); /*oom*/
- h->table = (struct entry **)ks_pool_alloc(h->pool, sizeof(struct entry*) * size);
+ h->table = (struct entry **)ks_pool_alloc(pool, sizeof(struct entry*) * size);
if (NULL == h->table) abort(); /*oom*/
*hp = h;
- ks_pool_set_cleanup(pool, h, NULL, ks_hash_cleanup);
+ ks_pool_set_cleanup(h, NULL, ks_hash_cleanup);
return KS_STATUS_SUCCESS;
}
if (h->primeindex == (prime_table_length - 1)) return 0;
newsize = primes[++(h->primeindex)];
- newtable = (struct entry **)ks_pool_alloc(h->pool, sizeof(struct entry*) * newsize);
+ newtable = (struct entry **)ks_pool_alloc(ks_pool_get(h), sizeof(struct entry*) * newsize);
if (NULL != newtable)
{
memset(newtable, 0, newsize * sizeof(struct entry *));
newtable[index] = e;
}
}
- ks_pool_free(h->pool, &h->table);
+ ks_pool_free(&h->table);
h->table = newtable;
}
/* Plan B: realloc instead */
else
{
newtable = (struct entry **)
- ks_pool_resize(h->pool, h->table, newsize * sizeof(struct entry *));
+ ks_pool_resize(h->table, newsize * sizeof(struct entry *));
if (NULL == newtable) { (h->primeindex)--; return 0; }
h->table = newtable;
memset(newtable[h->tablelength], 0, newsize - h->tablelength);
h->entrycount--;
v = e->v;
if (e->flags & KS_HASH_FLAG_FREE_KEY) {
- ks_pool_free(h->pool, &e->k);
+ ks_pool_free(&e->k);
}
if (e->flags & KS_HASH_FLAG_FREE_VALUE) {
- ks_pool_free(h->pool, &e->v);
+ ks_pool_free(&e->v);
v = NULL;
} else if (e->destructor) {
e->destructor(e->v);
h->destructor(e->v);
v = e->v = NULL;
}
- ks_pool_free(h->pool, &e);
+ ks_pool_free(&e);
return v;
}
pE = &(e->next);
ks_hash_expand(h);
index = indexFor(h->tablelength, hashvalue);
}
- e = (struct entry *)ks_pool_alloc(h->pool, sizeof(struct entry));
+ e = (struct entry *)ks_pool_alloc(ks_pool_get(h), sizeof(struct entry));
e->h = hashvalue;
e->k = k;
e->v = v;
unsigned int i;
struct entry *e, *f;
struct entry **table = (*h)->table;
- ks_pool_t *pool;
ks_hash_write_lock(*h);
f = e; e = e->next;
if (f->flags & KS_HASH_FLAG_FREE_KEY) {
- ks_pool_free((*h)->pool, &f->k);
+ ks_pool_free(&f->k);
}
if (f->flags & KS_HASH_FLAG_FREE_VALUE) {
- ks_pool_free((*h)->pool, &f->v);
+ ks_pool_free(&f->v);
} else if (f->destructor) {
f->destructor(f->v);
f->v = NULL;
(*h)->destructor(f->v);
f->v = NULL;
}
- ks_pool_free((*h)->pool, &f);
+ ks_pool_free(&f);
}
}
- pool = (*h)->pool;
- ks_pool_free(pool, &(*h)->table);
+ ks_pool_free(&(*h)->table);
ks_hash_write_unlock(*h);
- if ((*h)->rwl) ks_pool_free(pool, &(*h)->rwl);
+ if ((*h)->rwl) ks_pool_free(&(*h)->rwl);
if ((*h)->mutex) {
- ks_pool_free(pool, &(*h)->mutex);
+ ks_pool_free(&(*h)->mutex);
}
- ks_pool_free(pool, &(*h));
- pool = NULL;
+ ks_pool_free(&(*h));
*h = NULL;
-
-
}
KS_DECLARE(void) ks_hash_last(ks_hash_iterator_t **iP)
ks_rwl_read_unlock(i->h->rwl);
}
- ks_pool_free(i->h->pool, &i);
+ ks_pool_free(&i);
*iP = NULL;
}
ks_assert(locked != KS_READLOCKED || (h->flags & KS_HASH_FLAG_RWLOCK));
- iterator = ks_pool_alloc(h->pool, sizeof(*iterator));
+ iterator = ks_pool_alloc(ks_pool_get(h), sizeof(*iterator));
ks_assert(iterator);
iterator->pos = 0;
#else
pthread_mutex_t mutex;
#endif
- ks_pool_t * pool;
ks_mutex_type_t type;
uint8_t malloc;
};
-static void ks_mutex_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void ks_mutex_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_mutex_t *mutex = (ks_mutex_t *) ptr;
#endif
free(mutex);
} else {
- ks_pool_free(mutex->pool, &mutex);
+ ks_pool_free(&mutex);
}
return KS_STATUS_SUCCESS;
check->malloc = 1;
}
- check->pool = pool;
check->type = KS_MUTEX_TYPE_DEFAULT;
#ifdef WIN32
status = KS_STATUS_SUCCESS;
if (pool) {
- ks_pool_set_cleanup(pool, check, NULL, ks_mutex_cleanup);
+ ks_pool_set_cleanup(check, NULL, ks_mutex_cleanup);
}
done:
struct ks_cond {
- ks_pool_t * pool;
ks_mutex_t *mutex;
#ifdef WIN32
CONDITION_VARIABLE cond;
uint8_t static_mutex;
};
-static void ks_cond_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void ks_cond_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_cond_t *cond = (ks_cond_t *) ptr;
goto done;
}
- check->pool = pool;
if (mutex) {
check->mutex = mutex;
check->static_mutex = 1;
*cond = check;
status = KS_STATUS_SUCCESS;
- ks_pool_set_cleanup(pool, check, NULL, ks_cond_cleanup);
+ ks_pool_set_cleanup(check, NULL, ks_cond_cleanup);
done:
return status;
*cond = NULL;
- return ks_pool_free(condp->pool, &condp);
+ return ks_pool_free(&condp);
}
#else
pthread_rwlock_t rwlock;
#endif
- ks_pool_t *pool;
ks_pid_t write_locker;
uint32_t wlc;
};
-static void ks_rwl_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void ks_rwl_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
#ifndef WIN32
ks_rwl_t *rwlock = (ks_rwl_t *) ptr;
goto done;
}
- check->pool = pool;
-
#ifdef WIN32
if (ks_hash_create(&check->read_lock_list, KS_HASH_MODE_PTR, KS_HASH_FLAG_NONE, pool) != KS_STATUS_SUCCESS) {
*rwlock = check;
status = KS_STATUS_SUCCESS;
- ks_pool_set_cleanup(pool, check, NULL, ks_rwl_cleanup);
+ ks_pool_set_cleanup(check, NULL, ks_rwl_cleanup);
done:
return status;
}
*rwlock = NULL;
- return ks_pool_free(rwlockp->pool, &rwlockp);
+ return ks_pool_free(&rwlockp);
}
ks_pool_cleanup_callback_t cleanup_callback;
void *cleanup_arg;
ks_size_t magic4;
- ks_size_t reserved[2]; // @todo use one of these to store the original pool address to validate that free_mem is not attempted against the wrong pool, which can corrupt the allocation linked list
+ ks_pool_t *pool;
+ ks_size_t magic5;
};
#define KS_POOL_PREFIX_SIZE sizeof(ks_pool_prefix_t)
static void write_fence(void *addr);
static ks_status_t check_prefix(const ks_pool_prefix_t *prefix);
-static void perform_pool_cleanup_on_free(ks_pool_t *pool, ks_pool_prefix_t *prefix)
+static void perform_pool_cleanup_on_free(ks_pool_prefix_t *prefix)
{
void *addr;
- ks_assert(pool);
ks_assert(prefix);
+ ks_assert(prefix->pool);
- if (pool->cleaning_up) return;
+ if (prefix->pool->cleaning_up) return;
addr = (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE);
if (prefix->cleanup_callback) {
- prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_ANNOUNCE, KS_MPCL_FREE);
- prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_TEARDOWN, KS_MPCL_FREE);
- prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_DESTROY, KS_MPCL_FREE);
+ prefix->cleanup_callback(addr, prefix->cleanup_arg, KS_MPCL_ANNOUNCE, KS_MPCL_FREE);
+ prefix->cleanup_callback(addr, prefix->cleanup_arg, KS_MPCL_TEARDOWN, KS_MPCL_FREE);
+ prefix->cleanup_callback(addr, prefix->cleanup_arg, KS_MPCL_DESTROY, KS_MPCL_FREE);
}
}
for (prefix = pool->first; prefix; prefix = prefix->next) {
if (!prefix->cleanup_callback) continue;
- prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_ANNOUNCE, KS_MPCL_GLOBAL_FREE);
+ prefix->cleanup_callback((void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_ANNOUNCE, KS_MPCL_GLOBAL_FREE);
}
for (prefix = pool->first; prefix; prefix = prefix->next) {
if (!prefix->cleanup_callback) continue;
- prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_TEARDOWN, KS_MPCL_GLOBAL_FREE);
+ prefix->cleanup_callback((void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_TEARDOWN, KS_MPCL_GLOBAL_FREE);
}
for (prefix = pool->first; prefix; prefix = prefix->next) {
if (!prefix->cleanup_callback) continue;
- prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_DESTROY, KS_MPCL_GLOBAL_FREE);
+ prefix->cleanup_callback((void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_DESTROY, KS_MPCL_GLOBAL_FREE);
}
}
-KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_callback_t callback)
+KS_DECLARE(ks_status_t) ks_pool_set_cleanup(void *ptr, void *arg, ks_pool_cleanup_callback_t callback)
{
ks_status_t ret = KS_STATUS_SUCCESS;
ks_pool_prefix_t *prefix = NULL;
- ks_assert(pool);
ks_assert(ptr);
ks_assert(callback);
*/
static ks_status_t check_prefix(const ks_pool_prefix_t *prefix)
{
- if (!(prefix->magic1 == KS_POOL_PREFIX_MAGIC && prefix->magic2 == KS_POOL_PREFIX_MAGIC && prefix->magic3 == KS_POOL_PREFIX_MAGIC && prefix->magic4 == KS_POOL_PREFIX_MAGIC)) return KS_STATUS_INVALID_POINTER;
+ if (!(prefix->magic1 == KS_POOL_PREFIX_MAGIC &&
+ prefix->magic2 == KS_POOL_PREFIX_MAGIC &&
+ prefix->magic3 == KS_POOL_PREFIX_MAGIC &&
+ prefix->magic4 == KS_POOL_PREFIX_MAGIC &&
+ prefix->magic5 == KS_POOL_PREFIX_MAGIC)) return KS_STATUS_INVALID_POINTER;
return KS_STATUS_SUCCESS;
}
if (!pool->last) pool->last = prefix;
prefix->magic3 = KS_POOL_PREFIX_MAGIC;
prefix->magic4 = KS_POOL_PREFIX_MAGIC;
+ prefix->pool = pool;
+ prefix->magic5 = KS_POOL_PREFIX_MAGIC;
write_fence(fence);
* addr -> Address to free.
*
*/
-static ks_status_t free_mem(ks_pool_t *pool, void *addr)
+static ks_status_t free_mem(void *addr)
{
ks_status_t ret = KS_STATUS_SUCCESS;
void *start = NULL;
void *fence = NULL;
ks_pool_prefix_t *prefix = NULL;
+ ks_pool_t *pool = NULL;
- ks_assert(pool);
ks_assert(addr);
start = (void *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) return ret;
+ pool = prefix->pool;
+
if (prefix->refs > 0) {
prefix->refs--;
fence = (void *)((uintptr_t)addr + prefix->size);
ret = check_fence(fence);
- perform_pool_cleanup_on_free(pool, prefix);
+ perform_pool_cleanup_on_free(prefix);
if (!prefix->prev && !prefix->next) pool->first = pool->last = NULL;
else if (!prefix->prev) {
*/
static ks_pool_t *ks_pool_raw_open(const ks_size_t flags, ks_status_t *error_p)
{
- ks_pool_t *pool;
+ ks_pool_t *pool = NULL;
pool = malloc(sizeof(ks_pool_t));
ks_assert(pool);
return ret;
}
+// @todo fill in documentation
+KS_DECLARE(ks_bool_t) ks_pool_verify(void *addr)
+{
+ ks_pool_prefix_t *prefix = NULL;
+ if (!addr) return KS_FALSE;
+ prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
+ if (check_prefix(prefix) != KS_STATUS_SUCCESS) return KS_FALSE;
+ return KS_TRUE;
+}
+
+// @todo fill in documentation
+KS_DECLARE(ks_pool_t *) ks_pool_get(void *addr)
+{
+ ks_pool_prefix_t *prefix = NULL;
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ ks_pool_t *pool = NULL;
+
+ if (!addr) goto done;
+
+ prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
+ if (check_prefix(prefix) != KS_STATUS_SUCCESS) goto done;
+
+ if ((ret = check_pool(prefix->pool)) == KS_STATUS_SUCCESS) pool = prefix->pool;
+
+done:
+ ks_assert(ret == KS_STATUS_SUCCESS);
+
+ return pool;
+}
+
/*
* void *ks_pool_alloc_ex
*
*
* ARGUMENTS:
*
- * pool -> Pointer to the memory pool.
- *
* addr <-> Pointer to pointer of Address to free.
*
*/
-KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *pool, void **addrP)
+KS_DECLARE(ks_status_t) ks_pool_free_ex(void **addrP)
{
ks_status_t ret = KS_STATUS_SUCCESS;
- void *addr;
+ void *addr = NULL;
+ ks_pool_prefix_t *prefix = NULL;
+ ks_pool_t *pool = NULL;
- ks_assert(pool);
ks_assert(addrP);
ks_assert(*addrP);
addr = *addrP;
+ prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
+ if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) goto done;
+
+ pool = prefix->pool;
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
ks_mutex_lock(pool->mutex);
if (pool->log_func != NULL) {
- ks_pool_prefix_t *prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
// @todo check_prefix()?
pool->log_func(pool, prefix->refs == 1 ? KS_POOL_FUNC_FREE : KS_POOL_FUNC_DECREF, prefix->size, prefix->refs - 1, addr, NULL, 0);
}
- ret = free_mem(pool, addr);
+ ret = free_mem(addr);
ks_mutex_unlock(pool->mutex);
done:
*
* ARGUMENTS:
*
- * pool -> Pointer to the memory pool.
- *
* addr -> The addr to ref
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
-KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *pool, void *addr, ks_status_t *error_p)
+KS_DECLARE(void *) ks_pool_ref_ex(void *addr, ks_status_t *error_p)
{
ks_status_t ret = KS_STATUS_SUCCESS;
- ks_pool_prefix_t *prefix;
+ ks_pool_prefix_t *prefix = NULL;
+ ks_pool_t *pool = NULL;
ks_size_t refs;
- ks_assert(pool);
ks_assert(addr);
- if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
-
prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) goto done;
+ pool = prefix->pool;
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
+
ks_mutex_lock(pool->mutex);
refs = ++prefix->refs;
ks_mutex_unlock(pool->mutex);
*
* ARGUMENTS:
*
- * pool -> Pointer to the memory pool.
- *
- *
* old_addr -> Previously allocated address.
*
* new_size -> New size of the allocation.
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
-KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *pool, void *old_addr, const ks_size_t new_size, ks_status_t *error_p)
+KS_DECLARE(void *) ks_pool_resize_ex(void *old_addr, const ks_size_t new_size, ks_status_t *error_p)
{
ks_status_t ret = KS_STATUS_SUCCESS;
ks_size_t old_size;
- ks_pool_prefix_t *prefix;
+ ks_pool_prefix_t *prefix = NULL;
+ ks_pool_t *pool = NULL;
void *new_addr = NULL;
ks_size_t required;
- ks_assert(pool);
+ ks_assert(old_addr);
ks_assert(new_size);
- if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) {
+ prefix = (ks_pool_prefix_t *)((uintptr_t)old_addr - KS_POOL_PREFIX_SIZE);
+ if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) {
SET_POINTER(error_p, ret);
return NULL;
}
- if (!old_addr) {
- return ks_pool_alloc_ex(pool, new_size, error_p);
- }
-
- prefix = (ks_pool_prefix_t *)((uintptr_t)old_addr - KS_POOL_PREFIX_SIZE);
- if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) {
+ pool = prefix->pool;
+ if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) {
SET_POINTER(error_p, ret);
return NULL;
}
*
* ARGUMENTS:
*
- * pool -> Pointer to the memory pool.
- *
- *
* old_addr -> Previously allocated address.
*
* new_size -> New size of the allocation.
*
*/
-KS_DECLARE(void *) ks_pool_resize(ks_pool_t *pool, void *old_addr, const ks_size_t new_size)
+KS_DECLARE(void *) ks_pool_resize(void *old_addr, const ks_size_t new_size)
{
- return ks_pool_resize_ex(pool, old_addr, new_size, NULL);
+ return ks_pool_resize_ex(old_addr, new_size, NULL);
}
/*
*/
static void *pool_realloc(void *old, int size, void *arg)
{
- return ks_pool_resize(arg, old, size);
+ void *addr = NULL;
+ ks_pool_t *pool = (ks_pool_t *)arg;
+ if (!old || !ks_pool_verify(old)) addr = ks_pool_alloc(pool, size);
+ else addr = ks_pool_resize(old, size);
+ return addr;
}
/*
} ks_qnode_t;
struct ks_q_s {
- ks_pool_t *pool;
ks_flush_fn_t flush_fn;
void *flush_data;
ks_size_t len;
uint8_t active;
};
-static void ks_q_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void ks_q_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_q_t *q = (ks_q_t *) ptr;
ks_qnode_t *np, *fp;
while(np) {
fp = np;
np = np->next;
- ks_pool_free(q->pool, &fp);
+ ks_pool_free(&fp);
}
np = q->empty;
while(np) {
fp = np;
np = np->next;
- ks_pool_free(q->pool, &fp);
+ ks_pool_free(&fp);
}
break;
case KS_MPCL_DESTROY:
KS_DECLARE(ks_status_t) ks_q_destroy(ks_q_t **qP)
{
ks_q_t *q;
- ks_pool_t *pool;
ks_assert(qP);
ks_q_flush(q);
ks_q_term(q);
- pool = q->pool;
- ks_pool_free(pool, &q);
- pool = NULL;
+ ks_pool_free(&q);
return KS_STATUS_SUCCESS;
}
q = ks_pool_alloc(pool, sizeof(*q));
ks_assert(q);
- q->pool = pool;
-
-
ks_mutex_create(&q->list_mutex, KS_MUTEX_FLAG_DEFAULT, pool);
ks_assert(q->list_mutex);
q->maxlen = maxlen;
q->active = 1;
- ks_pool_set_cleanup(pool, q, NULL, ks_q_cleanup);
+ ks_pool_set_cleanup(q, NULL, ks_q_cleanup);
*qP = q;
np = q->empty;
q->empty = q->empty->next;
} else {
- np = ks_pool_alloc(q->pool, sizeof(*np));
+ np = ks_pool_alloc(ks_pool_get(q), sizeof(*np));
}
np->prev = np->next = NULL;
thread_default_stacksize = size;
}
-static void ks_thread_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void ks_thread_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_thread_t *thread = (ks_thread_t *) ptr;
thread->stack_size = stack_size;
thread->flags = flags;
thread->priority = priority;
- thread->pool = pool;
#if defined(WIN32)
thread->handle = (void *) _beginthreadex(NULL, (unsigned) thread->stack_size, (unsigned int (__stdcall *) (void *)) thread_launch, thread, 0, NULL);
fail:
if (thread) {
thread->state = KS_THREAD_FAIL;
- if (pool) {
- ks_pool_free(pool, &thread);
- }
+ ks_pool_free(&thread);
}
done:
if (status == KS_STATUS_SUCCESS) {
}
*rthread = thread;
- ks_pool_set_cleanup(pool, thread, NULL, ks_thread_cleanup);
+ ks_pool_set_cleanup(thread, NULL, ks_thread_cleanup);
}
return status;
uint32_t dying_thread_count;
ks_thread_pool_state_t state;
ks_mutex_t *mutex;
- ks_pool_t *pool;
};
typedef struct ks_thread_job_s {
ks_mutex_unlock(tp->mutex);
while(need > 0) {
- if (ks_thread_create_ex(&thread, worker_thread, tp, KS_THREAD_FLAG_DETACHED, tp->stack_size, tp->priority, tp->pool) != KS_STATUS_SUCCESS) {
+ if (ks_thread_create_ex(&thread, worker_thread, tp, KS_THREAD_FLAG_DETACHED, tp->stack_size, tp->priority, ks_pool_get(tp)) != KS_STATUS_SUCCESS) {
ks_mutex_lock(tp->mutex);
tp->thread_count--;
ks_mutex_unlock(tp->mutex);
idle_sec = 0;
job->func(thread, job->data);
- ks_pool_free(tp->pool, &job);
+ ks_pool_free(&job);
ks_mutex_lock(tp->mutex);
tp->busy_thread_count--;
KS_DECLARE(ks_status_t) ks_thread_pool_create(ks_thread_pool_t **tp, uint32_t min, uint32_t max, size_t stack_size,
ks_thread_priority_t priority, uint32_t idle_sec)
{
- ks_pool_t *pool;
+ ks_pool_t *pool = NULL;
ks_pool_open(&pool);
(*tp)->min = min;
(*tp)->max = max;
- (*tp)->pool = pool;
(*tp)->stack_size = stack_size;
(*tp)->priority = priority;
(*tp)->state = TP_STATE_RUNNING;
(*tp)->idle_sec = idle_sec;
- ks_mutex_create(&(*tp)->mutex, KS_MUTEX_FLAG_DEFAULT, (*tp)->pool);
- ks_q_create(&(*tp)->q, (*tp)->pool, TP_MAX_QLEN);
+ ks_mutex_create(&(*tp)->mutex, KS_MUTEX_FLAG_DEFAULT, pool);
+ ks_q_create(&(*tp)->q, pool, TP_MAX_QLEN);
check_queue(*tp, KS_FALSE);
KS_DECLARE(ks_status_t) ks_thread_pool_destroy(ks_thread_pool_t **tp)
{
- ks_pool_t *pool;
+ ks_pool_t *pool = NULL;
ks_assert(tp);
ks_sleep(100000);
}
- pool = (*tp)->pool;
+ pool = ks_pool_get(*tp);
ks_pool_close(&pool);
return KS_STATUS_SUCCESS;
KS_DECLARE(ks_status_t) ks_thread_pool_add_job(ks_thread_pool_t *tp, ks_thread_function_t func, void *data)
{
- ks_thread_job_t *job = (ks_thread_job_t *) ks_pool_alloc(tp->pool, sizeof(*job));
+ ks_thread_job_t *job = (ks_thread_job_t *) ks_pool_alloc(ks_pool_get(tp), sizeof(*job));
job->func = func;
job->data = data;
struct kws_s {
- ks_pool_t *pool;
ks_socket_t sock;
kws_type_t type;
char *buffer;
goto err;
}
- kws->uri = ks_pool_alloc(kws->pool, (unsigned long)(e-p) + 1);
+ kws->uri = ks_pool_alloc(ks_pool_get(kws), (unsigned long)(e-p) + 1);
strncpy(kws->uri, p, e-p);
*(kws->uri + (e-p)) = '\0';
kws_t *kws;
kws = ks_pool_alloc(pool, sizeof(*kws));
- kws->pool = pool;
if ((flags & KWS_CLOSE_SOCK)) {
kws->close_sock = 1;
if (client_data) {
char *p = NULL;
- kws->req_uri = ks_pstrdup(kws->pool, client_data);
+ kws->req_uri = ks_pstrdup(pool, client_data);
if ((p = strchr(kws->req_uri, ':'))) {
*p++ = '\0';
kws->buflen = 1024 * 64;
kws->bbuflen = kws->buflen;
- kws->buffer = ks_pool_alloc(kws->pool, (unsigned long)kws->buflen);
- kws->bbuffer = ks_pool_alloc(kws->pool, (unsigned long)kws->bbuflen);
+ kws->buffer = ks_pool_alloc(pool, (unsigned long)kws->buflen);
+ kws->bbuffer = ks_pool_alloc(pool, (unsigned long)kws->bbuflen);
//printf("init %p %ld\n", (void *) kws->bbuffer, kws->bbuflen);
//memset(kws->buffer, 0, kws->buflen);
//memset(kws->bbuffer, 0, kws->bbuflen);
kws->down = 2;
if (kws->write_buffer) {
- ks_pool_free(kws->pool, &kws->write_buffer);
+ ks_pool_free(&kws->write_buffer);
kws->write_buffer = NULL;
kws->write_buffer_len = 0;
}
kws->ssl = NULL;
}
- if (kws->buffer) ks_pool_free(kws->pool, &kws->buffer);
- if (kws->bbuffer) ks_pool_free(kws->pool, &kws->bbuffer);
+ if (kws->buffer) ks_pool_free(&kws->buffer);
+ if (kws->bbuffer) ks_pool_free(&kws->bbuffer);
kws->buffer = kws->bbuffer = NULL;
- ks_pool_free(kws->pool, &kws);
+ ks_pool_free(&kws);
kws = NULL;
}
kws->down = 1;
if (kws->uri) {
- ks_pool_free(kws->pool, &kws->uri);
+ ks_pool_free(&kws->uri);
kws->uri = NULL;
}
void *tmp;
kws->bbuflen = need + blen + kws->rplen;
- if ((tmp = ks_pool_resize(kws->pool, kws->bbuffer, (unsigned long)kws->bbuflen))) {
+ if ((tmp = ks_pool_resize(kws->bbuffer, (unsigned long)kws->bbuflen))) {
kws->bbuffer = tmp;
} else {
abort();
void *tmp;
kws->write_buffer_len = hlen + bytes + 1;
- if ((tmp = ks_pool_resize(kws->pool, kws->write_buffer, (unsigned long)kws->write_buffer_len))) {
+ if (!kws->write_buffer) kws->write_buffer = ks_pool_alloc(ks_pool_get(kws), (unsigned long)kws->write_buffer_len);
+ else if ((tmp = ks_pool_resize(kws->write_buffer, (unsigned long)kws->write_buffer_len))) {
kws->write_buffer = tmp;
} else {
abort();
#endif
-static void ks_list_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+static void ks_list_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_list_t *l = (ks_list_t *)ptr;
case KS_MPCL_TEARDOWN:
ks_list_clear(l);
ks_rwl_write_lock(l->lock);
- for (unsigned int i = 0; i < l->spareelsnum; i++) ks_pool_free(l->pool, &l->spareels[i]);
+ for (unsigned int i = 0; i < l->spareelsnum; i++) ks_pool_free(&l->spareels[i]);
l->spareelsnum = 0;
- ks_pool_free(l->pool, &l->spareels);
- ks_pool_free(l->pool, &l->head_sentinel);
- ks_pool_free(l->pool, &l->tail_sentinel);
+ ks_pool_free(&l->spareels);
+ ks_pool_free(&l->head_sentinel);
+ ks_pool_free(&l->tail_sentinel);
ks_rwl_write_unlock(l->lock);
ks_rwl_destroy(&l->lock);
break;
l = ks_pool_alloc(pool, sizeof(ks_list_t));
ks_assert(l);
- l->pool = pool;
l->numels = 0;
ks_rwl_create(&l->lock, pool);
ks_assert(ks_list_repOk(l));
ks_assert(ks_list_attrOk(l));
- ks_pool_set_cleanup(pool, l, NULL, ks_list_cleanup);
+ ks_pool_set_cleanup(l, NULL, ks_list_cleanup);
*list = l;
return KS_STATUS_SUCCESS;
*list = NULL;
if (!l) return KS_STATUS_FAIL;
- ks_pool_free(l->pool, &l);
+ ks_pool_free(&l);
return KS_STATUS_SUCCESS;
}
}
KS_DECLARE(int) ks_list_insert_at(ks_list_t *restrict l, const void *data, unsigned int pos) {
+ ks_pool_t *pool = NULL;
struct ks_list_entry_s *lent, *succ, *prec;
if (l->iter_active || pos > l->numels) return -1;
+ pool = ks_pool_get(l);
+
ks_rwl_write_lock(l->lock);
/* this code optimizes malloc() with a free-list */
if (l->spareelsnum > 0) {
l->spareelsnum--;
}
else {
- lent = (struct ks_list_entry_s *)ks_pool_alloc(l->pool, sizeof(struct ks_list_entry_s));
+ lent = (struct ks_list_entry_s *)ks_pool_alloc(pool, sizeof(struct ks_list_entry_s));
ks_assert(lent);
}
if (l->attrs.copy_data) {
/* make room for user' data (has to be copied) */
ks_size_t datalen = l->attrs.meter(data);
- lent->data = (struct ks_list_entry_s *)ks_pool_alloc(l->pool, datalen);
+ lent->data = (struct ks_list_entry_s *)ks_pool_alloc(pool, datalen);
memcpy(lent->data, data, datalen);
}
else {
for (; i <= posend; i++) {
tmp2 = tmp;
tmp = tmp->next;
- if (tmp2->data != NULL) ks_pool_free(l->pool, &tmp2->data);
+ if (tmp2->data != NULL) ks_pool_free(&tmp2->data);
if (l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS) {
l->spareels[l->spareelsnum++] = tmp2;
}
else {
- ks_pool_free(l->pool, &tmp2);
+ ks_pool_free(&tmp2);
}
}
}
l->spareels[l->spareelsnum++] = tmp2;
}
else {
- ks_pool_free(l->pool, &tmp2);
+ ks_pool_free(&tmp2);
}
}
}
/* spare a loop conditional with two loops: spareing elems and freeing elems */
for (s = l->head_sentinel->next; l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS && s != l->tail_sentinel; s = s->next) {
/* move elements as spares as long as there is room */
- if (s->data != NULL) ks_pool_free(l->pool, &s->data);
+ if (s->data != NULL) ks_pool_free(&s->data);
l->spareels[l->spareelsnum++] = s;
}
while (s != l->tail_sentinel) {
/* free the remaining elems */
- if (s->data != NULL) ks_pool_free(l->pool, &s->data);
+ if (s->data != NULL) ks_pool_free(&s->data);
s = s->next;
- ks_pool_free(l->pool, &s->prev);
+ ks_pool_free(&s->prev);
}
l->head_sentinel->next = l->tail_sentinel;
l->tail_sentinel->prev = l->head_sentinel;
while (s != l->tail_sentinel) {
/* free the remaining elems */
s = s->next;
- ks_pool_free(l->pool, &s->prev);
+ ks_pool_free(&s->prev);
}
l->head_sentinel->next = l->tail_sentinel;
l->tail_sentinel->prev = l->head_sentinel;
}
KS_DECLARE(int) ks_list_concat(const ks_list_t *l1, const ks_list_t *l2, ks_list_t *restrict dest) {
+ ks_pool_t *pool = NULL;
struct ks_list_entry_s *el, *srcel;
unsigned int cnt;
int err;
dest->numels = l1->numels + l2->numels;
if (dest->numels == 0) goto done;
+ pool = ks_pool_get(dest);
+
/* copy list1 */
srcel = l1->head_sentinel->next;
el = dest->head_sentinel;
while (srcel != l1->tail_sentinel) {
- el->next = (struct ks_list_entry_s *)ks_pool_alloc(dest->pool, sizeof(struct ks_list_entry_s));
+ el->next = (struct ks_list_entry_s *)ks_pool_alloc(pool, sizeof(struct ks_list_entry_s));
el->next->prev = el;
el = el->next;
el->data = srcel->data;
/* copy list 2 */
srcel = l2->head_sentinel->next;
while (srcel != l2->tail_sentinel) {
- el->next = (struct ks_list_entry_s *)ks_pool_alloc(dest->pool, sizeof(struct ks_list_entry_s));
+ el->next = (struct ks_list_entry_s *)ks_pool_alloc(pool, sizeof(struct ks_list_entry_s));
el->next->prev = el;
el = el->next;
el->data = srcel->data;
static void *ks_list_sort_quicksort_threadwrapper(void *wrapped_params) {
struct ks_list_sort_wrappedparams *wp = (struct ks_list_sort_wrappedparams *)wrapped_params;
ks_list_sort_quicksort(wp->l, wp->versus, wp->first, wp->fel, wp->last, wp->lel);
- ks_pool_free(wp->l->pool, &wp);
+ ks_pool_free(&wp);
pthread_exit(NULL);
return NULL;
}
if (pivotid > 0) {
/* prepare wrapped args, then start thread */
if (l->threadcount < SIMCLIST_MAXTHREADS - 1) {
- struct ks_list_sort_wrappedparams *wp = (struct ks_list_sort_wrappedparams *)ks_pool_alloc(l->pool, sizeof(struct ks_list_sort_wrappedparams));
+ struct ks_list_sort_wrappedparams *wp = (struct ks_list_sort_wrappedparams *)ks_pool_alloc(ks_pool_get(l), sizeof(struct ks_list_sort_wrappedparams));
l->threadcount++;
traised = 1;
wp->l = l;
wp->last = first + pivotid - 1;
wp->lel = pivot->prev;
if (pthread_create(&tid, NULL, ks_list_sort_quicksort_threadwrapper, wp) != 0) {
- ks_pool_free(l->pool, &wp);
+ ks_pool_free(&wp);
traised = 0;
ks_list_sort_quicksort(l, versus, first, fel, first + pivotid - 1, pivot->prev);
}
if (l->attrs.serializer != NULL) { /* user user-specified serializer */
/* get preliminary length of serialized element in header.elemlen */
ser_buf = l->attrs.serializer(l->head_sentinel->next->data, &header.elemlen);
- ks_pool_free(l->pool, &ser_buf);
+ ks_pool_free(&ser_buf);
/* request custom serialization of each element */
for (x = l->head_sentinel->next; x != l->tail_sentinel; x = x->next) {
ser_buf = l->attrs.serializer(x->data, &bufsize);
header.totlistlen += bufsize;
if (header.elemlen != 0) { /* continue on speculation */
if (header.elemlen != bufsize) {
- ks_pool_free(l->pool, &ser_buf);
+ ks_pool_free(&ser_buf);
/* constant element length speculation broken! */
header.elemlen = 0;
header.totlistlen = 0;
WRITE_ERRCHECK(fd, &bufsize, sizeof(ks_size_t));
WRITE_ERRCHECK(fd, ser_buf, bufsize);
}
- ks_pool_free(l->pool, &ser_buf);
+ ks_pool_free(&ser_buf);
}
}
else if (l->attrs.meter != NULL) {
}
int ks_list_restore_filedescriptor(ks_list_t *restrict l, int fd, ks_size_t *restrict len) {
+ ks_pool_t *pool = NULL;
struct ks_list_dump_header_s header;
unsigned long cnt;
void *buf;
return -1;
}
+ pool = ks_pool_get(l);
+
/* timestamp */
READ_ERRCHECK(fd, &header.timestamp_sec, sizeof(header.timestamp_sec));
header.timestamp_sec = ntohl(header.timestamp_sec);
/* elements have constant size = header.elemlen */
if (l->attrs.unserializer != NULL) {
/* use unserializer */
- buf = ks_pool_alloc(l->pool, header.elemlen);
+ buf = ks_pool_alloc(pool, header.elemlen);
for (cnt = 0; cnt < header.numels; cnt++) {
READ_ERRCHECK(fd, buf, header.elemlen);
ks_list_append(l, l->attrs.unserializer(buf, &elsize));
else {
/* copy verbatim into memory */
for (cnt = 0; cnt < header.numels; cnt++) {
- buf = ks_pool_alloc(l->pool, header.elemlen);
+ buf = ks_pool_alloc(pool, header.elemlen);
READ_ERRCHECK(fd, buf, header.elemlen);
ks_list_append(l, buf);
}
/* use unserializer */
for (cnt = 0; cnt < header.numels; cnt++) {
READ_ERRCHECK(fd, &elsize, sizeof(elsize));
- buf = ks_pool_alloc(l->pool, (ks_size_t)elsize);
+ buf = ks_pool_alloc(pool, (ks_size_t)elsize);
READ_ERRCHECK(fd, buf, elsize);
totreadlen += elsize;
ks_list_append(l, l->attrs.unserializer(buf, &elsize));
/* copy verbatim into memory */
for (cnt = 0; cnt < header.numels; cnt++) {
READ_ERRCHECK(fd, &elsize, sizeof(elsize));
- buf = ks_pool_alloc(l->pool, elsize);
+ buf = ks_pool_alloc(pool, elsize);
READ_ERRCHECK(fd, buf, elsize);
totreadlen += elsize;
ks_list_append(l, buf);
/* free what's to be freed */
if (l->attrs.copy_data && tmp->data != NULL)
- ks_pool_free(l->pool, &tmp->data);
+ ks_pool_free(&tmp->data);
if (l->spareelsnum < SIMCLIST_MAX_SPARE_ELEMS) {
l->spareels[l->spareelsnum++] = tmp;
}
else {
- ks_pool_free(l->pool, &tmp);
+ ks_pool_free(&tmp);
}
return 0;
{
struct x *mydata = (struct x *) data;
- //ks_log(KS_LOG_DEBUG, "Thread %d\n", mydata->i);
+ ks_log(KS_LOG_DEBUG, "Thread %d\n", mydata->i);
ks_sleep(100000);
- ks_pool_free(mydata->pool, &mydata);
+ ks_pool_free(&mydata);
return NULL;
}
ok (!match);
- ks_pool_free(pool, &list);
+ ks_pool_free(&list);
ks_network_list_create(&list, "test", KS_TRUE, pool);
match = ks_check_network_list_ip_cidr("2637:f368:1281::10", "fe80::/10");
ok(!match);
- ks_pool_free(pool, &list);
+ ks_pool_free(&list);
ks_pool_close(&pool);
listener = start_listen(&addr);
listener_index = listeners_count++;
- listeners_poll = (struct pollfd *)ks_pool_resize(pool, listeners_poll, sizeof(struct pollfd) * listeners_count);
+ listeners_poll = (struct pollfd *)ks_pool_alloc(pool, sizeof(struct pollfd) * listeners_count);
ok(listeners_poll != NULL);
listeners_poll[listener_index].fd = listener;
listener = listeners_poll[index].fd;
ks_socket_close(&listener);
}
- ks_pool_free(pool, &listeners_poll);
+ ks_pool_free(&listeners_poll);
ks_pool_close(&pool);
};
-void cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
+void cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
struct foo *foo = (struct foo *) ptr;
void *blah = ks_pool_alloc(pool, 64 * 1024);
- ks_pool_free(pool, &blah);
+ ks_pool_free(&blah);
blah = ks_pool_alloc(pool, 2 * 1024);
printf("FREE:\n");
- status = ks_pool_free(pool, &str);
+ status = ks_pool_free(&str);
if (status != KS_STATUS_SUCCESS) {
fprintf(stderr, "FREE ERR: [%s]\n", ks_pool_strerror(err));
exit(255);
printf("ALLOC3 (refs):\n");
- str = ks_pool_ref(pool, str);
+ str = ks_pool_ref(str);
printf("STR [%s]\n", str);
- ks_pool_free(pool, &str);
+ ks_pool_free(&str);
ok(str != NULL && !strcmp(str, STR));
printf("STR [%s]\n", str);
- ks_pool_free(pool, &str);
+ ks_pool_free(&str);
ok(str == NULL);
foo->x = 12;
foo->str = strdup("This is a test 1234 abcd; This will be called on explicit free\n");
- ks_pool_set_cleanup(pool, foo, NULL, cleanup);
+ ks_pool_set_cleanup(foo, NULL, cleanup);
printf("FREE OBJ:\n");
- status = ks_pool_free(pool, &foo);
+ status = ks_pool_free(&foo);
ok(status == KS_STATUS_SUCCESS);
if (status != KS_STATUS_SUCCESS) {
fprintf(stderr, "FREE OBJ ERR: [%s]\n", ks_pool_strerror(status));
foo->x = 12;
foo->str = strdup("This is a second test 1234 abcd; This will be called on pool clear/destroy\n");
- ks_pool_set_cleanup(pool, foo, NULL, cleanup);
+ ks_pool_set_cleanup(foo, NULL, cleanup);
printf("ALLOC OBJ3: %p\n", (void *)pool);
printf("CLEANUP: %p\n", (void *)pool);
foo->x = 12;
foo->str = strdup("This is a third test 1234 abcd; This will be called on pool clear/destroy\n");
- ks_pool_set_cleanup(pool, foo, NULL, cleanup);
+ ks_pool_set_cleanup(foo, NULL, cleanup);
ks_snprintf(str, bytes, "%s", STR);
printf("1 STR [%s]\n", str);
bytes *= 2;
- str = ks_pool_resize(pool, str, bytes);
+ str = ks_pool_resize(str, bytes);
printf("2 STR [%s]\n", str);
ok(!strcmp(str, STR));
printf("FREE 2:\n");
- status = ks_pool_free(pool, &str);
+ status = ks_pool_free(&str);
ok(status == KS_STATUS_SUCCESS);
if (status != KS_STATUS_SUCCESS) {
fprintf(stderr, "FREE2 ERR: [%s]\n", ks_pool_strerror(status));
while(ks_q_pop(q, &pop) == KS_STATUS_SUCCESS) {
//int *i = (int *)pop;
//printf("POP %d\n", *i);
- ks_pool_free(thread->pool, &pop);
+ ks_pool_free(&pop);
}
return NULL;
static void do_flush(ks_q_t *q, void *ptr, void *flush_data)
{
- ks_pool_t *pool = (ks_pool_t *)flush_data;
- ks_pool_free(pool, &ptr);
+ //ks_pool_t *pool = (ks_pool_t *)flush_data;
+ ks_pool_free(&ptr);
}
//int *i = (int *)pop;
//printf("%p POP %d\n", (void *)pthread_self(), *i);
popped++;
- ks_pool_free(thread->pool, &pop);
+ ks_pool_free(&pop);
} else if (status == KS_STATUS_INACTIVE) {
break;
} else if (t2->try && ks_q_size(t2->q)) {
ks_init();
- plan(7);
+ plan(4);
ks_pool_open(&pool);
- buf = (uint32_t *)ks_pool_resize(pool, buf, sizeof(uint32_t) * 1);
+ buf = (uint32_t *)ks_pool_alloc(pool, sizeof(uint32_t) * 1);
ok(buf != NULL);
ptr = (intptr_t)buf;
- buf = (uint32_t *)ks_pool_resize(pool, buf, sizeof(uint32_t) * 2);
+ buf = (uint32_t *)ks_pool_resize(buf, sizeof(uint32_t) * 1);
ok(buf != NULL);
ok((intptr_t)buf == ptr);
- buf = (uint32_t *)ks_pool_resize(pool, buf, sizeof(uint32_t) * 1);
+ buf = (uint32_t *)ks_pool_resize(buf, sizeof(uint32_t) * 2);
ok(buf != NULL);
- ok((intptr_t)buf == ptr);
-
- buf = (uint32_t *)ks_pool_resize(pool, buf, sizeof(uint32_t) * 2);
- ok(buf != NULL);
-
- ok((intptr_t)buf == ptr);
-
- ks_pool_free(pool, &buf);
+ ks_pool_free(&buf);
ks_pool_close(&pool);
for (i = 0; i < LOOP_COUNT; i++) {
if (last_mem) {
- ks_pool_free(thread->pool, &last_mem);
+ ks_pool_free(&last_mem);
}
- mem = ks_pool_alloc(thread->pool, 1024);
+ mem = ks_pool_alloc(ks_pool_get(thread), 1024);
last_mem = mem;
}
ok( ks_thread_priority(thread_p) == KS_PRI_IMPORTANT );
end_todo;
- ks_pool_free(pool, &thread_p);
+ ks_pool_free(&thread_p);
}
static void join_threads(void)