return KS_STATUS_SUCCESS;
}
+ks_status_t blade_transport_wss_init_create(blade_transport_wss_init_t **bt_wssiP, blade_module_wss_t *bm_wss, ks_socket_t sock)
+{
+ blade_transport_wss_init_t *bt_wssi = NULL;
+
+ ks_assert(bt_wssiP);
+ ks_assert(bm_wss);
+ ks_assert(sock != KS_SOCK_INVALID);
+
+ bt_wssi = ks_pool_alloc(bm_wss->pool, sizeof(blade_transport_wss_init_t));
+ bt_wssi->module = bm_wss;
+ bt_wssi->pool = bm_wss->pool;
+ bt_wssi->sock = sock;
+
+ *bt_wssiP = bt_wssi;
+
+ return KS_STATUS_SUCCESS;
+}
+
+ks_status_t blade_transport_wss_init_destroy(blade_transport_wss_init_t **bt_wssiP)
+{
+ blade_transport_wss_init_t *bt_wssi = NULL;
+
+ ks_assert(bt_wssiP);
+ ks_assert(*bt_wssiP);
+
+ bt_wssi = *bt_wssiP;
+
+ ks_pool_free(bt_wssi->pool, bt_wssiP);
+
+ return KS_STATUS_SUCCESS;
+}
+
ks_status_t blade_module_wss_config(blade_module_wss_t *bm_wss, config_setting_t *config)
{
config_setting_t *wss = NULL;
}
wss = config_setting_get_member(config, "wss");
- if (!wss) {
- ks_log(KS_LOG_DEBUG, "!wss\n");
- return KS_STATUS_FAIL;
- }
- wss_endpoints = config_setting_get_member(wss, "endpoints");
- if (!wss_endpoints) {
- ks_log(KS_LOG_DEBUG, "!wss_endpoints\n");
- return KS_STATUS_FAIL;
- }
- wss_endpoints_ipv4 = config_lookup_from(wss_endpoints, "ipv4");
- wss_endpoints_ipv6 = config_lookup_from(wss_endpoints, "ipv6");
- if (wss_endpoints_ipv4) {
- if (config_setting_type(wss_endpoints_ipv4) != CONFIG_TYPE_LIST) return KS_STATUS_FAIL;
- if ((config_wss_endpoints_ipv4_length = config_setting_length(wss_endpoints_ipv4)) > BLADE_MODULE_WSS_ENDPOINTS_MULTIHOME_MAX)
+ if (wss) {
+ wss_endpoints = config_setting_get_member(wss, "endpoints");
+ if (!wss_endpoints) {
+ ks_log(KS_LOG_DEBUG, "!wss_endpoints\n");
return KS_STATUS_FAIL;
-
- for (int32_t index = 0; index < config_wss_endpoints_ipv4_length; ++index) {
- element = config_setting_get_elem(wss_endpoints_ipv4, index);
- tmp1 = config_lookup_from(element, "address");
- tmp2 = config_lookup_from(element, "port");
- if (!tmp1 || !tmp2) return KS_STATUS_FAIL;
- if (config_setting_type(tmp1) != CONFIG_TYPE_STRING) return KS_STATUS_FAIL;
- if (config_setting_type(tmp2) != CONFIG_TYPE_INT) return KS_STATUS_FAIL;
-
- if (ks_addr_set(&config_wss_endpoints_ipv4[index],
- config_setting_get_string(tmp1),
- config_setting_get_int(tmp2),
- AF_INET) != KS_STATUS_SUCCESS) return KS_STATUS_FAIL;
- ks_log(KS_LOG_DEBUG,
- "Binding to IPV4 %s on port %d\n",
- ks_addr_get_host(&config_wss_endpoints_ipv4[index]),
- ks_addr_get_port(&config_wss_endpoints_ipv4[index]));
}
- }
- if (wss_endpoints_ipv6) {
- if (config_setting_type(wss_endpoints_ipv6) != CONFIG_TYPE_LIST) return KS_STATUS_FAIL;
- if ((config_wss_endpoints_ipv6_length = config_setting_length(wss_endpoints_ipv6)) > BLADE_MODULE_WSS_ENDPOINTS_MULTIHOME_MAX)
- return KS_STATUS_FAIL;
+ wss_endpoints_ipv4 = config_lookup_from(wss_endpoints, "ipv4");
+ wss_endpoints_ipv6 = config_lookup_from(wss_endpoints, "ipv6");
+ if (wss_endpoints_ipv4) {
+ if (config_setting_type(wss_endpoints_ipv4) != CONFIG_TYPE_LIST) return KS_STATUS_FAIL;
+ if ((config_wss_endpoints_ipv4_length = config_setting_length(wss_endpoints_ipv4)) > BLADE_MODULE_WSS_ENDPOINTS_MULTIHOME_MAX)
+ return KS_STATUS_FAIL;
- for (int32_t index = 0; index < config_wss_endpoints_ipv6_length; ++index) {
- element = config_setting_get_elem(wss_endpoints_ipv6, index);
- tmp1 = config_lookup_from(element, "address");
- tmp2 = config_lookup_from(element, "port");
- if (!tmp1 || !tmp2) return KS_STATUS_FAIL;
- if (config_setting_type(tmp1) != CONFIG_TYPE_STRING) return KS_STATUS_FAIL;
- if (config_setting_type(tmp2) != CONFIG_TYPE_INT) return KS_STATUS_FAIL;
-
-
- if (ks_addr_set(&config_wss_endpoints_ipv6[index],
- config_setting_get_string(tmp1),
- config_setting_get_int(tmp2),
- AF_INET6) != KS_STATUS_SUCCESS) return KS_STATUS_FAIL;
- ks_log(KS_LOG_DEBUG,
- "Binding to IPV6 %s on port %d\n",
- ks_addr_get_host(&config_wss_endpoints_ipv6[index]),
- ks_addr_get_port(&config_wss_endpoints_ipv6[index]));
+ for (int32_t index = 0; index < config_wss_endpoints_ipv4_length; ++index) {
+ element = config_setting_get_elem(wss_endpoints_ipv4, index);
+ tmp1 = config_lookup_from(element, "address");
+ tmp2 = config_lookup_from(element, "port");
+ if (!tmp1 || !tmp2) return KS_STATUS_FAIL;
+ if (config_setting_type(tmp1) != CONFIG_TYPE_STRING) return KS_STATUS_FAIL;
+ if (config_setting_type(tmp2) != CONFIG_TYPE_INT) return KS_STATUS_FAIL;
+
+ if (ks_addr_set(&config_wss_endpoints_ipv4[index],
+ config_setting_get_string(tmp1),
+ config_setting_get_int(tmp2),
+ AF_INET) != KS_STATUS_SUCCESS) return KS_STATUS_FAIL;
+ ks_log(KS_LOG_DEBUG,
+ "Binding to IPV4 %s on port %d\n",
+ ks_addr_get_host(&config_wss_endpoints_ipv4[index]),
+ ks_addr_get_port(&config_wss_endpoints_ipv4[index]));
+ }
+ }
+ if (wss_endpoints_ipv6) {
+ if (config_setting_type(wss_endpoints_ipv6) != CONFIG_TYPE_LIST) return KS_STATUS_FAIL;
+ if ((config_wss_endpoints_ipv6_length = config_setting_length(wss_endpoints_ipv6)) > BLADE_MODULE_WSS_ENDPOINTS_MULTIHOME_MAX)
+ return KS_STATUS_FAIL;
+
+ for (int32_t index = 0; index < config_wss_endpoints_ipv6_length; ++index) {
+ element = config_setting_get_elem(wss_endpoints_ipv6, index);
+ tmp1 = config_lookup_from(element, "address");
+ tmp2 = config_lookup_from(element, "port");
+ if (!tmp1 || !tmp2) return KS_STATUS_FAIL;
+ if (config_setting_type(tmp1) != CONFIG_TYPE_STRING) return KS_STATUS_FAIL;
+ if (config_setting_type(tmp2) != CONFIG_TYPE_INT) return KS_STATUS_FAIL;
+
+
+ if (ks_addr_set(&config_wss_endpoints_ipv6[index],
+ config_setting_get_string(tmp1),
+ config_setting_get_int(tmp2),
+ AF_INET6) != KS_STATUS_SUCCESS) return KS_STATUS_FAIL;
+ ks_log(KS_LOG_DEBUG,
+ "Binding to IPV6 %s on port %d\n",
+ ks_addr_get_host(&config_wss_endpoints_ipv6[index]),
+ ks_addr_get_port(&config_wss_endpoints_ipv6[index]));
+ }
+ }
+ if (config_wss_endpoints_ipv4_length + config_wss_endpoints_ipv6_length <= 0) return KS_STATUS_FAIL;
+ tmp1 = config_lookup_from(wss_endpoints, "backlog");
+ if (tmp1) {
+ if (config_setting_type(tmp1) != CONFIG_TYPE_INT) return KS_STATUS_FAIL;
+ config_wss_endpoints_backlog = config_setting_get_int(tmp1);
+ }
+ wss_ssl = config_setting_get_member(wss, "ssl");
+ if (wss_ssl) {
+ // @todo: SSL stuffs from wss_ssl into config_wss_ssl envelope
}
- }
- if (config_wss_endpoints_ipv4_length + config_wss_endpoints_ipv6_length <= 0) return KS_STATUS_FAIL;
- tmp1 = config_lookup_from(wss_endpoints, "backlog");
- if (tmp1) {
- if (config_setting_type(tmp1) != CONFIG_TYPE_INT) return KS_STATUS_FAIL;
- config_wss_endpoints_backlog = config_setting_get_int(tmp1);
- }
- wss_ssl = config_setting_get_member(wss, "ssl");
- if (wss_ssl) {
- // @todo: SSL stuffs from wss_ssl into config_wss_ssl envelope
}
goto done;
}
+ ks_log(KS_LOG_DEBUG, "Listeners Before\n");
+ for (int index = 0; index < bm_wss->listeners_count; ++index) {
+ ks_log(KS_LOG_DEBUG, " Listener %d = %d\n", index, bm_wss->listeners_poll[index].fd);
+ }
+
listener_index = bm_wss->listeners_count++;
bm_wss->listeners_poll = (struct pollfd *)ks_pool_resize(bm_wss->pool,
bm_wss->listeners_poll,
bm_wss->listeners_poll[listener_index].fd = listener;
bm_wss->listeners_poll[listener_index].events = POLLIN | POLLERR;
+ ks_log(KS_LOG_DEBUG, "Listeners After\n");
+ for (int index = 0; index < bm_wss->listeners_count; ++index) {
+ ks_log(KS_LOG_DEBUG, " Listener %d = %d\n", index, bm_wss->listeners_poll[index].fd);
+ }
+
done:
if (ret != KS_STATUS_SUCCESS) {
if (listener != KS_SOCK_INVALID) {
bm_wss = (blade_module_wss_t *)data;
+ ks_log(KS_LOG_DEBUG, "Started\n");
while (!bm_wss->shutdown) {
// @todo take exact timeout from a setting in config_wss_endpoints
if (ks_poll(bm_wss->listeners_poll, bm_wss->listeners_count, 100) > 0) {
for (int32_t index = 0; index < bm_wss->listeners_count; ++index) {
ks_socket_t sock = KS_SOCK_INVALID;
- if (!(bm_wss->listeners_poll[index].revents & POLLIN)) continue;
if (bm_wss->listeners_poll[index].revents & POLLERR) {
// @todo: error handling, just skip the listener for now, it might recover, could skip X times before closing?
+ ks_log(KS_LOG_DEBUG, "Listener POLLERR\n");
continue;
}
+ if (!(bm_wss->listeners_poll[index].revents & POLLIN)) continue;
if ((sock = accept(bm_wss->listeners_poll[index].fd, NULL, NULL)) == KS_SOCK_INVALID) {
// @todo: error handling, just skip the socket for now as most causes are because remote side became unreachable
continue;
}
+ ks_log(KS_LOG_DEBUG, "Socket Accepted\n");
+
blade_transport_wss_init_create(&bt_wss_init, bm_wss, sock);
ks_assert(bt_wss_init);
-
+
blade_connection_create(&bc, bm_wss->handle, bt_wss_init, bm_wss->transport_callbacks);
ks_assert(bc);
if (bt_wss) blade_transport_wss_destroy(&bt_wss);
}
}
+ ks_log(KS_LOG_DEBUG, "Stopped\n");
return NULL;
}
ks_status_t blade_transport_wss_on_connect(blade_connection_t **bcP, blade_module_t *bm, blade_identity_t *target)
{
+ ks_status_t ret = KS_STATUS_SUCCESS;
+ blade_module_wss_t *bm_wss = NULL;
+ ks_sockaddr_t addr;
+ ks_socket_t sock = KS_SOCK_INVALID;
+ int family = AF_INET;
+ const char *ip = NULL;
+ const char *portstr = NULL;
+ ks_port_t port = 1234;
+ blade_transport_wss_init_t *bt_wss_init = NULL;
+ blade_connection_t *bc = NULL;
+
ks_assert(bcP);
ks_assert(bm);
ks_assert(target);
+ bm_wss = (blade_module_wss_t *)blade_module_data_get(bm);
+
*bcP = NULL;
- // @todo connect-out equivilent of accept
ks_log(KS_LOG_DEBUG, "Connect Callback: %s\n", blade_identity_uri(target));
- return KS_STATUS_SUCCESS;
+ // @todo completely rework all of this once more is known about connecting when an identity has no explicit transport details but this transport
+ // has been choosen anyway
+ ip = blade_identity_parameter_get(target, "host");
+ portstr = blade_identity_parameter_get(target, "port");
+ if (!ip) {
+ // @todo: temporary, this should fall back on DNS SRV or whatever else can turn "a@b.com" into an ip (and port?) to connect to
+ // also need to deal with hostname lookup, so identities with wss transport need to have a host parameter that is an IP for the moment
+ ret = KS_STATUS_FAIL;
+ goto done;
+ }
+
+ // @todo wrap this code to get address family from string IP between IPV4 and IPV6, and put it in libks somewhere
+ {
+ ks_size_t len = strlen(ip);
+
+ if (len <= 3) {
+ ret = KS_STATUS_FAIL;
+ goto done;
+ }
+ if (ip[1] == '.' || ip[2] == '.' || (len > 3 && ip[3] == '.')) family = AF_INET;
+ else family = AF_INET6;
+ }
+
+ if (portstr) {
+ int p = atoi(portstr);
+ if (p > 0 && p <= UINT16_MAX) port = p;
+ }
+
+ ks_addr_set(&addr, ip, port, family);
+ if ((sock = ks_socket_connect(SOCK_STREAM, IPPROTO_TCP, &addr)) == KS_SOCK_INVALID) {
+ // @todo: error handling, just fail for now as most causes are because remote side became unreachable
+ ret = KS_STATUS_FAIL;
+ goto done;
+ }
+
+ ks_log(KS_LOG_DEBUG, "Socket Connected\n");
+
+ blade_transport_wss_init_create(&bt_wss_init, bm_wss, sock);
+ ks_assert(bt_wss_init);
+
+ blade_connection_create(&bc, bm_wss->handle, bt_wss_init, bm_wss->transport_callbacks);
+ ks_assert(bc);
+
+ if (blade_connection_startup(bc, BLADE_CONNECTION_DIRECTION_OUTBOUND) != KS_STATUS_SUCCESS) {
+ blade_connection_destroy(&bc);
+ blade_transport_wss_init_destroy(&bt_wss_init);
+ ks_socket_close(&sock);
+ ret = KS_STATUS_FAIL;
+ goto done;
+ }
+ // @todo make sure it's sensible to be mixing outbound and inbound connections in the same list, but this allows entering the destruction pipeline
+ // for module shutdown, disconnects and errors without special considerations
+ list_append(&bm_wss->connected, bc);
+ *bcP = bc;
+
+ blade_connection_state_set(bc, BLADE_CONNECTION_STATE_NEW);
+
+ done:
+ return ret;
}
blade_connection_rank_t blade_transport_wss_on_rank(blade_connection_t *bc, blade_identity_t *target)
blade_connection_state_hook_t blade_transport_wss_on_state_new_outbound(blade_connection_t *bc, blade_connection_state_condition_t condition)
{
+ blade_transport_wss_t *bt_wss = NULL;
+ blade_transport_wss_init_t *bt_wss_init = NULL;
+
ks_assert(bc);
ks_log(KS_LOG_DEBUG, "State Callback: %d\n", (int32_t)condition);
if (condition == BLADE_CONNECTION_STATE_CONDITION_PRE) return BLADE_CONNECTION_STATE_HOOK_SUCCESS;
+ bt_wss_init = (blade_transport_wss_init_t *)blade_connection_transport_init_get(bc);
+
+ blade_transport_wss_create(&bt_wss, bt_wss_init->module, bt_wss_init->sock);
+ ks_assert(bt_wss);
+
+ blade_connection_transport_set(bc, bt_wss);
+
return BLADE_CONNECTION_STATE_HOOK_SUCCESS;
}
blade_connection_state_hook_t blade_transport_wss_on_state_connect_outbound(blade_connection_t *bc, blade_connection_state_condition_t condition)
{
+ blade_transport_wss_t *bt_wss = NULL;
+
ks_assert(bc);
ks_log(KS_LOG_DEBUG, "State Callback: %d\n", (int32_t)condition);
+ if (condition == BLADE_CONNECTION_STATE_CONDITION_PRE) return BLADE_CONNECTION_STATE_HOOK_SUCCESS;
+
+ bt_wss = (blade_transport_wss_t *)blade_connection_transport_get(bc);
+
+ // @todo: SSL init stuffs based on data from config to pass into kws_init
+ if (kws_init(&bt_wss->kws, bt_wss->sock, NULL, "/blade:blade.invalid:blade", KWS_BLOCK, bt_wss->pool) != KS_STATUS_SUCCESS) {
+ // @todo error logging
+ return BLADE_CONNECTION_STATE_HOOK_DISCONNECT;
+ }
+
return BLADE_CONNECTION_STATE_HOOK_SUCCESS;
}
ks_log(KS_LOG_DEBUG, "State Callback: %d\n", (int32_t)condition);
- return BLADE_CONNECTION_STATE_HOOK_SUCCESS;
+ ks_sleep_ms(1000); // @todo temporary testing, remove this and return success once negotiations are done
+ return BLADE_CONNECTION_STATE_HOOK_BYPASS;
}
blade_connection_state_hook_t blade_transport_wss_on_state_detach(blade_connection_t *bc, blade_connection_state_condition_t condition)
return BLADE_CONNECTION_STATE_HOOK_SUCCESS;
}
-ks_status_t blade_transport_wss_init_create(blade_transport_wss_init_t **bt_wssiP, blade_module_wss_t *bm_wss, ks_socket_t sock)
-{
- blade_transport_wss_init_t *bt_wssi = NULL;
-
- ks_assert(bt_wssiP);
- ks_assert(bm_wss);
- ks_assert(sock != KS_SOCK_INVALID);
-
- bt_wssi = ks_pool_alloc(bm_wss->pool, sizeof(blade_transport_wss_init_t));
- bt_wssi->module = bm_wss;
- bt_wssi->pool = bm_wss->pool;
- bt_wssi->sock = sock;
-
- *bt_wssiP = bt_wssi;
-
- return KS_STATUS_SUCCESS;
-}
-
-ks_status_t blade_transport_wss_init_destroy(blade_transport_wss_init_t **bt_wssiP)
-{
- blade_transport_wss_init_t *bt_wssi = NULL;
-
- ks_assert(bt_wssiP);
- ks_assert(*bt_wssiP);
-
- bt_wssi = *bt_wssiP;
-
- ks_pool_free(bt_wssi->pool, bt_wssiP);
-
- return KS_STATUS_SUCCESS;
-}
-
/* For Emacs:
* Local Variables:
* mode:c
ks_pool_t *pool;
ks_thread_pool_t *tpool;
- config_setting_t *config_service;
+ config_setting_t *config_directory;
config_setting_t *config_datastore;
ks_hash_t *transports;
+ blade_identity_t *identity;
blade_datastore_t *datastore;
};
ks_status_t blade_handle_config(blade_handle_t *bh, config_setting_t *config)
{
- config_setting_t *service = NULL;
+ config_setting_t *directory = NULL;
config_setting_t *datastore = NULL;
ks_assert(bh);
if (!config) return KS_STATUS_FAIL;
if (!config_setting_is_group(config)) return KS_STATUS_FAIL;
- service = config_setting_get_member(config, "service");
+ directory = config_setting_get_member(config, "directory");
datastore = config_setting_get_member(config, "datastore");
//if (datastore && !config_setting_is_group(datastore)) return KS_STATUS_FAIL;
- bh->config_service = service;
+ bh->config_directory = directory;
bh->config_datastore = datastore;
return KS_STATUS_SUCCESS;
return KS_STATUS_FAIL;
}
}
+
+ // @todo load DSOs
+ // @todo call onload and onstartup callbacks for modules from DSOs
+
return KS_STATUS_SUCCESS;
}
{
ks_assert(bh);
- // @todo cleanup registered transports
+ // @todo call onshutdown and onunload callbacks for modules from DSOs
+
+ // @todo unload DSOs
+
if (blade_handle_datastore_available(bh)) blade_datastore_destroy(&bh->datastore);
-
+
return KS_STATUS_SUCCESS;
}
ks_assert(bh);
ks_assert(target);
+ // @todo this should take a callback, and push this to a queue to be processed async from another thread on the handle
+ // which will allow the onconnect callback to block while doing things like DNS lookups without having unknown
+ // impact depending on the caller thread
+
ks_hash_read_lock(bh->transports);
- blade_identity_parameter_get(target, "transport", &tname);
+ tname = blade_identity_parameter_get(target, "transport");
if (tname) {
bhtr = ks_hash_search(bh->transports, (void *)tname, KS_UNLOCKED);
if (!bhtr) {
// @todo error logging, target has an explicit transport that is not available in the local transports registry
// discuss later whether this scenario should still attempt other transports when target is explicit
+ // @note discussions indicate that by default messages should favor relaying through a master service, unless
+ // an existing direct connection already exists to the target (which if the target is the master node, then there is
+ // no conflict of proper routing). This also applies to routing for identities which relate to groups, relaying should
+ // most often occur through a master service, however there may be scenarios that exist where an existing session
+ // exists dedicated to faster delivery for a group (IE, through an ampq cluster directly, such as master services
+ // syncing with each other through a pub/sub). There is also the potential that instead of a separate session, the
+ // current session with a master service may be able to have another connection attached which represents access through
+ // amqp, which in turn acts as a preferred router for only group identities
+ // This information does not directly apply to connecting, but should be noted for the next level up where you simply
+ // send a message which will not actually connect, only check for existing sessions for the target and master service
+ // @note relaying by master services should take a slightly different path, when they receive something not for the
+ // master service itself, it should relay this on to all other master services, which in turn all including original
+ // receiver pass on to any sessions matching an identity that is part of the group, alternatively they can use a pub/sub
+ // like amqp to relay between the master services more efficiently than using the websocket to send every master service
+ // session the message individually
}
} else {
for (ks_hash_iterator_t *it = ks_hash_first(bh->transports, KS_UNLOCKED); it; it = ks_hash_next(&it)) {