static const enum protolayer_type protolayer_grp_udp53[] = {
PROTOLAYER_TYPE_UDP,
PROTOLAYER_TYPE_DNS_DGRAM,
- PROTOLAYER_TYPE_NULL
};
static const enum protolayer_type protolayer_grp_tcp53[] = {
PROTOLAYER_TYPE_TCP,
PROTOLAYER_TYPE_DNS_MULTI_STREAM,
- PROTOLAYER_TYPE_NULL
};
static const enum protolayer_type protolayer_grp_dot[] = {
PROTOLAYER_TYPE_TCP,
PROTOLAYER_TYPE_TLS,
PROTOLAYER_TYPE_DNS_MULTI_STREAM,
- PROTOLAYER_TYPE_NULL
};
static const enum protolayer_type protolayer_grp_doh[] = {
PROTOLAYER_TYPE_TLS,
PROTOLAYER_TYPE_HTTP,
PROTOLAYER_TYPE_DNS_UNSIZED_STREAM,
- PROTOLAYER_TYPE_NULL
};
static const enum protolayer_type protolayer_grp_doq[] = {
// not yet used
- PROTOLAYER_TYPE_NULL
+ PROTOLAYER_TYPE_NULL,
};
+struct protolayer_grp {
+ const enum protolayer_type *layers;
+ size_t num_layers;
+};
+
+#define PROTOLAYER_GRP(p_array) { \
+ .layers = (p_array), \
+ .num_layers = sizeof((p_array)) / sizeof((p_array)[0]), \
+}
+
/** Sequences of layers, or groups, mapped by `enum kr_proto`.
*
* Each group represents a sequence of layers in the unwrap direction (wrap
* indicate the end of the list of protocol layers. The array name's suffix must
* be the one defined as *Variable name* (2nd parameter) in the
* `KR_PROTO_MAP` macro. */
-static const enum protolayer_type *protolayer_grps[KR_PROTO_COUNT] = {
-#define XX(cid, vid, name) [KR_PROTO_##cid] = protolayer_grp_##vid,
+static const struct protolayer_grp protolayer_grps[KR_PROTO_COUNT] = {
+#define XX(cid, vid, name) [KR_PROTO_##cid] = PROTOLAYER_GRP(protolayer_grp_##vid),
KR_PROTO_MAP(XX)
#undef XX
};
}
}
-struct protolayer_payload protolayer_as_buffer(const struct protolayer_payload *payload)
+struct protolayer_payload protolayer_payload_as_buffer(
+ const struct protolayer_payload *payload)
{
if (payload->type == PROTOLAYER_PAYLOAD_BUFFER)
return *payload;
static inline struct protolayer_data *protolayer_sess_data_get(
struct protolayer_manager *m, size_t layer_ix)
{
- if (kr_fails_assert(layer_ix < m->num_layers))
+ const struct protolayer_grp *grp = &protolayer_grps[m->proto];
+ if (kr_fails_assert(layer_ix < grp->num_layers))
return NULL;
/* See doc comment of `struct protolayer_manager::data` */
const ssize_t *offsets = (ssize_t *)m->data;
- char *pl_data_beg = &m->data[2 * m->num_layers * sizeof(*offsets)];
+ char *pl_data_beg = &m->data[2 * grp->num_layers * sizeof(*offsets)];
ssize_t offset = offsets[layer_ix];
if (offset < 0) /* No session data for this layer */
struct protolayer_iter_ctx *ctx, size_t layer_ix)
{
struct protolayer_manager *m = ctx->manager;
- if (kr_fails_assert(layer_ix < m->num_layers))
+ const struct protolayer_grp *grp = &protolayer_grps[m->proto];
+ if (kr_fails_assert(layer_ix < grp->num_layers))
return NULL;
/* See doc comment of `struct protolayer_manager::data` */
- const ssize_t *offsets = (ssize_t *)&m->data[m->num_layers * sizeof(*offsets)];
+ const ssize_t *offsets = (ssize_t *)&m->data[grp->num_layers * sizeof(*offsets)];
ssize_t offset = offsets[layer_ix];
if (offset < 0) /* No iteration data for this layer */
static inline ssize_t protolayer_manager_get_protocol(
struct protolayer_manager *m, enum protolayer_type protocol)
{
- for (ssize_t i = 0; i < m->num_layers; i++) {
- enum protolayer_type found = protolayer_grps[m->grp][i];
+ const struct protolayer_grp *grp = &protolayer_grps[m->proto];
+ for (ssize_t i = 0; i < grp->num_layers; i++) {
+ enum protolayer_type found = grp->layers[i];
if (protocol == found)
return i;
}
static inline bool protolayer_iter_ctx_is_last(struct protolayer_iter_ctx *ctx)
{
unsigned int last_ix = (ctx->direction == PROTOLAYER_UNWRAP)
- ? ctx->manager->num_layers - 1
+ ? protolayer_grps[ctx->manager->proto].num_layers - 1
: 0;
return ctx->layer_ix == last_ix;
}
{
if (grp >= KR_PROTO_COUNT)
return "(invalid)";
- enum protolayer_type p = protolayer_grps[grp][layer_ix];
+ enum protolayer_type p = protolayer_grps[grp].layers[layer_ix];
return protolayer_layer_name(p);
}
static inline const char *layer_name_ctx(struct protolayer_iter_ctx *ctx)
{
- return layer_name(ctx->manager->grp, ctx->layer_ix);
+ return layer_name(ctx->manager->proto, ctx->layer_ix);
}
static int protolayer_iter_ctx_finish(struct protolayer_iter_ctx *ctx, int ret)
struct session2 *session = ctx->manager->session;
struct protolayer_manager *m = ctx->manager;
- struct protolayer_globals *globals = &protolayer_globals[m->grp];
- for (size_t i = 0; i < m->num_layers; i++) {
+ const struct protolayer_globals *globals = &protolayer_globals[m->proto];
+ const struct protolayer_grp *grp = &protolayer_grps[m->proto];
+ for (size_t i = 0; i < grp->num_layers; i++) {
struct protolayer_data *d = protolayer_iter_data_get(ctx, i);
if (globals->iter_deinit)
globals->iter_deinit(m, ctx, d);
if (ret)
VERBOSE_LOG(session, "layer context of group '%s' (on %u: %s) ended with return code %d\n",
- kr_proto_name(ctx->manager->grp),
+ kr_proto_name(ctx->manager->proto),
ctx->layer_ix, layer_name_ctx(ctx), ret);
if (ctx->status)
VERBOSE_LOG(session, "iteration of group '%s' (on %u: %s) ended with status %d\n",
- kr_proto_name(ctx->manager->grp),
+ kr_proto_name(ctx->manager->proto),
ctx->layer_ix, layer_name_ctx(ctx), ctx->status);
if (ctx->finished_cb)
struct session2 *session = ctx->manager->session;
if (ctx->payload.type == PROTOLAYER_PAYLOAD_WIRE_BUF) {
- ctx->payload = protolayer_as_buffer(&ctx->payload);
+ ctx->payload = protolayer_payload_as_buffer(&ctx->payload);
}
if (kr_log_is_debug(PROTOLAYER, NULL)) {
return PROTOLAYER_RET_ASYNC;
}
-static void protolayer_ensure_long_lived(struct protolayer_iter_ctx *ctx)
+static void protolayer_payload_ensure_long_lived(struct protolayer_iter_ctx *ctx)
{
if (!ctx->payload.short_lived)
return;
protolayer_payload_copy(buf, &ctx->payload, buf_len);
ctx->async_buffer = buf;
- ctx->payload = protolayer_buffer(buf, buf_len, false);
+ ctx->payload = protolayer_payload_buffer(buf, buf_len, false);
}
/** Processes as many layers as possible synchronously, returning when either
static int protolayer_step(struct protolayer_iter_ctx *ctx)
{
while (true) {
- if (kr_fails_assert(ctx->manager->grp < KR_PROTO_COUNT))
+ if (kr_fails_assert(ctx->manager->proto < KR_PROTO_COUNT))
return kr_error(EFAULT);
- enum protolayer_type protocol = protolayer_grps[ctx->manager->grp][ctx->layer_ix];
+ enum protolayer_type protocol = protolayer_grps[ctx->manager->proto].layers[ctx->layer_ix];
struct protolayer_globals *globals = &protolayer_globals[protocol];
ctx->async_mode = false;
if (!ctx->action) {
/* Next step is from a callback */
ctx->async_mode = true;
- protolayer_ensure_long_lived(ctx);
+ protolayer_payload_ensure_long_lived(ctx);
return PROTOLAYER_RET_ASYNC;
}
if (manager->session->closing)
return kr_error(ECANCELED);
+ if (kr_fails_assert(manager->proto < KR_PROTO_COUNT))
+ return kr_error(EFAULT);
+
struct protolayer_iter_ctx *ctx = malloc(manager->cb_ctx_size);
kr_require(ctx);
VERBOSE_LOG(manager->session,
"%s submitted to grp '%s' in %s direction (%zu: %s)\n",
protolayer_payload_name(payload.type),
- kr_proto_name(manager->grp),
+ kr_proto_name(manager->proto),
(direction == PROTOLAYER_UNWRAP) ? "unwrap" : "wrap",
- layer_ix, layer_name(manager->grp, layer_ix));
+ layer_ix, layer_name(manager->proto, layer_ix));
*ctx = (struct protolayer_iter_ctx) {
.payload = payload,
.finished_cb_baton = baton
};
- for (size_t i = 0; i < manager->num_layers; i++) {
- if (kr_fails_assert(ctx->manager->grp < KR_PROTO_COUNT))
- return kr_error(EFAULT);
-
- enum protolayer_type p = protolayer_grps[manager->grp][i];
- struct protolayer_globals *globals = &protolayer_globals[p];
+ const struct protolayer_grp *grp = &protolayer_grps[manager->proto];
+ for (size_t i = 0; i < grp->num_layers; i++) {
+ struct protolayer_globals *globals = &protolayer_globals[grp->layers[i]];
struct protolayer_data *iter_data = protolayer_iter_data_get(ctx, i);
if (iter_data) {
memset(iter_data, 0, globals->iter_size);
/** Allocates and initializes a new manager. */
static struct protolayer_manager *protolayer_manager_new(
struct session2 *s,
- enum kr_proto grp,
+ enum kr_proto proto,
struct protolayer_data_param *layer_param,
size_t layer_param_count)
{
- if (kr_fails_assert(s && grp))
+ if (kr_fails_assert(s && proto))
return NULL;
- size_t num_layers = 0;
size_t manager_size = sizeof(struct protolayer_manager);
size_t cb_ctx_size = sizeof(struct protolayer_iter_ctx);
- const enum protolayer_type *protocols = protolayer_grps[grp];
- if (kr_fails_assert(protocols))
- return NULL;
- const enum protolayer_type *p = protocols;
-
- /* Space for offset index */
- for (; *p; p++)
- num_layers++;
- if (kr_fails_assert(num_layers))
+ const struct protolayer_grp *grp = &protolayer_grps[proto];
+ if (kr_fails_assert(grp->num_layers))
return NULL;
size_t wire_buf_length = 0;
size_t wire_buf_max_length = 0;
- ssize_t offsets[2 * num_layers];
+ ssize_t offsets[2 * grp->num_layers];
manager_size += sizeof(offsets);
ssize_t *sess_offsets = offsets;
- ssize_t *iter_offsets = &offsets[num_layers];
+ ssize_t *iter_offsets = &offsets[grp->num_layers];
/* Space for layer-specific data, guaranteeing alignment */
size_t total_sess_data_size = 0;
size_t total_iter_data_size = 0;
- for (size_t i = 0; i < num_layers; i++) {
- const struct protolayer_globals *g = &protolayer_globals[protocols[i]];
+ for (size_t i = 0; i < grp->num_layers; i++) {
+ const struct protolayer_globals *g = &protolayer_globals[grp->layers[i]];
sess_offsets[i] = g->sess_size ? total_sess_data_size : -1;
total_sess_data_size += ALIGN_TO(g->sess_size, CPU_STRUCT_ALIGN);
/* Allocate and initialize manager */
struct protolayer_manager *m = calloc(1, manager_size);
kr_require(m);
- m->grp = grp;
+ m->proto = proto;
m->session = s;
- m->num_layers = num_layers;
m->cb_ctx_size = cb_ctx_size;
memcpy(m->data, offsets, sizeof(offsets));
kr_require(!ret);
/* Initialize the layer's session data */
- for (size_t i = 0; i < num_layers; i++) {
- struct protolayer_globals *globals = &protolayer_globals[protocols[i]];
+ for (size_t i = 0; i < grp->num_layers; i++) {
+ struct protolayer_globals *globals = &protolayer_globals[grp->layers[i]];
struct protolayer_data *sess_data = protolayer_sess_data_get(m, i);
if (sess_data) {
memset(sess_data, 0, globals->sess_size);
sess_data->session = s;
}
- void *param = get_init_param(protocols[i], layer_param, layer_param_count);
+ void *param = get_init_param(grp->layers[i], layer_param, layer_param_count);
if (globals->sess_init)
globals->sess_init(m, sess_data, param);
}
{
if (!m) return;
- for (size_t i = 0; i < m->num_layers; i++) {
- enum protolayer_type p = protolayer_grps[m->grp][i];
- struct protolayer_globals *globals = &protolayer_globals[p];
+ const struct protolayer_grp *grp = &protolayer_grps[m->proto];
+ for (size_t i = 0; i < grp->num_layers; i++) {
+ struct protolayer_globals *globals = &protolayer_globals[grp->layers[i]];
if (globals->sess_deinit) {
struct protolayer_data *sess_data = protolayer_sess_data_get(m, i);
globals->sess_deinit(m, sess_data);
if (wb->buf && wb->size >= size)
return kr_ok();
- wb->buf = realloc(wb->buf, size);
- kr_require(wb->buf);
+ char *newbuf = realloc(wb->buf, size);
+ kr_require(newbuf);
+ wb->buf = newbuf;
wb->size = size;
return kr_ok();
}
void *baton)
{
return protolayer_manager_submit(s->layers, PROTOLAYER_WRAP,
- s->layers->num_layers - 1,
+ protolayer_grps[s->layers->proto].num_layers - 1,
payload, comm, cb, baton);
}
{
bool cont;
struct protolayer_manager *m = s->layers;
- for (ssize_t i = m->num_layers - 1; i >= 0; i--) {
- enum protolayer_type p = protolayer_grps[s->layers->grp][i];
- struct protolayer_globals *globals = &protolayer_globals[p];
+ const struct protolayer_grp *grp = &protolayer_grps[m->proto];
+ for (ssize_t i = grp->num_layers - 1; i >= 0; i--) {
+ struct protolayer_globals *globals = &protolayer_globals[grp->layers[i]];
if (globals->event_wrap) {
struct protolayer_data *sess_data = protolayer_sess_data_get(m, i);
cont = globals->event_wrap(event, &baton, m, sess_data);
{
bool cont;
struct protolayer_manager *m = s->layers;
- for (ssize_t i = start_ix; i < m->num_layers; i++) {
- enum protolayer_type p = protolayer_grps[s->layers->grp][i];
- struct protolayer_globals *globals = &protolayer_globals[p];
+ const struct protolayer_grp *grp = &protolayer_grps[m->proto];
+ for (ssize_t i = start_ix; i < grp->num_layers; i++) {
+ struct protolayer_globals *globals = &protolayer_globals[grp->layers[i]];
if (globals->event_unwrap) {
struct protolayer_data *sess_data = protolayer_sess_data_get(m, i);
cont = globals->event_unwrap(event, &baton, m, sess_data);
void session2_init_request(struct session2 *s, struct kr_request *req)
{
struct protolayer_manager *m = s->layers;
- for (ssize_t i = 0; i < m->num_layers; i++) {
- enum protolayer_type p = protolayer_grps[s->layers->grp][i];
- struct protolayer_globals *globals = &protolayer_globals[p];
+ const struct protolayer_grp *grp = &protolayer_grps[m->proto];
+ for (ssize_t i = 0; i < grp->num_layers; i++) {
+ struct protolayer_globals *globals = &protolayer_globals[grp->layers[i]];
if (globals->request_init) {
struct protolayer_data *sess_data = protolayer_sess_data_get(m, i);
globals->request_init(m, req, sess_data);
goto exit_err;
}
int ret = session2_wrap(parent,
- protolayer_iovec(iov, iovcnt, iov_short_lived),
+ protolayer_payload_iovec(iov, iovcnt, iov_short_lived),
comm, session2_transport_parent_pushv_finished,
ctx);
return (ret < 0) ? ret : kr_ok();
* valid. */
enum protolayer_payload_type {
PROTOLAYER_PAYLOAD_NULL = 0,
-#define XX(cid, name) PROTOLAYER_PAYLOAD_ ## cid,
+#define XX(cid, name) PROTOLAYER_PAYLOAD_##cid,
PROTOLAYER_PAYLOAD_MAP(XX)
#undef XX
PROTOLAYER_PAYLOAD_COUNT
/** Time-to-live hint (e.g. for HTTP Cache-Control) */
unsigned int ttl;
- /** If `true`, the payload's memory may be freed early as kresd does not
- * completely control its lifetime. When going asynchronous, it needs to
- * be copied. */
+ /** If `true`, signifies that the memory this payload points to may
+ * become invalid when we return from one of the functions in the
+ * current stack. That is fine as long as all the protocol layer
+ * processing for this payload takes place in a single `session2_wrap()`
+ * or `session2_unwrap()` call, but may become a problem, when a layer
+ * goes asynchronous (via `protolayer_async()`).
+ *
+ * Setting this to `true` will ensure that the payload will get copied
+ * into a separate memory buffer if and only if a layer goes
+ * asynchronous. It makes sure that if all processing for the payload is
+ * synchronous, no copies or reallocations for the payload are done. */
bool short_lived;
union {
size_t max_len);
/** Convenience function to get a buffer-type payload. */
-static inline struct protolayer_payload protolayer_buffer(void *buf, size_t len,
- bool short_lived)
+static inline struct protolayer_payload protolayer_payload_buffer(
+ void *buf, size_t len, bool short_lived)
{
return (struct protolayer_payload){
.type = PROTOLAYER_PAYLOAD_BUFFER,
}
/** Convenience function to get an iovec-type payload. */
-static inline struct protolayer_payload protolayer_iovec(
+static inline struct protolayer_payload protolayer_payload_iovec(
struct iovec *iov, int iovcnt, bool short_lived)
{
return (struct protolayer_payload){
}
/** Convenience function to get a wire-buf-type payload. */
-static inline struct protolayer_payload protolayer_wire_buf(
+static inline struct protolayer_payload protolayer_payload_wire_buf(
struct wire_buf *wire_buf, bool short_lived)
{
return (struct protolayer_payload){
* If the input payload is `_WIRE_BUF`, the pointed-to wire buffer is reset to
* indicate that all of its contents have been used up, and the buffer is ready
* to be reused. */
-struct protolayer_payload protolayer_as_buffer(const struct protolayer_payload *payload);
+struct protolayer_payload protolayer_payload_as_buffer(
+ const struct protolayer_payload *payload);
/** A predefined queue type for iteration context. */
typedef queue_t(struct protolayer_iter_ctx *) protolayer_iter_ctx_queue_t;
* `grp`), which define how the data processed by the session is to be
* interpreted. */
struct protolayer_manager {
- enum kr_proto grp;
+ enum kr_proto proto;
struct wire_buf wire_buf;
size_t wire_buf_max_length;
struct session2 *session;
- size_t num_layers;
size_t cb_ctx_size; /**< Size of a single callback context, including
* layer-specific per-iteration data. */