'ucw/mempool.c',
'ucw/mempool-fmt.c',
'murmurhash3/murmurhash3.c',
+ 'quicly/quicly.c',
+ 'quicly/defaults.c',
+ 'quicly/cc-reno.c',
+ 'quicly/frame.c',
+ 'quicly/streambuf.c',
+ 'quicly/sendstate.c',
+ 'quicly/recvstate.c',
+ 'quicly/sentmap.c',
+ 'quicly/ranges.c',
+ 'quicly/picotls/picotls.c',
+ 'quicly/picotls/openssl.c',
+ 'quicly/picotls/pembase64.c',
'base32hex.c',
'base64.c'
])
'contrib',
contrib_src,
include_directories: contrib_inc,
- dependencies: libknot,
+ dependencies: [libknot, openssl],
)
contrib_dep = declare_dependency(
--- /dev/null
+/*
+ * Copyright (c) 2019 Fastly, Janardhan Iyengar
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "contrib/quicly/cc.h"
+
+#define QUICLY_INITIAL_WINDOW 10
+#define QUICLY_MIN_CWND 2
+#define QUICLY_RENO_BETA 0.7
+
+void quicly_cc_init(quicly_cc_t *cc)
+{
+ memset(cc, 0, sizeof(quicly_cc_t));
+ cc->cwnd = QUICLY_INITIAL_WINDOW * QUICLY_MAX_PACKET_SIZE;
+ cc->ssthresh = UINT32_MAX;
+}
+
+// TODO: Avoid increase if sender was application limited
+void quicly_cc_on_acked(quicly_cc_t *cc, uint32_t bytes, uint64_t largest_acked, uint32_t inflight)
+{
+ assert(inflight >= bytes);
+ // no increases while in recovery
+ if (largest_acked < cc->recovery_end)
+ return;
+
+ // slow start
+ if (cc->cwnd < cc->ssthresh) {
+ cc->cwnd += bytes;
+ return;
+ }
+ // congestion avoidance
+ cc->stash += bytes;
+ if (cc->stash < cc->cwnd)
+ return;
+ // increase cwnd by 1 MSS per cwnd acked
+ uint32_t count = cc->stash / cc->cwnd;
+ cc->stash -= count * cc->cwnd;
+ cc->cwnd += count * QUICLY_MAX_PACKET_SIZE;
+}
+
+void quicly_cc_on_lost(quicly_cc_t *cc, uint32_t bytes, uint64_t lost_pn, uint64_t next_pn)
+{
+ // nothing to do if loss is in recovery window
+ if (lost_pn < cc->recovery_end)
+ return;
+ // set end of recovery window
+ cc->recovery_end = next_pn;
+ cc->cwnd *= QUICLY_RENO_BETA;
+ if (cc->cwnd < QUICLY_MIN_CWND * QUICLY_MAX_PACKET_SIZE)
+ cc->cwnd = QUICLY_MIN_CWND * QUICLY_MAX_PACKET_SIZE;
+ cc->ssthresh = cc->cwnd;
+}
+
+void quicly_cc_on_persistent_congestion(quicly_cc_t *cc)
+{
+ // TODO
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Fastly, Janardhan Iyengar
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* Interface definition for quicly's congestion controller.
+ */
+
+#ifndef quicly_cc_h
+#define quicly_cc_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include "contrib/quicly/constants.h"
+
+typedef struct st_quicly_cc_t {
+ uint32_t cwnd;
+ uint32_t ssthresh;
+ uint32_t stash;
+ uint64_t recovery_end;
+} quicly_cc_t;
+
+void quicly_cc_init(quicly_cc_t *cc);
+
+/**
+ * Called when a packet is newly acknowledged.
+ */
+void quicly_cc_on_acked(quicly_cc_t *cc, uint32_t bytes, uint64_t largest_acked, uint32_t inflight);
+
+/**
+ * Called when a packet is detected as lost. |next_pn| is the next unsent packet number,
+ * used for setting the recovery window.
+ */
+void quicly_cc_on_lost(quicly_cc_t *cc, uint32_t bytes, uint64_t lost_pn, uint64_t next_pn);
+
+/**
+ * Called when persistent congestion is observed.
+ */
+void quicly_cc_on_persistent_congestion(quicly_cc_t *cc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_constants_h
+#define quicly_constants_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include "contrib/quicly/picotls/picotls.h"
+
+#define QUICLY_NUM_PACKETS_BEFORE_ACK 2
+#define QUICLY_DELAYED_ACK_TIMEOUT 25 /* milliseconds */
+#define QUICLY_DEFAULT_MAX_ACK_DELAY 25 /* milliseconds */
+#define QUICLY_LOCAL_MAX_ACK_DELAY 25 /* milliseconds */
+#define QUICLY_DEFAULT_ACK_DELAY_EXPONENT 3
+#define QUICLY_LOCAL_ACK_DELAY_EXPONENT 10
+#define QUICLY_DEFAULT_MIN_PTO 1 /* milliseconds */
+#define QUICLY_DEFAULT_INITIAL_RTT 66 /* initial retransmission timeout is *3, i.e. 200ms */
+#define QUICLY_LOSS_DEFAULT_PACKET_THRESHOLD 3
+
+#define QUICLY_MAX_PACKET_SIZE 1280 /* must be >= 1200 bytes */
+#define QUICLY_AEAD_TAG_SIZE 16
+
+#define QUICLY_MAX_CID_LEN_V1 20
+#define QUICLY_STATELESS_RESET_TOKEN_LEN 16
+
+/* coexists with picotls error codes, assuming that int is at least 32-bits */
+#define QUICLY_ERROR_IS_QUIC(e) (((e) & ~0x1ffff) == 0x20000)
+#define QUICLY_ERROR_IS_QUIC_TRANSPORT(e) (((e) & ~0xffff) == 0x20000)
+#define QUICLY_ERROR_IS_QUIC_APPLICATION(e) (((e) & ~0xffff) == 0x30000)
+#define QUICLY_ERROR_GET_ERROR_CODE(e) ((uint16_t)(e))
+#define QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(e) ((uint16_t)(e) + 0x20000)
+#define QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(e) ((uint16_t)(e) + 0x30000)
+/**
+ * PTLS_ERROR_NO_MEMORY and QUICLY_ERROR_STATE_EXHAUSTION are special error codes that are internal but can be passed to
+ * quicly_close. These are converted to QUICLY_TRANSPORT_ERROR_INTERNAL when sent over the wire.
+ */
+#define QUICLY_ERROR_IS_CONCEALED(err) ((err) == PTLS_ERROR_NO_MEMORY || (err) == QUICLY_ERROR_STATE_EXHAUSTION)
+
+/* transport error codes */
+#define QUICLY_TRANSPORT_ERROR_NONE QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x0)
+#define QUICLY_TRANSPORT_ERROR_INTERNAL QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x1)
+#define QUICLY_TRANSPORT_ERROR_SERVER_BUSY QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x2)
+#define QUICLY_TRANSPORT_ERROR_FLOW_CONTROL QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x3)
+#define QUICLY_TRANSPORT_ERROR_STREAM_LIMIT QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x4)
+#define QUICLY_TRANSPORT_ERROR_STREAM_STATE QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x5)
+#define QUICLY_TRANSPORT_ERROR_FINAL_SIZE QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x6)
+#define QUICLY_TRANSPORT_ERROR_FRAME_ENCODING QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x7)
+#define QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x8)
+#define QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0xa)
+#define QUICLY_TRANSPORT_ERROR_INVALID_TOKEN QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0xb)
+#define QUICLY_TRANSPORT_ERROR_CRYPTO_BUFFER_EXCEEDED QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0xd)
+#define QUICLY_TRANSPORT_ERROR_TLS_ALERT_BASE QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(0x100)
+
+/* internal error codes, used purely for signaling status to the application */
+#define QUICLY_ERROR_PACKET_IGNORED 0xff01
+#define QUICLY_ERROR_SENDBUF_FULL 0xff02 /* internal use only; the error code is never exposed to the application */
+#define QUICLY_ERROR_FREE_CONNECTION 0xff03 /* returned by quicly_send when the connection is freeable */
+#define QUICLY_ERROR_RECEIVED_STATELESS_RESET 0xff04
+#define QUICLY_ERROR_NO_COMPATIBLE_VERSION 0xff05
+#define QUICLY_ERROR_IS_CLOSING 0xff06 /* indicates that the connection has already entered closing state */
+#define QUICLY_ERROR_STATE_EXHAUSTION 0xff07
+
+#define QUICLY_BUILD_ASSERT(condition) ((void)sizeof(char[2 * !!(!__builtin_constant_p(condition) || (condition)) - 1]))
+
+typedef int64_t quicly_stream_id_t;
+
+typedef struct st_quicly_conn_t quicly_conn_t;
+
+/**
+ * Used for emitting arbitrary debug message through probes. The debug message might get emitted unescaped as a JSON string,
+ * therefore cannot contain characters that are required to be escaped as a JSON string (e.g., `\n`, `"`).
+ */
+void quicly__debug_printf(quicly_conn_t *conn, const char *function, int line, const char *fmt, ...)
+ __attribute__((format(printf, 4, 5)));
+
+#define quicly_debug_printf(conn, ...) quicly__debug_printf((conn), __FUNCTION__, __LINE__, __VA_ARGS__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017-2019 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <sys/time.h>
+#include "contrib/quicly/defaults.h"
+
+#define DEFAULT_MAX_PACKETS_PER_KEY 16777216
+#define DEFAULT_MAX_CRYPTO_BYTES 65536
+
+/* profile that employs IETF specified values */
+const quicly_context_t quicly_spec_context = {NULL, /* tls */
+ QUICLY_MAX_PACKET_SIZE, /* max_packet_size */
+ QUICLY_LOSS_SPEC_CONF, /* loss */
+ {
+ {1 * 1024 * 1024, 1 * 1024 * 1024, 1 * 1024 * 1024}, /* max_stream_data */
+ 16 * 1024 * 1024, /* max_data */
+ 30 * 1000, /* idle_timeout (30 seconds) */
+ 100, /* max_concurrent_streams_bidi */
+ 0 /* max_concurrent_streams_uni */
+ },
+ DEFAULT_MAX_PACKETS_PER_KEY,
+ DEFAULT_MAX_CRYPTO_BYTES,
+ 0, /* enforce_version_negotiation */
+ 0, /* is_clustered */
+ 0, /* enlarge_client_hello */
+ &quicly_default_packet_allocator,
+ NULL,
+ NULL, /* on_stream_open */
+ &quicly_default_stream_scheduler,
+ NULL, /* on_conn_close */
+ &quicly_default_now,
+ NULL,
+ NULL,
+ &quicly_default_crypto_engine};
+
+/* profile with a focus on reducing latency for the HTTP use case */
+const quicly_context_t quicly_performant_context = {NULL, /* tls */
+ QUICLY_MAX_PACKET_SIZE, /* max_packet_size */
+ QUICLY_LOSS_PERFORMANT_CONF, /* loss */
+ {
+ {1 * 1024 * 1024, 1 * 1024 * 1024, 1 * 1024 * 1024}, /* max_stream_data */
+ 16 * 1024 * 1024, /* max_data */
+ 30 * 1000, /* idle_timeout (30 seconds) */
+ 100, /* max_concurrent_streams_bidi */
+ 0 /* max_concurrent_streams_uni */
+ },
+ DEFAULT_MAX_PACKETS_PER_KEY,
+ DEFAULT_MAX_CRYPTO_BYTES,
+ 0, /* enforce_version_negotiation */
+ 0, /* is_clustered */
+ 0, /* enlarge_client_hello */
+ &quicly_default_packet_allocator,
+ NULL,
+ NULL, /* on_stream_open */
+ &quicly_default_stream_scheduler,
+ NULL, /* on_conn_close */
+ &quicly_default_now,
+ NULL,
+ NULL,
+ &quicly_default_crypto_engine};
+
+static quicly_datagram_t *default_alloc_packet(quicly_packet_allocator_t *self, size_t payloadsize)
+{
+ quicly_datagram_t *packet;
+
+ if ((packet = malloc(sizeof(*packet) + payloadsize)) == NULL)
+ return NULL;
+ packet->data.base = (uint8_t *)packet + sizeof(*packet);
+
+ return packet;
+}
+
+static void default_free_packet(quicly_packet_allocator_t *self, quicly_datagram_t *packet)
+{
+ free(packet);
+}
+
+quicly_packet_allocator_t quicly_default_packet_allocator = {default_alloc_packet, default_free_packet};
+
+/**
+ * The context of the default CID encryptor. All the contexts being used here are ECB ciphers and therefore stateless - they can be
+ * used concurrently from multiple threads.
+ */
+struct st_quicly_default_encrypt_cid_t {
+ quicly_cid_encryptor_t super;
+ ptls_cipher_context_t *cid_encrypt_ctx, *cid_decrypt_ctx, *reset_token_ctx;
+};
+
+static void generate_reset_token(struct st_quicly_default_encrypt_cid_t *self, void *token, const void *cid)
+{
+ uint8_t expandbuf[QUICLY_STATELESS_RESET_TOKEN_LEN];
+
+ assert(self->reset_token_ctx->algo->block_size == QUICLY_STATELESS_RESET_TOKEN_LEN);
+
+ /* expand the input to full size, if CID is shorter than the size of the reset token */
+ if (self->cid_encrypt_ctx->algo->block_size != QUICLY_STATELESS_RESET_TOKEN_LEN) {
+ assert(self->cid_encrypt_ctx->algo->block_size < QUICLY_STATELESS_RESET_TOKEN_LEN);
+ memset(expandbuf, 0, sizeof(expandbuf));
+ memcpy(expandbuf, cid, self->cid_encrypt_ctx->algo->block_size);
+ cid = expandbuf;
+ }
+
+ /* transform */
+ ptls_cipher_encrypt(self->reset_token_ctx, token, cid, QUICLY_STATELESS_RESET_TOKEN_LEN);
+}
+
+static void default_encrypt_cid(quicly_cid_encryptor_t *_self, quicly_cid_t *encrypted, void *reset_token,
+ const quicly_cid_plaintext_t *plaintext)
+{
+ struct st_quicly_default_encrypt_cid_t *self = (void *)_self;
+ uint8_t buf[16], *p;
+
+ /* encode */
+ p = buf;
+ switch (self->cid_encrypt_ctx->algo->block_size) {
+ case 8:
+ break;
+ case 16:
+ p = quicly_encode64(p, plaintext->node_id);
+ break;
+ default:
+ assert(!"unexpected block size");
+ break;
+ }
+ p = quicly_encode32(p, plaintext->master_id);
+ p = quicly_encode32(p, (plaintext->thread_id << 8) | plaintext->path_id);
+ assert(p - buf == self->cid_encrypt_ctx->algo->block_size);
+
+ /* generate CID */
+ ptls_cipher_encrypt(self->cid_encrypt_ctx, encrypted->cid, buf, self->cid_encrypt_ctx->algo->block_size);
+ encrypted->len = self->cid_encrypt_ctx->algo->block_size;
+
+ /* generate stateless reset token if requested */
+ if (reset_token != NULL)
+ generate_reset_token(self, reset_token, encrypted->cid);
+}
+
+static size_t default_decrypt_cid(quicly_cid_encryptor_t *_self, quicly_cid_plaintext_t *plaintext, const void *encrypted,
+ size_t len)
+{
+ struct st_quicly_default_encrypt_cid_t *self = (void *)_self;
+ uint8_t ptbuf[16], tmpbuf[16];
+ const uint8_t *p;
+ size_t cid_len;
+
+ cid_len = self->cid_decrypt_ctx->algo->block_size;
+
+ /* normalize the input, so that we would get consistent routing */
+ if (len != 0 && len != cid_len) {
+ if (len > cid_len)
+ len = cid_len;
+ memcpy(tmpbuf, encrypted, cid_len);
+ if (len < cid_len)
+ memset(tmpbuf + len, 0, cid_len - len);
+ encrypted = tmpbuf;
+ }
+
+ /* decrypt */
+ ptls_cipher_encrypt(self->cid_decrypt_ctx, ptbuf, encrypted, cid_len);
+
+ /* decode */
+ p = ptbuf;
+ if (cid_len == 16) {
+ plaintext->node_id = quicly_decode64(&p);
+ } else {
+ plaintext->node_id = 0;
+ }
+ plaintext->master_id = quicly_decode32(&p);
+ plaintext->thread_id = quicly_decode24(&p);
+ plaintext->path_id = *p++;
+ assert(p - ptbuf == cid_len);
+
+ return cid_len;
+}
+
+static int default_generate_reset_token(quicly_cid_encryptor_t *_self, void *token, const void *cid)
+{
+ struct st_quicly_default_encrypt_cid_t *self = (void *)_self;
+ generate_reset_token(self, token, cid);
+ return 1;
+}
+
+quicly_cid_encryptor_t *quicly_new_default_cid_encryptor(ptls_cipher_algorithm_t *cid_cipher,
+ ptls_cipher_algorithm_t *reset_token_cipher, ptls_hash_algorithm_t *hash,
+ ptls_iovec_t key)
+{
+ struct st_quicly_default_encrypt_cid_t *self;
+ uint8_t digestbuf[PTLS_MAX_DIGEST_SIZE], keybuf[PTLS_MAX_SECRET_SIZE];
+
+ assert(cid_cipher->block_size == 8 || cid_cipher->block_size == 16);
+ assert(reset_token_cipher->block_size == 16);
+
+ if (key.len > hash->block_size) {
+ ptls_calc_hash(hash, digestbuf, key.base, key.len);
+ key = ptls_iovec_init(digestbuf, hash->digest_size);
+ }
+
+ if ((self = malloc(sizeof(*self))) == NULL)
+ goto Fail;
+ *self = (struct st_quicly_default_encrypt_cid_t){{default_encrypt_cid, default_decrypt_cid, default_generate_reset_token}};
+
+ if (ptls_hkdf_expand_label(hash, keybuf, cid_cipher->key_size, key, "cid", ptls_iovec_init(NULL, 0), "") != 0)
+ goto Fail;
+ if ((self->cid_encrypt_ctx = ptls_cipher_new(cid_cipher, 1, keybuf)) == NULL)
+ goto Fail;
+ if ((self->cid_decrypt_ctx = ptls_cipher_new(cid_cipher, 0, keybuf)) == NULL)
+ goto Fail;
+ if (ptls_hkdf_expand_label(hash, keybuf, reset_token_cipher->key_size, key, "reset", ptls_iovec_init(NULL, 0), "") != 0)
+ goto Fail;
+ if ((self->reset_token_ctx = ptls_cipher_new(reset_token_cipher, 1, keybuf)) == NULL)
+ goto Fail;
+
+ ptls_clear_memory(digestbuf, sizeof(digestbuf));
+ ptls_clear_memory(keybuf, sizeof(keybuf));
+ return &self->super;
+
+Fail:
+ if (self != NULL) {
+ if (self->cid_encrypt_ctx != NULL)
+ ptls_cipher_free(self->cid_encrypt_ctx);
+ if (self->cid_decrypt_ctx != NULL)
+ ptls_cipher_free(self->cid_decrypt_ctx);
+ if (self->reset_token_ctx != NULL)
+ ptls_cipher_free(self->reset_token_ctx);
+ free(self);
+ }
+ ptls_clear_memory(digestbuf, sizeof(digestbuf));
+ ptls_clear_memory(keybuf, sizeof(keybuf));
+ return NULL;
+}
+
+void quicly_free_default_cid_encryptor(quicly_cid_encryptor_t *_self)
+{
+ struct st_quicly_default_encrypt_cid_t *self = (void *)_self;
+
+ ptls_cipher_free(self->cid_encrypt_ctx);
+ ptls_cipher_free(self->cid_decrypt_ctx);
+ ptls_cipher_free(self->reset_token_ctx);
+ free(self);
+}
+
+/**
+ * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic.
+ */
+static int default_stream_scheduler_can_send(quicly_stream_scheduler_t *self, quicly_conn_t *conn, int conn_is_saturated)
+{
+ struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)conn)->_default_scheduler;
+
+ if (!conn_is_saturated) {
+ /* not saturated */
+ quicly_linklist_insert_list(&sched->active, &sched->blocked);
+ } else {
+ /* The code below is disabled, because H2O's scheduler doesn't allow you to "walk" the priority tree without actually
+ * running the round robin, and we want quicly's default to behave like H2O so that we can catch errors. The downside is
+ * that there'd be at most one spurious call of `quicly_send` when the connection is saturated, but that should be fine.
+ */
+ if (0) {
+ /* Saturated. Lazily move such streams to the "blocked" list, at the same time checking if anything can be sent. */
+ while (quicly_linklist_is_linked(&sched->active)) {
+ quicly_stream_t *stream =
+ (void *)((char *)sched->active.next - offsetof(quicly_stream_t, _send_aux.pending_link.default_scheduler));
+ if (quicly_sendstate_can_send(&stream->sendstate, NULL))
+ return 1;
+ quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler);
+ quicly_linklist_insert(sched->blocked.prev, &stream->_send_aux.pending_link.default_scheduler);
+ }
+ }
+ }
+
+ return quicly_linklist_is_linked(&sched->active);
+}
+
+static void link_stream(struct st_quicly_default_scheduler_state_t *sched, quicly_stream_t *stream, int conn_is_flow_capped)
+{
+ if (!quicly_linklist_is_linked(&stream->_send_aux.pending_link.default_scheduler)) {
+ quicly_linklist_t *slot = &sched->active;
+ if (conn_is_flow_capped && !quicly_sendstate_can_send(&stream->sendstate, NULL))
+ slot = &sched->blocked;
+ quicly_linklist_insert(slot->prev, &stream->_send_aux.pending_link.default_scheduler);
+ }
+}
+
+/**
+ * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic.
+ */
+static int default_stream_scheduler_do_send(quicly_stream_scheduler_t *self, quicly_conn_t *conn, quicly_send_context_t *s)
+{
+ struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)conn)->_default_scheduler;
+ int conn_is_flow_capped = quicly_is_flow_capped(conn), ret = 0;
+
+ if (!conn_is_flow_capped)
+ quicly_linklist_insert_list(&sched->active, &sched->blocked);
+
+ while (quicly_can_send_stream_data((quicly_conn_t *)conn, s) && quicly_linklist_is_linked(&sched->active)) {
+ /* detach the first active stream */
+ quicly_stream_t *stream =
+ (void *)((char *)sched->active.next - offsetof(quicly_stream_t, _send_aux.pending_link.default_scheduler));
+ quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler);
+ /* relink the stream to the blocked list if necessary */
+ if (conn_is_flow_capped && !quicly_sendstate_can_send(&stream->sendstate, NULL)) {
+ quicly_linklist_insert(sched->blocked.prev, &stream->_send_aux.pending_link.default_scheduler);
+ continue;
+ }
+ /* send! */
+ if ((ret = quicly_send_stream(stream, s)) != 0) {
+ /* FIXME Stop quicly_send_stream emitting SENDBUF_FULL (happpens when CWND is congested). Otherwise, we need to make
+ * adjustments to the scheduler after popping a stream */
+ if (ret == QUICLY_ERROR_SENDBUF_FULL) {
+ assert(quicly_sendstate_can_send(&stream->sendstate, &stream->_send_aux.max_stream_data));
+ link_stream(sched, stream, conn_is_flow_capped);
+ }
+ break;
+ }
+ /* reschedule */
+ conn_is_flow_capped = quicly_is_flow_capped(conn);
+ if (quicly_sendstate_can_send(&stream->sendstate, &stream->_send_aux.max_stream_data))
+ link_stream(sched, stream, conn_is_flow_capped);
+ }
+
+ return ret;
+}
+
+/**
+ * See doc-comment of `st_quicly_default_scheduler_state_t` to understand the logic.
+ */
+static int default_stream_scheduler_update_state(quicly_stream_scheduler_t *self, quicly_stream_t *stream)
+{
+ struct st_quicly_default_scheduler_state_t *sched = &((struct _st_quicly_conn_public_t *)stream->conn)->_default_scheduler;
+
+ if (quicly_sendstate_can_send(&stream->sendstate, &stream->_send_aux.max_stream_data)) {
+ /* activate if not */
+ link_stream(sched, stream, quicly_is_flow_capped(stream->conn));
+ } else {
+ /* disactivate if active */
+ if (quicly_linklist_is_linked(&stream->_send_aux.pending_link.default_scheduler))
+ quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler);
+ }
+
+ return 0;
+}
+
+quicly_stream_scheduler_t quicly_default_stream_scheduler = {default_stream_scheduler_can_send, default_stream_scheduler_do_send,
+ default_stream_scheduler_update_state};
+
+quicly_stream_t *quicly_default_alloc_stream(quicly_context_t *ctx)
+{
+ return malloc(sizeof(quicly_stream_t));
+}
+
+void quicly_default_free_stream(quicly_stream_t *stream)
+{
+ free(stream);
+}
+
+static int64_t default_now(quicly_now_t *self)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+
+quicly_now_t quicly_default_now = {default_now};
+
+static int default_setup_cipher(quicly_crypto_engine_t *engine, quicly_conn_t *conn, size_t epoch, int is_enc,
+ ptls_cipher_context_t **hp_ctx, ptls_aead_context_t **aead_ctx, ptls_aead_algorithm_t *aead,
+ ptls_hash_algorithm_t *hash, const void *secret)
+{
+ uint8_t hpkey[PTLS_MAX_SECRET_SIZE];
+ int ret;
+
+ if (hp_ctx != NULL)
+ *hp_ctx = NULL;
+ *aead_ctx = NULL;
+
+ /* generate new header protection key */
+ if (hp_ctx != NULL) {
+ if ((ret = ptls_hkdf_expand_label(hash, hpkey, aead->ctr_cipher->key_size, ptls_iovec_init(secret, hash->digest_size),
+ "quic hp", ptls_iovec_init(NULL, 0), NULL)) != 0)
+ goto Exit;
+ if ((*hp_ctx = ptls_cipher_new(aead->ctr_cipher, is_enc, hpkey)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ }
+
+ /* generate new AEAD context */
+ if ((*aead_ctx = ptls_aead_new(aead, hash, is_enc, secret, QUICLY_AEAD_BASE_LABEL)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (QUICLY_DEBUG) {
+ char *secret_hex = quicly_hexdump(secret, hash->digest_size, SIZE_MAX),
+ *hpkey_hex = quicly_hexdump(hpkey, aead->ctr_cipher->key_size, SIZE_MAX);
+ fprintf(stderr, "%s:\n aead-secret: %s\n hp-key: %s\n", __FUNCTION__, secret_hex, hpkey_hex);
+ free(secret_hex);
+ free(hpkey_hex);
+ }
+
+ ret = 0;
+Exit:
+ if (ret != 0) {
+ if (*aead_ctx != NULL) {
+ ptls_aead_free(*aead_ctx);
+ *aead_ctx = NULL;
+ }
+ if (*hp_ctx != NULL) {
+ ptls_cipher_free(*hp_ctx);
+ *hp_ctx = NULL;
+ }
+ }
+ ptls_clear_memory(hpkey, sizeof(hpkey));
+ return ret;
+}
+
+static void default_finalize_send_packet(quicly_crypto_engine_t *engine, quicly_conn_t *conn,
+ ptls_cipher_context_t *header_protect_ctx, ptls_aead_context_t *packet_protect_ctx,
+ quicly_datagram_t *packet, size_t first_byte_at, size_t payload_from, int coalesced)
+{
+ uint8_t hpmask[1 + QUICLY_SEND_PN_SIZE] = {0};
+ size_t i;
+
+ ptls_cipher_init(header_protect_ctx, packet->data.base + payload_from - QUICLY_SEND_PN_SIZE + QUICLY_MAX_PN_SIZE);
+ ptls_cipher_encrypt(header_protect_ctx, hpmask, hpmask, sizeof(hpmask));
+
+ packet->data.base[first_byte_at] ^= hpmask[0] & (QUICLY_PACKET_IS_LONG_HEADER(packet->data.base[first_byte_at]) ? 0xf : 0x1f);
+ for (i = 0; i != QUICLY_SEND_PN_SIZE; ++i)
+ packet->data.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^= hpmask[i + 1];
+}
+
+quicly_crypto_engine_t quicly_default_crypto_engine = {default_setup_cipher, default_finalize_send_packet};
--- /dev/null
+/*
+ * Copyright (c) 2017-2019 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_defaults_h
+#define quicly_defaults_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "contrib/quicly/quicly.h"
+
+extern const quicly_context_t quicly_spec_context;
+extern const quicly_context_t quicly_performant_context;
+
+/**
+ *
+ */
+extern quicly_packet_allocator_t quicly_default_packet_allocator;
+/**
+ * Instantiates a CID cipher.
+ * The CID cipher MUST be a block cipher. It MAY be a 64-bit block cipher (e.g., blowfish) when `quicly_cid_plaintext_t::node_id` is
+ * not utilized by the application. Otherwise, it MUST be a 128-bit block cipher (e.g., AES).
+ * The reset token cipher MUST be a 128-bit block cipher.
+ */
+quicly_cid_encryptor_t *quicly_new_default_cid_encryptor(ptls_cipher_algorithm_t *cid_cipher,
+ ptls_cipher_algorithm_t *reset_token_cipher, ptls_hash_algorithm_t *hash,
+ ptls_iovec_t key);
+/**
+ *
+ */
+void quicly_free_default_cid_encryptor(quicly_cid_encryptor_t *self);
+/**
+ *
+ */
+extern quicly_stream_scheduler_t quicly_default_stream_scheduler;
+/**
+ *
+ */
+extern quicly_now_t quicly_default_now;
+/**
+ *
+ */
+extern quicly_crypto_engine_t quicly_default_crypto_engine;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <string.h>
+#include "contrib/quicly/frame.h"
+
+uint8_t *quicly_encode_path_challenge_frame(uint8_t *dst, int is_response, const uint8_t *data)
+{
+ *dst++ = is_response ? QUICLY_FRAME_TYPE_PATH_RESPONSE : QUICLY_FRAME_TYPE_PATH_CHALLENGE;
+ memcpy(dst, data, QUICLY_PATH_CHALLENGE_DATA_LEN);
+ dst += QUICLY_PATH_CHALLENGE_DATA_LEN;
+ return dst;
+}
+
+uint8_t *quicly_encode_ack_frame(uint8_t *dst, uint8_t *dst_end, quicly_ranges_t *ranges, uint64_t ack_delay)
+{
+#define WRITE_BLOCK(start, end) \
+ do { \
+ uint64_t _start = (start), _end = (end); \
+ assert(_start < _end); \
+ if (dst_end - dst < 8) \
+ return NULL; \
+ dst = quicly_encodev(dst, _end - _start - 1); \
+ } while (0)
+
+ size_t range_index = ranges->num_ranges - 1;
+
+ assert(ranges->num_ranges != 0);
+
+ /* number of bytes being emitted without space check are 1 + 8 + 8 + 1 bytes (as defined in QUICLY_ACK_FRAME_CAPACITY) */
+ *dst++ = QUICLY_FRAME_TYPE_ACK;
+ dst = quicly_encodev(dst, ranges->ranges[range_index].end - 1); /* largest acknowledged */
+ dst = quicly_encodev(dst, ack_delay); /* ack delay */
+ QUICLY_BUILD_ASSERT(QUICLY_MAX_ACK_BLOCKS - 1 <= 63);
+ *dst++ = (uint8_t)(ranges->num_ranges - 1); /* ack blocks */
+
+ while (1) {
+ WRITE_BLOCK(ranges->ranges[range_index].start, ranges->ranges[range_index].end); /* ACK block count */
+ if (range_index-- == 0)
+ break;
+ WRITE_BLOCK(ranges->ranges[range_index].end, ranges->ranges[range_index + 1].start);
+ }
+
+ return dst;
+
+#undef WRITE_BLOCK
+}
+
+int quicly_decode_ack_frame(const uint8_t **src, const uint8_t *end, quicly_ack_frame_t *frame, int is_ack_ecn)
+{
+ uint64_t i, num_gaps, gap, ack_range;
+
+ if ((frame->largest_acknowledged = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((frame->ack_delay = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((num_gaps = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+
+ if ((ack_range = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if (frame->largest_acknowledged < ack_range)
+ goto Error;
+ frame->smallest_acknowledged = frame->largest_acknowledged - ack_range;
+ frame->ack_block_lengths[0] = ack_range + 1;
+ frame->num_gaps = 0;
+
+ for (i = 0; i != num_gaps; ++i) {
+ if ((gap = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((ack_range = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if (i < QUICLY_ACK_MAX_GAPS) {
+ if (frame->smallest_acknowledged < gap + ack_range + 2)
+ goto Error;
+ frame->gaps[i] = gap + 1;
+ frame->ack_block_lengths[i + 1] = ack_range + 1;
+ frame->smallest_acknowledged -= gap + ack_range + 2;
+ ++frame->num_gaps;
+ }
+ }
+
+ if (is_ack_ecn) {
+ /* just skip ECT(0), ECT(1), ECT-CE counters for the time being */
+ for (i = 0; i != 3; ++i)
+ if (quicly_decodev(src, end) == UINT64_MAX)
+ goto Error;
+ }
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+uint8_t *quicly_encode_close_frame(uint8_t *const base, uint64_t error_code, uint64_t offending_frame_type,
+ const char *reason_phrase)
+{
+ size_t offset = 0, reason_phrase_len = strlen(reason_phrase);
+
+#define PUSHV(v) \
+ do { \
+ if (base != NULL) { \
+ offset = quicly_encodev(base + offset, (v)) - base; \
+ } else { \
+ offset += quicly_encodev_capacity(v); \
+ } \
+ } while (0)
+
+ PUSHV(offending_frame_type == UINT64_MAX ? QUICLY_FRAME_TYPE_APPLICATION_CLOSE : QUICLY_FRAME_TYPE_TRANSPORT_CLOSE);
+ PUSHV(error_code);
+ if (offending_frame_type != UINT64_MAX)
+ PUSHV(offending_frame_type);
+ PUSHV(reason_phrase_len);
+ if (base != NULL)
+ memcpy(base + offset, reason_phrase, reason_phrase_len);
+ offset += reason_phrase_len;
+
+#undef PUSHV
+
+ return base + offset;
+}
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_frame_h
+#define quicly_frame_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include "contrib/quicly/picotls/picotls.h"
+#include "contrib/quicly/constants.h"
+#include "contrib/quicly/ranges.h"
+
+#define QUICLY_FRAME_TYPE_PADDING 0
+#define QUICLY_FRAME_TYPE_PING 1
+#define QUICLY_FRAME_TYPE_ACK 2
+#define QUICLY_FRAME_TYPE_ACK_ECN 3
+#define QUICLY_FRAME_TYPE_RESET_STREAM 4 /* RESET_STREAM */
+#define QUICLY_FRAME_TYPE_STOP_SENDING 5
+#define QUICLY_FRAME_TYPE_CRYPTO 6
+#define QUICLY_FRAME_TYPE_NEW_TOKEN 7
+#define QUICLY_FRAME_TYPE_STREAM_BASE 8
+#define QUICLY_FRAME_TYPE_MAX_DATA 16
+#define QUICLY_FRAME_TYPE_MAX_STREAM_DATA 17
+#define QUICLY_FRAME_TYPE_MAX_STREAMS_BIDI 18
+#define QUICLY_FRAME_TYPE_MAX_STREAMS_UNI 19
+#define QUICLY_FRAME_TYPE_DATA_BLOCKED 20
+#define QUICLY_FRAME_TYPE_STREAM_DATA_BLOCKED 21
+#define QUICLY_FRAME_TYPE_STREAMS_BLOCKED_BIDI 22
+#define QUICLY_FRAME_TYPE_STREAMS_BLOCKED_UNI 23
+#define QUICLY_FRAME_TYPE_NEW_CONNECTION_ID 24
+#define QUICLY_FRAME_TYPE_RETIRE_CONNECTION_ID 25
+#define QUICLY_FRAME_TYPE_PATH_CHALLENGE 26
+#define QUICLY_FRAME_TYPE_PATH_RESPONSE 27
+#define QUICLY_FRAME_TYPE_TRANSPORT_CLOSE 28
+#define QUICLY_FRAME_TYPE_APPLICATION_CLOSE 29
+#define QUICLY_FRAME_TYPE_HANDSHAKE_DONE 30
+
+#define QUICLY_FRAME_TYPE_STREAM_BITS 0x7
+#define QUICLY_FRAME_TYPE_STREAM_BIT_OFF 0x4
+#define QUICLY_FRAME_TYPE_STREAM_BIT_LEN 0x2
+#define QUICLY_FRAME_TYPE_STREAM_BIT_FIN 0x1
+
+#define QUICLY_MAX_DATA_FRAME_CAPACITY (1 + 8)
+#define QUICLY_MAX_STREAM_DATA_FRAME_CAPACITY (1 + 8 + 8)
+#define QUICLY_MAX_STREAMS_FRAME_CAPACITY (1 + 8)
+#define QUICLY_PING_FRAME_CAPACITY 1
+#define QUICLY_RST_FRAME_CAPACITY (1 + 8 + 8 + 8)
+#define QUICLY_STREAMS_BLOCKED_FRAME_CAPACITY (1 + 8)
+#define QUICLY_STOP_SENDING_FRAME_CAPACITY (1 + 8 + 8)
+#define QUICLY_ACK_MAX_GAPS 256
+#define QUICLY_ACK_FRAME_CAPACITY (1 + 8 + 8 + 1)
+#define QUICLY_PATH_CHALLENGE_FRAME_CAPACITY (1 + 8)
+#define QUICLY_STREAM_FRAME_CAPACITY (1 + 8 + 8 + 1)
+
+/**
+ * maximum number of ACK blocks (inclusive)
+ */
+#define QUICLY_MAX_ACK_BLOCKS 64
+
+static uint16_t quicly_decode16(const uint8_t **src);
+static uint32_t quicly_decode24(const uint8_t **src);
+static uint32_t quicly_decode32(const uint8_t **src);
+static uint64_t quicly_decode64(const uint8_t **src);
+#define quicly_decodev ptls_decode_quicint
+static uint8_t *quicly_encode16(uint8_t *p, uint16_t v);
+static uint8_t *quicly_encode32(uint8_t *p, uint32_t v);
+static uint8_t *quicly_encode64(uint8_t *p, uint64_t v);
+#define quicly_encodev ptls_encode_quicint
+static size_t quicly_encodev_capacity(uint64_t v);
+static unsigned quicly_clz32(uint32_t v);
+static unsigned quicly_clz64(uint64_t v);
+
+typedef struct st_quicly_stream_frame_t {
+ uint64_t stream_id;
+ unsigned is_fin : 1;
+ uint64_t offset;
+ ptls_iovec_t data;
+} quicly_stream_frame_t;
+
+static int quicly_decode_stream_frame(uint8_t type_flags, const uint8_t **src, const uint8_t *end, quicly_stream_frame_t *frame);
+static uint8_t *quicly_encode_crypto_frame_header(uint8_t *dst, uint8_t *dst_end, uint64_t offset, size_t *data_len);
+static int quicly_decode_crypto_frame(const uint8_t **src, const uint8_t *end, quicly_stream_frame_t *frame);
+
+static uint8_t *quicly_encode_reset_stream_frame(uint8_t *dst, uint64_t stream_id, uint16_t app_error_code, uint64_t final_size);
+
+typedef struct st_quicly_reset_stream_frame_t {
+ uint64_t stream_id;
+ uint16_t app_error_code;
+ uint64_t final_size;
+} quicly_reset_stream_frame_t;
+
+static int quicly_decode_reset_stream_frame(const uint8_t **src, const uint8_t *end, quicly_reset_stream_frame_t *frame);
+
+typedef struct st_quicly_transport_close_frame_t {
+ uint16_t error_code;
+ uint64_t frame_type;
+ ptls_iovec_t reason_phrase;
+} quicly_transport_close_frame_t;
+
+static int quicly_decode_transport_close_frame(const uint8_t **src, const uint8_t *end, quicly_transport_close_frame_t *frame);
+
+typedef struct st_quicly_application_close_frame_t {
+ uint16_t error_code;
+ ptls_iovec_t reason_phrase;
+} quicly_application_close_frame_t;
+
+static int quicly_decode_application_close_frame(const uint8_t **src, const uint8_t *end, quicly_application_close_frame_t *frame);
+
+static size_t quicly_close_frame_capacity(uint64_t error_code, uint64_t offending_frame_type, const char *reason_phrase);
+/**
+ * @param offending_frame_type the offending frame type if sending a transport close, or UINT64_MAX if sending an application close
+ */
+uint8_t *quicly_encode_close_frame(uint8_t *const base, uint64_t error_code, uint64_t offending_frame_type,
+ const char *reason_phrase);
+
+static uint8_t *quicly_encode_max_data_frame(uint8_t *dst, uint64_t max_data);
+
+typedef struct st_quicly_max_data_frame_t {
+ uint64_t max_data;
+} quicly_max_data_frame_t;
+
+static int quicly_decode_max_data_frame(const uint8_t **src, const uint8_t *end, quicly_max_data_frame_t *frame);
+
+static uint8_t *quicly_encode_max_stream_data_frame(uint8_t *dst, uint64_t stream_id, uint64_t max_stream_data);
+
+typedef struct st_quicly_max_stream_data_frame_t {
+ uint64_t stream_id;
+ uint64_t max_stream_data;
+} quicly_max_stream_data_frame_t;
+
+static int quicly_decode_max_stream_data_frame(const uint8_t **src, const uint8_t *end, quicly_max_stream_data_frame_t *frame);
+
+static uint8_t *quicly_encode_max_streams_frame(uint8_t *dst, int uni, uint64_t count);
+
+typedef struct st_quicly_max_streams_frame_t {
+ uint64_t count;
+} quicly_max_streams_frame_t;
+
+static int quicly_decode_max_streams_frame(const uint8_t **src, const uint8_t *end, quicly_max_streams_frame_t *frame);
+
+#define QUICLY_PATH_CHALLENGE_DATA_LEN 8
+
+uint8_t *quicly_encode_path_challenge_frame(uint8_t *dst, int is_response, const uint8_t *data);
+
+typedef struct st_quicly_path_challenge_frame_t {
+ const uint8_t *data;
+} quicly_path_challenge_frame_t;
+
+static int quicly_decode_path_challenge_frame(const uint8_t **src, const uint8_t *end, quicly_path_challenge_frame_t *frame);
+
+typedef struct st_quicly_data_blocked_frame_t {
+ uint64_t offset;
+} quicly_data_blocked_frame_t;
+
+static int quicly_decode_data_blocked_frame(const uint8_t **src, const uint8_t *end, quicly_data_blocked_frame_t *frame);
+
+typedef struct st_quicly_stream_data_blocked_frame_t {
+ quicly_stream_id_t stream_id;
+ uint64_t offset;
+} quicly_stream_data_blocked_frame_t;
+
+static int quicly_decode_stream_data_blocked_frame(const uint8_t **src, const uint8_t *end,
+ quicly_stream_data_blocked_frame_t *frame);
+
+static uint8_t *quicly_encode_streams_blocked_frame(uint8_t *dst, int uni, uint64_t count);
+
+typedef struct st_quicly_streams_blocked_frame_t {
+ uint64_t count;
+} quicly_streams_blocked_frame_t;
+
+static int quicly_decode_streams_blocked_frame(const uint8_t **src, const uint8_t *end, quicly_streams_blocked_frame_t *frame);
+
+typedef struct st_quicly_new_connection_id_frame_t {
+ uint64_t sequence;
+ uint64_t retire_prior_to;
+ ptls_iovec_t cid;
+ const uint8_t *stateless_reset_token;
+} quicly_new_connection_id_frame_t;
+
+static int quicly_decode_new_connection_id_frame(const uint8_t **src, const uint8_t *end, quicly_new_connection_id_frame_t *frame);
+
+static uint8_t *quicly_encode_stop_sending_frame(uint8_t *dst, uint64_t stream_id, uint16_t app_error_code);
+
+typedef struct st_quicly_stop_sending_frame_t {
+ uint64_t stream_id;
+ uint16_t app_error_code;
+} quicly_stop_sending_frame_t;
+
+static int quicly_decode_stop_sending_frame(const uint8_t **src, const uint8_t *end, quicly_stop_sending_frame_t *frame);
+
+uint8_t *quicly_encode_ack_frame(uint8_t *dst, uint8_t *dst_end, quicly_ranges_t *ranges, uint64_t ack_delay);
+
+typedef struct st_quicly_ack_frame_t {
+ uint64_t largest_acknowledged;
+ uint64_t smallest_acknowledged;
+ uint64_t ack_delay;
+ uint64_t num_gaps;
+ uint64_t ack_block_lengths[QUICLY_ACK_MAX_GAPS + 1];
+ uint64_t gaps[QUICLY_ACK_MAX_GAPS];
+} quicly_ack_frame_t;
+
+int quicly_decode_ack_frame(const uint8_t **src, const uint8_t *end, quicly_ack_frame_t *frame, int is_ack_ecn);
+
+static size_t quicly_new_token_frame_capacity(ptls_iovec_t token);
+static uint8_t *quicly_encode_new_token_frame(uint8_t *dst, ptls_iovec_t token);
+
+typedef struct st_quicly_new_token_frame_t {
+ ptls_iovec_t token;
+} quicly_new_token_frame_t;
+
+static int quicly_decode_new_token_frame(const uint8_t **src, const uint8_t *end, quicly_new_token_frame_t *frame);
+
+/* inline definitions */
+
+inline uint16_t quicly_decode16(const uint8_t **src)
+{
+ uint16_t v = (uint16_t)(*src)[0] << 8 | (*src)[1];
+ *src += 2;
+ return v;
+}
+
+inline uint32_t quicly_decode24(const uint8_t **src)
+{
+ uint32_t v = (uint32_t)(*src)[0] << 16 | (uint32_t)(*src)[1] << 8 | (uint32_t)(*src)[2];
+ *src += 3;
+ return v;
+}
+
+inline uint32_t quicly_decode32(const uint8_t **src)
+{
+ uint32_t v = (uint32_t)(*src)[0] << 24 | (uint32_t)(*src)[1] << 16 | (uint32_t)(*src)[2] << 8 | (*src)[3];
+ *src += 4;
+ return v;
+}
+
+inline uint64_t quicly_decode64(const uint8_t **src)
+{
+ uint64_t v = (uint64_t)(*src)[0] << 56 | (uint64_t)(*src)[1] << 48 | (uint64_t)(*src)[2] << 40 | (uint64_t)(*src)[3] << 32 |
+ (uint64_t)(*src)[4] << 24 | (uint64_t)(*src)[5] << 16 | (uint64_t)(*src)[6] << 8 | (*src)[7];
+ *src += 8;
+ return v;
+}
+
+inline uint8_t *quicly_encode16(uint8_t *p, uint16_t v)
+{
+ *p++ = (uint8_t)(v >> 8);
+ *p++ = (uint8_t)v;
+ return p;
+}
+
+inline uint8_t *quicly_encode32(uint8_t *p, uint32_t v)
+{
+ *p++ = (uint8_t)(v >> 24);
+ *p++ = (uint8_t)(v >> 16);
+ *p++ = (uint8_t)(v >> 8);
+ *p++ = (uint8_t)v;
+ return p;
+}
+
+inline uint8_t *quicly_encode64(uint8_t *p, uint64_t v)
+{
+ *p++ = (uint8_t)(v >> 56);
+ *p++ = (uint8_t)(v >> 48);
+ *p++ = (uint8_t)(v >> 40);
+ *p++ = (uint8_t)(v >> 32);
+ *p++ = (uint8_t)(v >> 24);
+ *p++ = (uint8_t)(v >> 16);
+ *p++ = (uint8_t)(v >> 8);
+ *p++ = (uint8_t)v;
+ return p;
+}
+
+inline size_t quicly_encodev_capacity(uint64_t v)
+{
+ if (v > 63) {
+ if (v > 16383) {
+ if (v > 1073741823)
+ return 8;
+ return 4;
+ }
+ return 2;
+ }
+ return 1;
+}
+
+inline unsigned quicly_clz32(uint32_t v)
+{
+ QUICLY_BUILD_ASSERT(sizeof(unsigned) == 4);
+ return v != 0 ? __builtin_clz(v) : 32;
+}
+
+inline unsigned quicly_clz64(uint64_t v)
+{
+ QUICLY_BUILD_ASSERT(sizeof(long long) == 8);
+ return v != 0 ? __builtin_clzll(v) : 64;
+}
+
+inline int quicly_decode_stream_frame(uint8_t type_flags, const uint8_t **src, const uint8_t *end, quicly_stream_frame_t *frame)
+{
+ /* obtain stream id */
+ if ((frame->stream_id = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+
+ /* obtain offset */
+ if ((type_flags & QUICLY_FRAME_TYPE_STREAM_BIT_OFF) != 0) {
+ if ((frame->offset = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ } else {
+ frame->offset = 0;
+ }
+
+ /* obtain data */
+ if ((type_flags & QUICLY_FRAME_TYPE_STREAM_BIT_LEN) != 0) {
+ uint64_t len;
+ if ((len = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((uint64_t)(end - *src) < len)
+ goto Error;
+ frame->data = ptls_iovec_init(*src, len);
+ *src += len;
+ } else {
+ frame->data = ptls_iovec_init(*src, end - *src);
+ *src = end;
+ }
+
+ /* fin bit */
+ frame->is_fin = (type_flags & QUICLY_FRAME_TYPE_STREAM_BIT_FIN) != 0;
+
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline uint8_t *quicly_encode_crypto_frame_header(uint8_t *dst, uint8_t *dst_end, uint64_t offset, size_t *data_len)
+{
+ size_t sizeleft, len_length;
+
+ *dst++ = QUICLY_FRAME_TYPE_CRYPTO;
+ dst = quicly_encodev(dst, offset);
+
+ sizeleft = dst_end - dst;
+ if (sizeleft <= 64 || *data_len < 64) {
+ if (*data_len >= sizeleft)
+ *data_len = sizeleft - 1;
+ len_length = 1;
+ } else {
+ if (*data_len > 16383)
+ *data_len = 16383;
+ len_length = 2;
+ }
+
+ if (*data_len > sizeleft - len_length)
+ *data_len = sizeleft - len_length;
+ dst = quicly_encodev(dst, *data_len);
+ return dst;
+}
+
+inline int quicly_decode_crypto_frame(const uint8_t **src, const uint8_t *end, quicly_stream_frame_t *frame)
+{
+ uint64_t len;
+
+ frame->stream_id = 0;
+ frame->is_fin = 0;
+
+ if ((frame->offset = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((len = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((uint64_t)(end - *src) < len)
+ goto Error;
+ frame->data = ptls_iovec_init(*src, len);
+ *src += len;
+
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline uint8_t *quicly_encode_reset_stream_frame(uint8_t *dst, uint64_t stream_id, uint16_t app_error_code, uint64_t final_size)
+{
+ *dst++ = QUICLY_FRAME_TYPE_RESET_STREAM;
+ dst = quicly_encodev(dst, stream_id);
+ dst = quicly_encodev(dst, app_error_code);
+ dst = quicly_encodev(dst, final_size);
+ return dst;
+}
+
+inline int quicly_decode_reset_stream_frame(const uint8_t **src, const uint8_t *end, quicly_reset_stream_frame_t *frame)
+{
+ uint64_t error_code;
+
+ if ((frame->stream_id = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((error_code = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ frame->app_error_code = (uint16_t)error_code;
+ frame->final_size = quicly_decodev(src, end);
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline int quicly_decode_application_close_frame(const uint8_t **src, const uint8_t *end, quicly_application_close_frame_t *frame)
+{
+ uint64_t error_code, reason_len;
+
+ if ((error_code = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ frame->error_code = (uint16_t)error_code;
+ if ((reason_len = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((uint64_t)(end - *src) < reason_len)
+ goto Error;
+ frame->reason_phrase = ptls_iovec_init(*src, reason_len);
+ *src += reason_len;
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline int quicly_decode_transport_close_frame(const uint8_t **src, const uint8_t *end, quicly_transport_close_frame_t *frame)
+{
+ uint64_t error_code, reason_len;
+
+ if ((error_code = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ frame->error_code = (uint16_t)error_code;
+ if ((frame->frame_type = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((reason_len = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((uint64_t)(end - *src) < reason_len)
+ goto Error;
+ frame->reason_phrase = ptls_iovec_init(*src, reason_len);
+ *src += reason_len;
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline size_t quicly_close_frame_capacity(uint64_t error_code, uint64_t offending_frame_type, const char *reason_phrase)
+{
+ return quicly_encode_close_frame(NULL, error_code, offending_frame_type, reason_phrase) - (uint8_t *)NULL;
+}
+
+inline uint8_t *quicly_encode_max_data_frame(uint8_t *dst, uint64_t max_data)
+{
+ *dst++ = QUICLY_FRAME_TYPE_MAX_DATA;
+ dst = quicly_encodev(dst, max_data);
+ return dst;
+}
+
+inline int quicly_decode_max_data_frame(const uint8_t **src, const uint8_t *end, quicly_max_data_frame_t *frame)
+{
+ if ((frame->max_data = quicly_decodev(src, end)) == UINT64_MAX)
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+ return 0;
+}
+
+inline uint8_t *quicly_encode_max_stream_data_frame(uint8_t *dst, uint64_t stream_id, uint64_t max_stream_data)
+{
+ *dst++ = QUICLY_FRAME_TYPE_MAX_STREAM_DATA;
+ dst = quicly_encodev(dst, stream_id);
+ dst = quicly_encodev(dst, max_stream_data);
+ return dst;
+}
+
+inline int quicly_decode_max_stream_data_frame(const uint8_t **src, const uint8_t *end, quicly_max_stream_data_frame_t *frame)
+{
+ if ((frame->stream_id = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((frame->max_stream_data = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline uint8_t *quicly_encode_max_streams_frame(uint8_t *dst, int uni, uint64_t count)
+{
+ *dst++ = uni ? QUICLY_FRAME_TYPE_MAX_STREAMS_UNI : QUICLY_FRAME_TYPE_MAX_STREAMS_BIDI;
+ dst = quicly_encodev(dst, count);
+ return dst;
+}
+
+inline int quicly_decode_max_streams_frame(const uint8_t **src, const uint8_t *end, quicly_max_streams_frame_t *frame)
+{
+ if ((frame->count = quicly_decodev(src, end)) == UINT64_MAX)
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+ if (frame->count > (uint64_t)1 << 60)
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+ return 0;
+}
+
+inline int quicly_decode_path_challenge_frame(const uint8_t **src, const uint8_t *end, quicly_path_challenge_frame_t *frame)
+{
+ if (end - *src < 1)
+ goto Error;
+ if (end - *src < QUICLY_PATH_CHALLENGE_DATA_LEN)
+ goto Error;
+ frame->data = *src;
+ *src += QUICLY_PATH_CHALLENGE_DATA_LEN;
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline int quicly_decode_data_blocked_frame(const uint8_t **src, const uint8_t *end, quicly_data_blocked_frame_t *frame)
+{
+ if ((frame->offset = quicly_decodev(src, end)) == UINT64_MAX)
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+ return 0;
+}
+
+inline int quicly_decode_stream_data_blocked_frame(const uint8_t **src, const uint8_t *end,
+ quicly_stream_data_blocked_frame_t *frame)
+{
+ if ((frame->stream_id = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((frame->offset = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline uint8_t *quicly_encode_streams_blocked_frame(uint8_t *dst, int uni, uint64_t count)
+{
+ *dst++ = uni ? QUICLY_FRAME_TYPE_STREAMS_BLOCKED_UNI : QUICLY_FRAME_TYPE_STREAMS_BLOCKED_BIDI;
+ dst = quicly_encodev(dst, count);
+ return dst;
+}
+
+inline int quicly_decode_streams_blocked_frame(const uint8_t **src, const uint8_t *end, quicly_streams_blocked_frame_t *frame)
+{
+ if ((frame->count = quicly_decodev(src, end)) == UINT64_MAX)
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+ if (frame->count > (uint64_t)1 << 60)
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+ return 0;
+}
+
+inline int quicly_decode_new_connection_id_frame(const uint8_t **src, const uint8_t *end, quicly_new_connection_id_frame_t *frame)
+{
+ /* sequence */
+ if ((frame->sequence = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Fail;
+ if ((frame->retire_prior_to = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Fail;
+ if (end - *src < 1)
+ goto Fail;
+
+ { /* cid */
+ uint8_t cid_len = *(*src)++;
+ if (!(1 <= cid_len && cid_len <= QUICLY_MAX_CID_LEN_V1))
+ goto Fail;
+ frame->cid = ptls_iovec_init(src, cid_len);
+ *src += cid_len;
+ }
+
+ /* stateless reset token */
+ if (end - *src < QUICLY_STATELESS_RESET_TOKEN_LEN)
+ goto Fail;
+ frame->stateless_reset_token = *src;
+ *src += QUICLY_STATELESS_RESET_TOKEN_LEN;
+
+ return 0;
+Fail:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline uint8_t *quicly_encode_stop_sending_frame(uint8_t *dst, uint64_t stream_id, uint16_t app_error_code)
+{
+ *dst++ = QUICLY_FRAME_TYPE_STOP_SENDING;
+ dst = quicly_encodev(dst, stream_id);
+ dst = quicly_encodev(dst, app_error_code);
+ return dst;
+}
+
+inline int quicly_decode_stop_sending_frame(const uint8_t **src, const uint8_t *end, quicly_stop_sending_frame_t *frame)
+{
+ uint64_t error_code;
+
+ if ((frame->stream_id = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if ((error_code = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ frame->app_error_code = (uint16_t)error_code;
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+inline size_t quicly_new_token_frame_capacity(ptls_iovec_t token)
+{
+ return 1 + quicly_encodev_capacity(token.len) + token.len;
+}
+
+inline uint8_t *quicly_encode_new_token_frame(uint8_t *dst, ptls_iovec_t token)
+{
+ *dst++ = QUICLY_FRAME_TYPE_NEW_TOKEN;
+ dst = quicly_encodev(dst, token.len);
+ memcpy(dst, token.base, token.len);
+ dst += token.len;
+ return dst;
+}
+
+inline int quicly_decode_new_token_frame(const uint8_t **src, const uint8_t *end, quicly_new_token_frame_t *frame)
+{
+ uint64_t token_len;
+ if ((token_len = quicly_decodev(src, end)) == UINT64_MAX)
+ goto Error;
+ if (token_len == 0)
+ goto Error;
+ if ((uint64_t)(end - *src) < token_len)
+ goto Error;
+ frame->token = ptls_iovec_init(*src, (size_t)token_len);
+ *src += frame->token.len;
+ return 0;
+Error:
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/* The MIT License
+
+ Copyright (c) 2008 Broad Institute / Massachusetts Institute of Technology
+ 2011 Attractive Chaos <attractor@live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+*/
+
+/* The BGZF library was originally written by Bob Handsaker from the Broad
+ * Institute. It was later improved by the SAMtools developers. */
+
+#ifndef __BGZF_H
+#define __BGZF_H
+
+#include <stdint.h>
+#include <stdio.h>
+#include <zlib.h>
+
+#define BGZF_BLOCK_SIZE 0x10000
+#define BGZF_MAX_BLOCK_SIZE 0x10000
+
+#define BGZF_ERR_ZLIB 1
+#define BGZF_ERR_HEADER 2
+#define BGZF_ERR_IO 4
+#define BGZF_ERR_MISUSE 8
+
+typedef struct {
+ int open_mode:8, compress_level:8, errcode:16;
+ int cache_size;
+ int block_length, block_offset;
+ int64_t block_address;
+ void *uncompressed_block, *compressed_block;
+ void *cache; // a pointer to a hash table
+ void *fp; // actual file handler; FILE* on writing; FILE* or knetFile* on reading
+} BGZF;
+
+#ifndef KSTRING_T
+#define KSTRING_T kstring_t
+typedef struct __kstring_t {
+ size_t l, m;
+ char *s;
+} kstring_t;
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /******************
+ * Basic routines *
+ ******************/
+
+ /**
+ * Open an existing file descriptor for reading or writing.
+ *
+ * @param fd file descriptor
+ * @param mode mode matching /[rwu0-9]+/: 'r' for reading, 'w' for writing and a digit specifies
+ * the zlib compression level; if both 'r' and 'w' are present, 'w' is ignored.
+ * @return BGZF file handler; 0 on error
+ */
+ BGZF* bgzf_dopen(int fd, const char *mode);
+
+ #define bgzf_fdopen(fd, mode) bgzf_dopen((fd), (mode)) // for backward compatibility
+
+ /**
+ * Open the specified file for reading or writing.
+ */
+ BGZF* bgzf_open(const char* path, const char *mode);
+
+ /**
+ * Close the BGZF and free all associated resources.
+ *
+ * @param fp BGZF file handler
+ * @return 0 on success and -1 on error
+ */
+ int bgzf_close(BGZF *fp);
+
+ /**
+ * Read up to _length_ bytes from the file storing into _data_.
+ *
+ * @param fp BGZF file handler
+ * @param data data array to read into
+ * @param length size of data to read
+ * @return number of bytes actually read; 0 on end-of-file and -1 on error
+ */
+ ssize_t bgzf_read(BGZF *fp, void *data, ssize_t length);
+
+ /**
+ * Write _length_ bytes from _data_ to the file.
+ *
+ * @param fp BGZF file handler
+ * @param data data array to write
+ * @param length size of data to write
+ * @return number of bytes actually written; -1 on error
+ */
+ ssize_t bgzf_write(BGZF *fp, const void *data, ssize_t length);
+
+ /**
+ * Write the data in the buffer to the file.
+ */
+ int bgzf_flush(BGZF *fp);
+
+ /**
+ * Return a virtual file pointer to the current location in the file.
+ * No interpetation of the value should be made, other than a subsequent
+ * call to bgzf_seek can be used to position the file at the same point.
+ * Return value is non-negative on success.
+ */
+ #define bgzf_tell(fp) ((fp->block_address << 16) | (fp->block_offset & 0xFFFF))
+
+ /**
+ * Set the file to read from the location specified by _pos_.
+ *
+ * @param fp BGZF file handler
+ * @param pos virtual file offset returned by bgzf_tell()
+ * @param whence must be SEEK_SET
+ * @return 0 on success and -1 on error
+ */
+ int64_t bgzf_seek(BGZF *fp, int64_t pos, int whence);
+
+ /**
+ * Check if the BGZF end-of-file (EOF) marker is present
+ *
+ * @param fp BGZF file handler opened for reading
+ * @return 1 if EOF is present; 0 if not or on I/O error
+ */
+ int bgzf_check_EOF(BGZF *fp);
+
+ /**
+ * Check if a file is in the BGZF format
+ *
+ * @param fn file name
+ * @return 1 if _fn_ is BGZF; 0 if not or on I/O error
+ */
+ int bgzf_is_bgzf(const char *fn);
+
+ /*********************
+ * Advanced routines *
+ *********************/
+
+ /**
+ * Set the cache size. Only effective when compiled with -DBGZF_CACHE.
+ *
+ * @param fp BGZF file handler
+ * @param size size of cache in bytes; 0 to disable caching (default)
+ */
+ void bgzf_set_cache_size(BGZF *fp, int size);
+
+ /**
+ * Flush the file if the remaining buffer size is smaller than _size_
+ */
+ int bgzf_flush_try(BGZF *fp, ssize_t size);
+
+ /**
+ * Read one byte from a BGZF file. It is faster than bgzf_read()
+ * @param fp BGZF file handler
+ * @return byte read; -1 on end-of-file or error
+ */
+ int bgzf_getc(BGZF *fp);
+
+ /**
+ * Read one line from a BGZF file. It is faster than bgzf_getc()
+ *
+ * @param fp BGZF file handler
+ * @param delim delimitor
+ * @param str string to write to; must be initialized
+ * @return length of the string; 0 on end-of-file; negative on error
+ */
+ int bgzf_getline(BGZF *fp, int delim, kstring_t *str);
+
+ /**
+ * Read the next BGZF block.
+ */
+ int bgzf_read_block(BGZF *fp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef KBIT_H
+#define KBIT_H
+
+#include <stdint.h>
+
+static inline uint64_t kbi_popcount64(uint64_t y) // standard popcount; from wikipedia
+{
+ y -= ((y >> 1) & 0x5555555555555555ull);
+ y = (y & 0x3333333333333333ull) + (y >> 2 & 0x3333333333333333ull);
+ return ((y + (y >> 4)) & 0xf0f0f0f0f0f0f0full) * 0x101010101010101ull >> 56;
+}
+
+static inline uint64_t kbi_DNAcount64(uint64_t y, int c) // count #A/C/G/T from a 2-bit encoded integer; from BWA
+{
+ // reduce nucleotide counting to bits counting
+ y = ((c&2)? y : ~y) >> 1 & ((c&1)? y : ~y) & 0x5555555555555555ull;
+ // count the number of 1s in y
+ y = (y & 0x3333333333333333ull) + (y >> 2 & 0x3333333333333333ull);
+ return ((y + (y >> 4)) & 0xf0f0f0f0f0f0f0full) * 0x101010101010101ull >> 56;
+}
+
+#ifndef kroundup32 // round a 32-bit integer to the next closet integer; from "bit twiddling hacks"
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+#endif
+
+#ifndef kbi_swap
+#define kbi_swap(a, b) (((a) ^= (b)), ((b) ^= (a)), ((a) ^= (b))) // from "bit twiddling hacks"
+#endif
+
+#endif
--- /dev/null
+/*-
+ * Copyright 1997-1999, 2001, John-Mark Gurney.
+ * 2008-2009, Attractive Chaos <attractor@live.co.uk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __AC_KBTREE_H
+#define __AC_KBTREE_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+
+#define KB_MAX_DEPTH 64
+
+typedef struct {
+ int32_t is_internal:1, n:31;
+} kbnode_t;
+
+typedef struct {
+ kbnode_t *x;
+ int i;
+} kbpos_t;
+
+typedef struct {
+ kbpos_t stack[KB_MAX_DEPTH], *p;
+} kbitr_t;
+
+#define __KB_KEY(type, x) ((type*)((char*)x + 4))
+#define __KB_PTR(btr, x) ((kbnode_t**)((char*)x + btr->off_ptr))
+
+#define __KB_TREE_T(name) \
+ typedef struct { \
+ kbnode_t *root; \
+ int off_key, off_ptr, ilen, elen; \
+ int n, t; \
+ int n_keys, n_nodes; \
+ } kbtree_##name##_t;
+
+#define __KB_INIT(name, key_t) \
+ kbtree_##name##_t *kb_init_##name(int size) \
+ { \
+ kbtree_##name##_t *b; \
+ b = (kbtree_##name##_t*)calloc(1, sizeof(kbtree_##name##_t)); \
+ b->t = ((size - 4 - sizeof(void*)) / (sizeof(void*) + sizeof(key_t)) + 1) >> 1; \
+ if (b->t < 2) { \
+ free(b); return 0; \
+ } \
+ b->n = 2 * b->t - 1; \
+ b->off_ptr = 4 + b->n * sizeof(key_t); \
+ b->ilen = (4 + sizeof(void*) + b->n * (sizeof(void*) + sizeof(key_t)) + 3) >> 2 << 2; \
+ b->elen = (b->off_ptr + 3) >> 2 << 2; \
+ b->root = (kbnode_t*)calloc(1, b->ilen); \
+ ++b->n_nodes; \
+ return b; \
+ }
+
+#define __kb_destroy(b) do { \
+ int i, max = 8; \
+ kbnode_t *x, **top, **stack = 0; \
+ if (b) { \
+ top = stack = (kbnode_t**)calloc(max, sizeof(kbnode_t*)); \
+ *top++ = (b)->root; \
+ while (top != stack) { \
+ x = *--top; \
+ if (x->is_internal == 0) { free(x); continue; } \
+ for (i = 0; i <= x->n; ++i) \
+ if (__KB_PTR(b, x)[i]) { \
+ if (top - stack == max) { \
+ max <<= 1; \
+ stack = (kbnode_t**)realloc(stack, max * sizeof(kbnode_t*)); \
+ top = stack + (max>>1); \
+ } \
+ *top++ = __KB_PTR(b, x)[i]; \
+ } \
+ free(x); \
+ } \
+ } \
+ free(b); free(stack); \
+ } while (0)
+
+#define __KB_GET_AUX1(name, key_t, __cmp) \
+ static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, const key_t * __restrict k, int *r) \
+ { \
+ int tr, *rr, begin = 0, end = x->n; \
+ if (x->n == 0) return -1; \
+ rr = r? r : &tr; \
+ while (begin < end) { \
+ int mid = (begin + end) >> 1; \
+ if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
+ else end = mid; \
+ } \
+ if (begin == x->n) { *rr = 1; return x->n - 1; } \
+ if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
+ return begin; \
+ }
+
+#define __KB_GET(name, key_t) \
+ static key_t *kb_getp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
+ { \
+ int i, r = 0; \
+ kbnode_t *x = b->root; \
+ while (x) { \
+ i = __kb_getp_aux_##name(x, k, &r); \
+ if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
+ if (x->is_internal == 0) return 0; \
+ x = __KB_PTR(b, x)[i + 1]; \
+ } \
+ return 0; \
+ } \
+ static inline key_t *kb_get_##name(kbtree_##name##_t *b, const key_t k) \
+ { \
+ return kb_getp_##name(b, &k); \
+ }
+
+#define __KB_INTERVAL(name, key_t) \
+ static void kb_intervalp_##name(kbtree_##name##_t *b, const key_t * __restrict k, key_t **lower, key_t **upper) \
+ { \
+ int i, r = 0; \
+ kbnode_t *x = b->root; \
+ *lower = *upper = 0; \
+ while (x) { \
+ i = __kb_getp_aux_##name(x, k, &r); \
+ if (i >= 0 && r == 0) { \
+ *lower = *upper = &__KB_KEY(key_t, x)[i]; \
+ return; \
+ } \
+ if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
+ if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
+ if (x->is_internal == 0) return; \
+ x = __KB_PTR(b, x)[i + 1]; \
+ } \
+ } \
+ static inline void kb_interval_##name(kbtree_##name##_t *b, const key_t k, key_t **lower, key_t **upper) \
+ { \
+ kb_intervalp_##name(b, &k, lower, upper); \
+ }
+
+#define __KB_PUT(name, key_t, __cmp) \
+ /* x must be an internal node */ \
+ static void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
+ { \
+ kbnode_t *z; \
+ z = (kbnode_t*)calloc(1, y->is_internal? b->ilen : b->elen); \
+ ++b->n_nodes; \
+ z->is_internal = y->is_internal; \
+ z->n = b->t - 1; \
+ memcpy(__KB_KEY(key_t, z), __KB_KEY(key_t, y) + b->t, sizeof(key_t) * (b->t - 1)); \
+ if (y->is_internal) memcpy(__KB_PTR(b, z), __KB_PTR(b, y) + b->t, sizeof(void*) * b->t); \
+ y->n = b->t - 1; \
+ memmove(__KB_PTR(b, x) + i + 2, __KB_PTR(b, x) + i + 1, sizeof(void*) * (x->n - i)); \
+ __KB_PTR(b, x)[i + 1] = z; \
+ memmove(__KB_KEY(key_t, x) + i + 1, __KB_KEY(key_t, x) + i, sizeof(key_t) * (x->n - i)); \
+ __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[b->t - 1]; \
+ ++x->n; \
+ } \
+ static key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, const key_t * __restrict k) \
+ { \
+ int i = x->n - 1; \
+ key_t *ret; \
+ if (x->is_internal == 0) { \
+ i = __kb_getp_aux_##name(x, k, 0); \
+ if (i != x->n - 1) \
+ memmove(__KB_KEY(key_t, x) + i + 2, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
+ ret = &__KB_KEY(key_t, x)[i + 1]; \
+ *ret = *k; \
+ ++x->n; \
+ } else { \
+ i = __kb_getp_aux_##name(x, k, 0) + 1; \
+ if (__KB_PTR(b, x)[i]->n == 2 * b->t - 1) { \
+ __kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
+ if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
+ } \
+ ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
+ } \
+ return ret; \
+ } \
+ static key_t *kb_putp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
+ { \
+ kbnode_t *r, *s; \
+ ++b->n_keys; \
+ r = b->root; \
+ if (r->n == 2 * b->t - 1) { \
+ ++b->n_nodes; \
+ s = (kbnode_t*)calloc(1, b->ilen); \
+ b->root = s; s->is_internal = 1; s->n = 0; \
+ __KB_PTR(b, s)[0] = r; \
+ __kb_split_##name(b, s, 0, r); \
+ r = s; \
+ } \
+ return __kb_putp_aux_##name(b, r, k); \
+ } \
+ static inline void kb_put_##name(kbtree_##name##_t *b, const key_t k) \
+ { \
+ kb_putp_##name(b, &k); \
+ }
+
+
+#define __KB_DEL(name, key_t) \
+ static key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, const key_t * __restrict k, int s) \
+ { \
+ int yn, zn, i, r = 0; \
+ kbnode_t *xp, *y, *z; \
+ key_t kp; \
+ if (x == 0) return *k; \
+ if (s) { /* s can only be 0, 1 or 2 */ \
+ r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
+ i = s == 1? x->n - 1 : -1; \
+ } else i = __kb_getp_aux_##name(x, k, &r); \
+ if (x->is_internal == 0) { \
+ if (s == 2) ++i; \
+ kp = __KB_KEY(key_t, x)[i]; \
+ memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
+ --x->n; \
+ return kp; \
+ } \
+ if (r == 0) { \
+ if ((yn = __KB_PTR(b, x)[i]->n) >= b->t) { \
+ xp = __KB_PTR(b, x)[i]; \
+ kp = __KB_KEY(key_t, x)[i]; \
+ __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
+ return kp; \
+ } else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= b->t) { \
+ xp = __KB_PTR(b, x)[i + 1]; \
+ kp = __KB_KEY(key_t, x)[i]; \
+ __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
+ return kp; \
+ } else if (yn == b->t - 1 && zn == b->t - 1) { \
+ y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
+ __KB_KEY(key_t, y)[y->n++] = *k; \
+ memmove(__KB_KEY(key_t, y) + y->n, __KB_KEY(key_t, z), z->n * sizeof(key_t)); \
+ if (y->is_internal) memmove(__KB_PTR(b, y) + y->n, __KB_PTR(b, z), (z->n + 1) * sizeof(void*)); \
+ y->n += z->n; \
+ memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
+ memmove(__KB_PTR(b, x) + i + 1, __KB_PTR(b, x) + i + 2, (x->n - i - 1) * sizeof(void*)); \
+ --x->n; \
+ free(z); \
+ return __kb_delp_aux_##name(b, y, k, s); \
+ } \
+ } \
+ ++i; \
+ if ((xp = __KB_PTR(b, x)[i])->n == b->t - 1) { \
+ if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= b->t) { \
+ memmove(__KB_KEY(key_t, xp) + 1, __KB_KEY(key_t, xp), xp->n * sizeof(key_t)); \
+ if (xp->is_internal) memmove(__KB_PTR(b, xp) + 1, __KB_PTR(b, xp), (xp->n + 1) * sizeof(void*)); \
+ __KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
+ __KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
+ if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
+ --y->n; ++xp->n; \
+ } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= b->t) { \
+ __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
+ __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
+ if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
+ --y->n; \
+ memmove(__KB_KEY(key_t, y), __KB_KEY(key_t, y) + 1, y->n * sizeof(key_t)); \
+ if (y->is_internal) memmove(__KB_PTR(b, y), __KB_PTR(b, y) + 1, (y->n + 1) * sizeof(void*)); \
+ } else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == b->t - 1) { \
+ __KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
+ memmove(__KB_KEY(key_t, y) + y->n, __KB_KEY(key_t, xp), xp->n * sizeof(key_t)); \
+ if (y->is_internal) memmove(__KB_PTR(b, y) + y->n, __KB_PTR(b, xp), (xp->n + 1) * sizeof(void*)); \
+ y->n += xp->n; \
+ memmove(__KB_KEY(key_t, x) + i - 1, __KB_KEY(key_t, x) + i, (x->n - i) * sizeof(key_t)); \
+ memmove(__KB_PTR(b, x) + i, __KB_PTR(b, x) + i + 1, (x->n - i) * sizeof(void*)); \
+ --x->n; \
+ free(xp); \
+ xp = y; \
+ } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == b->t - 1) { \
+ __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
+ memmove(__KB_KEY(key_t, xp) + xp->n, __KB_KEY(key_t, y), y->n * sizeof(key_t)); \
+ if (xp->is_internal) memmove(__KB_PTR(b, xp) + xp->n, __KB_PTR(b, y), (y->n + 1) * sizeof(void*)); \
+ xp->n += y->n; \
+ memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
+ memmove(__KB_PTR(b, x) + i + 1, __KB_PTR(b, x) + i + 2, (x->n - i - 1) * sizeof(void*)); \
+ --x->n; \
+ free(y); \
+ } \
+ } \
+ return __kb_delp_aux_##name(b, xp, k, s); \
+ } \
+ static key_t kb_delp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
+ { \
+ kbnode_t *x; \
+ key_t ret; \
+ ret = __kb_delp_aux_##name(b, b->root, k, 0); \
+ --b->n_keys; \
+ if (b->root->n == 0 && b->root->is_internal) { \
+ --b->n_nodes; \
+ x = b->root; \
+ b->root = __KB_PTR(b, x)[0]; \
+ free(x); \
+ } \
+ return ret; \
+ } \
+ static inline key_t kb_del_##name(kbtree_##name##_t *b, const key_t k) \
+ { \
+ return kb_delp_##name(b, &k); \
+ }
+
+#define __KB_ITR(name, key_t) \
+ static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_t *itr) \
+ { \
+ itr->p = 0; \
+ if (b->n_keys == 0) return; \
+ itr->p = itr->stack; \
+ itr->p->x = b->root; itr->p->i = 0; \
+ while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
+ kbnode_t *x = itr->p->x; \
+ ++itr->p; \
+ itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
+ } \
+ } \
+ static int kb_itr_get_##name(kbtree_##name##_t *b, const key_t * __restrict k, kbitr_t *itr) \
+ { \
+ int i, r = 0; \
+ itr->p = itr->stack; \
+ itr->p->x = b->root; itr->p->i = 0; \
+ while (itr->p->x) { \
+ i = __kb_getp_aux_##name(itr->p->x, k, &r); \
+ if (i >= 0 && r == 0) return 0; \
+ if (itr->p->x->is_internal == 0) return -1; \
+ itr->p[1].x = __KB_PTR(b, itr->p->x)[i + 1]; \
+ itr->p[1].i = i; \
+ ++itr->p; \
+ } \
+ return -1; \
+ } \
+ static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_t *itr) \
+ { \
+ if (itr->p < itr->stack) return 0; \
+ for (;;) { \
+ ++itr->p->i; \
+ while (itr->p->x && itr->p->i <= itr->p->x->n) { \
+ itr->p[1].i = 0; \
+ itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
+ ++itr->p; \
+ } \
+ --itr->p; \
+ if (itr->p < itr->stack) return 0; \
+ if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
+ } \
+ }
+
+#define KBTREE_INIT(name, key_t, __cmp) \
+ __KB_TREE_T(name) \
+ __KB_INIT(name, key_t) \
+ __KB_GET_AUX1(name, key_t, __cmp) \
+ __KB_GET(name, key_t) \
+ __KB_INTERVAL(name, key_t) \
+ __KB_PUT(name, key_t, __cmp) \
+ __KB_DEL(name, key_t) \
+ __KB_ITR(name, key_t)
+
+#define KB_DEFAULT_SIZE 512
+
+#define kbtree_t(name) kbtree_##name##_t
+#define kb_init(name, s) kb_init_##name(s)
+#define kb_destroy(name, b) __kb_destroy(b)
+#define kb_get(name, b, k) kb_get_##name(b, k)
+#define kb_put(name, b, k) kb_put_##name(b, k)
+#define kb_del(name, b, k) kb_del_##name(b, k)
+#define kb_interval(name, b, k, l, u) kb_interval_##name(b, k, l, u)
+#define kb_getp(name, b, k) kb_getp_##name(b, k)
+#define kb_putp(name, b, k) kb_putp_##name(b, k)
+#define kb_delp(name, b, k) kb_delp_##name(b, k)
+#define kb_intervalp(name, b, k, l, u) kb_intervalp_##name(b, k, l, u)
+
+#define kb_itr_first(name, b, i) kb_itr_first_##name(b, i)
+#define kb_itr_get(name, b, k, i) kb_itr_get_##name(b, k, i)
+#define kb_itr_next(name, b, i) kb_itr_next_##name(b, i)
+#define kb_itr_key(type, itr) __KB_KEY(type, (itr)->p->x)[(itr)->p->i]
+#define kb_itr_valid(itr) ((itr)->p >= (itr)->stack)
+
+#define kb_size(b) ((b)->n_keys)
+
+#define kb_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
+#define kb_str_cmp(a, b) strcmp(a, b)
+
+/* The following is *DEPRECATED*!!! Use the iterator interface instead! */
+
+typedef struct {
+ kbnode_t *x;
+ int i;
+} __kbstack_t;
+
+#define __kb_traverse(key_t, b, __func) do { \
+ int __kmax = 8; \
+ __kbstack_t *__kstack, *__kp; \
+ __kp = __kstack = (__kbstack_t*)calloc(__kmax, sizeof(__kbstack_t)); \
+ __kp->x = (b)->root; __kp->i = 0; \
+ for (;;) { \
+ while (__kp->x && __kp->i <= __kp->x->n) { \
+ if (__kp - __kstack == __kmax - 1) { \
+ __kmax <<= 1; \
+ __kstack = (__kbstack_t*)realloc(__kstack, __kmax * sizeof(__kbstack_t)); \
+ __kp = __kstack + (__kmax>>1) - 1; \
+ } \
+ (__kp+1)->i = 0; (__kp+1)->x = __kp->x->is_internal? __KB_PTR(b, __kp->x)[__kp->i] : 0; \
+ ++__kp; \
+ } \
+ --__kp; \
+ if (__kp >= __kstack) { \
+ if (__kp->x && __kp->i < __kp->x->n) __func(&__KB_KEY(key_t, __kp->x)[__kp->i]); \
+ ++__kp->i; \
+ } else break; \
+ } \
+ free(__kstack); \
+ } while (0)
+
+#define __kb_get_first(key_t, b, ret) do { \
+ kbnode_t *__x = (b)->root; \
+ while (__KB_PTR(b, __x)[0] != 0) \
+ __x = __KB_PTR(b, __x)[0]; \
+ (ret) = __KB_KEY(key_t, __x)[0]; \
+ } while (0)
+
+#endif
--- /dev/null
+#ifndef __AC_KDQ_H
+#define __AC_KDQ_H
+
+#include <stdlib.h>
+#include <string.h>
+
+#define __KDQ_TYPE(type) \
+ typedef struct { \
+ size_t front:58, bits:6, count, mask; \
+ type *a; \
+ } kdq_##type##_t;
+
+#define kdq_t(type) kdq_##type##_t
+#define kdq_size(q) ((q)->count)
+#define kdq_first(q) ((q)->a[(q)->front])
+#define kdq_last(q) ((q)->a[((q)->front + (q)->count - 1) & (q)->mask])
+#define kdq_at(q, i) ((q)->a[((q)->front + (i)) & (q)->mask])
+
+#define __KDQ_IMPL(type, SCOPE) \
+ SCOPE kdq_##type##_t *kdq_init_##type() \
+ { \
+ kdq_##type##_t *q; \
+ q = (kdq_##type##_t*)calloc(1, sizeof(kdq_##type##_t)); \
+ q->bits = 2, q->mask = (1ULL<<q->bits) - 1; \
+ q->a = (type*)malloc((1<<q->bits) * sizeof(type)); \
+ return q; \
+ } \
+ SCOPE void kdq_destroy_##type(kdq_##type##_t *q) \
+ { \
+ if (q == 0) return; \
+ free(q->a); free(q); \
+ } \
+ SCOPE int kdq_resize_##type(kdq_##type##_t *q, int new_bits) \
+ { \
+ size_t new_size = 1ULL<<new_bits, old_size = 1ULL<<q->bits; \
+ if (new_size < q->count) { /* not big enough */ \
+ int i; \
+ for (i = 0; i < 64; ++i) \
+ if (1ULL<<i > q->count) break; \
+ new_bits = i, new_size = 1ULL<<new_bits; \
+ } \
+ if (new_bits == q->bits) return q->bits; /* unchanged */ \
+ if (new_bits > q->bits) q->a = (type*)realloc(q->a, (1ULL<<new_bits) * sizeof(type)); \
+ if (q->front + q->count <= old_size) { /* unwrapped */ \
+ if (q->front + q->count > new_size) /* only happens for shrinking */ \
+ memmove(q->a, q->a + new_size, (q->front + q->count - new_size) * sizeof(type)); \
+ } else { /* wrapped */ \
+ memmove(q->a + (new_size - (old_size - q->front)), q->a + q->front, (old_size - q->front) * sizeof(type)); \
+ q->front = new_size - (old_size - q->front); \
+ } \
+ q->bits = new_bits, q->mask = (1ULL<<q->bits) - 1; \
+ if (new_bits < q->bits) q->a = (type*)realloc(q->a, (1ULL<<new_bits) * sizeof(type)); \
+ return q->bits; \
+ } \
+ SCOPE type *kdq_pushp_##type(kdq_##type##_t *q) \
+ { \
+ if (q->count == 1ULL<<q->bits) kdq_resize_##type(q, q->bits + 1); \
+ return &q->a[((q->count++) + q->front) & (q)->mask]; \
+ } \
+ SCOPE void kdq_push_##type(kdq_##type##_t *q, type v) \
+ { \
+ if (q->count == 1ULL<<q->bits) kdq_resize_##type(q, q->bits + 1); \
+ q->a[((q->count++) + q->front) & (q)->mask] = v; \
+ } \
+ SCOPE type *kdq_unshiftp_##type(kdq_##type##_t *q) \
+ { \
+ if (q->count == 1ULL<<q->bits) kdq_resize_##type(q, q->bits + 1); \
+ ++q->count; \
+ q->front = q->front? q->front - 1 : (1ULL<<q->bits) - 1; \
+ return &q->a[q->front]; \
+ } \
+ SCOPE void kdq_unshift_##type(kdq_##type##_t *q, type v) \
+ { \
+ type *p; \
+ p = kdq_unshiftp_##type(q); \
+ *p = v; \
+ } \
+ SCOPE type *kdq_pop_##type(kdq_##type##_t *q) \
+ { \
+ return q->count? &q->a[((--q->count) + q->front) & q->mask] : 0; \
+ } \
+ SCOPE type *kdq_shift_##type(kdq_##type##_t *q) \
+ { \
+ type *d = 0; \
+ if (q->count == 0) return 0; \
+ d = &q->a[q->front++]; \
+ q->front &= q->mask; \
+ --q->count; \
+ return d; \
+ }
+
+#define KDQ_INIT2(type, SCOPE) \
+ __KDQ_TYPE(type) \
+ __KDQ_IMPL(type, SCOPE)
+
+#ifndef klib_unused
+#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
+#define klib_unused __attribute__ ((__unused__))
+#else
+#define klib_unused
+#endif
+#endif /* klib_unused */
+
+#define KDQ_INIT(type) KDQ_INIT2(type, static inline klib_unused)
+
+#define KDQ_DECLARE(type) \
+ __KDQ_TYPE(type) \
+ kdq_##type##_t *kdq_init_##type(); \
+ void kdq_destroy_##type(kdq_##type##_t *q); \
+ int kdq_resize_##type(kdq_##type##_t *q, int new_bits); \
+ type *kdq_pushp_##type(kdq_##type##_t *q); \
+ void kdq_push_##type(kdq_##type##_t *q, type v); \
+ type *kdq_unshiftp_##type(kdq_##type##_t *q); \
+ void kdq_unshift_##type(kdq_##type##_t *q, type v); \
+ type *kdq_pop_##type(kdq_##type##_t *q); \
+ type *kdq_shift_##type(kdq_##type##_t *q);
+
+#define kdq_init(type) kdq_init_##type()
+#define kdq_destroy(type, q) kdq_destroy_##type(q)
+#define kdq_resize(type, q, new_bits) kdq_resize_##type(q, new_bits)
+#define kdq_pushp(type, q) kdq_pushp_##type(q)
+#define kdq_push(type, q, v) kdq_push_##type(q, v)
+#define kdq_pop(type, q) kdq_pop_##type(q)
+#define kdq_unshiftp(type, q) kdq_unshiftp_##type(q)
+#define kdq_unshift(type, q, v) kdq_unshift_##type(q, v)
+#define kdq_shift(type, q) kdq_shift_##type(q)
+
+#endif
--- /dev/null
+#ifndef KEXPR_H
+#define KEXPR_H
+
+#include <stdint.h>
+
+struct kexpr_s;
+typedef struct kexpr_s kexpr_t;
+
+// Parse errors
+#define KEE_UNQU 0x01 // unmatched quotation marks
+#define KEE_UNLP 0x02 // unmatched left parentheses
+#define KEE_UNRP 0x04 // unmatched right parentheses
+#define KEE_UNOP 0x08 // unknown operators
+#define KEE_FUNC 0x10 // wrong function syntax
+#define KEE_ARG 0x20
+#define KEE_NUM 0x40 // fail to parse a number
+
+// Evaluation errors
+#define KEE_UNFUNC 0x40 // undefined function
+#define KEE_UNVAR 0x80 // unassigned variable
+
+// Return type
+#define KEV_REAL 1
+#define KEV_INT 2
+#define KEV_STR 3
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ // parse an expression and return errors in $err
+ kexpr_t *ke_parse(const char *_s, int *err);
+
+ // free memory allocated during parsing
+ void ke_destroy(kexpr_t *ke);
+
+ // set a variable to integer value and return the occurrence of the variable
+ int ke_set_int(kexpr_t *ke, const char *var, int64_t x);
+
+ // set a variable to real value and return the occurrence of the variable
+ int ke_set_real(kexpr_t *ke, const char *var, double x);
+
+ // set a variable to string value and return the occurrence of the variable
+ int ke_set_str(kexpr_t *ke, const char *var, const char *x);
+
+ // set a user-defined function
+ int ke_set_real_func1(kexpr_t *ke, const char *name, double (*func)(double));
+ int ke_set_real_func2(kexpr_t *ke, const char *name, double (*func)(double, double));
+
+ // set default math functions
+ int ke_set_default_func(kexpr_t *ke);
+
+ // mark all variable as unset
+ void ke_unset(kexpr_t *e);
+
+ // evaluate expression; return error code; final value is returned via pointers
+ int ke_eval(const kexpr_t *ke, int64_t *_i, double *_r, const char **_s, int *ret_type);
+ int64_t ke_eval_int(const kexpr_t *ke, int *err);
+ double ke_eval_real(const kexpr_t *ke, int *err);
+
+ // print the expression in Reverse Polish notation (RPN)
+ void ke_print(const kexpr_t *ke);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef AC_KGRAPH_H
+#define AC_KGRAPH_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include "khash.h"
+#include "kbtree.h"
+
+typedef unsigned kgint_t;
+
+#define kgraph_t(name) kh_##name##_t
+
+#define __KG_BASIC(name, SCOPE, vertex_t, arc_t, ehn) \
+ SCOPE kgraph_t(name) *kg_init_##name(void) { return kh_init(name); } \
+ SCOPE void kg_destroy_##name(kgraph_t(name) *g) { \
+ khint_t k; \
+ if (g == 0) return; \
+ for (k = kh_begin(g); k != kh_end(g); ++k) \
+ if (kh_exist(g, k)) kh_destroy(ehn, kh_val(g, k)._arc); \
+ kh_destroy(name, g); \
+ } \
+ SCOPE vertex_t *kg_get_v_##name(kgraph_t(name) *g, kgint_t v) { \
+ khint_t k = kh_get(name, g, v); \
+ return k == kh_end(g)? 0 : &kh_val(g, k); \
+ } \
+ SCOPE vertex_t *kg_put_v_##name(kgraph_t(name) *g, kgint_t v, int *absent) { \
+ khint_t k; \
+ k = kh_put(name, g, v, absent); \
+ if (*absent) kh_val(g, k)._arc = kh_init(ehn); \
+ return &kh_val(g, k); \
+ } \
+ SCOPE void kg_put_a_##name(kgraph_t(name) *g, kgint_t vbeg, kgint_t vend, int dir, arc_t **pb, arc_t **pe) { \
+ vertex_t *p; \
+ khint_t k; \
+ int absent; \
+ p = kg_put_v_##name(g, vbeg, &absent); \
+ k = kh_put(ehn, p->_arc, vend<<2|dir, &absent); \
+ *pb = &kh_val(p->_arc, k); \
+ p = kg_put_v_##name(g, vend, &absent); \
+ k = kh_put(ehn, p->_arc, vbeg<<2|(~dir&3), &absent); \
+ *pe = &kh_val(p->_arc, k); \
+ } \
+ SCOPE vertex_t *kg_del_v_##name(kgraph_t(name) *g, kgint_t v) { \
+ khint_t k, k0, k2, k3; \
+ khash_t(ehn) *h; \
+ k0 = k = kh_get(name, g, v); \
+ if (k == kh_end(g)) return 0; /* not present in the graph */ \
+ h = kh_val(g, k)._arc; \
+ for (k = kh_begin(h); k != kh_end(h); ++k) /* remove v from its neighbors */ \
+ if (kh_exist(h, k)) { \
+ k2 = kh_get(name, g, kh_key(h, k)>>2); \
+ /* assert(k2 != kh_end(g)); */ \
+ k3 = kh_get(ehn, kh_val(g, k2)._arc, v<<2|(~kh_key(h, k)&3)); \
+ /* assert(k3 != kh_end(kh_val(g, k2)._arc)); */ \
+ kh_del(ehn, kh_val(g, k2)._arc, k3); \
+ } \
+ kh_destroy(ehn, h); \
+ kh_del(name, g, k0); \
+ return &kh_val(g, k0); \
+ }
+
+#define KGRAPH_PRINT(name, SCOPE) \
+ SCOPE void kg_print_##name(kgraph_t(name) *g) { \
+ khint_t k, k2; \
+ for (k = kh_begin(g); k != kh_end(g); ++k) \
+ if (kh_exist(g, k)) { \
+ printf("v %u\n", kh_key(g, k)); \
+ for (k2 = kh_begin(kh_val(g, k)._arc); k2 != kh_end(kh_val(g, k)._arc); ++k2) \
+ if (kh_exist(kh_val(g, k)._arc, k2) && kh_key(g, k) < kh_key(kh_val(g, k)._arc, k2)>>2) \
+ printf("a %u%c%c%u\n", kh_key(g, k), "><"[kh_key(kh_val(g, k)._arc, k2)>>1&1], \
+ "><"[kh_key(kh_val(g, k)._arc, k2)&1], kh_key(kh_val(g, k)._arc, k2)>>2); \
+ } \
+ }
+
+#define KGRAPH_INIT(name, SCOPE, vertex_t, arc_t, ehn) \
+ KHASH_INIT2(name, SCOPE, kgint_t, vertex_t, 1, kh_int_hash_func, kh_int_hash_equal) \
+ __KG_BASIC(name, SCOPE, vertex_t, arc_t, ehn)
+
+#endif
--- /dev/null
+/* The MIT License
+
+ Copyright (c) 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/*
+ An example:
+
+#include "khash.h"
+KHASH_MAP_INIT_INT(32, char)
+int main() {
+ int ret, is_missing;
+ khiter_t k;
+ khash_t(32) *h = kh_init(32);
+ k = kh_put(32, h, 5, &ret);
+ kh_value(h, k) = 10;
+ k = kh_get(32, h, 10);
+ is_missing = (k == kh_end(h));
+ k = kh_get(32, h, 5);
+ kh_del(32, h, k);
+ for (k = kh_begin(h); k != kh_end(h); ++k)
+ if (kh_exist(h, k)) kh_value(h, k) = 1;
+ kh_destroy(32, h);
+ return 0;
+}
+*/
+
+/*
+ 2013-05-02 (0.2.8):
+
+ * Use quadratic probing. When the capacity is power of 2, stepping function
+ i*(i+1)/2 guarantees to traverse each bucket. It is better than double
+ hashing on cache performance and is more robust than linear probing.
+
+ In theory, double hashing should be more robust than quadratic probing.
+ However, my implementation is probably not for large hash tables, because
+ the second hash function is closely tied to the first hash function,
+ which reduce the effectiveness of double hashing.
+
+ Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php
+
+ 2011-12-29 (0.2.7):
+
+ * Minor code clean up; no actual effect.
+
+ 2011-09-16 (0.2.6):
+
+ * The capacity is a power of 2. This seems to dramatically improve the
+ speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
+
+ - http://code.google.com/p/ulib/
+ - http://nothings.org/computer/judy/
+
+ * Allow to optionally use linear probing which usually has better
+ performance for random input. Double hashing is still the default as it
+ is more robust to certain non-random input.
+
+ * Added Wang's integer hash function (not used by default). This hash
+ function is more robust to certain non-random input.
+
+ 2011-02-14 (0.2.5):
+
+ * Allow to declare global functions.
+
+ 2009-09-26 (0.2.4):
+
+ * Improve portability
+
+ 2008-09-19 (0.2.3):
+
+ * Corrected the example
+ * Improved interfaces
+
+ 2008-09-11 (0.2.2):
+
+ * Improved speed a little in kh_put()
+
+ 2008-09-10 (0.2.1):
+
+ * Added kh_clear()
+ * Fixed a compiling error
+
+ 2008-09-02 (0.2.0):
+
+ * Changed to token concatenation which increases flexibility.
+
+ 2008-08-31 (0.1.2):
+
+ * Fixed a bug in kh_get(), which has not been tested previously.
+
+ 2008-08-31 (0.1.1):
+
+ * Added destructor
+*/
+
+
+#ifndef __AC_KHASH_H
+#define __AC_KHASH_H
+
+/*!
+ @header
+
+ Generic hash table library.
+ */
+
+#define AC_VERSION_KHASH_H "0.2.8"
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+/* compiler specific configuration */
+
+#if UINT_MAX == 0xffffffffu
+typedef unsigned int khint32_t;
+#elif ULONG_MAX == 0xffffffffu
+typedef unsigned long khint32_t;
+#endif
+
+#if ULONG_MAX == ULLONG_MAX
+typedef unsigned long khint64_t;
+#else
+typedef unsigned long long khint64_t;
+#endif
+
+#ifndef kh_inline
+#ifdef _MSC_VER
+#define kh_inline __inline
+#else
+#define kh_inline inline
+#endif
+#endif /* kh_inline */
+
+#ifndef klib_unused
+#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
+#define klib_unused __attribute__ ((__unused__))
+#else
+#define klib_unused
+#endif
+#endif /* klib_unused */
+
+typedef khint32_t khint_t;
+typedef khint_t khiter_t;
+
+#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2)
+#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1)
+#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3)
+#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1)))
+#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1)))
+#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1)))
+#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1))
+
+#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4)
+
+#ifndef kroundup32
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+#endif
+
+#ifndef kcalloc
+#define kcalloc(N,Z) calloc(N,Z)
+#endif
+#ifndef kmalloc
+#define kmalloc(Z) malloc(Z)
+#endif
+#ifndef krealloc
+#define krealloc(P,Z) realloc(P,Z)
+#endif
+#ifndef kfree
+#define kfree(P) free(P)
+#endif
+
+static const double __ac_HASH_UPPER = 0.77;
+
+#define __KHASH_TYPE(name, khkey_t, khval_t) \
+ typedef struct kh_##name##_s { \
+ khint_t n_buckets, size, n_occupied, upper_bound; \
+ khint32_t *flags; \
+ khkey_t *keys; \
+ khval_t *vals; \
+ } kh_##name##_t;
+
+#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
+ extern kh_##name##_t *kh_init_##name(void); \
+ extern void kh_destroy_##name(kh_##name##_t *h); \
+ extern void kh_clear_##name(kh_##name##_t *h); \
+ extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
+ extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
+ extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
+ extern void kh_del_##name(kh_##name##_t *h, khint_t x);
+
+#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ SCOPE kh_##name##_t *kh_init_##name(void) { \
+ return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \
+ } \
+ SCOPE void kh_destroy_##name(kh_##name##_t *h) \
+ { \
+ if (h) { \
+ kfree((void *)h->keys); kfree(h->flags); \
+ kfree((void *)h->vals); \
+ kfree(h); \
+ } \
+ } \
+ SCOPE void kh_clear_##name(kh_##name##_t *h) \
+ { \
+ if (h && h->flags) { \
+ memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \
+ h->size = h->n_occupied = 0; \
+ } \
+ } \
+ SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
+ { \
+ if (h->n_buckets) { \
+ khint_t k, i, last, mask, step = 0; \
+ mask = h->n_buckets - 1; \
+ k = __hash_func(key); i = k & mask; \
+ last = i; \
+ while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
+ i = (i + (++step)) & mask; \
+ if (i == last) return h->n_buckets; \
+ } \
+ return __ac_iseither(h->flags, i)? h->n_buckets : i; \
+ } else return 0; \
+ } \
+ SCOPE int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
+ { /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \
+ khint32_t *new_flags = 0; \
+ khint_t j = 1; \
+ { \
+ kroundup32(new_n_buckets); \
+ if (new_n_buckets < 4) new_n_buckets = 4; \
+ if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
+ else { /* hash table size to be changed (shrink or expand); rehash */ \
+ new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ if (!new_flags) return -1; \
+ memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ if (h->n_buckets < new_n_buckets) { /* expand */ \
+ khkey_t *new_keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+ if (!new_keys) { kfree(new_flags); return -1; } \
+ h->keys = new_keys; \
+ if (kh_is_map) { \
+ khval_t *new_vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
+ if (!new_vals) { kfree(new_flags); return -1; } \
+ h->vals = new_vals; \
+ } \
+ } /* otherwise shrink */ \
+ } \
+ } \
+ if (j) { /* rehashing is needed */ \
+ for (j = 0; j != h->n_buckets; ++j) { \
+ if (__ac_iseither(h->flags, j) == 0) { \
+ khkey_t key = h->keys[j]; \
+ khval_t val; \
+ khint_t new_mask; \
+ new_mask = new_n_buckets - 1; \
+ if (kh_is_map) val = h->vals[j]; \
+ __ac_set_isdel_true(h->flags, j); \
+ while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
+ khint_t k, i, step = 0; \
+ k = __hash_func(key); \
+ i = k & new_mask; \
+ while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \
+ __ac_set_isempty_false(new_flags, i); \
+ if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \
+ { khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
+ if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \
+ __ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \
+ } else { /* write the element and jump out of the loop */ \
+ h->keys[i] = key; \
+ if (kh_is_map) h->vals[i] = val; \
+ break; \
+ } \
+ } \
+ } \
+ } \
+ if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
+ h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+ if (kh_is_map) h->vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
+ } \
+ kfree(h->flags); /* free the working space */ \
+ h->flags = new_flags; \
+ h->n_buckets = new_n_buckets; \
+ h->n_occupied = h->size; \
+ h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
+ } \
+ return 0; \
+ } \
+ SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
+ { \
+ khint_t x; \
+ if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
+ if (h->n_buckets > (h->size<<1)) { \
+ if (kh_resize_##name(h, h->n_buckets - 1) < 0) { /* clear "deleted" elements */ \
+ *ret = -1; return h->n_buckets; \
+ } \
+ } else if (kh_resize_##name(h, h->n_buckets + 1) < 0) { /* expand the hash table */ \
+ *ret = -1; return h->n_buckets; \
+ } \
+ } /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
+ { \
+ khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \
+ x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \
+ if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \
+ else { \
+ last = i; \
+ while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
+ if (__ac_isdel(h->flags, i)) site = i; \
+ i = (i + (++step)) & mask; \
+ if (i == last) { x = site; break; } \
+ } \
+ if (x == h->n_buckets) { \
+ if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \
+ else x = i; \
+ } \
+ } \
+ } \
+ if (__ac_isempty(h->flags, x)) { /* not present at all */ \
+ h->keys[x] = key; \
+ __ac_set_isboth_false(h->flags, x); \
+ ++h->size; ++h->n_occupied; \
+ *ret = 1; \
+ } else if (__ac_isdel(h->flags, x)) { /* deleted */ \
+ h->keys[x] = key; \
+ __ac_set_isboth_false(h->flags, x); \
+ ++h->size; \
+ *ret = 2; \
+ } else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
+ return x; \
+ } \
+ SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
+ { \
+ if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \
+ __ac_set_isdel_true(h->flags, x); \
+ --h->size; \
+ } \
+ }
+
+#define KHASH_DECLARE(name, khkey_t, khval_t) \
+ __KHASH_TYPE(name, khkey_t, khval_t) \
+ __KHASH_PROTOTYPES(name, khkey_t, khval_t)
+
+#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ __KHASH_TYPE(name, khkey_t, khval_t) \
+ __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
+
+#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+ KHASH_INIT2(name, static kh_inline klib_unused, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
+
+/* --- BEGIN OF HASH FUNCTIONS --- */
+
+/*! @function
+ @abstract Integer hash function
+ @param key The integer [khint32_t]
+ @return The hash value [khint_t]
+ */
+#define kh_int_hash_func(key) (khint32_t)(key)
+/*! @function
+ @abstract Integer comparison function
+ */
+#define kh_int_hash_equal(a, b) ((a) == (b))
+/*! @function
+ @abstract 64-bit integer hash function
+ @param key The integer [khint64_t]
+ @return The hash value [khint_t]
+ */
+#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11)
+/*! @function
+ @abstract 64-bit integer comparison function
+ */
+#define kh_int64_hash_equal(a, b) ((a) == (b))
+/*! @function
+ @abstract const char* hash function
+ @param s Pointer to a null terminated string
+ @return The hash value
+ */
+static kh_inline khint_t __ac_X31_hash_string(const char *s)
+{
+ khint_t h = (khint_t)*s;
+ if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s;
+ return h;
+}
+/*! @function
+ @abstract Another interface to const char* hash function
+ @param key Pointer to a null terminated string [const char*]
+ @return The hash value [khint_t]
+ */
+#define kh_str_hash_func(key) __ac_X31_hash_string(key)
+/*! @function
+ @abstract Const char* comparison function
+ */
+#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
+
+static kh_inline khint_t __ac_Wang_hash(khint_t key)
+{
+ key += ~(key << 15);
+ key ^= (key >> 10);
+ key += (key << 3);
+ key ^= (key >> 6);
+ key += ~(key << 11);
+ key ^= (key >> 16);
+ return key;
+}
+#define kh_int_hash_func2(key) __ac_Wang_hash((khint_t)key)
+
+/* --- END OF HASH FUNCTIONS --- */
+
+/* Other convenient macros... */
+
+/*!
+ @abstract Type of the hash table.
+ @param name Name of the hash table [symbol]
+ */
+#define khash_t(name) kh_##name##_t
+
+/*! @function
+ @abstract Initiate a hash table.
+ @param name Name of the hash table [symbol]
+ @return Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_init(name) kh_init_##name()
+
+/*! @function
+ @abstract Destroy a hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_destroy(name, h) kh_destroy_##name(h)
+
+/*! @function
+ @abstract Reset a hash table without deallocating memory.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ */
+#define kh_clear(name, h) kh_clear_##name(h)
+
+/*! @function
+ @abstract Resize a hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param s New size [khint_t]
+ */
+#define kh_resize(name, h, s) kh_resize_##name(h, s)
+
+/*! @function
+ @abstract Insert a key to the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Key [type of keys]
+ @param r Extra return code: -1 if the operation failed;
+ 0 if the key is present in the hash table;
+ 1 if the bucket is empty (never used); 2 if the element in
+ the bucket has been deleted [int*]
+ @return Iterator to the inserted element [khint_t]
+ */
+#define kh_put(name, h, k, r) kh_put_##name(h, k, r)
+
+/*! @function
+ @abstract Retrieve a key from the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Key [type of keys]
+ @return Iterator to the found element, or kh_end(h) if the element is absent [khint_t]
+ */
+#define kh_get(name, h, k) kh_get_##name(h, k)
+
+/*! @function
+ @abstract Remove a key from the hash table.
+ @param name Name of the hash table [symbol]
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param k Iterator to the element to be deleted [khint_t]
+ */
+#define kh_del(name, h, k) kh_del_##name(h, k)
+
+/*! @function
+ @abstract Test whether a bucket contains data.
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return 1 if containing data; 0 otherwise [int]
+ */
+#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
+
+/*! @function
+ @abstract Get key given an iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return Key [type of keys]
+ */
+#define kh_key(h, x) ((h)->keys[x])
+
+/*! @function
+ @abstract Get value given an iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param x Iterator to the bucket [khint_t]
+ @return Value [type of values]
+ @discussion For hash sets, calling this results in segfault.
+ */
+#define kh_val(h, x) ((h)->vals[x])
+
+/*! @function
+ @abstract Alias of kh_val()
+ */
+#define kh_value(h, x) ((h)->vals[x])
+
+/*! @function
+ @abstract Get the start iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return The start iterator [khint_t]
+ */
+#define kh_begin(h) (khint_t)(0)
+
+/*! @function
+ @abstract Get the end iterator
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return The end iterator [khint_t]
+ */
+#define kh_end(h) ((h)->n_buckets)
+
+/*! @function
+ @abstract Get the number of elements in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return Number of elements in the hash table [khint_t]
+ */
+#define kh_size(h) ((h)->size)
+
+/*! @function
+ @abstract Get the number of buckets in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @return Number of buckets in the hash table [khint_t]
+ */
+#define kh_n_buckets(h) ((h)->n_buckets)
+
+/*! @function
+ @abstract Iterate over the entries in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param kvar Variable to which key will be assigned
+ @param vvar Variable to which value will be assigned
+ @param code Block of code to execute
+ */
+#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
+ for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
+ if (!kh_exist(h,__i)) continue; \
+ (kvar) = kh_key(h,__i); \
+ (vvar) = kh_val(h,__i); \
+ code; \
+ } }
+
+/*! @function
+ @abstract Iterate over the values in the hash table
+ @param h Pointer to the hash table [khash_t(name)*]
+ @param vvar Variable to which value will be assigned
+ @param code Block of code to execute
+ */
+#define kh_foreach_value(h, vvar, code) { khint_t __i; \
+ for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
+ if (!kh_exist(h,__i)) continue; \
+ (vvar) = kh_val(h,__i); \
+ code; \
+ } }
+
+/* More conenient interfaces */
+
+/*! @function
+ @abstract Instantiate a hash set containing integer keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_INT(name) \
+ KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing integer keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_INT(name, khval_t) \
+ KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing 64-bit integer keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_INT64(name) \
+ KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing 64-bit integer keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_INT64(name, khval_t) \
+ KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
+
+typedef const char *kh_cstr_t;
+/*! @function
+ @abstract Instantiate a hash map containing const char* keys
+ @param name Name of the hash table [symbol]
+ */
+#define KHASH_SET_INIT_STR(name) \
+ KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal)
+
+/*! @function
+ @abstract Instantiate a hash map containing const char* keys
+ @param name Name of the hash table [symbol]
+ @param khval_t Type of values [type]
+ */
+#define KHASH_MAP_INIT_STR(name, khval_t) \
+ KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal)
+
+#endif /* __AC_KHASH_H */
--- /dev/null
+#ifndef AC_SCHMM_H_
+#define AC_SCHMM_H_
+
+/*
+ * Last Modified: 2008-03-10
+ * Version: 0.1.0-8
+ *
+ * 2008-03-10, 0.1.0-8: make icc report two more "VECTORIZED"
+ * 2008-03-10, 0.1.0-7: accelerate for some CPU
+ * 2008-02-07, 0.1.0-6: simulate sequences
+ * 2008-01-15, 0.1.0-5: goodness of fit
+ * 2007-11-20, 0.1.0-4: add function declaration of hmm_post_decode()
+ * 2007-11-09: fix a memory leak
+ */
+
+#include <stdlib.h>
+
+#define HMM_VERSION "0.1.0-7"
+
+#define HMM_FORWARD 0x02
+#define HMM_BACKWARD 0x04
+#define HMM_VITERBI 0x40
+#define HMM_POSTDEC 0x80
+
+#ifndef FLOAT
+#define FLOAT double
+#endif
+#define HMM_TINY 1e-25
+#define HMM_INF 1e300
+
+typedef struct
+{
+ int m, n; // number of symbols, number of states
+ FLOAT **a, **e; // transition matrix and emitting probilities
+ FLOAT **ae; // auxiliary array for acceleration, should be calculated by hmm_pre_backward()
+ FLOAT *a0; // trasition matrix from the start state
+} hmm_par_t;
+
+typedef struct
+{
+ int L;
+ unsigned status;
+ char *seq;
+ FLOAT **f, **b, *s;
+ int *v; // Viterbi path
+ int *p; // posterior decoding
+} hmm_data_t;
+
+typedef struct
+{
+ int m, n;
+ FLOAT Q0, **A, **E, *A0;
+} hmm_exp_t;
+
+typedef struct
+{
+ int l, *obs;
+ FLOAT *thr;
+} hmm_gof_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* initialize and destroy hmm_par_t */
+ hmm_par_t *hmm_new_par(int m, int n);
+ void hmm_delete_par(hmm_par_t *hp);
+ /* initialize and destroy hmm_data_t */
+ hmm_data_t *hmm_new_data(int L, const char *seq, const hmm_par_t *hp);
+ void hmm_delete_data(hmm_data_t *hd);
+ /* initialize and destroy hmm_exp_t */
+ hmm_exp_t *hmm_new_exp(const hmm_par_t *hp);
+ void hmm_delete_exp(hmm_exp_t *he);
+ /* Viterbi, forward and backward algorithms */
+ FLOAT hmm_Viterbi(const hmm_par_t *hp, hmm_data_t *hd);
+ void hmm_pre_backward(hmm_par_t *hp);
+ void hmm_forward(const hmm_par_t *hp, hmm_data_t *hd);
+ void hmm_backward(const hmm_par_t *hp, hmm_data_t *hd);
+ /* log-likelihood of the observations (natural based) */
+ FLOAT hmm_lk(const hmm_data_t *hd);
+ /* posterior probability at the position on the sequence */
+ FLOAT hmm_post_state(const hmm_par_t *hp, const hmm_data_t *hd, int u, FLOAT *prob);
+ /* posterior decoding */
+ void hmm_post_decode(const hmm_par_t *hp, hmm_data_t *hd);
+ /* expected counts of transitions and emissions */
+ hmm_exp_t *hmm_expect(const hmm_par_t *hp, const hmm_data_t *hd);
+ /* add he0 counts to he1 counts*/
+ void hmm_add_expect(const hmm_exp_t *he0, hmm_exp_t *he1);
+ /* the Q function that should be maximized in EM */
+ FLOAT hmm_Q(const hmm_par_t *hp, const hmm_exp_t *he);
+ FLOAT hmm_Q0(const hmm_par_t *hp, hmm_exp_t *he);
+ /* simulate sequences */
+ char *hmm_simulate(const hmm_par_t *hp, int L);
+#ifdef __cplusplus
+}
+#endif
+
+static inline void **calloc2(int n_row, int n_col, int size)
+{
+ char **p;
+ int k;
+ p = (char**)malloc(sizeof(char*) * n_row);
+ for (k = 0; k != n_row; ++k)
+ p[k] = (char*)calloc(n_col, size);
+ return (void**)p;
+}
+
+#endif
--- /dev/null
+/* The MIT License
+
+ Copyright (c) 2008-2009, by Attractive Chaos <attractor@live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+#ifndef _AC_KLIST_H
+#define _AC_KLIST_H
+
+#include <stdlib.h>
+
+#ifndef klib_unused
+#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
+#define klib_unused __attribute__ ((__unused__))
+#else
+#define klib_unused
+#endif
+#endif /* klib_unused */
+
+#define KMEMPOOL_INIT2(SCOPE, name, kmptype_t, kmpfree_f) \
+ typedef struct { \
+ size_t cnt, n, max; \
+ kmptype_t **buf; \
+ } kmp_##name##_t; \
+ SCOPE kmp_##name##_t *kmp_init_##name(void) { \
+ return calloc(1, sizeof(kmp_##name##_t)); \
+ } \
+ SCOPE void kmp_destroy_##name(kmp_##name##_t *mp) { \
+ size_t k; \
+ for (k = 0; k < mp->n; ++k) { \
+ kmpfree_f(mp->buf[k]); free(mp->buf[k]); \
+ } \
+ free(mp->buf); free(mp); \
+ } \
+ SCOPE kmptype_t *kmp_alloc_##name(kmp_##name##_t *mp) { \
+ ++mp->cnt; \
+ if (mp->n == 0) return calloc(1, sizeof(kmptype_t)); \
+ return mp->buf[--mp->n]; \
+ } \
+ SCOPE void kmp_free_##name(kmp_##name##_t *mp, kmptype_t *p) { \
+ --mp->cnt; \
+ if (mp->n == mp->max) { \
+ mp->max = mp->max? mp->max<<1 : 16; \
+ mp->buf = realloc(mp->buf, sizeof(kmptype_t *) * mp->max); \
+ } \
+ mp->buf[mp->n++] = p; \
+ }
+
+#define KMEMPOOL_INIT(name, kmptype_t, kmpfree_f) \
+ KMEMPOOL_INIT2(static inline klib_unused, name, kmptype_t, kmpfree_f)
+
+#define kmempool_t(name) kmp_##name##_t
+#define kmp_init(name) kmp_init_##name()
+#define kmp_destroy(name, mp) kmp_destroy_##name(mp)
+#define kmp_alloc(name, mp) kmp_alloc_##name(mp)
+#define kmp_free(name, mp, p) kmp_free_##name(mp, p)
+
+#define KLIST_INIT2(SCOPE, name, kltype_t, kmpfree_t) \
+ struct __kl1_##name { \
+ kltype_t data; \
+ struct __kl1_##name *next; \
+ }; \
+ typedef struct __kl1_##name kl1_##name; \
+ KMEMPOOL_INIT2(SCOPE, name, kl1_##name, kmpfree_t) \
+ typedef struct { \
+ kl1_##name *head, *tail; \
+ kmp_##name##_t *mp; \
+ size_t size; \
+ } kl_##name##_t; \
+ SCOPE kl_##name##_t *kl_init_##name(void) { \
+ kl_##name##_t *kl = calloc(1, sizeof(kl_##name##_t)); \
+ kl->mp = kmp_init(name); \
+ kl->head = kl->tail = kmp_alloc(name, kl->mp); \
+ kl->head->next = 0; \
+ return kl; \
+ } \
+ SCOPE void kl_destroy_##name(kl_##name##_t *kl) { \
+ kl1_##name *p; \
+ for (p = kl->head; p != kl->tail; p = p->next) \
+ kmp_free(name, kl->mp, p); \
+ kmp_free(name, kl->mp, p); \
+ kmp_destroy(name, kl->mp); \
+ free(kl); \
+ } \
+ SCOPE kltype_t *kl_pushp_##name(kl_##name##_t *kl) { \
+ kl1_##name *q, *p = kmp_alloc(name, kl->mp); \
+ q = kl->tail; p->next = 0; kl->tail->next = p; kl->tail = p; \
+ ++kl->size; \
+ return &q->data; \
+ } \
+ SCOPE int kl_shift_##name(kl_##name##_t *kl, kltype_t *d) { \
+ kl1_##name *p; \
+ if (kl->head->next == 0) return -1; \
+ --kl->size; \
+ p = kl->head; kl->head = kl->head->next; \
+ if (d) *d = p->data; \
+ kmp_free(name, kl->mp, p); \
+ return 0; \
+ }
+
+#define KLIST_INIT(name, kltype_t, kmpfree_t) \
+ KLIST_INIT2(static inline klib_unused, name, kltype_t, kmpfree_t)
+
+#define kliter_t(name) kl1_##name
+#define klist_t(name) kl_##name##_t
+#define kl_val(iter) ((iter)->data)
+#define kl_next(iter) ((iter)->next)
+#define kl_begin(kl) ((kl)->head)
+#define kl_end(kl) ((kl)->tail)
+
+#define kl_init(name) kl_init_##name()
+#define kl_destroy(name, kl) kl_destroy_##name(kl)
+#define kl_pushp(name, kl) kl_pushp_##name(kl)
+#define kl_shift(name, kl, d) kl_shift_##name(kl, d)
+
+#endif
--- /dev/null
+#ifndef AC_KMATH_H
+#define AC_KMATH_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /**********************************
+ * Pseudo-random number generator *
+ **********************************/
+
+ typedef uint64_t krint64_t;
+
+ struct _krand_t;
+ typedef struct _krand_t krand_t;
+
+ #define kr_drand(_kr) ((kr_rand(_kr) >> 11) * (1.0/9007199254740992.0))
+ #define kr_sample(_kr, _k, _cnt) ((*(_cnt))++ < (_k)? *(_cnt) - 1 : kr_rand(_kr) % *(_cnt))
+
+ krand_t *kr_srand(krint64_t seed);
+ krint64_t kr_rand(krand_t *kr);
+ double kr_normal(krand_t *kr);
+
+ /**************************
+ * Non-linear programming *
+ **************************/
+
+ #define KMIN_RADIUS 0.5
+ #define KMIN_EPS 1e-7
+ #define KMIN_MAXCALL 50000
+
+ typedef double (*kmin_f)(int, double*, void*);
+ typedef double (*kmin1_f)(double, void*);
+
+ double kmin_hj(kmin_f func, int n, double *x, void *data, double r, double eps, int max_calls); // Hooke-Jeeves'
+ double kmin_brent(kmin1_f func, double a, double b, void *data, double tol, double *xmin); // Brent's 1-dimenssion
+
+ /*********************
+ * Special functions *
+ *********************/
+
+ double kf_lgamma(double z); // log gamma function
+ double kf_erfc(double x); // complementary error function
+ double kf_gammap(double s, double z); // regularized lower incomplete gamma function
+ double kf_gammaq(double s, double z); // regularized upper incomplete gamma function
+ double kf_betai(double a, double b, double x); // regularized incomplete beta function
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef KNETFILE_H
+#define KNETFILE_H
+
+#include <stdint.h>
+#include <fcntl.h>
+
+#ifndef _WIN32
+#define netread(fd, ptr, len) read(fd, ptr, len)
+#define netwrite(fd, ptr, len) write(fd, ptr, len)
+#define netclose(fd) close(fd)
+#else
+#include <winsock2.h>
+#define netread(fd, ptr, len) recv(fd, ptr, len, 0)
+#define netwrite(fd, ptr, len) send(fd, ptr, len, 0)
+#define netclose(fd) closesocket(fd)
+#endif
+
+// FIXME: currently I/O is unbuffered
+
+#define KNF_TYPE_LOCAL 1
+#define KNF_TYPE_FTP 2
+#define KNF_TYPE_HTTP 3
+
+typedef struct knetFile_s {
+ int type, fd;
+ int64_t offset;
+ char *host, *port;
+
+ // the following are for FTP only
+ int ctrl_fd, pasv_ip[4], pasv_port, max_response, no_reconnect, is_ready;
+ char *response, *retr, *size_cmd;
+ int64_t seek_offset; // for lazy seek
+ int64_t file_size;
+
+ // the following are for HTTP only
+ char *path, *http_host;
+} knetFile;
+
+#define knet_tell(fp) ((fp)->offset)
+#define knet_fileno(fp) ((fp)->fd)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _WIN32
+ int knet_win32_init();
+ void knet_win32_destroy();
+#endif
+
+ knetFile *knet_open(const char *fn, const char *mode);
+
+ /*
+ This only works with local files.
+ */
+ knetFile *knet_dopen(int fd, const char *mode);
+
+ /*
+ If ->is_ready==0, this routine updates ->fd; otherwise, it simply
+ reads from ->fd.
+ */
+ off_t knet_read(knetFile *fp, void *buf, off_t len);
+
+ /*
+ This routine only sets ->offset and ->is_ready=0. It does not
+ communicate with the FTP server.
+ */
+ off_t knet_seek(knetFile *fp, int64_t off, int whence);
+ int knet_close(knetFile *fp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef KNHX_H_
+#define KNHX_H_
+
+#define KNERR_MISSING_LEFT 0x01
+#define KNERR_MISSING_RGHT 0x02
+#define KNERR_BRACKET 0x04
+#define KNERR_COLON 0x08
+
+typedef struct {
+ int parent, n;
+ int *child;
+ char *name;
+ double d;
+} knhx1_t;
+
+#ifndef KSTRING_T
+#define KSTRING_T kstring_t
+typedef struct __kstring_t {
+ size_t l, m;
+ char *s;
+} kstring_t;
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ knhx1_t *kn_parse(const char *nhx, int *_n, int *_error);
+ void kn_format(const knhx1_t *node, int root, kstring_t *s);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/* The MIT License
+
+ Copyright (c) 2008, 2009, 2011 Attractive Chaos <attractor@live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/* Last Modified: 05MAR2012 */
+
+#ifndef AC_KSEQ_H
+#define AC_KSEQ_H
+
+#include <ctype.h>
+#include <string.h>
+#include <stdlib.h>
+
+#define KS_SEP_SPACE 0 // isspace(): \t, \n, \v, \f, \r
+#define KS_SEP_TAB 1 // isspace() && !' '
+#define KS_SEP_LINE 2 // line separator: "\n" (Unix) or "\r\n" (Windows)
+#define KS_SEP_MAX 2
+
+#define __KS_TYPE(type_t) \
+ typedef struct __kstream_t { \
+ unsigned char *buf; \
+ int begin, end, is_eof; \
+ type_t f; \
+ } kstream_t;
+
+#define ks_err(ks) ((ks)->end == -1)
+#define ks_eof(ks) ((ks)->is_eof && (ks)->begin >= (ks)->end)
+#define ks_rewind(ks) ((ks)->is_eof = (ks)->begin = (ks)->end = 0)
+
+#define __KS_BASIC(type_t, __bufsize) \
+ static inline kstream_t *ks_init(type_t f) \
+ { \
+ kstream_t *ks = (kstream_t*)calloc(1, sizeof(kstream_t)); \
+ ks->f = f; \
+ ks->buf = (unsigned char*)malloc(__bufsize); \
+ return ks; \
+ } \
+ static inline void ks_destroy(kstream_t *ks) \
+ { \
+ if (ks) { \
+ free(ks->buf); \
+ free(ks); \
+ } \
+ }
+
+#define __KS_GETC(__read, __bufsize) \
+ static inline int ks_getc(kstream_t *ks) \
+ { \
+ if (ks_err(ks)) return -3; \
+ if (ks->is_eof && ks->begin >= ks->end) return -1; \
+ if (ks->begin >= ks->end) { \
+ ks->begin = 0; \
+ ks->end = __read(ks->f, ks->buf, __bufsize); \
+ if (ks->end == 0) { ks->is_eof = 1; return -1;} \
+ if (ks->end == -1) { ks->is_eof = 1; return -3;}\
+ } \
+ return (int)ks->buf[ks->begin++]; \
+ }
+
+#ifndef KSTRING_T
+#define KSTRING_T kstring_t
+typedef struct __kstring_t {
+ size_t l, m;
+ char *s;
+} kstring_t;
+#endif
+
+#ifndef kroundup32
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+#endif
+
+#define __KS_GETUNTIL(__read, __bufsize) \
+ static int ks_getuntil2(kstream_t *ks, int delimiter, kstring_t *str, int *dret, int append) \
+ { \
+ int gotany = 0; \
+ if (dret) *dret = 0; \
+ str->l = append? str->l : 0; \
+ for (;;) { \
+ int i; \
+ if (ks_err(ks)) return -3; \
+ if (ks->begin >= ks->end) { \
+ if (!ks->is_eof) { \
+ ks->begin = 0; \
+ ks->end = __read(ks->f, ks->buf, __bufsize); \
+ if (ks->end == 0) { ks->is_eof = 1; break; } \
+ if (ks->end == -1) { ks->is_eof = 1; return -3; } \
+ } else break; \
+ } \
+ if (delimiter == KS_SEP_LINE) { \
+ for (i = ks->begin; i < ks->end; ++i) \
+ if (ks->buf[i] == '\n') break; \
+ } else if (delimiter > KS_SEP_MAX) { \
+ for (i = ks->begin; i < ks->end; ++i) \
+ if (ks->buf[i] == delimiter) break; \
+ } else if (delimiter == KS_SEP_SPACE) { \
+ for (i = ks->begin; i < ks->end; ++i) \
+ if (isspace(ks->buf[i])) break; \
+ } else if (delimiter == KS_SEP_TAB) { \
+ for (i = ks->begin; i < ks->end; ++i) \
+ if (isspace(ks->buf[i]) && ks->buf[i] != ' ') break; \
+ } else i = 0; /* never come to here! */ \
+ if (str->m - str->l < (size_t)(i - ks->begin + 1)) { \
+ str->m = str->l + (i - ks->begin) + 1; \
+ kroundup32(str->m); \
+ str->s = (char*)realloc(str->s, str->m); \
+ } \
+ gotany = 1; \
+ memcpy(str->s + str->l, ks->buf + ks->begin, i - ks->begin); \
+ str->l = str->l + (i - ks->begin); \
+ ks->begin = i + 1; \
+ if (i < ks->end) { \
+ if (dret) *dret = ks->buf[i]; \
+ break; \
+ } \
+ } \
+ if (!gotany && ks_eof(ks)) return -1; \
+ if (str->s == 0) { \
+ str->m = 1; \
+ str->s = (char*)calloc(1, 1); \
+ } else if (delimiter == KS_SEP_LINE && str->l > 1 && str->s[str->l-1] == '\r') --str->l; \
+ str->s[str->l] = '\0'; \
+ return str->l; \
+ } \
+ static inline int ks_getuntil(kstream_t *ks, int delimiter, kstring_t *str, int *dret) \
+ { return ks_getuntil2(ks, delimiter, str, dret, 0); }
+
+#define KSTREAM_INIT(type_t, __read, __bufsize) \
+ __KS_TYPE(type_t) \
+ __KS_BASIC(type_t, __bufsize) \
+ __KS_GETC(__read, __bufsize) \
+ __KS_GETUNTIL(__read, __bufsize)
+
+#define kseq_rewind(ks) ((ks)->last_char = (ks)->f->is_eof = (ks)->f->begin = (ks)->f->end = 0)
+
+#define __KSEQ_BASIC(SCOPE, type_t) \
+ SCOPE kseq_t *kseq_init(type_t fd) \
+ { \
+ kseq_t *s = (kseq_t*)calloc(1, sizeof(kseq_t)); \
+ s->f = ks_init(fd); \
+ return s; \
+ } \
+ SCOPE void kseq_destroy(kseq_t *ks) \
+ { \
+ if (!ks) return; \
+ free(ks->name.s); free(ks->comment.s); free(ks->seq.s); free(ks->qual.s); \
+ ks_destroy(ks->f); \
+ free(ks); \
+ }
+
+/* Return value:
+ >=0 length of the sequence (normal)
+ -1 end-of-file
+ -2 truncated quality string
+ -3 error reading stream
+ */
+#define __KSEQ_READ(SCOPE) \
+ SCOPE int kseq_read(kseq_t *seq) \
+ { \
+ int c,r; \
+ kstream_t *ks = seq->f; \
+ if (seq->last_char == 0) { /* then jump to the next header line */ \
+ while ((c = ks_getc(ks)) >= 0 && c != '>' && c != '@'); \
+ if (c < 0) return c; /* end of file or error*/ \
+ seq->last_char = c; \
+ } /* else: the first header char has been read in the previous call */ \
+ seq->comment.l = seq->seq.l = seq->qual.l = 0; /* reset all members */ \
+ if ((r=ks_getuntil(ks, 0, &seq->name, &c)) < 0) return r; /* normal exit: EOF or error */ \
+ if (c != '\n') ks_getuntil(ks, KS_SEP_LINE, &seq->comment, 0); /* read FASTA/Q comment */ \
+ if (seq->seq.s == 0) { /* we can do this in the loop below, but that is slower */ \
+ seq->seq.m = 256; \
+ seq->seq.s = (char*)malloc(seq->seq.m); \
+ } \
+ while ((c = ks_getc(ks)) >= 0 && c != '>' && c != '+' && c != '@') { \
+ if (c == '\n') continue; /* skip empty lines */ \
+ seq->seq.s[seq->seq.l++] = c; /* this is safe: we always have enough space for 1 char */ \
+ ks_getuntil2(ks, KS_SEP_LINE, &seq->seq, 0, 1); /* read the rest of the line */ \
+ } \
+ if (c == '>' || c == '@') seq->last_char = c; /* the first header char has been read */ \
+ if (seq->seq.l + 1 >= seq->seq.m) { /* seq->seq.s[seq->seq.l] below may be out of boundary */ \
+ seq->seq.m = seq->seq.l + 2; \
+ kroundup32(seq->seq.m); /* rounded to the next closest 2^k */ \
+ seq->seq.s = (char*)realloc(seq->seq.s, seq->seq.m); \
+ } \
+ seq->seq.s[seq->seq.l] = 0; /* null terminated string */ \
+ if (c != '+') return seq->seq.l; /* FASTA */ \
+ if (seq->qual.m < seq->seq.m) { /* allocate memory for qual in case insufficient */ \
+ seq->qual.m = seq->seq.m; \
+ seq->qual.s = (char*)realloc(seq->qual.s, seq->qual.m); \
+ } \
+ while ((c = ks_getc(ks)) >= 0 && c != '\n'); /* skip the rest of '+' line */ \
+ if (c == -1) return -2; /* error: no quality string */ \
+ while ((c = ks_getuntil2(ks, KS_SEP_LINE, &seq->qual, 0, 1) >= 0 && seq->qual.l < seq->seq.l)); \
+ if (c == -3) return -3; /* stream error */ \
+ seq->last_char = 0; /* we have not come to the next header line */ \
+ if (seq->seq.l != seq->qual.l) return -2; /* error: qual string is of a different length */ \
+ return seq->seq.l; \
+ }
+
+#define __KSEQ_TYPE(type_t) \
+ typedef struct { \
+ kstring_t name, comment, seq, qual; \
+ int last_char; \
+ kstream_t *f; \
+ } kseq_t;
+
+#define KSEQ_INIT2(SCOPE, type_t, __read) \
+ KSTREAM_INIT(type_t, __read, 16384) \
+ __KSEQ_TYPE(type_t) \
+ __KSEQ_BASIC(SCOPE, type_t) \
+ __KSEQ_READ(SCOPE)
+
+#define KSEQ_INIT(type_t, __read) KSEQ_INIT2(static, type_t, __read)
+
+#define KSEQ_DECLARE(type_t) \
+ __KS_TYPE(type_t) \
+ __KSEQ_TYPE(type_t) \
+ extern kseq_t *kseq_init(type_t fd); \
+ void kseq_destroy(kseq_t *ks); \
+ int kseq_read(kseq_t *seq);
+
+#endif
--- /dev/null
+#ifndef KSON_H
+#define KSON_H
+
+#include <string.h>
+
+#define KSON_TYPE_NO_QUOTE 1
+#define KSON_TYPE_SGL_QUOTE 2
+#define KSON_TYPE_DBL_QUOTE 3
+#define KSON_TYPE_BRACKET 4
+#define KSON_TYPE_BRACE 5
+
+#define KSON_OK 0
+#define KSON_ERR_EXTRA_LEFT 1
+#define KSON_ERR_EXTRA_RIGHT 2
+#define KSON_ERR_NO_KEY 3
+
+typedef struct kson_node_s {
+ unsigned long long type:3, n:61;
+ char *key;
+ union {
+ struct kson_node_s **child;
+ char *str;
+ } v;
+} kson_node_t;
+
+typedef struct {
+ long n_nodes;
+ kson_node_t *root;
+} kson_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ kson_t *kson_parse(const char *json);
+ void kson_destroy(kson_t *kson);
+ const kson_node_t *kson_by_path(const kson_node_t *root, int path_len, ...);
+ void kson_format(const kson_node_t *root);
+
+#ifdef __cplusplus
+}
+#endif
+
+#define kson_is_internal(p) ((p)->type == KSON_TYPE_BRACKET || (p)->type == KSON_TYPE_BRACE)
+
+static inline const kson_node_t *kson_by_key(const kson_node_t *p, const char *key)
+{
+ long i;
+ if (!kson_is_internal(p)) return 0;
+ for (i = 0; i < (long)p->n; ++i) {
+ const kson_node_t *q = p->v.child[i];
+ if (q->key && strcmp(q->key, key) == 0)
+ return q;
+ }
+ return 0;
+}
+
+static inline const kson_node_t *kson_by_index(const kson_node_t *p, long i)
+{
+ if (!kson_is_internal(p)) return 0;
+ return 0 <= i && i < (long)p->n? p->v.child[i] : 0;
+}
+
+#endif
--- /dev/null
+/* The MIT License
+
+ Copyright (c) 2008, 2011 Attractive Chaos <attractor@live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/*
+ 2011-04-10 (0.1.6):
+
+ * Added sample
+
+ 2011-03 (0.1.5):
+
+ * Added shuffle/permutation
+
+ 2008-11-16 (0.1.4):
+
+ * Fixed a bug in introsort() that happens in rare cases.
+
+ 2008-11-05 (0.1.3):
+
+ * Fixed a bug in introsort() for complex comparisons.
+
+ * Fixed a bug in mergesort(). The previous version is not stable.
+
+ 2008-09-15 (0.1.2):
+
+ * Accelerated introsort. On my Mac (not on another Linux machine),
+ my implementation is as fast as std::sort on random input.
+
+ * Added combsort and in introsort, switch to combsort if the
+ recursion is too deep.
+
+ 2008-09-13 (0.1.1):
+
+ * Added k-small algorithm
+
+ 2008-09-05 (0.1.0):
+
+ * Initial version
+
+*/
+
+#ifndef AC_KSORT_H
+#define AC_KSORT_H
+
+#include <stdlib.h>
+#include <string.h>
+
+typedef struct {
+ void *left, *right;
+ int depth;
+} ks_isort_stack_t;
+
+#define KSORT_SWAP(type_t, a, b) { register type_t t=(a); (a)=(b); (b)=t; }
+
+#define KSORT_INIT(name, type_t, __sort_lt) \
+ void ks_mergesort_##name(size_t n, type_t array[], type_t temp[]) \
+ { \
+ type_t *a2[2], *a, *b; \
+ int curr, shift; \
+ \
+ a2[0] = array; \
+ a2[1] = temp? temp : (type_t*)malloc(sizeof(type_t) * n); \
+ for (curr = 0, shift = 0; (1ul<<shift) < n; ++shift) { \
+ a = a2[curr]; b = a2[1-curr]; \
+ if (shift == 0) { \
+ type_t *p = b, *i, *eb = a + n; \
+ for (i = a; i < eb; i += 2) { \
+ if (i == eb - 1) *p++ = *i; \
+ else { \
+ if (__sort_lt(*(i+1), *i)) { \
+ *p++ = *(i+1); *p++ = *i; \
+ } else { \
+ *p++ = *i; *p++ = *(i+1); \
+ } \
+ } \
+ } \
+ } else { \
+ size_t i, step = 1ul<<shift; \
+ for (i = 0; i < n; i += step<<1) { \
+ type_t *p, *j, *k, *ea, *eb; \
+ if (n < i + step) { \
+ ea = a + n; eb = a; \
+ } else { \
+ ea = a + i + step; \
+ eb = a + (n < i + (step<<1)? n : i + (step<<1)); \
+ } \
+ j = a + i; k = a + i + step; p = b + i; \
+ while (j < ea && k < eb) { \
+ if (__sort_lt(*k, *j)) *p++ = *k++; \
+ else *p++ = *j++; \
+ } \
+ while (j < ea) *p++ = *j++; \
+ while (k < eb) *p++ = *k++; \
+ } \
+ } \
+ curr = 1 - curr; \
+ } \
+ if (curr == 1) { \
+ type_t *p = a2[0], *i = a2[1], *eb = array + n; \
+ for (; p < eb; ++i) *p++ = *i; \
+ } \
+ if (temp == 0) free(a2[1]); \
+ } \
+ void ks_heapadjust_##name(size_t i, size_t n, type_t l[]) \
+ { \
+ size_t k = i; \
+ type_t tmp = l[i]; \
+ while ((k = (k << 1) + 1) < n) { \
+ if (k != n - 1 && __sort_lt(l[k], l[k+1])) ++k; \
+ if (__sort_lt(l[k], tmp)) break; \
+ l[i] = l[k]; i = k; \
+ } \
+ l[i] = tmp; \
+ } \
+ void ks_heapmake_##name(size_t lsize, type_t l[]) \
+ { \
+ size_t i; \
+ for (i = (lsize >> 1) - 1; i != (size_t)(-1); --i) \
+ ks_heapadjust_##name(i, lsize, l); \
+ } \
+ void ks_heapsort_##name(size_t lsize, type_t l[]) \
+ { \
+ size_t i; \
+ for (i = lsize - 1; i > 0; --i) { \
+ type_t tmp; \
+ tmp = *l; *l = l[i]; l[i] = tmp; ks_heapadjust_##name(0, i, l); \
+ } \
+ } \
+ static inline void __ks_insertsort_##name(type_t *s, type_t *t) \
+ { \
+ type_t *i, *j, swap_tmp; \
+ for (i = s + 1; i < t; ++i) \
+ for (j = i; j > s && __sort_lt(*j, *(j-1)); --j) { \
+ swap_tmp = *j; *j = *(j-1); *(j-1) = swap_tmp; \
+ } \
+ } \
+ void ks_combsort_##name(size_t n, type_t a[]) \
+ { \
+ const double shrink_factor = 1.2473309501039786540366528676643; \
+ int do_swap; \
+ size_t gap = n; \
+ type_t tmp, *i, *j; \
+ do { \
+ if (gap > 2) { \
+ gap = (size_t)(gap / shrink_factor); \
+ if (gap == 9 || gap == 10) gap = 11; \
+ } \
+ do_swap = 0; \
+ for (i = a; i < a + n - gap; ++i) { \
+ j = i + gap; \
+ if (__sort_lt(*j, *i)) { \
+ tmp = *i; *i = *j; *j = tmp; \
+ do_swap = 1; \
+ } \
+ } \
+ } while (do_swap || gap > 2); \
+ if (gap != 1) __ks_insertsort_##name(a, a + n); \
+ } \
+ void ks_introsort_##name(size_t n, type_t a[]) \
+ { \
+ int d; \
+ ks_isort_stack_t *top, *stack; \
+ type_t rp, swap_tmp; \
+ type_t *s, *t, *i, *j, *k; \
+ \
+ if (n < 1) return; \
+ else if (n == 2) { \
+ if (__sort_lt(a[1], a[0])) { swap_tmp = a[0]; a[0] = a[1]; a[1] = swap_tmp; } \
+ return; \
+ } \
+ for (d = 2; 1ul<<d < n; ++d); \
+ stack = (ks_isort_stack_t*)malloc(sizeof(ks_isort_stack_t) * ((sizeof(size_t)*d)+2)); \
+ top = stack; s = a; t = a + (n-1); d <<= 1; \
+ while (1) { \
+ if (s < t) { \
+ if (--d == 0) { \
+ ks_combsort_##name(t - s + 1, s); \
+ t = s; \
+ continue; \
+ } \
+ i = s; j = t; k = i + ((j-i)>>1) + 1; \
+ if (__sort_lt(*k, *i)) { \
+ if (__sort_lt(*k, *j)) k = j; \
+ } else k = __sort_lt(*j, *i)? i : j; \
+ rp = *k; \
+ if (k != t) { swap_tmp = *k; *k = *t; *t = swap_tmp; } \
+ for (;;) { \
+ do ++i; while (__sort_lt(*i, rp)); \
+ do --j; while (i <= j && __sort_lt(rp, *j)); \
+ if (j <= i) break; \
+ swap_tmp = *i; *i = *j; *j = swap_tmp; \
+ } \
+ swap_tmp = *i; *i = *t; *t = swap_tmp; \
+ if (i-s > t-i) { \
+ if (i-s > 16) { top->left = s; top->right = i-1; top->depth = d; ++top; } \
+ s = t-i > 16? i+1 : t; \
+ } else { \
+ if (t-i > 16) { top->left = i+1; top->right = t; top->depth = d; ++top; } \
+ t = i-s > 16? i-1 : s; \
+ } \
+ } else { \
+ if (top == stack) { \
+ free(stack); \
+ __ks_insertsort_##name(a, a+n); \
+ return; \
+ } else { --top; s = (type_t*)top->left; t = (type_t*)top->right; d = top->depth; } \
+ } \
+ } \
+ } \
+ /* This function is adapted from: http://ndevilla.free.fr/median/ */ \
+ /* 0 <= kk < n */ \
+ type_t ks_ksmall_##name(size_t n, type_t arr[], size_t kk) \
+ { \
+ type_t *low, *high, *k, *ll, *hh, *mid; \
+ low = arr; high = arr + n - 1; k = arr + kk; \
+ for (;;) { \
+ if (high <= low) return *k; \
+ if (high == low + 1) { \
+ if (__sort_lt(*high, *low)) KSORT_SWAP(type_t, *low, *high); \
+ return *k; \
+ } \
+ mid = low + (high - low) / 2; \
+ if (__sort_lt(*high, *mid)) KSORT_SWAP(type_t, *mid, *high); \
+ if (__sort_lt(*high, *low)) KSORT_SWAP(type_t, *low, *high); \
+ if (__sort_lt(*low, *mid)) KSORT_SWAP(type_t, *mid, *low); \
+ KSORT_SWAP(type_t, *mid, *(low+1)); \
+ ll = low + 1; hh = high; \
+ for (;;) { \
+ do ++ll; while (__sort_lt(*ll, *low)); \
+ do --hh; while (__sort_lt(*low, *hh)); \
+ if (hh < ll) break; \
+ KSORT_SWAP(type_t, *ll, *hh); \
+ } \
+ KSORT_SWAP(type_t, *low, *hh); \
+ if (hh <= k) low = ll; \
+ if (hh >= k) high = hh - 1; \
+ } \
+ } \
+ void ks_shuffle_##name(size_t n, type_t a[]) \
+ { \
+ int i, j; \
+ for (i = n; i > 1; --i) { \
+ type_t tmp; \
+ j = (int)(drand48() * i); \
+ tmp = a[j]; a[j] = a[i-1]; a[i-1] = tmp; \
+ } \
+ } \
+ void ks_sample_##name(size_t n, size_t r, type_t a[]) /* FIXME: NOT TESTED!!! */ \
+ { /* reference: http://code.activestate.com/recipes/272884/ */ \
+ int i, k, pop = n; \
+ for (i = (int)r, k = 0; i >= 0; --i) { \
+ double z = 1., x = drand48(); \
+ type_t tmp; \
+ while (x < z) z -= z * i / (pop--); \
+ if (k != n - pop - 1) tmp = a[k], a[k] = a[n-pop-1], a[n-pop-1] = tmp; \
+ ++k; \
+ } \
+ }
+
+#define ks_mergesort(name, n, a, t) ks_mergesort_##name(n, a, t)
+#define ks_introsort(name, n, a) ks_introsort_##name(n, a)
+#define ks_combsort(name, n, a) ks_combsort_##name(n, a)
+#define ks_heapsort(name, n, a) ks_heapsort_##name(n, a)
+#define ks_heapmake(name, n, a) ks_heapmake_##name(n, a)
+#define ks_heapadjust(name, i, n, a) ks_heapadjust_##name(i, n, a)
+#define ks_ksmall(name, n, a, k) ks_ksmall_##name(n, a, k)
+#define ks_shuffle(name, n, a) ks_shuffle_##name(n, a)
+
+#define ks_lt_generic(a, b) ((a) < (b))
+#define ks_lt_str(a, b) (strcmp((a), (b)) < 0)
+
+typedef const char *ksstr_t;
+
+#define KSORT_INIT_GENERIC(type_t) KSORT_INIT(type_t, type_t, ks_lt_generic)
+#define KSORT_INIT_STR KSORT_INIT(str, ksstr_t, ks_lt_str)
+
+#endif
--- /dev/null
+/* The MIT License
+
+ Copyright (c) by Attractive Chaos <attractor@live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+#ifndef KSTRING_H
+#define KSTRING_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#ifndef kroundup32
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+#endif
+
+#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
+#define KS_ATTR_PRINTF(fmt, arg) __attribute__((__format__ (__printf__, fmt, arg)))
+#else
+#define KS_ATTR_PRINTF(fmt, arg)
+#endif
+
+
+/* kstring_t is a simple non-opaque type whose fields are likely to be
+ * used directly by user code (but see also ks_str() and ks_len() below).
+ * A kstring_t object is initialised by either of
+ * kstring_t str = { 0, 0, NULL };
+ * kstring_t str; ...; str.l = str.m = 0; str.s = NULL;
+ * and either ownership of the underlying buffer should be given away before
+ * the object disappears (see ks_release() below) or the kstring_t should be
+ * destroyed with free(str.s); */
+#ifndef KSTRING_T
+#define KSTRING_T kstring_t
+typedef struct __kstring_t {
+ size_t l, m;
+ char *s;
+} kstring_t;
+#endif
+
+typedef struct {
+ uint64_t tab[4];
+ int sep, finished;
+ const char *p; // end of the current token
+} ks_tokaux_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ int kvsprintf(kstring_t *s, const char *fmt, va_list ap) KS_ATTR_PRINTF(2,0);
+ int ksprintf(kstring_t *s, const char *fmt, ...) KS_ATTR_PRINTF(2,3);
+ int ksplit_core(char *s, int delimiter, int *_max, int **_offsets);
+ char *kstrstr(const char *str, const char *pat, int **_prep);
+ char *kstrnstr(const char *str, const char *pat, int n, int **_prep);
+ void *kmemmem(const void *_str, int n, const void *_pat, int m, int **_prep);
+
+ /* kstrtok() is similar to strtok_r() except that str is not
+ * modified and both str and sep can be NULL. For efficiency, it is
+ * actually recommended to set both to NULL in the subsequent calls
+ * if sep is not changed. */
+ char *kstrtok(const char *str, const char *sep, ks_tokaux_t *aux);
+
+ /* kgetline() uses the supplied fgets()-like function to read a "\n"-
+ * or "\r\n"-terminated line from fp. The line read is appended to the
+ * kstring without its terminator and 0 is returned; EOF is returned at
+ * EOF or on error (determined by querying fp, as per fgets()). */
+ typedef char *kgets_func(char *, int, void *);
+ int kgetline(kstring_t *s, kgets_func *fgets, void *fp);
+
+#ifdef __cplusplus
+}
+#endif
+
+static inline int ks_resize(kstring_t *s, size_t size)
+{
+ if (s->m < size) {
+ char *tmp;
+ s->m = size;
+ kroundup32(s->m);
+ if ((tmp = (char*)realloc(s->s, s->m)))
+ s->s = tmp;
+ else
+ return -1;
+ }
+ return 0;
+}
+
+static inline char *ks_str(kstring_t *s)
+{
+ return s->s;
+}
+
+static inline size_t ks_len(kstring_t *s)
+{
+ return s->l;
+}
+
+// Give ownership of the underlying buffer away to something else (making
+// that something else responsible for freeing it), leaving the kstring_t
+// empty and ready to be used again, or ready to go out of scope without
+// needing free(str.s) to prevent a memory leak.
+static inline char *ks_release(kstring_t *s)
+{
+ char *ss = s->s;
+ s->l = s->m = 0;
+ s->s = NULL;
+ return ss;
+}
+
+static inline int kputsn(const char *p, int l, kstring_t *s)
+{
+ if (s->l + l + 1 >= s->m) {
+ char *tmp;
+ s->m = s->l + l + 2;
+ kroundup32(s->m);
+ if ((tmp = (char*)realloc(s->s, s->m)))
+ s->s = tmp;
+ else
+ return EOF;
+ }
+ memcpy(s->s + s->l, p, l);
+ s->l += l;
+ s->s[s->l] = 0;
+ return l;
+}
+
+static inline int kputs(const char *p, kstring_t *s)
+{
+ return kputsn(p, strlen(p), s);
+}
+
+static inline int kputc(int c, kstring_t *s)
+{
+ if (s->l + 1 >= s->m) {
+ char *tmp;
+ s->m = s->l + 2;
+ kroundup32(s->m);
+ if ((tmp = (char*)realloc(s->s, s->m)))
+ s->s = tmp;
+ else
+ return EOF;
+ }
+ s->s[s->l++] = c;
+ s->s[s->l] = 0;
+ return c;
+}
+
+static inline int kputc_(int c, kstring_t *s)
+{
+ if (s->l + 1 > s->m) {
+ char *tmp;
+ s->m = s->l + 1;
+ kroundup32(s->m);
+ if ((tmp = (char*)realloc(s->s, s->m)))
+ s->s = tmp;
+ else
+ return EOF;
+ }
+ s->s[s->l++] = c;
+ return 1;
+}
+
+static inline int kputsn_(const void *p, int l, kstring_t *s)
+{
+ if (s->l + l > s->m) {
+ char *tmp;
+ s->m = s->l + l;
+ kroundup32(s->m);
+ if ((tmp = (char*)realloc(s->s, s->m)))
+ s->s = tmp;
+ else
+ return EOF;
+ }
+ memcpy(s->s + s->l, p, l);
+ s->l += l;
+ return l;
+}
+
+static inline int kputw(int c, kstring_t *s)
+{
+ char buf[16];
+ int i, l = 0;
+ unsigned int x = c;
+ if (c < 0) x = -x;
+ do { buf[l++] = x%10 + '0'; x /= 10; } while (x > 0);
+ if (c < 0) buf[l++] = '-';
+ if (s->l + l + 1 >= s->m) {
+ char *tmp;
+ s->m = s->l + l + 2;
+ kroundup32(s->m);
+ if ((tmp = (char*)realloc(s->s, s->m)))
+ s->s = tmp;
+ else
+ return EOF;
+ }
+ for (i = l - 1; i >= 0; --i) s->s[s->l++] = buf[i];
+ s->s[s->l] = 0;
+ return 0;
+}
+
+static inline int kputuw(unsigned c, kstring_t *s)
+{
+ char buf[16];
+ int l, i;
+ unsigned x;
+ if (c == 0) return kputc('0', s);
+ for (l = 0, x = c; x > 0; x /= 10) buf[l++] = x%10 + '0';
+ if (s->l + l + 1 >= s->m) {
+ char *tmp;
+ s->m = s->l + l + 2;
+ kroundup32(s->m);
+ if ((tmp = (char*)realloc(s->s, s->m)))
+ s->s = tmp;
+ else
+ return EOF;
+ }
+ for (i = l - 1; i >= 0; --i) s->s[s->l++] = buf[i];
+ s->s[s->l] = 0;
+ return 0;
+}
+
+static inline int kputl(long c, kstring_t *s)
+{
+ char buf[32];
+ int i, l = 0;
+ unsigned long x = c;
+ if (c < 0) x = -x;
+ do { buf[l++] = x%10 + '0'; x /= 10; } while (x > 0);
+ if (c < 0) buf[l++] = '-';
+ if (s->l + l + 1 >= s->m) {
+ char *tmp;
+ s->m = s->l + l + 2;
+ kroundup32(s->m);
+ if ((tmp = (char*)realloc(s->s, s->m)))
+ s->s = tmp;
+ else
+ return EOF;
+ }
+ for (i = l - 1; i >= 0; --i) s->s[s->l++] = buf[i];
+ s->s[s->l] = 0;
+ return 0;
+}
+
+/*
+ * Returns 's' split by delimiter, with *n being the number of components;
+ * NULL on failue.
+ */
+static inline int *ksplit(kstring_t *s, int delimiter, int *n)
+{
+ int max = 0, *offsets = 0;
+ *n = ksplit_core(s->s, delimiter, &max, &offsets);
+ return offsets;
+}
+
+#endif
--- /dev/null
+#ifndef __AC_KSW_H
+#define __AC_KSW_H
+
+#include <stdint.h>
+
+#define KSW_XBYTE 0x10000
+#define KSW_XSTOP 0x20000
+#define KSW_XSUBO 0x40000
+#define KSW_XSTART 0x80000
+
+struct _kswq_t;
+typedef struct _kswq_t kswq_t;
+
+typedef struct {
+ int score; // best score
+ int te, qe; // target end and query end
+ int score2, te2; // second best score and ending position on the target
+ int tb, qb; // target start and query start
+} kswr_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /**
+ * Aligning two sequences
+ *
+ * @param qlen length of the query sequence (typically <tlen)
+ * @param query query sequence with 0 <= query[i] < m
+ * @param tlen length of the target sequence
+ * @param target target sequence
+ * @param m number of residue types
+ * @param mat m*m scoring matrix in one-dimention array
+ * @param gapo gap open penalty; a gap of length l cost "-(gapo+l*gape)"
+ * @param gape gap extension penalty
+ * @param xtra extra information (see below)
+ * @param qry query profile (see below)
+ *
+ * @return alignment information in a struct; unset values to -1
+ *
+ * When xtra==0, ksw_align() uses a signed two-byte integer to store a
+ * score and only finds the best score and the end positions. The 2nd best
+ * score or the start positions are not attempted. The default behavior can
+ * be tuned by setting KSW_X* flags:
+ *
+ * KSW_XBYTE: use an unsigned byte to store a score. If overflow occurs,
+ * kswr_t::score will be set to 255
+ *
+ * KSW_XSUBO: track the 2nd best score and the ending position on the
+ * target if the 2nd best is higher than (xtra&0xffff)
+ *
+ * KSW_XSTOP: stop if the maximum score is above (xtra&0xffff)
+ *
+ * KSW_XSTART: find the start positions
+ *
+ * When *qry==NULL, ksw_align() will compute and allocate the query profile
+ * and when the function returns, *qry will point to the profile, which can
+ * be deallocated simply by free(). If one query is aligned against multiple
+ * target sequences, *qry should be set to NULL during the first call and
+ * freed after the last call. Note that qry can equal 0. In this case, the
+ * query profile will be deallocated in ksw_align().
+ */
+ kswr_t ksw_align(int qlen, uint8_t *query, int tlen, uint8_t *target, int m, const int8_t *mat, int gapo, int gape, int xtra, kswq_t **qry);
+
+ int ksw_extend(int qlen, const uint8_t *query, int tlen, const uint8_t *target, int m, const int8_t *mat, int gapo, int gape, int w, int h0, int *_qle, int *_tle);
+ int ksw_global(int qlen, const uint8_t *query, int tlen, const uint8_t *target, int m, const int8_t *mat, int gapo, int gape, int w, int *_n_cigar, uint32_t **_cigar);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef KTHREAD_H
+#define KTHREAD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void kt_for(int n_threads, void (*func)(void*,long,int), void *data, long n);
+void kt_pipeline(int n_threads, void *(*func)(void*, int, void*), void *shared_data, int n_steps);
+
+void *kt_forpool_init(int n_threads);
+void kt_forpool_destroy(void *_fp);
+void kt_forpool(void *_fp, void (*func)(void*,long,int), void *data, long n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef KURL_H
+#define KURL_H
+
+#include <sys/types.h>
+
+#define KURL_NULL 1
+#define KURL_INV_WHENCE 2
+#define KURL_SEEK_OUT 3
+#define KURL_NO_AUTH 4
+
+struct kurl_t;
+typedef struct kurl_t kurl_t;
+
+typedef struct {
+ const char *s3keyid;
+ const char *s3secretkey;
+ const char *s3key_fn;
+} kurl_opt_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int kurl_init(void);
+void kurl_destroy(void);
+
+kurl_t *kurl_open(const char *url, kurl_opt_t *opt);
+kurl_t *kurl_dopen(int fd);
+int kurl_close(kurl_t *ku);
+ssize_t kurl_read(kurl_t *ku, void *buf, size_t nbytes);
+off_t kurl_seek(kurl_t *ku, off_t offset, int whence);
+int kurl_buflen(kurl_t *ku, int len);
+
+off_t kurl_tell(const kurl_t *ku);
+int kurl_eof(const kurl_t *ku);
+int kurl_fileno(const kurl_t *ku);
+int kurl_error(const kurl_t *ku);
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifndef KNETFILE_H
+#define KNETFILE_H
+typedef kurl_t knetFile;
+#define knet_open(fn, mode) kurl_open(fn, 0)
+#define knet_dopen(fd, mode) kurl_dopen(fd)
+#define knet_close(fp) kurl_close(fp)
+#define knet_read(fp, buf, len) kurl_read(fp, buf, len)
+#define knet_seek(fp, off, whence) kurl_seek(fp, off, whence)
+#define knet_tell(fp) kurl_tell(fp)
+#define knet_fileno(fp) kurl_fileno(fp)
+#define knet_win32_init() kurl_init()
+#define knet_win32_destroy() kurl_destroy()
+#endif
+
+#endif
--- /dev/null
+/* The MIT License
+
+ Copyright (c) 2008, by Attractive Chaos <attractor@live.co.uk>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+*/
+
+/*
+ An example:
+
+#include "kvec.h"
+int main() {
+ kvec_t(int) array;
+ kv_init(array);
+ kv_push(int, array, 10); // append
+ kv_a(int, array, 20) = 5; // dynamic
+ kv_A(array, 20) = 4; // static
+ kv_destroy(array);
+ return 0;
+}
+*/
+
+/*
+ 2008-09-22 (0.1.0):
+
+ * The initial version.
+
+*/
+
+#ifndef AC_KVEC_H
+#define AC_KVEC_H
+
+#include <stdlib.h>
+
+#define kv_roundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+
+#define kvec_t(type) struct { size_t n, m; type *a; }
+#define kv_init(v) ((v).n = (v).m = 0, (v).a = 0)
+#define kv_destroy(v) free((v).a)
+#define kv_A(v, i) ((v).a[(i)])
+#define kv_pop(v) ((v).a[--(v).n])
+#define kv_size(v) ((v).n)
+#define kv_max(v) ((v).m)
+
+#define kv_resize(type, v, s) ((v).m = (s), (v).a = (type*)realloc((v).a, sizeof(type) * (v).m))
+
+#define kv_copy(type, v1, v0) do { \
+ if ((v1).m < (v0).n) kv_resize(type, v1, (v0).n); \
+ (v1).n = (v0).n; \
+ memcpy((v1).a, (v0).a, sizeof(type) * (v0).n); \
+ } while (0) \
+
+#define kv_push(type, v, x) do { \
+ if ((v).n == (v).m) { \
+ (v).m = (v).m? (v).m<<1 : 2; \
+ (v).a = (type*)realloc((v).a, sizeof(type) * (v).m); \
+ } \
+ (v).a[(v).n++] = (x); \
+ } while (0)
+
+#define kv_pushp(type, v) (((v).n == (v).m)? \
+ ((v).m = ((v).m? (v).m<<1 : 2), \
+ (v).a = (type*)realloc((v).a, sizeof(type) * (v).m), 0) \
+ : 0), ((v).a + ((v).n++))
+
+#define kv_a(type, v, i) (((v).m <= (size_t)(i)? \
+ ((v).m = (v).n = (i) + 1, kv_roundup32((v).m), \
+ (v).a = (type*)realloc((v).a, sizeof(type) * (v).m), 0) \
+ : (v).n <= (size_t)(i)? (v).n = (i) + 1 \
+ : 0), (v).a[(i)])
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_linklist_h
+#define quicly_linklist_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <assert.h>
+
+typedef struct st_quicly_linklist_t {
+ struct st_quicly_linklist_t *prev;
+ struct st_quicly_linklist_t *next;
+} quicly_linklist_t;
+
+static void quicly_linklist_init(quicly_linklist_t *l);
+static int quicly_linklist_is_linked(quicly_linklist_t *l);
+static void quicly_linklist_insert(quicly_linklist_t *prev, quicly_linklist_t *n);
+static void quicly_linklist_unlink(quicly_linklist_t *l);
+static void quicly_linklist_insert_list(quicly_linklist_t *prev, quicly_linklist_t *l);
+
+/* inline functions */
+
+inline void quicly_linklist_init(quicly_linklist_t *l)
+{
+ l->prev = l->next = l;
+}
+
+inline int quicly_linklist_is_linked(quicly_linklist_t *l)
+{
+ return l->prev != l;
+}
+
+inline void quicly_linklist_insert(quicly_linklist_t *prev, quicly_linklist_t *n)
+{
+ assert(!quicly_linklist_is_linked(n));
+ n->prev = prev;
+ n->next = prev->next;
+ n->prev->next = n;
+ n->next->prev = n;
+}
+
+inline void quicly_linklist_unlink(quicly_linklist_t *l)
+{
+ l->prev->next = l->next;
+ l->next->prev = l->prev;
+ quicly_linklist_init(l);
+}
+
+inline void quicly_linklist_insert_list(quicly_linklist_t *prev, quicly_linklist_t *l)
+{
+ if (quicly_linklist_is_linked(l)) {
+ l->next->prev = prev;
+ l->prev->next = prev->next;
+ prev->next->prev = l->prev;
+ prev->next = l->next;
+ quicly_linklist_init(l);
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_loss_h
+#define quicly_loss_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include "contrib/quicly/constants.h"
+
+typedef struct quicly_loss_conf_t {
+ /**
+ * Maximum reordering in time space before time based loss detection considers a packet lost. In percentile (1/1024) of an RTT.
+ */
+ unsigned time_reordering_percentile;
+ /**
+ * Minimum time in the future a PTO alarm may be set for. Typically set to alarm granularity.
+ */
+ uint32_t min_pto;
+ /**
+ * The default RTT used before an RTT sample is taken.
+ */
+ uint32_t default_initial_rtt;
+ /**
+ * Number of speculative PTOs at the end of a window. This must not be set to more than 3.
+ */
+ uint8_t num_speculative_ptos;
+} quicly_loss_conf_t;
+
+#define QUICLY_LOSS_DEFAULT_TIME_REORDERING_PERCENTILE (1024 / 8)
+
+#define QUICLY_LOSS_SPEC_CONF \
+ { \
+ QUICLY_LOSS_DEFAULT_TIME_REORDERING_PERCENTILE, /* time_reordering_percentile */ \
+ QUICLY_DEFAULT_MIN_PTO, /* min_pto */ \
+ QUICLY_DEFAULT_INITIAL_RTT, /* initial_rtt */ \
+ 0 /* number of speculative PTOs */ \
+ }
+
+#define QUICLY_LOSS_PERFORMANT_CONF \
+ { \
+ QUICLY_LOSS_DEFAULT_TIME_REORDERING_PERCENTILE, /* time_reordering_percentile */ \
+ QUICLY_DEFAULT_MIN_PTO, /* min_pto */ \
+ QUICLY_DEFAULT_INITIAL_RTT, /* initial_rtt */ \
+ 2 /* number of speculative PTOs */ \
+ }
+
+/**
+ * Holds RTT variables. We use this structure differently from the specification:
+ * * if the first sample has been obtained should be checked by doing: `latest != 0`
+ * * smoothed and variance are avaiable even before the first RTT sample is obtained
+ */
+typedef struct quicly_rtt_t {
+ uint32_t minimum;
+ uint32_t smoothed;
+ uint32_t variance;
+ uint32_t latest;
+} quicly_rtt_t;
+
+static void quicly_rtt_init(quicly_rtt_t *rtt, const quicly_loss_conf_t *conf, uint32_t initial_rtt);
+static void quicly_rtt_update(quicly_rtt_t *rtt, uint32_t latest_rtt, uint32_t ack_delay);
+static uint32_t quicly_rtt_get_pto(quicly_rtt_t *rtt, uint32_t max_ack_delay, uint32_t min_pto);
+
+typedef struct quicly_loss_t {
+ /**
+ * configuration
+ */
+ const quicly_loss_conf_t *conf;
+ /**
+ * pointer to transport parameter containing the peer's max_ack_delay
+ */
+ uint16_t *max_ack_delay;
+ /**
+ * pointer to transport parameter containing the peer's ack exponent
+ */
+ uint8_t *ack_delay_exponent;
+ /**
+ * The number of consecutive PTOs (PTOs that have fired without receiving an ack).
+ */
+ int8_t pto_count;
+ /**
+ * The time the most recent packet was sent.
+ */
+ int64_t time_of_last_packet_sent;
+ /**
+ * The largest packet number acknowledged in an ack frame, added by one (so that zero can mean "below any PN").
+ */
+ uint64_t largest_acked_packet_plus1;
+ /**
+ * Total number of application data bytes sent when the last tail occurred, not including retransmissions.
+ */
+ uint64_t total_bytes_sent;
+ /**
+ * The time at which the next packet will be considered lost based on exceeding the reordering window in time.
+ */
+ int64_t loss_time;
+ /**
+ * The time at when lostdetect_on_alarm should be called.
+ */
+ int64_t alarm_at;
+ /**
+ * rtt
+ */
+ quicly_rtt_t rtt;
+} quicly_loss_t;
+
+typedef int (*quicly_loss_do_detect_cb)(quicly_loss_t *r, uint64_t largest_acked, uint32_t delay_until_lost, int64_t *loss_time);
+
+static void quicly_loss_init(quicly_loss_t *r, const quicly_loss_conf_t *conf, uint32_t initial_rtt, uint16_t *max_ack_delay,
+ uint8_t *ack_delay_exponent);
+
+static void quicly_loss_update_alarm(quicly_loss_t *r, int64_t now, int64_t last_retransmittable_sent_at, int has_outstanding,
+ int can_send_stream_data, int handshake_is_in_progress, uint64_t total_bytes_sent);
+
+/* called when an ACK is received
+ */
+static void quicly_loss_on_ack_received(quicly_loss_t *r, uint64_t largest_newly_acked, int64_t now, int64_t sent_at,
+ uint64_t ack_delay_encoded, int ack_eliciting);
+
+/* This function updates the loss detection timer and indicates to the caller how many packets should be sent.
+ * After calling this function, app should:
+ * * send min_packets_to_send number of packets immmediately. min_packets_to_send should never be 0.
+ * * if restrict_sending is true, limit sending to min_packets_to_send, otherwise as limited by congestion/flow control
+ * and then call quicly_loss_update_alarm and update the alarm
+ */
+static int quicly_loss_on_alarm(quicly_loss_t *r, uint64_t largest_sent, uint64_t largest_acked, quicly_loss_do_detect_cb do_detect,
+ size_t *min_packets_to_send, int *restrict_sending);
+
+static int quicly_loss_detect_loss(quicly_loss_t *r, uint64_t largest_acked, quicly_loss_do_detect_cb do_detect);
+
+/* inline definitions */
+
+inline void quicly_rtt_init(quicly_rtt_t *rtt, const quicly_loss_conf_t *conf, uint32_t initial_rtt)
+{
+ rtt->minimum = UINT32_MAX;
+ rtt->latest = 0;
+ rtt->smoothed = initial_rtt;
+ rtt->variance = initial_rtt / 2;
+}
+
+inline void quicly_rtt_update(quicly_rtt_t *rtt, uint32_t latest_rtt, uint32_t ack_delay)
+{
+ int is_first_sample = rtt->latest == 0;
+
+ assert(latest_rtt != UINT32_MAX);
+ rtt->latest = latest_rtt != 0 ? latest_rtt : 1; /* Force minimum RTT sample to 1ms */
+
+ /* update min_rtt */
+ if (rtt->latest < rtt->minimum)
+ rtt->minimum = rtt->latest;
+
+ /* use ack_delay if it's a plausible value */
+ if (rtt->latest > rtt->minimum + ack_delay)
+ rtt->latest -= ack_delay;
+
+ /* update smoothed_rtt and rttvar */
+ if (is_first_sample) {
+ rtt->smoothed = rtt->latest;
+ rtt->variance = rtt->latest / 2;
+ } else {
+ uint32_t absdiff = rtt->smoothed >= rtt->latest ? rtt->smoothed - rtt->latest : rtt->latest - rtt->smoothed;
+ rtt->variance = (rtt->variance * 3 + absdiff) / 4;
+ rtt->smoothed = (rtt->smoothed * 7 + rtt->latest) / 8;
+ }
+ assert(rtt->smoothed != 0);
+}
+
+inline uint32_t quicly_rtt_get_pto(quicly_rtt_t *rtt, uint32_t max_ack_delay, uint32_t min_pto)
+{
+ return rtt->smoothed + (rtt->variance != 0 ? rtt->variance * 4 : min_pto) + max_ack_delay;
+}
+
+inline void quicly_loss_init(quicly_loss_t *r, const quicly_loss_conf_t *conf, uint32_t initial_rtt, uint16_t *max_ack_delay,
+ uint8_t *ack_delay_exponent)
+{
+ *r = (quicly_loss_t){.conf = conf,
+ .max_ack_delay = max_ack_delay,
+ .ack_delay_exponent = ack_delay_exponent,
+ .pto_count = 0,
+ .time_of_last_packet_sent = 0,
+ .largest_acked_packet_plus1 = 0,
+ .total_bytes_sent = 0,
+ .loss_time = INT64_MAX,
+ .alarm_at = INT64_MAX};
+ quicly_rtt_init(&r->rtt, conf, initial_rtt);
+}
+
+inline void quicly_loss_update_alarm(quicly_loss_t *r, int64_t now, int64_t last_retransmittable_sent_at, int has_outstanding,
+ int can_send_stream_data, int handshake_is_in_progress, uint64_t total_bytes_sent)
+{
+ if (!has_outstanding) {
+ /* Do not set alarm if there's no data oustanding */
+ r->alarm_at = INT64_MAX;
+ r->loss_time = INT64_MAX;
+ return;
+ }
+ assert(last_retransmittable_sent_at != INT64_MAX);
+ int64_t alarm_duration;
+ if (r->loss_time != INT64_MAX) {
+ /* time-threshold loss detection */
+ alarm_duration = r->loss_time - last_retransmittable_sent_at;
+ } else {
+ /* PTO alarm */
+ assert(r->pto_count < 63);
+ /* Probes are sent with a modified backoff to minimize latency of recovery. For instance, with num_speculative_ptos set to
+ * 2, the backoff pattern is as follows:
+ * * when there's a tail: 0.25, 0.5, 1, 2, 4, 8, ...
+ * * when mid-transfer: 1, 1, 1, 2, 4, 8, ...
+ * The first 2 probes in this case (and num_speculative_ptos, more generally), or the probes sent when pto_count < 0, are
+ * the speculative ones, which add potentially redundant retransmissions at a tail to reduce the cost of potential tail
+ * losses.
+ *
+ * FIXME: use of `can_send_stream_data` and `bytes_sent` is not entirely correct, it does not take things like MAX_ frames
+ * and pending.flows into consideration.
+ */
+ if (r->conf->num_speculative_ptos > 0 && r->pto_count <= 0 && !handshake_is_in_progress && !can_send_stream_data &&
+ r->total_bytes_sent < total_bytes_sent) {
+ /* New tail, defined as (i) sender is not in PTO recovery, (ii) there is no stream data to send, and
+ * (iii) new application data was sent since the last tail. Move the pto_count back to kick off speculative probing. */
+ if (r->pto_count == 0)
+ /* kick off speculative probing if not already in progress */
+ r->pto_count = -r->conf->num_speculative_ptos;
+ r->total_bytes_sent = total_bytes_sent;
+ }
+ if (r->pto_count < 0) {
+ /* Speculative probes sent under an RTT do not need to account for ack delay, since there is no expectation
+ * of an ack being received before the probe is sent. */
+ alarm_duration = quicly_rtt_get_pto(&r->rtt, 0, r->conf->min_pto);
+ alarm_duration >>= -r->pto_count;
+ if (alarm_duration < r->conf->min_pto)
+ alarm_duration = r->conf->min_pto;
+ } else {
+ /* Ordinary PTO. The bitshift below is fine; it would take more than a millenium to overflow either alarm_duration or
+ * pto_count, even when the timer granularity is nanosecond */
+ alarm_duration = quicly_rtt_get_pto(&r->rtt, handshake_is_in_progress ? 0 : *r->max_ack_delay, r->conf->min_pto);
+ alarm_duration <<= r->pto_count;
+ }
+ }
+ r->alarm_at = last_retransmittable_sent_at + alarm_duration;
+ if (r->alarm_at < now)
+ r->alarm_at = now;
+}
+
+inline void quicly_loss_on_ack_received(quicly_loss_t *r, uint64_t largest_newly_acked, int64_t now, int64_t sent_at,
+ uint64_t ack_delay_encoded, int ack_eliciting)
+{
+ /* Reset PTO count if anything is newly acked, and if sender is not speculatively probing at a tail */
+ if (largest_newly_acked != UINT64_MAX && r->pto_count > 0)
+ r->pto_count = 0;
+
+ /* If largest newly acked is not larger than before, skip RTT sample */
+ if (largest_newly_acked == UINT64_MAX || r->largest_acked_packet_plus1 > largest_newly_acked)
+ return;
+ r->largest_acked_packet_plus1 = largest_newly_acked + 1;
+
+ /* If ack does not acknowledge any ack-eliciting packet, skip RTT sample */
+ if (!ack_eliciting)
+ return;
+
+ /* Decode ack delay */
+ uint64_t ack_delay_microsecs = ack_delay_encoded << *r->ack_delay_exponent;
+ uint32_t ack_delay_millisecs = (uint32_t)((ack_delay_microsecs * 2 + 1000) / 2000);
+ /* use min(ack_delay, max_ack_delay) as the ack delay */
+ if (ack_delay_millisecs > *r->max_ack_delay)
+ ack_delay_millisecs = *r->max_ack_delay;
+ quicly_rtt_update(&r->rtt, (uint32_t)(now - sent_at), ack_delay_millisecs);
+}
+
+inline int quicly_loss_on_alarm(quicly_loss_t *r, uint64_t largest_sent, uint64_t largest_acked, quicly_loss_do_detect_cb do_detect,
+ size_t *min_packets_to_send, int *restrict_sending)
+{
+ r->alarm_at = INT64_MAX;
+ *min_packets_to_send = 1;
+ if (r->loss_time != INT64_MAX) {
+ /* Time threshold loss detection. Send at least 1 packet, but no restrictions on sending otherwise. */
+ *restrict_sending = 0;
+ return quicly_loss_detect_loss(r, largest_acked, do_detect);
+ }
+ /* PTO. Send at least and at most 1 packet during speculative probing and 2 packets otherwise. */
+ ++r->pto_count;
+ *restrict_sending = 1;
+ if (r->pto_count > 0)
+ *min_packets_to_send = 2;
+
+ return 0;
+}
+
+inline int quicly_loss_detect_loss(quicly_loss_t *r, uint64_t largest_acked, quicly_loss_do_detect_cb do_detect)
+{
+ uint32_t delay_until_lost = ((r->rtt.latest > r->rtt.smoothed ? r->rtt.latest : r->rtt.smoothed) * 9 + 7) / 8;
+ int64_t loss_time;
+ int ret;
+
+ r->loss_time = INT64_MAX;
+
+ if ((ret = do_detect(r, largest_acked, delay_until_lost, &loss_time)) != 0)
+ return ret;
+ if (loss_time != INT64_MAX)
+ r->loss_time = loss_time;
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_maxsender_h
+#define quicly_maxsender_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include "contrib/quicly/constants.h"
+
+typedef struct st_quicly_maxsender_t {
+ /**
+ * maximum value being announced (never decreases)
+ */
+ int64_t max_committed;
+ /**
+ * maximum value being acked by peer
+ */
+ int64_t max_acked;
+ /**
+ * number of maximums inflight
+ */
+ size_t num_inflight;
+ /**
+ *
+ */
+ unsigned force_send : 1;
+} quicly_maxsender_t;
+
+typedef struct st_quicly_maxsender_sent_t {
+ uint64_t inflight : 1;
+ uint64_t value : 63;
+} quicly_maxsender_sent_t;
+
+static void quicly_maxsender_init(quicly_maxsender_t *m, int64_t initial_value);
+static void quicly_maxsender_dispose(quicly_maxsender_t *m);
+static void quicly_maxsender_request_transmit(quicly_maxsender_t *m);
+static int quicly_maxsender_should_send_max(quicly_maxsender_t *m, int64_t buffered_from, uint32_t window_size,
+ uint32_t update_ratio);
+static int quicly_maxsender_should_send_blocked(quicly_maxsender_t *m, int64_t local_max);
+static void quicly_maxsender_record(quicly_maxsender_t *m, int64_t value, quicly_maxsender_sent_t *sent);
+static void quicly_maxsender_acked(quicly_maxsender_t *m, quicly_maxsender_sent_t *sent);
+static void quicly_maxsender_lost(quicly_maxsender_t *m, quicly_maxsender_sent_t *sent);
+
+/* inline definitions */
+
+inline void quicly_maxsender_init(quicly_maxsender_t *m, int64_t initial_value)
+{
+ m->max_committed = initial_value;
+ m->max_acked = initial_value;
+ m->num_inflight = 0;
+ m->force_send = 0;
+}
+
+inline void quicly_maxsender_dispose(quicly_maxsender_t *m)
+{
+}
+
+inline void quicly_maxsender_request_transmit(quicly_maxsender_t *m)
+{
+ m->force_send = 1;
+}
+
+inline int quicly_maxsender_should_send_max(quicly_maxsender_t *m, int64_t buffered_from, uint32_t window_size,
+ uint32_t update_ratio)
+{
+ if (m->force_send)
+ return 1;
+
+ /* ratio is permil (1/1024) */
+ int64_t threshold = buffered_from + ((int64_t)window_size * update_ratio) / 1024;
+ return (m->num_inflight != 0 ? m->max_committed : m->max_acked) <= threshold;
+}
+
+inline int quicly_maxsender_should_send_blocked(quicly_maxsender_t *m, int64_t local_max)
+{
+ return m->max_committed < local_max;
+}
+
+inline void quicly_maxsender_record(quicly_maxsender_t *m, int64_t value, quicly_maxsender_sent_t *sent)
+{
+ assert(value >= m->max_committed);
+ m->max_committed = value;
+ ++m->num_inflight;
+ m->force_send = 0;
+ sent->inflight = 1;
+ sent->value = value;
+}
+
+inline void quicly_maxsender_acked(quicly_maxsender_t *m, quicly_maxsender_sent_t *sent)
+{
+ if (m->max_acked < sent->value)
+ m->max_acked = sent->value;
+ /* num_inflight should not be adjusted in case of a late ACK */
+ if (sent->inflight) {
+ assert(m->num_inflight != 0);
+ --m->num_inflight;
+ sent->inflight = 0;
+ }
+}
+
+inline void quicly_maxsender_lost(quicly_maxsender_t *m, quicly_maxsender_sent_t *sent)
+{
+ /* the function must be called at most once (when LOST event occurs, but not EXPIRED), hence assert and always decrement */
+ assert(m->num_inflight != 0);
+ --m->num_inflight;
+ sent->inflight = 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2016 DeNA Co., Ltd., Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifdef _WINDOWS
+#include "wincompat.h"
+#else
+#include <unistd.h>
+#endif
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <openssl/bn.h>
+#include <openssl/crypto.h>
+#include <openssl/ec.h>
+#include <openssl/ecdh.h>
+#include <openssl/err.h>
+#include <openssl/evp.h>
+#include <openssl/objects.h>
+#include <openssl/rand.h>
+#include <openssl/rsa.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/x509_vfy.h>
+#include "picotls.h"
+#include "picotls/openssl.h"
+
+#ifdef _WINDOWS
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+#pragma warning(disable : 4996)
+#include <ms/applink.c>
+#endif
+
+#if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x10100000L
+#define OPENSSL_1_1_API 1
+#elif defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER >= 0x2070000fL
+#define OPENSSL_1_1_API 1
+#else
+#define OPENSSL_1_1_API 0
+#endif
+
+#if !OPENSSL_1_1_API
+
+#define EVP_PKEY_up_ref(p) CRYPTO_add(&(p)->references, 1, CRYPTO_LOCK_EVP_PKEY)
+#define X509_STORE_up_ref(p) CRYPTO_add(&(p)->references, 1, CRYPTO_LOCK_X509_STORE)
+
+static HMAC_CTX *HMAC_CTX_new(void)
+{
+ HMAC_CTX *ctx;
+
+ if ((ctx = OPENSSL_malloc(sizeof(*ctx))) == NULL)
+ return NULL;
+ HMAC_CTX_init(ctx);
+ return ctx;
+}
+
+static void HMAC_CTX_free(HMAC_CTX *ctx)
+{
+ HMAC_CTX_cleanup(ctx);
+ OPENSSL_free(ctx);
+}
+
+static int EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx)
+{
+ return EVP_CIPHER_CTX_cleanup(ctx);
+}
+
+#endif
+
+void ptls_openssl_random_bytes(void *buf, size_t len)
+{
+ int ret = RAND_bytes(buf, (int)len);
+ if (ret != 1) {
+ fprintf(stderr, "RAND_bytes() failed with code: %d\n", ret);
+ abort();
+ }
+}
+
+static EC_KEY *ecdh_gerenate_key(EC_GROUP *group)
+{
+ EC_KEY *key;
+
+ if ((key = EC_KEY_new()) == NULL)
+ return NULL;
+ if (!EC_KEY_set_group(key, group) || !EC_KEY_generate_key(key)) {
+ EC_KEY_free(key);
+ return NULL;
+ }
+
+ return key;
+}
+
+static int ecdh_calc_secret(ptls_iovec_t *out, const EC_GROUP *group, EC_KEY *privkey, EC_POINT *peer_point)
+{
+ ptls_iovec_t secret;
+ int ret;
+
+ secret.len = (EC_GROUP_get_degree(group) + 7) / 8;
+ if ((secret.base = malloc(secret.len)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (ECDH_compute_key(secret.base, secret.len, peer_point, privkey, NULL) <= 0) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE; /* ??? */
+ goto Exit;
+ }
+ ret = 0;
+
+Exit:
+ if (ret == 0) {
+ *out = secret;
+ } else {
+ free(secret.base);
+ *out = (ptls_iovec_t){NULL};
+ }
+ return ret;
+}
+
+static EC_POINT *x9_62_decode_point(const EC_GROUP *group, ptls_iovec_t vec, BN_CTX *bn_ctx)
+{
+ EC_POINT *point = NULL;
+
+ if ((point = EC_POINT_new(group)) == NULL)
+ return NULL;
+ if (!EC_POINT_oct2point(group, point, vec.base, vec.len, bn_ctx)) {
+ EC_POINT_free(point);
+ return NULL;
+ }
+
+ return point;
+}
+
+static ptls_iovec_t x9_62_encode_point(const EC_GROUP *group, const EC_POINT *point, BN_CTX *bn_ctx)
+{
+ ptls_iovec_t vec;
+
+ if ((vec.len = EC_POINT_point2oct(group, point, POINT_CONVERSION_UNCOMPRESSED, NULL, 0, bn_ctx)) == 0)
+ return (ptls_iovec_t){NULL};
+ if ((vec.base = malloc(vec.len)) == NULL)
+ return (ptls_iovec_t){NULL};
+ if (EC_POINT_point2oct(group, point, POINT_CONVERSION_UNCOMPRESSED, vec.base, vec.len, bn_ctx) != vec.len) {
+ free(vec.base);
+ return (ptls_iovec_t){NULL};
+ }
+
+ return vec;
+}
+
+struct st_x9_62_keyex_context_t {
+ ptls_key_exchange_context_t super;
+ BN_CTX *bn_ctx;
+ EC_KEY *privkey;
+};
+
+static void x9_62_free_context(struct st_x9_62_keyex_context_t *ctx)
+{
+ free(ctx->super.pubkey.base);
+ if (ctx->privkey != NULL)
+ EC_KEY_free(ctx->privkey);
+ if (ctx->bn_ctx != NULL)
+ BN_CTX_free(ctx->bn_ctx);
+ free(ctx);
+}
+
+static int x9_62_on_exchange(ptls_key_exchange_context_t **_ctx, int release, ptls_iovec_t *secret, ptls_iovec_t peerkey)
+{
+ struct st_x9_62_keyex_context_t *ctx = (struct st_x9_62_keyex_context_t *)*_ctx;
+ const EC_GROUP *group = EC_KEY_get0_group(ctx->privkey);
+ EC_POINT *peer_point = NULL;
+ int ret;
+
+ if (secret == NULL) {
+ ret = 0;
+ goto Exit;
+ }
+
+ if ((peer_point = x9_62_decode_point(group, peerkey, ctx->bn_ctx)) == NULL) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ if ((ret = ecdh_calc_secret(secret, group, ctx->privkey, peer_point)) != 0)
+ goto Exit;
+
+Exit:
+ if (peer_point != NULL)
+ EC_POINT_free(peer_point);
+ if (release) {
+ x9_62_free_context(ctx);
+ *_ctx = NULL;
+ }
+ return ret;
+}
+
+static int x9_62_create_context(ptls_key_exchange_algorithm_t *algo, struct st_x9_62_keyex_context_t **ctx)
+{
+ int ret;
+
+ if ((*ctx = (struct st_x9_62_keyex_context_t *)malloc(sizeof(**ctx))) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ **ctx = (struct st_x9_62_keyex_context_t){{algo, {NULL}, x9_62_on_exchange}};
+
+ if (((*ctx)->bn_ctx = BN_CTX_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ ret = 0;
+Exit:
+ if (ret != 0) {
+ x9_62_free_context(*ctx);
+ *ctx = NULL;
+ }
+ return ret;
+}
+
+static int x9_62_setup_pubkey(struct st_x9_62_keyex_context_t *ctx)
+{
+ const EC_GROUP *group = EC_KEY_get0_group(ctx->privkey);
+ const EC_POINT *pubkey = EC_KEY_get0_public_key(ctx->privkey);
+ if ((ctx->super.pubkey = x9_62_encode_point(group, pubkey, ctx->bn_ctx)).base == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ return 0;
+}
+
+static int x9_62_create_key_exchange(ptls_key_exchange_algorithm_t *algo, ptls_key_exchange_context_t **_ctx)
+{
+ EC_GROUP *group = NULL;
+ struct st_x9_62_keyex_context_t *ctx = NULL;
+ int ret;
+
+ /* FIXME use a global? */
+ if ((group = EC_GROUP_new_by_curve_name((int)algo->data)) == NULL) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if ((ret = x9_62_create_context(algo, &ctx)) != 0)
+ goto Exit;
+ if ((ctx->privkey = ecdh_gerenate_key(group)) == NULL) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if ((ret = x9_62_setup_pubkey(ctx)) != 0)
+ goto Exit;
+ ret = 0;
+
+Exit:
+ if (group != NULL)
+ EC_GROUP_free(group);
+ if (ret == 0) {
+ *_ctx = &ctx->super;
+ } else {
+ if (ctx != NULL)
+ x9_62_free_context(ctx);
+ *_ctx = NULL;
+ }
+
+ return ret;
+}
+
+static int x9_62_init_key(ptls_key_exchange_algorithm_t *algo, ptls_key_exchange_context_t **_ctx, EC_KEY *eckey)
+{
+ struct st_x9_62_keyex_context_t *ctx = NULL;
+ int ret;
+
+ if ((ret = x9_62_create_context(algo, &ctx)) != 0)
+ goto Exit;
+ ctx->privkey = eckey;
+ if ((ret = x9_62_setup_pubkey(ctx)) != 0)
+ goto Exit;
+ ret = 0;
+
+Exit:
+ if (ret == 0) {
+ *_ctx = &ctx->super;
+ } else {
+ if (ctx != NULL)
+ x9_62_free_context(ctx);
+ *_ctx = NULL;
+ }
+ return ret;
+}
+
+static int x9_62_key_exchange(EC_GROUP *group, ptls_iovec_t *pubkey, ptls_iovec_t *secret, ptls_iovec_t peerkey, BN_CTX *bn_ctx)
+{
+ EC_POINT *peer_point = NULL;
+ EC_KEY *privkey = NULL;
+ int ret;
+
+ *pubkey = (ptls_iovec_t){NULL};
+ *secret = (ptls_iovec_t){NULL};
+
+ /* decode peer key */
+ if ((peer_point = x9_62_decode_point(group, peerkey, bn_ctx)) == NULL) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+
+ /* create private key */
+ if ((privkey = ecdh_gerenate_key(group)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ /* encode public key */
+ if ((*pubkey = x9_62_encode_point(group, EC_KEY_get0_public_key(privkey), bn_ctx)).base == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ /* calc secret */
+ secret->len = (EC_GROUP_get_degree(group) + 7) / 8;
+ if ((secret->base = malloc(secret->len)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ /* ecdh! */
+ if (ECDH_compute_key(secret->base, secret->len, peer_point, privkey, NULL) <= 0) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE; /* ??? */
+ goto Exit;
+ }
+
+ ret = 0;
+
+Exit:
+ if (peer_point != NULL)
+ EC_POINT_free(peer_point);
+ if (privkey != NULL)
+ EC_KEY_free(privkey);
+ if (ret != 0) {
+ free(pubkey->base);
+ *pubkey = (ptls_iovec_t){NULL};
+ free(secret->base);
+ *secret = (ptls_iovec_t){NULL};
+ }
+ return ret;
+}
+
+static int secp_key_exchange(ptls_key_exchange_algorithm_t *algo, ptls_iovec_t *pubkey, ptls_iovec_t *secret, ptls_iovec_t peerkey)
+{
+ EC_GROUP *group = NULL;
+ BN_CTX *bn_ctx = NULL;
+ int ret;
+
+ if ((group = EC_GROUP_new_by_curve_name((int)algo->data)) == NULL) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if ((bn_ctx = BN_CTX_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ ret = x9_62_key_exchange(group, pubkey, secret, peerkey, bn_ctx);
+
+Exit:
+ if (bn_ctx != NULL)
+ BN_CTX_free(bn_ctx);
+ if (group != NULL)
+ EC_GROUP_free(group);
+ return ret;
+}
+
+#if PTLS_OPENSSL_HAVE_X25519
+
+struct st_evp_keyex_context_t {
+ ptls_key_exchange_context_t super;
+ EVP_PKEY *privkey;
+};
+
+static void evp_keyex_free(struct st_evp_keyex_context_t *ctx)
+{
+ if (ctx->privkey != NULL)
+ EVP_PKEY_free(ctx->privkey);
+ if (ctx->super.pubkey.base != NULL)
+ OPENSSL_free(ctx->super.pubkey.base);
+ free(ctx);
+}
+
+static int evp_keyex_on_exchange(ptls_key_exchange_context_t **_ctx, int release, ptls_iovec_t *secret, ptls_iovec_t peerkey)
+{
+ struct st_evp_keyex_context_t *ctx = (void *)*_ctx;
+ EVP_PKEY *evppeer = NULL;
+ EVP_PKEY_CTX *evpctx = NULL;
+ int ret;
+
+ if (secret == NULL) {
+ ret = 0;
+ goto Exit;
+ }
+
+ secret->base = NULL;
+
+ if (peerkey.len != ctx->super.pubkey.len) {
+ ret = PTLS_ALERT_DECRYPT_ERROR;
+ goto Exit;
+ }
+
+ if ((evppeer = EVP_PKEY_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (EVP_PKEY_copy_parameters(evppeer, ctx->privkey) <= 0) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_set1_tls_encodedpoint(evppeer, peerkey.base, peerkey.len) <= 0) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if ((evpctx = EVP_PKEY_CTX_new(ctx->privkey, NULL)) == NULL) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_derive_init(evpctx) <= 0) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_derive_set_peer(evpctx, evppeer) <= 0) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_derive(evpctx, NULL, &secret->len) <= 0) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if ((secret->base = malloc(secret->len)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (EVP_PKEY_derive(evpctx, secret->base, &secret->len) <= 0) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+
+ ret = 0;
+Exit:
+ if (evpctx != NULL)
+ EVP_PKEY_CTX_free(evpctx);
+ if (evppeer != NULL)
+ EVP_PKEY_free(evppeer);
+ if (ret != 0)
+ free(secret->base);
+ if (release) {
+ evp_keyex_free(ctx);
+ *_ctx = NULL;
+ }
+ return ret;
+}
+
+static int evp_keyex_init(ptls_key_exchange_algorithm_t *algo, ptls_key_exchange_context_t **_ctx, EVP_PKEY *pkey)
+{
+ struct st_evp_keyex_context_t *ctx = NULL;
+ int ret;
+
+ /* instantiate */
+ if ((ctx = malloc(sizeof(*ctx))) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ *ctx = (struct st_evp_keyex_context_t){{algo, {NULL}, evp_keyex_on_exchange}, pkey};
+
+ /* set public key */
+ if ((ctx->super.pubkey.len = EVP_PKEY_get1_tls_encodedpoint(ctx->privkey, &ctx->super.pubkey.base)) == 0) {
+ ctx->super.pubkey.base = NULL;
+ return PTLS_ERROR_NO_MEMORY;
+ }
+
+ *_ctx = &ctx->super;
+ ret = 0;
+Exit:
+ if (ret != 0 && ctx != NULL)
+ evp_keyex_free(ctx);
+ return ret;
+}
+
+static int evp_keyex_create(ptls_key_exchange_algorithm_t *algo, ptls_key_exchange_context_t **ctx)
+{
+ EVP_PKEY_CTX *evpctx = NULL;
+ EVP_PKEY *pkey = NULL;
+ int ret;
+
+ /* generate private key */
+ if ((evpctx = EVP_PKEY_CTX_new_id((int)algo->data, NULL)) == NULL) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_keygen_init(evpctx) <= 0) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_keygen(evpctx, &pkey) <= 0) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+
+ /* setup */
+ if ((ret = evp_keyex_init(algo, ctx, pkey)) != 0)
+ goto Exit;
+ pkey = NULL;
+ ret = 0;
+
+Exit:
+ if (pkey != NULL)
+ EVP_PKEY_free(pkey);
+ if (evpctx != NULL)
+ EVP_PKEY_CTX_free(evpctx);
+ return ret;
+}
+
+static int evp_keyex_exchange(ptls_key_exchange_algorithm_t *algo, ptls_iovec_t *outpubkey, ptls_iovec_t *secret,
+ ptls_iovec_t peerkey)
+{
+ ptls_key_exchange_context_t *ctx = NULL;
+ int ret;
+
+ outpubkey->base = NULL;
+
+ if ((ret = evp_keyex_create(algo, &ctx)) != 0)
+ goto Exit;
+ if ((outpubkey->base = malloc(ctx->pubkey.len)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ memcpy(outpubkey->base, ctx->pubkey.base, ctx->pubkey.len);
+ outpubkey->len = ctx->pubkey.len;
+ ret = evp_keyex_on_exchange(&ctx, 1, secret, peerkey);
+ assert(ctx == NULL);
+
+Exit:
+ if (ctx != NULL)
+ evp_keyex_on_exchange(&ctx, 1, NULL, ptls_iovec_init(NULL, 0));
+ if (ret != 0)
+ free(outpubkey->base);
+ return ret;
+}
+
+#endif
+
+int ptls_openssl_create_key_exchange(ptls_key_exchange_context_t **ctx, EVP_PKEY *pkey)
+{
+ int ret, id;
+
+ switch (id = EVP_PKEY_id(pkey)) {
+
+ case EVP_PKEY_EC: {
+ /* obtain eckey */
+ EC_KEY *eckey = EVP_PKEY_get1_EC_KEY(pkey);
+
+ /* determine algo */
+ ptls_key_exchange_algorithm_t *algo;
+ switch (EC_GROUP_get_curve_name(EC_KEY_get0_group(eckey))) {
+ case NID_X9_62_prime256v1:
+ algo = &ptls_openssl_secp256r1;
+ break;
+#if PTLS_OPENSSL_HAVE_SECP384R1
+ case NID_secp384r1:
+ algo = &ptls_openssl_secp384r1;
+ break;
+#endif
+#if PTLS_OPENSSL_HAVE_SECP521R1
+ case NID_secp521r1:
+ algo = &ptls_openssl_secp521r1;
+ break;
+#endif
+ default:
+ EC_KEY_free(eckey);
+ return PTLS_ERROR_INCOMPATIBLE_KEY;
+ }
+
+ /* load key */
+ if ((ret = x9_62_init_key(algo, ctx, eckey)) != 0) {
+ EC_KEY_free(eckey);
+ return ret;
+ }
+
+ return 0;
+ } break;
+
+#if PTLS_OPENSSL_HAVE_X25519
+ case NID_X25519:
+ if ((ret = evp_keyex_init(&ptls_openssl_x25519, ctx, pkey)) != 0)
+ return ret;
+ EVP_PKEY_up_ref(pkey);
+ return 0;
+#endif
+
+ default:
+ return PTLS_ERROR_INCOMPATIBLE_KEY;
+ }
+}
+
+static int do_sign(EVP_PKEY *key, ptls_buffer_t *outbuf, ptls_iovec_t input, const EVP_MD *md)
+{
+ EVP_MD_CTX *ctx = NULL;
+ EVP_PKEY_CTX *pkey_ctx;
+ size_t siglen;
+ int ret;
+
+ if ((ctx = EVP_MD_CTX_create()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (EVP_DigestSignInit(ctx, &pkey_ctx, md, NULL, key) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_id(key) == EVP_PKEY_RSA) {
+ if (EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, RSA_PKCS1_PSS_PADDING) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_CTX_set_rsa_pss_saltlen(pkey_ctx, -1) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, EVP_sha256()) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ }
+ if (EVP_DigestSignUpdate(ctx, input.base, input.len) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_DigestSignFinal(ctx, NULL, &siglen) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if ((ret = ptls_buffer_reserve(outbuf, siglen)) != 0)
+ goto Exit;
+ if (EVP_DigestSignFinal(ctx, outbuf->base + outbuf->off, &siglen) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ outbuf->off += siglen;
+
+ ret = 0;
+Exit:
+ if (ctx != NULL)
+ EVP_MD_CTX_destroy(ctx);
+ return ret;
+}
+
+struct cipher_context_t {
+ ptls_cipher_context_t super;
+ EVP_CIPHER_CTX *evp;
+};
+
+static void cipher_dispose(ptls_cipher_context_t *_ctx)
+{
+ struct cipher_context_t *ctx = (struct cipher_context_t *)_ctx;
+ EVP_CIPHER_CTX_free(ctx->evp);
+}
+
+static void cipher_do_init(ptls_cipher_context_t *_ctx, const void *iv)
+{
+ struct cipher_context_t *ctx = (struct cipher_context_t *)_ctx;
+ int ret;
+ ret = EVP_EncryptInit_ex(ctx->evp, NULL, NULL, NULL, iv);
+ assert(ret);
+}
+
+static int cipher_setup_crypto(ptls_cipher_context_t *_ctx, int is_enc, const void *key, const EVP_CIPHER *cipher,
+ void (*do_transform)(ptls_cipher_context_t *, void *, const void *, size_t))
+{
+ struct cipher_context_t *ctx = (struct cipher_context_t *)_ctx;
+
+ ctx->super.do_dispose = cipher_dispose;
+ ctx->super.do_init = cipher_do_init;
+ ctx->super.do_transform = do_transform;
+
+ if ((ctx->evp = EVP_CIPHER_CTX_new()) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+
+ if (is_enc) {
+ if (!EVP_EncryptInit_ex(ctx->evp, cipher, NULL, key, NULL))
+ goto Error;
+ } else {
+ if (!EVP_DecryptInit_ex(ctx->evp, cipher, NULL, key, NULL))
+ goto Error;
+ EVP_CIPHER_CTX_set_padding(ctx->evp, 0); /* required to disable one block buffering in ECB mode */
+ }
+
+ return 0;
+Error:
+ EVP_CIPHER_CTX_free(ctx->evp);
+ return PTLS_ERROR_LIBRARY;
+}
+
+static void cipher_encrypt(ptls_cipher_context_t *_ctx, void *output, const void *input, size_t _len)
+{
+ struct cipher_context_t *ctx = (struct cipher_context_t *)_ctx;
+ int len = (int)_len, ret = EVP_EncryptUpdate(ctx->evp, output, &len, input, len);
+ assert(ret);
+ assert(len == (int)_len);
+}
+
+static void cipher_decrypt(ptls_cipher_context_t *_ctx, void *output, const void *input, size_t _len)
+{
+ struct cipher_context_t *ctx = (struct cipher_context_t *)_ctx;
+ int len = (int)_len, ret = EVP_DecryptUpdate(ctx->evp, output, &len, input, len);
+ assert(ret);
+ assert(len == (int)_len);
+}
+
+static int aes128ecb_setup_crypto(ptls_cipher_context_t *ctx, int is_enc, const void *key)
+{
+ return cipher_setup_crypto(ctx, is_enc, key, EVP_aes_128_ecb(), is_enc ? cipher_encrypt : cipher_decrypt);
+}
+
+static int aes256ecb_setup_crypto(ptls_cipher_context_t *ctx, int is_enc, const void *key)
+{
+ return cipher_setup_crypto(ctx, is_enc, key, EVP_aes_256_ecb(), is_enc ? cipher_encrypt : cipher_decrypt);
+}
+
+static int aes128ctr_setup_crypto(ptls_cipher_context_t *ctx, int is_enc, const void *key)
+{
+ return cipher_setup_crypto(ctx, 1, key, EVP_aes_128_ctr(), cipher_encrypt);
+}
+
+static int aes256ctr_setup_crypto(ptls_cipher_context_t *ctx, int is_enc, const void *key)
+{
+ return cipher_setup_crypto(ctx, 1, key, EVP_aes_256_ctr(), cipher_encrypt);
+}
+
+#if PTLS_OPENSSL_HAVE_CHACHA20_POLY1305
+
+static int chacha20_setup_crypto(ptls_cipher_context_t *ctx, int is_enc, const void *key)
+{
+ return cipher_setup_crypto(ctx, 1, key, EVP_chacha20(), cipher_encrypt);
+}
+
+#endif
+
+#if PTLS_OPENSSL_HAVE_BF
+
+static int bfecb_setup_crypto(ptls_cipher_context_t *ctx, int is_enc, const void *key)
+{
+ return cipher_setup_crypto(ctx, is_enc, key, EVP_bf_ecb(), is_enc ? cipher_encrypt : cipher_decrypt);
+}
+
+#endif
+
+struct aead_crypto_context_t {
+ ptls_aead_context_t super;
+ EVP_CIPHER_CTX *evp_ctx;
+};
+
+static void aead_dispose_crypto(ptls_aead_context_t *_ctx)
+{
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *)_ctx;
+
+ if (ctx->evp_ctx != NULL)
+ EVP_CIPHER_CTX_free(ctx->evp_ctx);
+}
+
+static void aead_do_encrypt_init(ptls_aead_context_t *_ctx, const void *iv, const void *aad, size_t aadlen)
+{
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *)_ctx;
+ int ret;
+
+ /* FIXME for performance, preserve the expanded key instead of the raw key */
+ ret = EVP_EncryptInit_ex(ctx->evp_ctx, NULL, NULL, NULL, iv);
+ assert(ret);
+
+ if (aadlen != 0) {
+ int blocklen;
+ ret = EVP_EncryptUpdate(ctx->evp_ctx, NULL, &blocklen, aad, (int)aadlen);
+ assert(ret);
+ }
+}
+
+static size_t aead_do_encrypt_update(ptls_aead_context_t *_ctx, void *output, const void *input, size_t inlen)
+{
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *)_ctx;
+ int blocklen, ret;
+
+ ret = EVP_EncryptUpdate(ctx->evp_ctx, output, &blocklen, input, (int)inlen);
+ assert(ret);
+
+ return blocklen;
+}
+
+static size_t aead_do_encrypt_final(ptls_aead_context_t *_ctx, void *_output)
+{
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *)_ctx;
+ uint8_t *output = _output;
+ size_t off = 0, tag_size = ctx->super.algo->tag_size;
+ int blocklen, ret;
+
+ ret = EVP_EncryptFinal_ex(ctx->evp_ctx, output + off, &blocklen);
+ assert(ret);
+ off += blocklen;
+ ret = EVP_CIPHER_CTX_ctrl(ctx->evp_ctx, EVP_CTRL_GCM_GET_TAG, (int)tag_size, output + off);
+ assert(ret);
+ off += tag_size;
+
+ return off;
+}
+
+static size_t aead_do_decrypt(ptls_aead_context_t *_ctx, void *_output, const void *input, size_t inlen, const void *iv,
+ const void *aad, size_t aadlen)
+{
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *)_ctx;
+ uint8_t *output = _output;
+ size_t off = 0, tag_size = ctx->super.algo->tag_size;
+ int blocklen, ret;
+
+ if (inlen < tag_size)
+ return SIZE_MAX;
+
+ ret = EVP_DecryptInit_ex(ctx->evp_ctx, NULL, NULL, NULL, iv);
+ assert(ret);
+ if (aadlen != 0) {
+ ret = EVP_DecryptUpdate(ctx->evp_ctx, NULL, &blocklen, aad, (int)aadlen);
+ assert(ret);
+ }
+ ret = EVP_DecryptUpdate(ctx->evp_ctx, output + off, &blocklen, input, (int)(inlen - tag_size));
+ assert(ret);
+ off += blocklen;
+ if (!EVP_CIPHER_CTX_ctrl(ctx->evp_ctx, EVP_CTRL_GCM_SET_TAG, (int)tag_size, (void *)((uint8_t *)input + inlen - tag_size)))
+ return SIZE_MAX;
+ if (!EVP_DecryptFinal_ex(ctx->evp_ctx, output + off, &blocklen))
+ return SIZE_MAX;
+ off += blocklen;
+
+ return off;
+}
+
+static int aead_setup_crypto(ptls_aead_context_t *_ctx, int is_enc, const void *key, const EVP_CIPHER *cipher)
+{
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *)_ctx;
+ int ret;
+
+ ctx->super.dispose_crypto = aead_dispose_crypto;
+ if (is_enc) {
+ ctx->super.do_encrypt_init = aead_do_encrypt_init;
+ ctx->super.do_encrypt_update = aead_do_encrypt_update;
+ ctx->super.do_encrypt_final = aead_do_encrypt_final;
+ ctx->super.do_decrypt = NULL;
+ } else {
+ ctx->super.do_encrypt_init = NULL;
+ ctx->super.do_encrypt_update = NULL;
+ ctx->super.do_encrypt_final = NULL;
+ ctx->super.do_decrypt = aead_do_decrypt;
+ }
+ ctx->evp_ctx = NULL;
+
+ if ((ctx->evp_ctx = EVP_CIPHER_CTX_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Error;
+ }
+ if (is_enc) {
+ if (!EVP_EncryptInit_ex(ctx->evp_ctx, cipher, NULL, key, NULL)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Error;
+ }
+ } else {
+ if (!EVP_DecryptInit_ex(ctx->evp_ctx, cipher, NULL, key, NULL)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Error;
+ }
+ }
+ if (!EVP_CIPHER_CTX_ctrl(ctx->evp_ctx, EVP_CTRL_GCM_SET_IVLEN, (int)ctx->super.algo->iv_size, NULL)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Error;
+ }
+
+ return 0;
+
+Error:
+ aead_dispose_crypto(&ctx->super);
+ return ret;
+}
+
+static int aead_aes128gcm_setup_crypto(ptls_aead_context_t *ctx, int is_enc, const void *key)
+{
+ return aead_setup_crypto(ctx, is_enc, key, EVP_aes_128_gcm());
+}
+
+static int aead_aes256gcm_setup_crypto(ptls_aead_context_t *ctx, int is_enc, const void *key)
+{
+ return aead_setup_crypto(ctx, is_enc, key, EVP_aes_256_gcm());
+}
+
+#if PTLS_OPENSSL_HAVE_CHACHA20_POLY1305
+static int aead_chacha20poly1305_setup_crypto(ptls_aead_context_t *ctx, int is_enc, const void *key)
+{
+ return aead_setup_crypto(ctx, is_enc, key, EVP_chacha20_poly1305());
+}
+#endif
+
+#define _sha256_final(ctx, md) SHA256_Final((md), (ctx))
+ptls_define_hash(sha256, SHA256_CTX, SHA256_Init, SHA256_Update, _sha256_final);
+
+#define _sha384_final(ctx, md) SHA384_Final((md), (ctx))
+ptls_define_hash(sha384, SHA512_CTX, SHA384_Init, SHA384_Update, _sha384_final);
+
+static int sign_certificate(ptls_sign_certificate_t *_self, ptls_t *tls, uint16_t *selected_algorithm, ptls_buffer_t *outbuf,
+ ptls_iovec_t input, const uint16_t *algorithms, size_t num_algorithms)
+{
+ ptls_openssl_sign_certificate_t *self = (ptls_openssl_sign_certificate_t *)_self;
+ const struct st_ptls_openssl_signature_scheme_t *scheme;
+
+ /* select the algorithm */
+ for (scheme = self->schemes; scheme->scheme_id != UINT16_MAX; ++scheme) {
+ size_t i;
+ for (i = 0; i != num_algorithms; ++i)
+ if (algorithms[i] == scheme->scheme_id)
+ goto Found;
+ }
+ return PTLS_ALERT_HANDSHAKE_FAILURE;
+
+Found:
+ *selected_algorithm = scheme->scheme_id;
+ return do_sign(self->key, outbuf, input, scheme->scheme_md);
+}
+
+static X509 *to_x509(ptls_iovec_t vec)
+{
+ const uint8_t *p = vec.base;
+ return d2i_X509(NULL, &p, (long)vec.len);
+}
+
+static int verify_sign(void *verify_ctx, ptls_iovec_t data, ptls_iovec_t signature)
+{
+ EVP_PKEY *key = verify_ctx;
+ EVP_MD_CTX *ctx = NULL;
+ EVP_PKEY_CTX *pkey_ctx = NULL;
+ int ret = 0;
+
+ if (data.base == NULL)
+ goto Exit;
+
+ if ((ctx = EVP_MD_CTX_create()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (EVP_DigestVerifyInit(ctx, &pkey_ctx, EVP_sha256(), NULL, key) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_id(key) == EVP_PKEY_RSA) {
+ if (EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, RSA_PKCS1_PSS_PADDING) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_CTX_set_rsa_pss_saltlen(pkey_ctx, -1) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, EVP_sha256()) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ }
+ if (EVP_DigestVerifyUpdate(ctx, data.base, data.len) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (EVP_DigestVerifyFinal(ctx, signature.base, signature.len) != 1) {
+ ret = PTLS_ALERT_DECRYPT_ERROR;
+ goto Exit;
+ }
+ ret = 0;
+
+Exit:
+ if (ctx != NULL)
+ EVP_MD_CTX_destroy(ctx);
+ EVP_PKEY_free(key);
+ return ret;
+}
+
+int ptls_openssl_init_sign_certificate(ptls_openssl_sign_certificate_t *self, EVP_PKEY *key)
+{
+ *self = (ptls_openssl_sign_certificate_t){{sign_certificate}};
+ size_t scheme_index = 0;
+
+#define PUSH_SCHEME(id, md) \
+ self->schemes[scheme_index++] = (struct st_ptls_openssl_signature_scheme_t) \
+ { \
+ id, md \
+ }
+
+ switch (EVP_PKEY_id(key)) {
+ case EVP_PKEY_RSA:
+ PUSH_SCHEME(PTLS_SIGNATURE_RSA_PSS_RSAE_SHA256, EVP_sha256());
+ PUSH_SCHEME(PTLS_SIGNATURE_RSA_PSS_RSAE_SHA384, EVP_sha384());
+ PUSH_SCHEME(PTLS_SIGNATURE_RSA_PSS_RSAE_SHA512, EVP_sha512());
+ break;
+ case EVP_PKEY_EC: {
+ EC_KEY *eckey = EVP_PKEY_get1_EC_KEY(key);
+ switch (EC_GROUP_get_curve_name(EC_KEY_get0_group(eckey))) {
+ case NID_X9_62_prime256v1:
+ PUSH_SCHEME(PTLS_SIGNATURE_ECDSA_SECP256R1_SHA256, EVP_sha256());
+ break;
+#if defined(NID_secp384r1) && !OPENSSL_NO_SHA384
+ case NID_secp384r1:
+ PUSH_SCHEME(PTLS_SIGNATURE_ECDSA_SECP384R1_SHA384, EVP_sha384());
+ break;
+#endif
+#if defined(NID_secp384r1) && !OPENSSL_NO_SHA512
+ case NID_secp521r1:
+ PUSH_SCHEME(PTLS_SIGNATURE_ECDSA_SECP521R1_SHA512, EVP_sha512());
+ break;
+#endif
+ default:
+ EC_KEY_free(eckey);
+ return PTLS_ERROR_INCOMPATIBLE_KEY;
+ }
+ EC_KEY_free(eckey);
+ } break;
+ default:
+ return PTLS_ERROR_INCOMPATIBLE_KEY;
+ }
+ PUSH_SCHEME(UINT16_MAX, NULL);
+ assert(scheme_index <= sizeof(self->schemes) / sizeof(self->schemes[0]));
+
+#undef PUSH_SCHEME
+
+ EVP_PKEY_up_ref(key);
+ self->key = key;
+
+ return 0;
+}
+
+void ptls_openssl_dispose_sign_certificate(ptls_openssl_sign_certificate_t *self)
+{
+ EVP_PKEY_free(self->key);
+}
+
+static int serialize_cert(X509 *cert, ptls_iovec_t *dst)
+{
+ int len = i2d_X509(cert, NULL);
+ assert(len > 0);
+
+ if ((dst->base = malloc(len)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ unsigned char *p = dst->base;
+ dst->len = i2d_X509(cert, &p);
+ assert(len == dst->len);
+
+ return 0;
+}
+
+int ptls_openssl_load_certificates(ptls_context_t *ctx, X509 *cert, STACK_OF(X509) * chain)
+{
+ ptls_iovec_t *list = NULL;
+ size_t slot = 0, count = (cert != NULL) + (chain != NULL ? sk_X509_num(chain) : 0);
+ int ret;
+
+ assert(ctx->certificates.list == NULL);
+
+ if ((list = malloc(sizeof(*list) * count)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (cert != NULL) {
+ if ((ret = serialize_cert(cert, list + slot++)) != 0)
+ goto Exit;
+ }
+ if (chain != NULL) {
+ int i;
+ for (i = 0; i != sk_X509_num(chain); ++i) {
+ if ((ret = serialize_cert(sk_X509_value(chain, i), list + slot++)) != 0)
+ goto Exit;
+ }
+ }
+
+ assert(slot == count);
+
+ ctx->certificates.list = list;
+ ctx->certificates.count = count;
+ ret = 0;
+
+Exit:
+ if (ret != 0 && list != NULL) {
+ size_t i;
+ for (i = 0; i != slot; ++i)
+ free(list[i].base);
+ free(list);
+ }
+ return ret;
+}
+
+static int verify_cert_chain(X509_STORE *store, X509 *cert, STACK_OF(X509) * chain, int is_server, const char *server_name)
+{
+ X509_STORE_CTX *verify_ctx;
+ int ret;
+
+ assert(server_name != NULL && "ptls_set_server_name MUST be called");
+
+ /* verify certificate chain */
+ if ((verify_ctx = X509_STORE_CTX_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (X509_STORE_CTX_init(verify_ctx, store, cert, chain) != 1) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ X509_STORE_CTX_set_purpose(verify_ctx, is_server ? X509_PURPOSE_SSL_SERVER : X509_PURPOSE_SSL_CLIENT);
+ if (X509_verify_cert(verify_ctx) != 1) {
+ int x509_err = X509_STORE_CTX_get_error(verify_ctx);
+ switch (x509_err) {
+ case X509_V_ERR_OUT_OF_MEM:
+ ret = PTLS_ERROR_NO_MEMORY;
+ break;
+ case X509_V_ERR_CERT_REVOKED:
+ ret = PTLS_ALERT_CERTIFICATE_REVOKED;
+ break;
+ case X509_V_ERR_CERT_NOT_YET_VALID:
+ case X509_V_ERR_CERT_HAS_EXPIRED:
+ ret = PTLS_ALERT_CERTIFICATE_EXPIRED;
+ break;
+ case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
+ case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY:
+ case X509_V_ERR_CERT_UNTRUSTED:
+ case X509_V_ERR_CERT_REJECTED:
+ ret = PTLS_ALERT_UNKNOWN_CA;
+ break;
+ case X509_V_ERR_INVALID_CA:
+ ret = PTLS_ALERT_BAD_CERTIFICATE;
+ break;
+ default:
+ ret = PTLS_ALERT_CERTIFICATE_UNKNOWN;
+ break;
+ }
+ goto Exit;
+ }
+
+#ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS
+ /* verify CN */
+ if (server_name != NULL) {
+ if (ptls_server_name_is_ipaddr(server_name)) {
+ ret = X509_check_ip_asc(cert, server_name, 0);
+ } else {
+ ret = X509_check_host(cert, server_name, strlen(server_name), X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS, NULL);
+ }
+ if (ret != 1) {
+ if (ret == 0) { /* failed match */
+ ret = PTLS_ALERT_BAD_CERTIFICATE;
+ } else {
+ ret = PTLS_ERROR_LIBRARY;
+ }
+ goto Exit;
+ }
+ }
+#else
+#warning "hostname validation is disabled; OpenSSL >= 1.0.2 or LibreSSL >= 2.5.0 is required"
+#endif
+
+ ret = 0;
+
+Exit:
+ if (verify_ctx != NULL)
+ X509_STORE_CTX_free(verify_ctx);
+ return ret;
+}
+
+static int verify_cert(ptls_verify_certificate_t *_self, ptls_t *tls, int (**verifier)(void *, ptls_iovec_t, ptls_iovec_t),
+ void **verify_data, ptls_iovec_t *certs, size_t num_certs)
+{
+ ptls_openssl_verify_certificate_t *self = (ptls_openssl_verify_certificate_t *)_self;
+ X509 *cert = NULL;
+ STACK_OF(X509) *chain = sk_X509_new_null();
+ size_t i;
+ int ret = 0;
+
+ assert(num_certs != 0);
+
+ /* convert certificates to OpenSSL representation */
+ if ((cert = to_x509(certs[0])) == NULL) {
+ ret = PTLS_ALERT_BAD_CERTIFICATE;
+ goto Exit;
+ }
+ for (i = 1; i != num_certs; ++i) {
+ X509 *interm = to_x509(certs[i]);
+ if (interm == NULL) {
+ ret = PTLS_ALERT_BAD_CERTIFICATE;
+ goto Exit;
+ }
+ sk_X509_push(chain, interm);
+ }
+
+ /* verify the chain */
+ if ((ret = verify_cert_chain(self->cert_store, cert, chain, ptls_is_server(tls), ptls_get_server_name(tls))) != 0)
+ goto Exit;
+
+ /* extract public key for verifying the TLS handshake signature */
+ if ((*verify_data = X509_get_pubkey(cert)) == NULL) {
+ ret = PTLS_ALERT_BAD_CERTIFICATE;
+ goto Exit;
+ }
+ *verifier = verify_sign;
+
+Exit:
+ if (chain != NULL)
+ sk_X509_pop_free(chain, X509_free);
+ if (cert != NULL)
+ X509_free(cert);
+ return ret;
+}
+
+static void cleanup_cipher_ctx(EVP_CIPHER_CTX *ctx)
+{
+ if (!EVP_CIPHER_CTX_reset(ctx)) {
+ fprintf(stderr, "EVP_CIPHER_CTX_reset() failed\n");
+ abort();
+ }
+}
+
+int ptls_openssl_init_verify_certificate(ptls_openssl_verify_certificate_t *self, X509_STORE *store)
+{
+ *self = (ptls_openssl_verify_certificate_t){{verify_cert}};
+
+ if (store != NULL) {
+ X509_STORE_up_ref(store);
+ self->cert_store = store;
+ } else {
+ /* use default store */
+ if ((self->cert_store = ptls_openssl_create_default_certificate_store()) == NULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+void ptls_openssl_dispose_verify_certificate(ptls_openssl_verify_certificate_t *self)
+{
+ X509_STORE_free(self->cert_store);
+ free(self);
+}
+
+X509_STORE *ptls_openssl_create_default_certificate_store(void)
+{
+ X509_STORE *store;
+ X509_LOOKUP *lookup;
+
+ if ((store = X509_STORE_new()) == NULL)
+ goto Error;
+ if ((lookup = X509_STORE_add_lookup(store, X509_LOOKUP_file())) == NULL)
+ goto Error;
+ X509_LOOKUP_load_file(lookup, NULL, X509_FILETYPE_DEFAULT);
+ if ((lookup = X509_STORE_add_lookup(store, X509_LOOKUP_hash_dir())) == NULL)
+ goto Error;
+ X509_LOOKUP_add_dir(lookup, NULL, X509_FILETYPE_DEFAULT);
+
+ return store;
+Error:
+ if (store != NULL)
+ X509_STORE_free(store);
+ return NULL;
+}
+
+#define TICKET_LABEL_SIZE 16
+#define TICKET_IV_SIZE EVP_MAX_IV_LENGTH
+
+int ptls_openssl_encrypt_ticket(ptls_buffer_t *buf, ptls_iovec_t src,
+ int (*cb)(unsigned char *key_name, unsigned char *iv, EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc))
+{
+ EVP_CIPHER_CTX *cctx = NULL;
+ HMAC_CTX *hctx = NULL;
+ uint8_t *dst;
+ int clen, ret;
+
+ if ((cctx = EVP_CIPHER_CTX_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if ((hctx = HMAC_CTX_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ if ((ret = ptls_buffer_reserve(buf, TICKET_LABEL_SIZE + TICKET_IV_SIZE + src.len + EVP_MAX_BLOCK_LENGTH + EVP_MAX_MD_SIZE)) !=
+ 0)
+ goto Exit;
+ dst = buf->base + buf->off;
+
+ /* fill label and iv, as well as obtaining the keys */
+ if (!(*cb)(dst, dst + TICKET_LABEL_SIZE, cctx, hctx, 1)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ dst += TICKET_LABEL_SIZE + TICKET_IV_SIZE;
+
+ /* encrypt */
+ if (!EVP_EncryptUpdate(cctx, dst, &clen, src.base, (int)src.len)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ dst += clen;
+ if (!EVP_EncryptFinal_ex(cctx, dst, &clen)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ dst += clen;
+
+ /* append hmac */
+ if (!HMAC_Update(hctx, buf->base + buf->off, dst - (buf->base + buf->off)) || !HMAC_Final(hctx, dst, NULL)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ dst += HMAC_size(hctx);
+
+ assert(dst <= buf->base + buf->capacity);
+ buf->off += dst - (buf->base + buf->off);
+ ret = 0;
+
+Exit:
+ if (cctx != NULL)
+ cleanup_cipher_ctx(cctx);
+ if (hctx != NULL)
+ HMAC_CTX_free(hctx);
+ return ret;
+}
+
+int ptls_openssl_decrypt_ticket(ptls_buffer_t *buf, ptls_iovec_t src,
+ int (*cb)(unsigned char *key_name, unsigned char *iv, EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc))
+{
+ EVP_CIPHER_CTX *cctx = NULL;
+ HMAC_CTX *hctx = NULL;
+ int clen, ret;
+
+ if ((cctx = EVP_CIPHER_CTX_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if ((hctx = HMAC_CTX_new()) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ /* obtain cipher and hash context.
+ * Note: no need to handle renew, since in picotls we always send a new ticket to minimize the chance of ticket reuse */
+ if (src.len < TICKET_LABEL_SIZE + TICKET_IV_SIZE) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ if (!(*cb)(src.base, src.base + TICKET_LABEL_SIZE, cctx, hctx, 0)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+
+ /* check hmac, and exclude label, iv, hmac */
+ size_t hmac_size = HMAC_size(hctx);
+ if (src.len < TICKET_LABEL_SIZE + TICKET_IV_SIZE + hmac_size) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ src.len -= hmac_size;
+ uint8_t hmac[EVP_MAX_MD_SIZE];
+ if (!HMAC_Update(hctx, src.base, src.len) || !HMAC_Final(hctx, hmac, NULL)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ if (!ptls_mem_equal(src.base + src.len, hmac, hmac_size)) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ goto Exit;
+ }
+ src.base += TICKET_LABEL_SIZE + TICKET_IV_SIZE;
+ src.len -= TICKET_LABEL_SIZE + TICKET_IV_SIZE;
+
+ /* decrypt */
+ if ((ret = ptls_buffer_reserve(buf, src.len)) != 0)
+ goto Exit;
+ if (!EVP_DecryptUpdate(cctx, buf->base + buf->off, &clen, src.base, (int)src.len)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ buf->off += clen;
+ if (!EVP_DecryptFinal_ex(cctx, buf->base + buf->off, &clen)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ buf->off += clen;
+
+ ret = 0;
+
+Exit:
+ if (cctx != NULL)
+ cleanup_cipher_ctx(cctx);
+ if (hctx != NULL)
+ HMAC_CTX_free(hctx);
+ return ret;
+}
+
+ptls_key_exchange_algorithm_t ptls_openssl_secp256r1 = {PTLS_GROUP_SECP256R1, x9_62_create_key_exchange, secp_key_exchange,
+ NID_X9_62_prime256v1};
+#if PTLS_OPENSSL_HAVE_SECP384R1
+ptls_key_exchange_algorithm_t ptls_openssl_secp384r1 = {PTLS_GROUP_SECP384R1, x9_62_create_key_exchange, secp_key_exchange,
+ NID_secp384r1};
+#endif
+#if PTLS_OPENSSL_HAVE_SECP521R1
+ptls_key_exchange_algorithm_t ptls_openssl_secp521r1 = {PTLS_GROUP_SECP521R1, x9_62_create_key_exchange, secp_key_exchange,
+ NID_secp521r1};
+#endif
+#if PTLS_OPENSSL_HAVE_X25519
+ptls_key_exchange_algorithm_t ptls_openssl_x25519 = {PTLS_GROUP_X25519, evp_keyex_create, evp_keyex_exchange, NID_X25519};
+#endif
+ptls_key_exchange_algorithm_t *ptls_openssl_key_exchanges[] = {&ptls_openssl_secp256r1, NULL};
+ptls_cipher_algorithm_t ptls_openssl_aes128ecb = {
+ "AES128-ECB", PTLS_AES128_KEY_SIZE, PTLS_AES_BLOCK_SIZE, 0 /* iv size */, sizeof(struct cipher_context_t),
+ aes128ecb_setup_crypto};
+ptls_cipher_algorithm_t ptls_openssl_aes128ctr = {
+ "AES128-CTR", PTLS_AES128_KEY_SIZE, 1, PTLS_AES_IV_SIZE, sizeof(struct cipher_context_t), aes128ctr_setup_crypto};
+ptls_aead_algorithm_t ptls_openssl_aes128gcm = {"AES128-GCM",
+ &ptls_openssl_aes128ctr,
+ &ptls_openssl_aes128ecb,
+ PTLS_AES128_KEY_SIZE,
+ PTLS_AESGCM_IV_SIZE,
+ PTLS_AESGCM_TAG_SIZE,
+ sizeof(struct aead_crypto_context_t),
+ aead_aes128gcm_setup_crypto};
+ptls_cipher_algorithm_t ptls_openssl_aes256ecb = {
+ "AES256-ECB", PTLS_AES256_KEY_SIZE, PTLS_AES_BLOCK_SIZE, 0 /* iv size */, sizeof(struct cipher_context_t),
+ aes256ecb_setup_crypto};
+ptls_cipher_algorithm_t ptls_openssl_aes256ctr = {
+ "AES256-CTR", PTLS_AES256_KEY_SIZE, 1 /* block size */, PTLS_AES_IV_SIZE, sizeof(struct cipher_context_t),
+ aes256ctr_setup_crypto};
+ptls_aead_algorithm_t ptls_openssl_aes256gcm = {"AES256-GCM",
+ &ptls_openssl_aes256ctr,
+ &ptls_openssl_aes256ecb,
+ PTLS_AES256_KEY_SIZE,
+ PTLS_AESGCM_IV_SIZE,
+ PTLS_AESGCM_TAG_SIZE,
+ sizeof(struct aead_crypto_context_t),
+ aead_aes256gcm_setup_crypto};
+ptls_hash_algorithm_t ptls_openssl_sha256 = {PTLS_SHA256_BLOCK_SIZE, PTLS_SHA256_DIGEST_SIZE, sha256_create,
+ PTLS_ZERO_DIGEST_SHA256};
+ptls_hash_algorithm_t ptls_openssl_sha384 = {PTLS_SHA384_BLOCK_SIZE, PTLS_SHA384_DIGEST_SIZE, sha384_create,
+ PTLS_ZERO_DIGEST_SHA384};
+ptls_cipher_suite_t ptls_openssl_aes128gcmsha256 = {PTLS_CIPHER_SUITE_AES_128_GCM_SHA256, &ptls_openssl_aes128gcm,
+ &ptls_openssl_sha256};
+ptls_cipher_suite_t ptls_openssl_aes256gcmsha384 = {PTLS_CIPHER_SUITE_AES_256_GCM_SHA384, &ptls_openssl_aes256gcm,
+ &ptls_openssl_sha384};
+#if PTLS_OPENSSL_HAVE_CHACHA20_POLY1305
+ptls_cipher_algorithm_t ptls_openssl_chacha20 = {
+ "CHACHA20", PTLS_CHACHA20_KEY_SIZE, 1 /* block size */, PTLS_CHACHA20_IV_SIZE, sizeof(struct cipher_context_t),
+ chacha20_setup_crypto};
+ptls_aead_algorithm_t ptls_openssl_chacha20poly1305 = {"CHACHA20-POLY1305",
+ &ptls_openssl_chacha20,
+ NULL,
+ PTLS_CHACHA20_KEY_SIZE,
+ PTLS_CHACHA20POLY1305_IV_SIZE,
+ PTLS_CHACHA20POLY1305_TAG_SIZE,
+ sizeof(struct aead_crypto_context_t),
+ aead_chacha20poly1305_setup_crypto};
+ptls_cipher_suite_t ptls_openssl_chacha20poly1305sha256 = {PTLS_CIPHER_SUITE_CHACHA20_POLY1305_SHA256,
+ &ptls_openssl_chacha20poly1305, &ptls_openssl_sha256};
+#endif
+ptls_cipher_suite_t *ptls_openssl_cipher_suites[] = {&ptls_openssl_aes256gcmsha384, &ptls_openssl_aes128gcmsha256,
+#if PTLS_OPENSSL_HAVE_CHACHA20_POLY1305
+ &ptls_openssl_chacha20poly1305sha256,
+#endif
+ NULL};
+
+#if PTLS_OPENSSL_HAVE_BF
+ptls_cipher_algorithm_t ptls_openssl_bfecb = {"BF-ECB", PTLS_BLOWFISH_KEY_SIZE, PTLS_BLOWFISH_BLOCK_SIZE,
+ 0 /* iv size */, sizeof(struct cipher_context_t), bfecb_setup_crypto};
+#endif
--- /dev/null
+/*
+* Copyright (c) 2016 Christian Huitema <huitema@huitema.net>
+*
+* Permission to use, copy, modify, and distribute this software for any
+* purpose with or without fee is hereby granted, provided that the above
+* copyright notice and this permission notice appear in all copies.
+*
+* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Manage Base64 encoding.
+ */
+#ifdef _WINDOWS
+#include "wincompat.h"
+#else
+#include <sys/time.h>
+#endif
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include "picotls.h"
+#include "picotls/pembase64.h"
+
+static char ptls_base64_alphabet[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
+ 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
+ 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
+ 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'};
+
+static signed char ptls_base64_values[] = {
+ /* 0x00 to 0x0F */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ /* 0x10 to 0x1F */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ /* 0x20 to 0x2F. '+' at 2B, '/' at 2F */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63,
+ /* 0x30 to 0x3F -- digits 0 to 9 at 0x30 to 0x39*/
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1,
+ /* 0x40 to 0x4F -- chars 'A' to 'O' at 0x41 to 0x4F */
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 0x50 to 0x5F -- chars 'P' to 'Z' at 0x50 to 0x5A */
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
+ /* 0x60 to 0x6F -- chars 'a' to 'o' at 0x61 to 0x6F */
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ /* 0x70 to 0x7F -- chars 'p' to 'z' at 0x70 to 0x7A */
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1};
+
+static void ptls_base64_cell(const uint8_t *data, char *text)
+{
+ int n[4];
+
+ n[0] = data[0] >> 2;
+ n[1] = ((data[0] & 3) << 4) | (data[1] >> 4);
+ n[2] = ((data[1] & 15) << 2) | (data[2] >> 6);
+ n[3] = data[2] & 63;
+
+ for (int i = 0; i < 4; i++) {
+ text[i] = ptls_base64_alphabet[n[i]];
+ }
+}
+
+size_t ptls_base64_howlong(size_t data_length)
+{
+ return (((data_length + 2) / 3) * 4);
+}
+
+int ptls_base64_encode(const uint8_t *data, size_t data_len, char *ptls_base64_text)
+{
+ int l = 0;
+ int lt = 0;
+
+ while ((data_len - l) >= 3) {
+ ptls_base64_cell(data + l, ptls_base64_text + lt);
+ l += 3;
+ lt += 4;
+ }
+
+ switch (data_len - l) {
+ case 0:
+ break;
+ case 1:
+ ptls_base64_text[lt++] = ptls_base64_alphabet[data[l] >> 2];
+ ptls_base64_text[lt++] = ptls_base64_alphabet[(data[l] & 3) << 4];
+ ptls_base64_text[lt++] = '=';
+ ptls_base64_text[lt++] = '=';
+ break;
+ case 2:
+ ptls_base64_text[lt++] = ptls_base64_alphabet[data[l] >> 2];
+ ptls_base64_text[lt++] = ptls_base64_alphabet[((data[l] & 3) << 4) | (data[l + 1] >> 4)];
+ ptls_base64_text[lt++] = ptls_base64_alphabet[((data[l + 1] & 15) << 2)];
+ ptls_base64_text[lt++] = '=';
+ break;
+ default:
+ break;
+ }
+ ptls_base64_text[lt++] = 0;
+
+ return lt;
+}
+
+/*
+ * Take into input a line of text, so as to work by increments.
+ * The intermediate text of the decoding is kept in a state variable.
+ * The decoded data is accumulated in a PTLS buffer.
+ * The parsing is consistent with the lax definition in RFC 7468
+ */
+
+void ptls_base64_decode_init(ptls_base64_decode_state_t *state)
+{
+ state->nbc = 0;
+ state->nbo = 3;
+ state->v = 0;
+ state->status = PTLS_BASE64_DECODE_IN_PROGRESS;
+}
+
+int ptls_base64_decode(const char *text, ptls_base64_decode_state_t *state, ptls_buffer_t *buf)
+{
+ int ret = 0;
+ uint8_t decoded[3];
+ size_t text_index = 0;
+ int c;
+ signed char vc;
+
+ /* skip initial blanks */
+ while (text[text_index] != 0) {
+ c = text[text_index];
+
+ if (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ text_index++;
+ } else {
+ break;
+ }
+ }
+
+ while (text[text_index] != 0 && ret == 0 && state->status == PTLS_BASE64_DECODE_IN_PROGRESS) {
+ c = text[text_index++];
+
+ vc = 0 < c && c < 0x7f ? ptls_base64_values[c] : -1;
+ if (vc == -1) {
+ if (state->nbc == 2 && c == '=' && text[text_index] == '=') {
+ state->nbc = 4;
+ text_index++;
+ state->nbo = 1;
+ state->v <<= 12;
+ } else if (state->nbc == 3 && c == '=') {
+ state->nbc = 4;
+ state->nbo = 2;
+ state->v <<= 6;
+ } else {
+ /* Skip final blanks */
+ for (--text_index; text[text_index] != 0; ++text_index) {
+ c = text[text_index];
+ if (!(c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == 0x0B || c == 0x0C))
+ break;
+ }
+
+ /* Should now be at end of buffer */
+ if (text[text_index] == 0) {
+ break;
+ } else {
+ /* Not at end of buffer, signal a decoding error */
+ state->nbo = 0;
+ state->status = PTLS_BASE64_DECODE_FAILED;
+ ret = PTLS_ERROR_INCORRECT_BASE64;
+ }
+ }
+ } else {
+ state->nbc++;
+ state->v <<= 6;
+ state->v |= vc;
+ }
+
+ if (ret == 0 && state->nbc == 4) {
+ /* Convert to up to 3 octets */
+ for (int j = 0; j < state->nbo; j++) {
+ decoded[j] = (uint8_t)(state->v >> (8 * (2 - j)));
+ }
+
+ ret = ptls_buffer__do_pushv(buf, decoded, state->nbo);
+
+ if (ret == 0) {
+ /* test for fin or continuation */
+ if (state->nbo < 3) {
+ /* Check that there are only trainling blanks on this line */
+ while (text[text_index] != 0) {
+ c = text[text_index++];
+
+ if (c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == 0x0B || c == 0x0C) {
+ continue;
+ }
+ }
+ if (text[text_index] == 0) {
+ state->status = PTLS_BASE64_DECODE_DONE;
+ } else {
+ state->status = PTLS_BASE64_DECODE_FAILED;
+ ret = PTLS_ERROR_INCORRECT_BASE64;
+ }
+ break;
+ } else {
+ state->v = 0;
+ state->nbo = 3;
+ state->nbc = 0;
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * Reading a PEM file, to get an object:
+ *
+ * - Find first object, get the object name.
+ * - If object label is what the application expects, parse, else skip to end.
+ *
+ * The following labels are defined in RFC 7468:
+ *
+ * Sec. Label ASN.1 Type Reference Module
+ * ----+----------------------+-----------------------+---------+----------
+ * 5 CERTIFICATE Certificate [RFC5280] id-pkix1-e
+ * 6 X509 CRL CertificateList [RFC5280] id-pkix1-e
+ * 7 CERTIFICATE REQUEST CertificationRequest [RFC2986] id-pkcs10
+ * 8 PKCS7 ContentInfo [RFC2315] id-pkcs7*
+ * 9 CMS ContentInfo [RFC5652] id-cms2004
+ * 10 PRIVATE KEY PrivateKeyInfo ::= [RFC5208] id-pkcs8
+ * OneAsymmetricKey [RFC5958] id-aKPV1
+ * 11 ENCRYPTED PRIVATE KEY EncryptedPrivateKeyInfo [RFC5958] id-aKPV1
+ * 12 ATTRIBUTE CERTIFICATE AttributeCertificate [RFC5755] id-acv2
+ * 13 PUBLIC KEY SubjectPublicKeyInfo [RFC5280] id-pkix1-e
+ */
+
+static int ptls_compare_separator_line(const char *line, const char *begin_or_end, const char *label)
+{
+ int ret = strncmp(line, "-----", 5);
+ size_t text_index = 5;
+
+ if (ret == 0) {
+ size_t begin_or_end_length = strlen(begin_or_end);
+ ret = strncmp(line + text_index, begin_or_end, begin_or_end_length);
+ text_index += begin_or_end_length;
+ }
+
+ if (ret == 0) {
+ ret = line[text_index] - ' ';
+ text_index++;
+ }
+
+ if (ret == 0) {
+ size_t label_length = strlen(label);
+ ret = strncmp(line + text_index, label, label_length);
+ text_index += label_length;
+ }
+
+ if (ret == 0) {
+ ret = strncmp(line + text_index, "-----", 5);
+ }
+
+ return ret;
+}
+
+static int ptls_get_pem_object(FILE *F, const char *label, ptls_buffer_t *buf)
+{
+ int ret = PTLS_ERROR_PEM_LABEL_NOT_FOUND;
+ char line[256];
+ ptls_base64_decode_state_t state;
+
+ /* Get the label on a line by itself */
+ while (fgets(line, 256, F)) {
+ if (ptls_compare_separator_line(line, "BEGIN", label) == 0) {
+ ret = 0;
+ ptls_base64_decode_init(&state);
+ break;
+ }
+ }
+ /* Get the data in the buffer */
+ while (ret == 0 && fgets(line, 256, F)) {
+ if (ptls_compare_separator_line(line, "END", label) == 0) {
+ if (state.status == PTLS_BASE64_DECODE_DONE || (state.status == PTLS_BASE64_DECODE_IN_PROGRESS && state.nbc == 0)) {
+ ret = 0;
+ } else {
+ ret = PTLS_ERROR_INCORRECT_BASE64;
+ }
+ break;
+ } else {
+ ret = ptls_base64_decode(line, &state, buf);
+ }
+ }
+
+ return ret;
+}
+
+int ptls_load_pem_objects(char const *pem_fname, const char *label, ptls_iovec_t *list, size_t list_max, size_t *nb_objects)
+{
+ FILE *F;
+ int ret = 0;
+ size_t count = 0;
+#ifdef _WINDOWS
+ errno_t err = fopen_s(&F, pem_fname, "r");
+ if (err != 0) {
+ ret = -1;
+ }
+#else
+ F = fopen(pem_fname, "r");
+ if (F == NULL) {
+ ret = -1;
+ }
+#endif
+
+ *nb_objects = 0;
+
+ if (ret == 0) {
+ while (count < list_max) {
+ ptls_buffer_t buf;
+
+ ptls_buffer_init(&buf, "", 0);
+
+ ret = ptls_get_pem_object(F, label, &buf);
+
+ if (ret == 0) {
+ if (buf.off > 0 && buf.is_allocated) {
+ list[count].base = buf.base;
+ list[count].len = buf.off;
+ count++;
+ } else {
+ ptls_buffer_dispose(&buf);
+ }
+ } else {
+ ptls_buffer_dispose(&buf);
+ break;
+ }
+ }
+ }
+
+ if (ret == PTLS_ERROR_PEM_LABEL_NOT_FOUND && count > 0) {
+ ret = 0;
+ }
+
+ *nb_objects = count;
+
+ if (F != NULL) {
+ fclose(F);
+ }
+
+ return ret;
+}
+
+#define PTLS_MAX_CERTS_IN_CONTEXT 16
+
+int ptls_load_certificates(ptls_context_t *ctx, char const *cert_pem_file)
+{
+ int ret = 0;
+
+ ctx->certificates.list = (ptls_iovec_t *)malloc(PTLS_MAX_CERTS_IN_CONTEXT * sizeof(ptls_iovec_t));
+
+ if (ctx->certificates.list == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ } else {
+ ret = ptls_load_pem_objects(cert_pem_file, "CERTIFICATE", ctx->certificates.list, PTLS_MAX_CERTS_IN_CONTEXT,
+ &ctx->certificates.count);
+ }
+
+ return ret;
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 DeNA Co., Ltd., Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef _WINDOWS
+#include "wincompat.h"
+#else
+#include <arpa/inet.h>
+#include <sys/time.h>
+#endif
+#include "picotls.h"
+#if PICOTLS_USE_DTRACE
+#include "picotls-probes.h"
+#endif
+
+#define PTLS_MAX_PLAINTEXT_RECORD_SIZE 16384
+#define PTLS_MAX_ENCRYPTED_RECORD_SIZE (16384 + 256)
+
+#define PTLS_RECORD_VERSION_MAJOR 3
+#define PTLS_RECORD_VERSION_MINOR 3
+
+#define PTLS_CONTENT_TYPE_CHANGE_CIPHER_SPEC 20
+#define PTLS_CONTENT_TYPE_ALERT 21
+#define PTLS_CONTENT_TYPE_HANDSHAKE 22
+#define PTLS_CONTENT_TYPE_APPDATA 23
+
+#define PTLS_PSK_KE_MODE_PSK 0
+#define PTLS_PSK_KE_MODE_PSK_DHE 1
+
+#define PTLS_HANDSHAKE_HEADER_SIZE 4
+
+#define PTLS_EXTENSION_TYPE_SERVER_NAME 0
+#define PTLS_EXTENSION_TYPE_STATUS_REQUEST 5
+#define PTLS_EXTENSION_TYPE_SUPPORTED_GROUPS 10
+#define PTLS_EXTENSION_TYPE_SIGNATURE_ALGORITHMS 13
+#define PTLS_EXTENSION_TYPE_ALPN 16
+#define PTLS_EXTENSION_TYPE_COMPRESS_CERTIFICATE 27
+#define PTLS_EXTENSION_TYPE_PRE_SHARED_KEY 41
+#define PTLS_EXTENSION_TYPE_EARLY_DATA 42
+#define PTLS_EXTENSION_TYPE_SUPPORTED_VERSIONS 43
+#define PTLS_EXTENSION_TYPE_COOKIE 44
+#define PTLS_EXTENSION_TYPE_PSK_KEY_EXCHANGE_MODES 45
+#define PTLS_EXTENSION_TYPE_KEY_SHARE 51
+#define PTLS_EXTENSION_TYPE_ENCRYPTED_SERVER_NAME 0xffce
+
+#define PTLS_PROTOCOL_VERSION_TLS13_FINAL 0x0304
+#define PTLS_PROTOCOL_VERSION_TLS13_DRAFT26 0x7f1a
+#define PTLS_PROTOCOL_VERSION_TLS13_DRAFT27 0x7f1b
+#define PTLS_PROTOCOL_VERSION_TLS13_DRAFT28 0x7f1c
+
+#define PTLS_SERVER_NAME_TYPE_HOSTNAME 0
+
+#define PTLS_SERVER_CERTIFICATE_VERIFY_CONTEXT_STRING "TLS 1.3, server CertificateVerify"
+#define PTLS_CLIENT_CERTIFICATE_VERIFY_CONTEXT_STRING "TLS 1.3, client CertificateVerify"
+#define PTLS_MAX_CERTIFICATE_VERIFY_SIGNDATA_SIZE \
+ (64 + sizeof(PTLS_SERVER_CERTIFICATE_VERIFY_CONTEXT_STRING) + PTLS_MAX_DIGEST_SIZE * 2)
+
+#define PTLS_EARLY_DATA_MAX_DELAY 10000 /* max. RTT (in msec) to permit early data */
+
+#ifndef PTLS_MAX_EARLY_DATA_SKIP_SIZE
+#define PTLS_MAX_EARLY_DATA_SKIP_SIZE 65536
+#endif
+#if defined(PTLS_DEBUG) && PTLS_DEBUG
+#define PTLS_DEBUGF(...) fprintf(stderr, __VA_ARGS__)
+#else
+#define PTLS_DEBUGF(...)
+#endif
+
+#ifndef PTLS_MEMORY_DEBUG
+#define PTLS_MEMORY_DEBUG 0
+#endif
+
+#if PICOTLS_USE_DTRACE
+#define PTLS_SHOULD_PROBE(LABEL, tls) (PTLS_UNLIKELY(PICOTLS_##LABEL##_ENABLED()) && !(tls)->skip_tracing)
+#define PTLS_PROBE0(LABEL, tls) \
+ do { \
+ ptls_t *_tls = (tls); \
+ if (PTLS_SHOULD_PROBE(LABEL, _tls)) \
+ PICOTLS_##LABEL(_tls); \
+ } while (0)
+#define PTLS_PROBE(LABEL, tls, ...) \
+ do { \
+ ptls_t *_tls = (tls); \
+ if (PTLS_SHOULD_PROBE(LABEL, _tls)) \
+ PICOTLS_##LABEL(_tls, __VA_ARGS__); \
+ } while (0)
+#else
+#define PTLS_PROBE0(LABEL, tls)
+#define PTLS_PROBE(LABEL, tls, ...)
+#endif
+
+/**
+ * list of supported versions in the preferred order
+ */
+static const uint16_t supported_versions[] = {PTLS_PROTOCOL_VERSION_TLS13_FINAL, PTLS_PROTOCOL_VERSION_TLS13_DRAFT28,
+ PTLS_PROTOCOL_VERSION_TLS13_DRAFT27, PTLS_PROTOCOL_VERSION_TLS13_DRAFT26};
+
+static const uint8_t hello_retry_random[PTLS_HELLO_RANDOM_SIZE] = {0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11, 0xBE, 0x1D, 0x8C,
+ 0x02, 0x1E, 0x65, 0xB8, 0x91, 0xC2, 0xA2, 0x11, 0x16, 0x7A, 0xBB,
+ 0x8C, 0x5E, 0x07, 0x9E, 0x09, 0xE2, 0xC8, 0xA8, 0x33, 0x9C};
+
+struct st_ptls_traffic_protection_t {
+ uint8_t secret[PTLS_MAX_DIGEST_SIZE];
+ size_t epoch;
+ /* the following fields are not used if the key_change callback is set */
+ ptls_aead_context_t *aead;
+ uint64_t seq;
+};
+
+struct st_ptls_record_message_emitter_t {
+ ptls_message_emitter_t super;
+ size_t rec_start;
+};
+
+struct st_ptls_signature_algorithms_t {
+ uint16_t list[16]; /* expand? */
+ size_t count;
+};
+
+struct st_ptls_certificate_request_t {
+ /**
+ * context.base becomes non-NULL when a CertificateRequest is pending for processing
+ */
+ ptls_iovec_t context;
+ struct st_ptls_signature_algorithms_t signature_algorithms;
+};
+
+struct st_ptls_t {
+ /**
+ * the context
+ */
+ ptls_context_t *ctx;
+ /**
+ * the state
+ */
+ enum en_ptls_state_t {
+ PTLS_STATE_CLIENT_HANDSHAKE_START,
+ PTLS_STATE_CLIENT_EXPECT_SERVER_HELLO,
+ PTLS_STATE_CLIENT_EXPECT_SECOND_SERVER_HELLO,
+ PTLS_STATE_CLIENT_EXPECT_ENCRYPTED_EXTENSIONS,
+ PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_REQUEST_OR_CERTIFICATE,
+ PTLS_STATE_CLIENT_EXPECT_CERTIFICATE,
+ PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_VERIFY,
+ PTLS_STATE_CLIENT_EXPECT_FINISHED,
+ PTLS_STATE_SERVER_EXPECT_CLIENT_HELLO,
+ PTLS_STATE_SERVER_EXPECT_SECOND_CLIENT_HELLO,
+ PTLS_STATE_SERVER_EXPECT_CERTIFICATE,
+ PTLS_STATE_SERVER_EXPECT_CERTIFICATE_VERIFY,
+ /* ptls_send can be called if the state is below here */
+ PTLS_STATE_SERVER_EXPECT_END_OF_EARLY_DATA,
+ PTLS_STATE_SERVER_EXPECT_FINISHED,
+ PTLS_STATE_POST_HANDSHAKE_MIN,
+ PTLS_STATE_CLIENT_POST_HANDSHAKE = PTLS_STATE_POST_HANDSHAKE_MIN,
+ PTLS_STATE_SERVER_POST_HANDSHAKE
+ } state;
+ /**
+ * receive buffers
+ */
+ struct {
+ ptls_buffer_t rec;
+ ptls_buffer_t mess;
+ } recvbuf;
+ /**
+ * key schedule
+ */
+ ptls_key_schedule_t *key_schedule;
+ /**
+ * values used for record protection
+ */
+ struct {
+ struct st_ptls_traffic_protection_t dec;
+ struct st_ptls_traffic_protection_t enc;
+ } traffic_protection;
+ /**
+ * server-name passed using SNI
+ */
+ char *server_name;
+ /**
+ * result of ALPN
+ */
+ char *negotiated_protocol;
+ /**
+ * selected key-exchange
+ */
+ ptls_key_exchange_algorithm_t *key_share;
+ /**
+ * selected cipher-suite
+ */
+ ptls_cipher_suite_t *cipher_suite;
+ /**
+ * clienthello.random
+ */
+ uint8_t client_random[PTLS_HELLO_RANDOM_SIZE];
+ /**
+ * esni
+ */
+ ptls_esni_secret_t *esni;
+ /**
+ * exporter master secret (either 0rtt or 1rtt)
+ */
+ struct {
+ uint8_t *early;
+ uint8_t *one_rtt;
+ } exporter_master_secret;
+ /* flags */
+ unsigned is_server : 1;
+ unsigned is_psk_handshake : 1;
+ unsigned send_change_cipher_spec : 1;
+ unsigned needs_key_update : 1;
+ unsigned key_update_send_request : 1;
+ unsigned skip_tracing : 1;
+ /**
+ * misc.
+ */
+ union {
+ struct {
+ uint8_t legacy_session_id[32];
+ ptls_key_exchange_context_t *key_share_ctx;
+ unsigned offered_psk : 1;
+ /**
+ * if 1-RTT write key is active
+ */
+ unsigned using_early_data : 1;
+ struct st_ptls_certificate_request_t certificate_request;
+ } client;
+ struct {
+ uint8_t pending_traffic_secret[PTLS_MAX_DIGEST_SIZE];
+ uint32_t early_data_skipped_bytes; /* if not UINT32_MAX, the server is skipping early data */
+ } server;
+ };
+ /**
+ * certificate verify
+ * will be used by the client and the server (if require_client_authentication is set).
+ */
+ struct {
+ int (*cb)(void *verify_ctx, ptls_iovec_t data, ptls_iovec_t signature);
+ void *verify_ctx;
+ } certificate_verify;
+ /**
+ * handshake traffic secret to be commisioned (an array of `uint8_t [PTLS_MAX_DIGEST_SIZE]` or NULL)
+ */
+ uint8_t *pending_handshake_secret;
+ /**
+ * user data
+ */
+ void *data_ptr;
+};
+
+struct st_ptls_record_t {
+ uint8_t type;
+ uint16_t version;
+ size_t length;
+ const uint8_t *fragment;
+};
+
+struct st_ptls_client_hello_psk_t {
+ ptls_iovec_t identity;
+ uint32_t obfuscated_ticket_age;
+ ptls_iovec_t binder;
+};
+
+#define MAX_UNKNOWN_EXTENSIONS 16
+#define MAX_CLIENT_CIPHERS 32
+
+struct st_ptls_client_hello_t {
+ const uint8_t *random_bytes;
+ ptls_iovec_t legacy_session_id;
+ struct {
+ const uint8_t *ids;
+ size_t count;
+ } compression_methods;
+ uint16_t selected_version;
+ ptls_iovec_t cipher_suites;
+ ptls_iovec_t negotiated_groups;
+ ptls_iovec_t key_shares;
+ struct st_ptls_signature_algorithms_t signature_algorithms;
+ ptls_iovec_t server_name;
+ struct {
+ ptls_cipher_suite_t *cipher; /* selected cipher-suite, or NULL if esni extension is not used */
+ ptls_key_exchange_algorithm_t *key_share;
+ ptls_iovec_t peer_key;
+ const uint8_t *record_digest;
+ ptls_iovec_t encrypted_sni;
+ } esni;
+ struct {
+ ptls_iovec_t list[16];
+ size_t count;
+ } alpn;
+ struct {
+ uint16_t list[16];
+ size_t count;
+ } cert_compression_algos;
+ struct {
+ uint16_t list[MAX_CLIENT_CIPHERS];
+ size_t count;
+ } client_ciphers;
+ struct {
+ ptls_iovec_t all;
+ ptls_iovec_t tbs;
+ ptls_iovec_t ch1_hash;
+ ptls_iovec_t signature;
+ unsigned sent_key_share : 1;
+ } cookie;
+ struct {
+ const uint8_t *hash_end;
+ struct {
+ struct st_ptls_client_hello_psk_t list[4];
+ size_t count;
+ } identities;
+ unsigned ke_modes;
+ int early_data_indication;
+ } psk;
+ ptls_raw_extension_t unknown_extensions[MAX_UNKNOWN_EXTENSIONS + 1];
+ unsigned status_request : 1;
+};
+
+struct st_ptls_server_hello_t {
+ uint8_t random_[PTLS_HELLO_RANDOM_SIZE];
+ ptls_iovec_t legacy_session_id;
+ int is_retry_request;
+ union {
+ ptls_iovec_t peerkey;
+ struct {
+ uint16_t selected_group;
+ ptls_iovec_t cookie;
+ } retry_request;
+ };
+};
+
+struct st_ptls_key_schedule_t {
+ unsigned generation; /* early secret (1), hanshake secret (2), master secret (3) */
+ const char *hkdf_label_prefix;
+ uint8_t secret[PTLS_MAX_DIGEST_SIZE];
+ size_t num_hashes;
+ struct {
+ ptls_hash_algorithm_t *algo;
+ ptls_hash_context_t *ctx;
+ } hashes[1];
+};
+
+struct st_ptls_extension_decoder_t {
+ uint16_t type;
+ int (*cb)(ptls_t *tls, void *arg, const uint8_t *src, const uint8_t *const end);
+};
+
+struct st_ptls_extension_bitmap_t {
+ uint8_t bits[8]; /* only ids below 64 is tracked */
+};
+
+static const uint8_t zeroes_of_max_digest_size[PTLS_MAX_DIGEST_SIZE] = {0};
+
+static int hkdf_expand_label(ptls_hash_algorithm_t *algo, void *output, size_t outlen, ptls_iovec_t secret, const char *label,
+ ptls_iovec_t hash_value, const char *label_prefix);
+static ptls_aead_context_t *new_aead(ptls_aead_algorithm_t *aead, ptls_hash_algorithm_t *hash, int is_enc, const void *secret,
+ ptls_iovec_t hash_value, const char *label_prefix);
+
+static int is_supported_version(uint16_t v)
+{
+ size_t i;
+ for (i = 0; i != sizeof(supported_versions) / sizeof(supported_versions[0]); ++i)
+ if (supported_versions[i] == v)
+ return 1;
+ return 0;
+}
+
+static inline int extension_bitmap_is_set(struct st_ptls_extension_bitmap_t *bitmap, uint16_t id)
+{
+ if (id < sizeof(bitmap->bits) * 8)
+ return (bitmap->bits[id / 8] & (1 << (id % 8))) != 0;
+ return 0;
+}
+
+static inline void extension_bitmap_set(struct st_ptls_extension_bitmap_t *bitmap, uint16_t id)
+{
+ if (id < sizeof(bitmap->bits) * 8)
+ bitmap->bits[id / 8] |= 1 << (id % 8);
+}
+
+static inline void init_extension_bitmap(struct st_ptls_extension_bitmap_t *bitmap, uint8_t hstype)
+{
+ *bitmap = (struct st_ptls_extension_bitmap_t){{0}};
+
+#define EXT(extid, proc) \
+ do { \
+ int _found = 0; \
+ do { \
+ proc \
+ } while (0); \
+ if (!_found) \
+ extension_bitmap_set(bitmap, PTLS_EXTENSION_TYPE_##extid); \
+ } while (0)
+#define ALLOW(allowed_hstype) _found = _found || hstype == PTLS_HANDSHAKE_TYPE_##allowed_hstype
+
+ /* Implements the table found in section 4.2 of draft-19; "If an implementation receives an extension which it recognizes and
+ * which is not specified for the message in which it appears it MUST abort the handshake with an “illegal_parameter” alert."
+ */
+ EXT(SERVER_NAME, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(ENCRYPTED_EXTENSIONS);
+ });
+ EXT(STATUS_REQUEST, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(CERTIFICATE);
+ ALLOW(CERTIFICATE_REQUEST);
+ });
+ EXT(SUPPORTED_GROUPS, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(ENCRYPTED_EXTENSIONS);
+ });
+ EXT(SIGNATURE_ALGORITHMS, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(CERTIFICATE_REQUEST);
+ });
+ EXT(ALPN, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(ENCRYPTED_EXTENSIONS);
+ });
+ EXT(KEY_SHARE, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(SERVER_HELLO);
+ });
+ EXT(PRE_SHARED_KEY, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(SERVER_HELLO);
+ });
+ EXT(PSK_KEY_EXCHANGE_MODES, { ALLOW(CLIENT_HELLO); });
+ EXT(EARLY_DATA, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(ENCRYPTED_EXTENSIONS);
+ ALLOW(NEW_SESSION_TICKET);
+ });
+ EXT(COOKIE, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(SERVER_HELLO);
+ });
+ EXT(SUPPORTED_VERSIONS, {
+ ALLOW(CLIENT_HELLO);
+ ALLOW(SERVER_HELLO);
+ });
+
+#undef ALLOW
+#undef EXT
+}
+
+#ifndef ntoh16
+static uint16_t ntoh16(const uint8_t *src)
+{
+ return (uint16_t)src[0] << 8 | src[1];
+}
+#endif
+
+#ifndef ntoh24
+static uint32_t ntoh24(const uint8_t *src)
+{
+ return (uint32_t)src[0] << 16 | (uint32_t)src[1] << 8 | src[2];
+}
+#endif
+
+#ifndef ntoh32
+static uint32_t ntoh32(const uint8_t *src)
+{
+ return (uint32_t)src[0] << 24 | (uint32_t)src[1] << 16 | (uint32_t)src[2] << 8 | src[3];
+}
+#endif
+
+#ifndef ntoh64
+static uint64_t ntoh64(const uint8_t *src)
+{
+ return (uint64_t)src[0] << 56 | (uint64_t)src[1] << 48 | (uint64_t)src[2] << 40 | (uint64_t)src[3] << 32 |
+ (uint64_t)src[4] << 24 | (uint64_t)src[5] << 16 | (uint64_t)src[6] << 8 | src[7];
+}
+#endif
+
+void ptls_buffer__release_memory(ptls_buffer_t *buf)
+{
+ ptls_clear_memory(buf->base, buf->off);
+ if (buf->is_allocated)
+ free(buf->base);
+}
+
+int ptls_buffer_reserve(ptls_buffer_t *buf, size_t delta)
+{
+ if (buf->base == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+
+ if (PTLS_MEMORY_DEBUG || buf->capacity < buf->off + delta) {
+ uint8_t *newp;
+ size_t new_capacity = buf->capacity;
+ if (new_capacity < 1024)
+ new_capacity = 1024;
+ while (new_capacity < buf->off + delta) {
+ new_capacity *= 2;
+ }
+ if ((newp = malloc(new_capacity)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ memcpy(newp, buf->base, buf->off);
+ ptls_buffer__release_memory(buf);
+ buf->base = newp;
+ buf->capacity = new_capacity;
+ buf->is_allocated = 1;
+ }
+
+ return 0;
+}
+
+int ptls_buffer__do_pushv(ptls_buffer_t *buf, const void *src, size_t len)
+{
+ int ret;
+
+ if (len == 0)
+ return 0;
+ if ((ret = ptls_buffer_reserve(buf, len)) != 0)
+ return ret;
+ memcpy(buf->base + buf->off, src, len);
+ buf->off += len;
+ return 0;
+}
+
+int ptls_buffer__adjust_quic_blocksize(ptls_buffer_t *buf, size_t body_size)
+{
+ uint8_t sizebuf[PTLS_ENCODE_QUICINT_CAPACITY];
+ size_t sizelen = ptls_encode_quicint(sizebuf, body_size) - sizebuf;
+
+ /* adjust amount of space before body_size to `sizelen` bytes */
+ if (sizelen != 1) {
+ int ret;
+ if ((ret = ptls_buffer_reserve(buf, sizelen - 1)) != 0)
+ return ret;
+ memmove(buf->base + buf->off - body_size - 1 + sizelen, buf->base + buf->off - body_size, body_size);
+ buf->off += sizelen - 1;
+ }
+
+ /* write the size */
+ memcpy(buf->base + buf->off - body_size - sizelen, sizebuf, sizelen);
+
+ return 0;
+}
+
+int ptls_buffer__adjust_asn1_blocksize(ptls_buffer_t *buf, size_t body_size)
+{
+ fprintf(stderr, "unimplemented\n");
+ abort();
+}
+
+int ptls_buffer_push_asn1_ubigint(ptls_buffer_t *buf, const void *bignum, size_t size)
+{
+ const uint8_t *p = bignum, *const end = p + size;
+ int ret;
+
+ /* skip zeroes */
+ for (; end - p >= 1; ++p)
+ if (*p != 0)
+ break;
+
+ /* emit */
+ ptls_buffer_push(buf, 2);
+ ptls_buffer_push_asn1_block(buf, {
+ if (*p >= 0x80)
+ ptls_buffer_push(buf, 0);
+ if (p != end) {
+ ptls_buffer_pushv(buf, p, end - p);
+ } else {
+ ptls_buffer_pushv(buf, "", 1);
+ }
+ });
+ ret = 0;
+
+Exit:
+ return ret;
+}
+
+#if PTLS_FUZZ_HANDSHAKE
+
+static size_t aead_encrypt(struct st_ptls_traffic_protection_t *ctx, void *output, const void *input, size_t inlen,
+ uint8_t content_type)
+{
+ memcpy(output, input, inlen);
+ memcpy(output + inlen, &content_type, 1);
+ return inlen + 1 + 16;
+}
+
+static int aead_decrypt(struct st_ptls_traffic_protection_t *ctx, void *output, size_t *outlen, const void *input, size_t inlen)
+{
+ if (inlen < 16) {
+ return PTLS_ALERT_BAD_RECORD_MAC;
+ }
+ memcpy(output, input, inlen - 16);
+ *outlen = inlen - 16; /* removing the 16 bytes of tag */
+ return 0;
+}
+
+#else
+
+static void build_aad(uint8_t aad[5], size_t reclen)
+{
+ aad[0] = PTLS_CONTENT_TYPE_APPDATA;
+ aad[1] = PTLS_RECORD_VERSION_MAJOR;
+ aad[2] = PTLS_RECORD_VERSION_MINOR;
+ aad[3] = (uint8_t)(reclen >> 8);
+ aad[4] = (uint8_t)reclen;
+}
+
+static size_t aead_encrypt(struct st_ptls_traffic_protection_t *ctx, void *output, const void *input, size_t inlen,
+ uint8_t content_type)
+{
+ uint8_t aad[5];
+ size_t off = 0;
+
+ build_aad(aad, inlen + 1 + ctx->aead->algo->tag_size);
+ ptls_aead_encrypt_init(ctx->aead, ctx->seq++, aad, sizeof(aad));
+ off += ptls_aead_encrypt_update(ctx->aead, ((uint8_t *)output) + off, input, inlen);
+ off += ptls_aead_encrypt_update(ctx->aead, ((uint8_t *)output) + off, &content_type, 1);
+ off += ptls_aead_encrypt_final(ctx->aead, ((uint8_t *)output) + off);
+
+ return off;
+}
+
+static int aead_decrypt(struct st_ptls_traffic_protection_t *ctx, void *output, size_t *outlen, const void *input, size_t inlen)
+{
+ uint8_t aad[5];
+
+ build_aad(aad, inlen);
+ if ((*outlen = ptls_aead_decrypt(ctx->aead, output, input, inlen, ctx->seq, aad, sizeof(aad))) == SIZE_MAX)
+ return PTLS_ALERT_BAD_RECORD_MAC;
+ ++ctx->seq;
+ return 0;
+}
+
+#endif /* #if PTLS_FUZZ_HANDSHAKE */
+
+#define buffer_push_record(buf, type, block) \
+ do { \
+ ptls_buffer_push((buf), (type), PTLS_RECORD_VERSION_MAJOR, PTLS_RECORD_VERSION_MINOR); \
+ ptls_buffer_push_block((buf), 2, block); \
+ } while (0)
+
+static int buffer_push_encrypted_records(ptls_buffer_t *buf, uint8_t type, const uint8_t *src, size_t len,
+ struct st_ptls_traffic_protection_t *enc)
+{
+ int ret = 0;
+
+ while (len != 0) {
+ size_t chunk_size = len;
+ if (chunk_size > PTLS_MAX_PLAINTEXT_RECORD_SIZE)
+ chunk_size = PTLS_MAX_PLAINTEXT_RECORD_SIZE;
+ buffer_push_record(buf, PTLS_CONTENT_TYPE_APPDATA, {
+ if ((ret = ptls_buffer_reserve(buf, chunk_size + enc->aead->algo->tag_size + 1)) != 0)
+ goto Exit;
+ buf->off += aead_encrypt(enc, buf->base + buf->off, src, chunk_size, type);
+ });
+ src += chunk_size;
+ len -= chunk_size;
+ }
+
+Exit:
+ return ret;
+}
+
+static int buffer_encrypt_record(ptls_buffer_t *buf, size_t rec_start, struct st_ptls_traffic_protection_t *enc)
+{
+ size_t bodylen = buf->off - rec_start - 5;
+ uint8_t *tmpbuf, type = buf->base[rec_start];
+ int ret;
+
+ /* fast path: do in-place encryption if only one record needs to be emitted */
+ if (bodylen <= PTLS_MAX_PLAINTEXT_RECORD_SIZE) {
+ size_t overhead = 1 + enc->aead->algo->tag_size;
+ if ((ret = ptls_buffer_reserve(buf, overhead)) != 0)
+ return ret;
+ size_t encrypted_len = aead_encrypt(enc, buf->base + rec_start + 5, buf->base + rec_start + 5, bodylen, type);
+ assert(encrypted_len == bodylen + overhead);
+ buf->off += overhead;
+ buf->base[rec_start] = PTLS_CONTENT_TYPE_APPDATA;
+ buf->base[rec_start + 3] = (encrypted_len >> 8) & 0xff;
+ buf->base[rec_start + 4] = encrypted_len & 0xff;
+ return 0;
+ }
+
+ /* move plaintext to temporary buffer */
+ if ((tmpbuf = malloc(bodylen)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ memcpy(tmpbuf, buf->base + rec_start + 5, bodylen);
+ ptls_clear_memory(buf->base + rec_start, bodylen + 5);
+ buf->off = rec_start;
+
+ /* push encrypted records */
+ ret = buffer_push_encrypted_records(buf, type, tmpbuf, bodylen, enc);
+
+Exit:
+ if (tmpbuf != NULL) {
+ ptls_clear_memory(tmpbuf, bodylen);
+ free(tmpbuf);
+ }
+ return ret;
+}
+
+static int begin_record_message(ptls_message_emitter_t *_self)
+{
+ struct st_ptls_record_message_emitter_t *self = (void *)_self;
+ int ret;
+
+ self->rec_start = self->super.buf->off;
+ ptls_buffer_push(self->super.buf, PTLS_CONTENT_TYPE_HANDSHAKE, PTLS_RECORD_VERSION_MAJOR, PTLS_RECORD_VERSION_MINOR, 0, 0);
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int commit_record_message(ptls_message_emitter_t *_self)
+{
+ struct st_ptls_record_message_emitter_t *self = (void *)_self;
+ int ret;
+
+ if (self->super.enc->aead != NULL) {
+ ret = buffer_encrypt_record(self->super.buf, self->rec_start, self->super.enc);
+ } else {
+ /* TODO allow CH,SH,HRR above 16KB */
+ size_t sz = self->super.buf->off - self->rec_start - 5;
+ assert(sz <= PTLS_MAX_PLAINTEXT_RECORD_SIZE);
+ self->super.buf->base[self->rec_start + 3] = (uint8_t)(sz >> 8);
+ self->super.buf->base[self->rec_start + 4] = (uint8_t)(sz);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+#define buffer_push_extension(buf, type, block) \
+ do { \
+ ptls_buffer_push16((buf), (type)); \
+ ptls_buffer_push_block((buf), 2, block); \
+ } while (0);
+
+#define decode_open_extensions(src, end, hstype, exttype, block) \
+ do { \
+ struct st_ptls_extension_bitmap_t bitmap; \
+ init_extension_bitmap(&bitmap, (hstype)); \
+ ptls_decode_open_block((src), end, 2, { \
+ while ((src) != end) { \
+ if ((ret = ptls_decode16((exttype), &(src), end)) != 0) \
+ goto Exit; \
+ if (extension_bitmap_is_set(&bitmap, *(exttype)) != 0) { \
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER; \
+ goto Exit; \
+ } \
+ extension_bitmap_set(&bitmap, *(exttype)); \
+ ptls_decode_open_block((src), end, 2, block); \
+ } \
+ }); \
+ } while (0)
+
+#define decode_extensions(src, end, hstype, exttype, block) \
+ do { \
+ decode_open_extensions((src), end, hstype, exttype, block); \
+ ptls_decode_assert_block_close((src), end); \
+ } while (0)
+
+int ptls_decode16(uint16_t *value, const uint8_t **src, const uint8_t *end)
+{
+ if (end - *src < 2)
+ return PTLS_ALERT_DECODE_ERROR;
+ *value = ntoh16(*src);
+ *src += 2;
+ return 0;
+}
+
+int ptls_decode24(uint32_t *value, const uint8_t **src, const uint8_t *end)
+{
+ if (end - *src < 3)
+ return PTLS_ALERT_DECODE_ERROR;
+ *value = ((uint32_t)(*src)[0] << 16) | ((uint32_t)(*src)[1] << 8) | (*src)[2];
+ *src += 3;
+ return 0;
+}
+
+int ptls_decode32(uint32_t *value, const uint8_t **src, const uint8_t *end)
+{
+ if (end - *src < 4)
+ return PTLS_ALERT_DECODE_ERROR;
+ *value = ntoh32(*src);
+ *src += 4;
+ return 0;
+}
+
+int ptls_decode64(uint64_t *value, const uint8_t **src, const uint8_t *end)
+{
+ if (end - *src < 8)
+ return PTLS_ALERT_DECODE_ERROR;
+ *value = ntoh64(*src);
+ *src += 8;
+ return 0;
+}
+
+uint64_t ptls_decode_quicint(const uint8_t **src, const uint8_t *end)
+{
+ if (PTLS_UNLIKELY(*src == end))
+ return UINT64_MAX;
+
+ uint8_t b = *(*src)++;
+
+ if (PTLS_LIKELY(b <= 0x3f))
+ return b;
+
+ uint64_t v = b & 0x3f;
+ unsigned bytes_left = (1 << (b >> 6)) - 1;
+ if (PTLS_UNLIKELY((size_t)(end - *src) < bytes_left))
+ return UINT64_MAX;
+ do {
+ v = (v << 8) | *(*src)++;
+ } while (--bytes_left != 0);
+ return v;
+}
+
+static void log_secret(ptls_t *tls, const char *type, ptls_iovec_t secret)
+{
+ char hexbuf[PTLS_MAX_DIGEST_SIZE * 2 + 1];
+
+ PTLS_PROBE(NEW_SECRET, tls, type, ptls_hexdump(hexbuf, secret.base, secret.len));
+
+ if (tls->ctx->log_event != NULL)
+ tls->ctx->log_event->cb(tls->ctx->log_event, tls, type, "%s", ptls_hexdump(hexbuf, secret.base, secret.len));
+}
+
+static void key_schedule_free(ptls_key_schedule_t *sched)
+{
+ size_t i;
+ ptls_clear_memory(sched->secret, sizeof(sched->secret));
+ for (i = 0; i != sched->num_hashes; ++i)
+ sched->hashes[i].ctx->final(sched->hashes[i].ctx, NULL, PTLS_HASH_FINAL_MODE_FREE);
+ free(sched);
+}
+
+static ptls_key_schedule_t *key_schedule_new(ptls_cipher_suite_t *preferred, ptls_cipher_suite_t **offered,
+ const char *hkdf_label_prefix)
+{
+#define FOREACH_HASH(block) \
+ do { \
+ ptls_cipher_suite_t *cs; \
+ if ((cs = preferred) != NULL) { \
+ block \
+ } \
+ if (offered != NULL) { \
+ size_t i, j; \
+ for (i = 0; (cs = offered[i]) != NULL; ++i) { \
+ if (preferred == NULL || cs->hash != preferred->hash) { \
+ for (j = 0; j != i; ++j) \
+ if (cs->hash == offered[j]->hash) \
+ break; \
+ if (j == i) { \
+ block \
+ } \
+ } \
+ } \
+ } \
+ } while (0)
+
+ ptls_key_schedule_t *sched;
+
+ if (hkdf_label_prefix == NULL)
+ hkdf_label_prefix = PTLS_HKDF_EXPAND_LABEL_PREFIX;
+
+ { /* allocate */
+ size_t num_hashes = 0;
+ FOREACH_HASH({ ++num_hashes; });
+ if ((sched = malloc(offsetof(ptls_key_schedule_t, hashes) + sizeof(sched->hashes[0]) * num_hashes)) == NULL)
+ return NULL;
+ *sched = (ptls_key_schedule_t){0, hkdf_label_prefix};
+ }
+
+ /* setup the hash algos and contexts */
+ FOREACH_HASH({
+ sched->hashes[sched->num_hashes].algo = cs->hash;
+ if ((sched->hashes[sched->num_hashes].ctx = cs->hash->create()) == NULL)
+ goto Fail;
+ ++sched->num_hashes;
+ });
+
+ return sched;
+Fail:
+ key_schedule_free(sched);
+ return NULL;
+
+#undef FOREACH_HASH
+}
+
+static int key_schedule_extract(ptls_key_schedule_t *sched, ptls_iovec_t ikm)
+{
+ int ret;
+
+ if (ikm.base == NULL)
+ ikm = ptls_iovec_init(zeroes_of_max_digest_size, sched->hashes[0].algo->digest_size);
+
+ if (sched->generation != 0 &&
+ (ret = hkdf_expand_label(sched->hashes[0].algo, sched->secret, sched->hashes[0].algo->digest_size,
+ ptls_iovec_init(sched->secret, sched->hashes[0].algo->digest_size), "derived",
+ ptls_iovec_init(sched->hashes[0].algo->empty_digest, sched->hashes[0].algo->digest_size),
+ sched->hkdf_label_prefix)) != 0)
+ return ret;
+
+ ++sched->generation;
+ ret = ptls_hkdf_extract(sched->hashes[0].algo, sched->secret,
+ ptls_iovec_init(sched->secret, sched->hashes[0].algo->digest_size), ikm);
+ PTLS_DEBUGF("%s: %u, %02x%02x\n", __FUNCTION__, sched->generation, (int)sched->secret[0], (int)sched->secret[1]);
+ return ret;
+}
+
+static int key_schedule_select_one(ptls_key_schedule_t *sched, ptls_cipher_suite_t *cs, int reset)
+{
+ size_t found_slot = SIZE_MAX, i;
+ int ret;
+
+ assert(sched->generation == 1);
+
+ /* find the one, while freeing others */
+ for (i = 0; i != sched->num_hashes; ++i) {
+ if (sched->hashes[i].algo == cs->hash) {
+ assert(found_slot == SIZE_MAX);
+ found_slot = i;
+ } else {
+ sched->hashes[i].ctx->final(sched->hashes[i].ctx, NULL, PTLS_HASH_FINAL_MODE_FREE);
+ }
+ }
+ if (found_slot != 0) {
+ sched->hashes[0] = sched->hashes[found_slot];
+ reset = 1;
+ }
+ sched->num_hashes = 1;
+
+ /* recalculate the hash if a different hash as been selected than the one we used for calculating the early secrets */
+ if (reset) {
+ --sched->generation;
+ memset(sched->secret, 0, sizeof(sched->secret));
+ if ((ret = key_schedule_extract(sched, ptls_iovec_init(NULL, 0))) != 0)
+ goto Exit;
+ }
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+void ptls__key_schedule_update_hash(ptls_key_schedule_t *sched, const uint8_t *msg, size_t msglen)
+{
+ size_t i;
+
+ PTLS_DEBUGF("%s:%zu\n", __FUNCTION__, msglen);
+ for (i = 0; i != sched->num_hashes; ++i)
+ sched->hashes[i].ctx->update(sched->hashes[i].ctx, msg, msglen);
+}
+
+static void key_schedule_update_ch1hash_prefix(ptls_key_schedule_t *sched)
+{
+ uint8_t prefix[4] = {PTLS_HANDSHAKE_TYPE_MESSAGE_HASH, 0, 0, (uint8_t)sched->hashes[0].algo->digest_size};
+ ptls__key_schedule_update_hash(sched, prefix, sizeof(prefix));
+}
+
+static void key_schedule_extract_ch1hash(ptls_key_schedule_t *sched, uint8_t *hash)
+{
+ sched->hashes[0].ctx->final(sched->hashes[0].ctx, hash, PTLS_HASH_FINAL_MODE_RESET);
+}
+
+static void key_schedule_transform_post_ch1hash(ptls_key_schedule_t *sched)
+{
+ uint8_t ch1hash[PTLS_MAX_DIGEST_SIZE];
+
+ key_schedule_extract_ch1hash(sched, ch1hash);
+
+ key_schedule_update_ch1hash_prefix(sched);
+ ptls__key_schedule_update_hash(sched, ch1hash, sched->hashes[0].algo->digest_size);
+}
+
+static int derive_secret_with_hash(ptls_key_schedule_t *sched, void *secret, const char *label, const uint8_t *hash)
+{
+ int ret = hkdf_expand_label(sched->hashes[0].algo, secret, sched->hashes[0].algo->digest_size,
+ ptls_iovec_init(sched->secret, sched->hashes[0].algo->digest_size), label,
+ ptls_iovec_init(hash, sched->hashes[0].algo->digest_size), sched->hkdf_label_prefix);
+ PTLS_DEBUGF("%s: (label=%s, hash=%02x%02x) => %02x%02x\n", __FUNCTION__, label, hash[0], hash[1], ((uint8_t *)secret)[0],
+ ((uint8_t *)secret)[1]);
+ return ret;
+}
+
+static int derive_secret(ptls_key_schedule_t *sched, void *secret, const char *label)
+{
+ uint8_t hash_value[PTLS_MAX_DIGEST_SIZE];
+
+ sched->hashes[0].ctx->final(sched->hashes[0].ctx, hash_value, PTLS_HASH_FINAL_MODE_SNAPSHOT);
+ int ret = derive_secret_with_hash(sched, secret, label, hash_value);
+ ptls_clear_memory(hash_value, sizeof(hash_value));
+ return ret;
+}
+
+static int derive_secret_with_empty_digest(ptls_key_schedule_t *sched, void *secret, const char *label)
+{
+ return derive_secret_with_hash(sched, secret, label, sched->hashes[0].algo->empty_digest);
+}
+
+static int derive_exporter_secret(ptls_t *tls, int is_early)
+{
+ int ret;
+
+ if (!tls->ctx->use_exporter)
+ return 0;
+
+ uint8_t **slot = is_early ? &tls->exporter_master_secret.early : &tls->exporter_master_secret.one_rtt;
+ assert(*slot == NULL);
+ if ((*slot = malloc(tls->key_schedule->hashes[0].algo->digest_size)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+
+ if ((ret = derive_secret(tls->key_schedule, *slot, is_early ? "e exp master" : "exp master")) != 0)
+ return ret;
+
+ log_secret(tls, is_early ? "EARLY_EXPORTER_SECRET" : "EXPORTER_SECRET",
+ ptls_iovec_init(*slot, tls->key_schedule->hashes[0].algo->digest_size));
+
+ return 0;
+}
+
+static void free_exporter_master_secret(ptls_t *tls, int is_early)
+{
+ uint8_t *slot = is_early ? tls->exporter_master_secret.early : tls->exporter_master_secret.one_rtt;
+ if (slot == NULL)
+ return;
+ assert(tls->key_schedule != NULL);
+ ptls_clear_memory(slot, tls->key_schedule->hashes[0].algo->digest_size);
+ free(slot);
+}
+
+static int derive_resumption_secret(ptls_key_schedule_t *sched, uint8_t *secret, ptls_iovec_t nonce)
+{
+ int ret;
+
+ if ((ret = derive_secret(sched, secret, "res master")) != 0)
+ goto Exit;
+ if ((ret = hkdf_expand_label(sched->hashes[0].algo, secret, sched->hashes[0].algo->digest_size,
+ ptls_iovec_init(secret, sched->hashes[0].algo->digest_size), "resumption", nonce,
+ sched->hkdf_label_prefix)) != 0)
+ goto Exit;
+
+Exit:
+ if (ret != 0)
+ ptls_clear_memory(secret, sched->hashes[0].algo->digest_size);
+ return ret;
+}
+
+static int decode_new_session_ticket(ptls_t *tls, uint32_t *lifetime, uint32_t *age_add, ptls_iovec_t *nonce, ptls_iovec_t *ticket,
+ uint32_t *max_early_data_size, const uint8_t *src, const uint8_t *const end)
+{
+ uint16_t exttype;
+ int ret;
+
+ if ((ret = ptls_decode32(lifetime, &src, end)) != 0)
+ goto Exit;
+ if ((ret = ptls_decode32(age_add, &src, end)) != 0)
+ goto Exit;
+ ptls_decode_open_block(src, end, 1, {
+ *nonce = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ ptls_decode_open_block(src, end, 2, {
+ if (src == end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ *ticket = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+
+ *max_early_data_size = 0;
+ decode_extensions(src, end, PTLS_HANDSHAKE_TYPE_NEW_SESSION_TICKET, &exttype, {
+ if (tls->ctx->on_extension != NULL &&
+ (ret = tls->ctx->on_extension->cb(tls->ctx->on_extension, tls, PTLS_HANDSHAKE_TYPE_NEW_SESSION_TICKET, exttype,
+ ptls_iovec_init(src, end - src)) != 0))
+ goto Exit;
+ switch (exttype) {
+ case PTLS_EXTENSION_TYPE_EARLY_DATA:
+ if ((ret = ptls_decode32(max_early_data_size, &src, end)) != 0)
+ goto Exit;
+ break;
+ default:
+ src = end;
+ break;
+ }
+ });
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int decode_stored_session_ticket(ptls_t *tls, ptls_key_exchange_algorithm_t **key_share, ptls_cipher_suite_t **cs,
+ ptls_iovec_t *secret, uint32_t *obfuscated_ticket_age, ptls_iovec_t *ticket,
+ uint32_t *max_early_data_size, const uint8_t *src, const uint8_t *const end)
+{
+ uint16_t kxid, csid;
+ uint32_t lifetime, age_add;
+ uint64_t obtained_at, now;
+ ptls_iovec_t nonce;
+ int ret;
+
+ /* decode */
+ if ((ret = ptls_decode64(&obtained_at, &src, end)) != 0)
+ goto Exit;
+ if ((ret = ptls_decode16(&kxid, &src, end)) != 0)
+ goto Exit;
+ if ((ret = ptls_decode16(&csid, &src, end)) != 0)
+ goto Exit;
+ ptls_decode_open_block(src, end, 3, {
+ if ((ret = decode_new_session_ticket(tls, &lifetime, &age_add, &nonce, ticket, max_early_data_size, src, end)) != 0)
+ goto Exit;
+ src = end;
+ });
+ ptls_decode_block(src, end, 2, {
+ *secret = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+
+ { /* determine the key-exchange */
+ ptls_key_exchange_algorithm_t **cand;
+ for (cand = tls->ctx->key_exchanges; *cand != NULL; ++cand)
+ if ((*cand)->id == kxid)
+ break;
+ if (*cand == NULL) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ *key_share = *cand;
+ }
+
+ { /* determine the cipher-suite */
+ ptls_cipher_suite_t **cand;
+ for (cand = tls->ctx->cipher_suites; *cand != NULL; ++cand)
+ if ((*cand)->id == csid)
+ break;
+ if (*cand == NULL) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ *cs = *cand;
+ }
+
+ /* calculate obfuscated_ticket_age */
+ now = tls->ctx->get_time->cb(tls->ctx->get_time);
+ if (!(obtained_at <= now && now - obtained_at < 7 * 86400 * 1000)) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ *obfuscated_ticket_age = (uint32_t)(now - obtained_at) + age_add;
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int get_traffic_key(ptls_hash_algorithm_t *algo, void *key, size_t key_size, int is_iv, const void *secret,
+ ptls_iovec_t hash_value, const char *label_prefix)
+{
+ return ptls_hkdf_expand_label(algo, key, key_size, ptls_iovec_init(secret, algo->digest_size), is_iv ? "iv" : "key", hash_value,
+ label_prefix);
+}
+
+static int setup_traffic_protection(ptls_t *tls, int is_enc, const char *secret_label, size_t epoch, int skip_notify)
+{
+ static const char *log_labels[2][4] = {
+ {NULL, "CLIENT_EARLY_TRAFFIC_SECRET", "CLIENT_HANDSHAKE_TRAFFIC_SECRET", "CLIENT_TRAFFIC_SECRET_0"},
+ {NULL, NULL, "SERVER_HANDSHAKE_TRAFFIC_SECRET", "SERVER_TRAFFIC_SECRET_0"}};
+ struct st_ptls_traffic_protection_t *ctx = is_enc ? &tls->traffic_protection.enc : &tls->traffic_protection.dec;
+
+ if (secret_label != NULL) {
+ int ret;
+ if ((ret = derive_secret(tls->key_schedule, ctx->secret, secret_label)) != 0)
+ return ret;
+ }
+
+ ctx->epoch = epoch;
+
+ /* special path for applications having their own record layer */
+ if (tls->ctx->update_traffic_key != NULL) {
+ if (skip_notify)
+ return 0;
+ return tls->ctx->update_traffic_key->cb(tls->ctx->update_traffic_key, tls, is_enc, epoch, ctx->secret);
+ }
+
+ if (ctx->aead != NULL)
+ ptls_aead_free(ctx->aead);
+ if ((ctx->aead = ptls_aead_new(tls->cipher_suite->aead, tls->cipher_suite->hash, is_enc, ctx->secret,
+ tls->ctx->hkdf_label_prefix__obsolete)) == NULL)
+ return PTLS_ERROR_NO_MEMORY; /* TODO obtain error from ptls_aead_new */
+ ctx->seq = 0;
+
+ log_secret(tls, log_labels[ptls_is_server(tls) == is_enc][epoch],
+ ptls_iovec_init(ctx->secret, tls->key_schedule->hashes[0].algo->digest_size));
+ PTLS_DEBUGF("[%s] %02x%02x,%02x%02x\n", log_labels[ptls_is_server(tls)][epoch], (unsigned)ctx->secret[0],
+ (unsigned)ctx->secret[1], (unsigned)ctx->aead->static_iv[0], (unsigned)ctx->aead->static_iv[1]);
+
+ return 0;
+}
+
+static int commission_handshake_secret(ptls_t *tls)
+{
+ int is_enc = !ptls_is_server(tls);
+
+ assert(tls->pending_handshake_secret != NULL);
+ memcpy((is_enc ? &tls->traffic_protection.enc : &tls->traffic_protection.dec)->secret, tls->pending_handshake_secret,
+ PTLS_MAX_DIGEST_SIZE);
+ ptls_clear_memory(tls->pending_handshake_secret, PTLS_MAX_DIGEST_SIZE);
+ free(tls->pending_handshake_secret);
+ tls->pending_handshake_secret = NULL;
+
+ return setup_traffic_protection(tls, is_enc, NULL, 2, 1);
+}
+
+static inline void log_client_random(ptls_t *tls)
+{
+ PTLS_PROBE(CLIENT_RANDOM, tls,
+ ptls_hexdump(alloca(sizeof(tls->client_random) * 2 + 1), tls->client_random, sizeof(tls->client_random)));
+}
+
+#define SESSION_IDENTIFIER_MAGIC "ptls0001" /* the number should be changed upon incompatible format change */
+#define SESSION_IDENTIFIER_MAGIC_SIZE (sizeof(SESSION_IDENTIFIER_MAGIC) - 1)
+
+static int encode_session_identifier(ptls_context_t *ctx, ptls_buffer_t *buf, uint32_t ticket_age_add, ptls_iovec_t ticket_nonce,
+ ptls_key_schedule_t *sched, const char *server_name, uint16_t key_exchange_id, uint16_t csid,
+ const char *negotiated_protocol)
+{
+ int ret = 0;
+
+ ptls_buffer_push_block(buf, 2, {
+ /* format id */
+ ptls_buffer_pushv(buf, SESSION_IDENTIFIER_MAGIC, SESSION_IDENTIFIER_MAGIC_SIZE);
+ /* date */
+ ptls_buffer_push64(buf, ctx->get_time->cb(ctx->get_time));
+ /* resumption master secret */
+ ptls_buffer_push_block(buf, 2, {
+ if ((ret = ptls_buffer_reserve(buf, sched->hashes[0].algo->digest_size)) != 0)
+ goto Exit;
+ if ((ret = derive_resumption_secret(sched, buf->base + buf->off, ticket_nonce)) != 0)
+ goto Exit;
+ buf->off += sched->hashes[0].algo->digest_size;
+ });
+ /* key-exchange */
+ ptls_buffer_push16(buf, key_exchange_id);
+ /* cipher-suite */
+ ptls_buffer_push16(buf, csid);
+ /* ticket_age_add */
+ ptls_buffer_push32(buf, ticket_age_add);
+ /* server-name */
+ ptls_buffer_push_block(buf, 2, {
+ if (server_name != NULL)
+ ptls_buffer_pushv(buf, server_name, strlen(server_name));
+ });
+ /* alpn */
+ ptls_buffer_push_block(buf, 1, {
+ if (negotiated_protocol != NULL)
+ ptls_buffer_pushv(buf, negotiated_protocol, strlen(negotiated_protocol));
+ });
+ });
+
+Exit:
+ return ret;
+}
+
+int decode_session_identifier(uint64_t *issued_at, ptls_iovec_t *psk, uint32_t *ticket_age_add, ptls_iovec_t *server_name,
+ uint16_t *key_exchange_id, uint16_t *csid, ptls_iovec_t *negotiated_protocol, const uint8_t *src,
+ const uint8_t *const end)
+{
+ int ret = 0;
+
+ ptls_decode_block(src, end, 2, {
+ if (end - src < SESSION_IDENTIFIER_MAGIC_SIZE ||
+ memcmp(src, SESSION_IDENTIFIER_MAGIC, SESSION_IDENTIFIER_MAGIC_SIZE) != 0) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ src += SESSION_IDENTIFIER_MAGIC_SIZE;
+ if ((ret = ptls_decode64(issued_at, &src, end)) != 0)
+ goto Exit;
+ ptls_decode_open_block(src, end, 2, {
+ *psk = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ if ((ret = ptls_decode16(key_exchange_id, &src, end)) != 0)
+ goto Exit;
+ if ((ret = ptls_decode16(csid, &src, end)) != 0)
+ goto Exit;
+ if ((ret = ptls_decode32(ticket_age_add, &src, end)) != 0)
+ goto Exit;
+ ptls_decode_open_block(src, end, 2, {
+ *server_name = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ ptls_decode_open_block(src, end, 1, {
+ *negotiated_protocol = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ });
+
+Exit:
+ return ret;
+}
+
+static size_t build_certificate_verify_signdata(uint8_t *data, ptls_key_schedule_t *sched, const char *context_string)
+{
+ size_t datalen = 0;
+
+ memset(data + datalen, 32, 64);
+ datalen += 64;
+ memcpy(data + datalen, context_string, strlen(context_string) + 1);
+ datalen += strlen(context_string) + 1;
+ sched->hashes[0].ctx->final(sched->hashes[0].ctx, data + datalen, PTLS_HASH_FINAL_MODE_SNAPSHOT);
+ datalen += sched->hashes[0].algo->digest_size;
+ assert(datalen <= PTLS_MAX_CERTIFICATE_VERIFY_SIGNDATA_SIZE);
+
+ return datalen;
+}
+
+static int calc_verify_data(void *output, ptls_key_schedule_t *sched, const void *secret)
+{
+ ptls_hash_context_t *hmac;
+ uint8_t digest[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ if ((ret = hkdf_expand_label(sched->hashes[0].algo, digest, sched->hashes[0].algo->digest_size,
+ ptls_iovec_init(secret, sched->hashes[0].algo->digest_size), "finished", ptls_iovec_init(NULL, 0),
+ sched->hkdf_label_prefix)) != 0)
+ return ret;
+ if ((hmac = ptls_hmac_create(sched->hashes[0].algo, digest, sched->hashes[0].algo->digest_size)) == NULL) {
+ ptls_clear_memory(digest, sizeof(digest));
+ return PTLS_ERROR_NO_MEMORY;
+ }
+
+ sched->hashes[0].ctx->final(sched->hashes[0].ctx, digest, PTLS_HASH_FINAL_MODE_SNAPSHOT);
+ PTLS_DEBUGF("%s: %02x%02x,%02x%02x\n", __FUNCTION__, ((uint8_t *)secret)[0], ((uint8_t *)secret)[1], digest[0], digest[1]);
+ hmac->update(hmac, digest, sched->hashes[0].algo->digest_size);
+ ptls_clear_memory(digest, sizeof(digest));
+ hmac->final(hmac, output, PTLS_HASH_FINAL_MODE_FREE);
+
+ return 0;
+}
+
+static int verify_finished(ptls_t *tls, ptls_iovec_t message)
+{
+ uint8_t verify_data[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ if (PTLS_HANDSHAKE_HEADER_SIZE + tls->key_schedule->hashes[0].algo->digest_size != message.len) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+
+ if ((ret = calc_verify_data(verify_data, tls->key_schedule, tls->traffic_protection.dec.secret)) != 0)
+ goto Exit;
+ if (!ptls_mem_equal(message.base + PTLS_HANDSHAKE_HEADER_SIZE, verify_data, tls->key_schedule->hashes[0].algo->digest_size)) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ goto Exit;
+ }
+
+Exit:
+ ptls_clear_memory(verify_data, sizeof(verify_data));
+ return ret;
+}
+
+static int send_finished(ptls_t *tls, ptls_message_emitter_t *emitter)
+{
+ int ret;
+
+ ptls_push_message(emitter, tls->key_schedule, PTLS_HANDSHAKE_TYPE_FINISHED, {
+ if ((ret = ptls_buffer_reserve(emitter->buf, tls->key_schedule->hashes[0].algo->digest_size)) != 0)
+ goto Exit;
+ if ((ret = calc_verify_data(emitter->buf->base + emitter->buf->off, tls->key_schedule,
+ tls->traffic_protection.enc.secret)) != 0)
+ goto Exit;
+ emitter->buf->off += tls->key_schedule->hashes[0].algo->digest_size;
+ });
+
+Exit:
+ return ret;
+}
+
+static int send_session_ticket(ptls_t *tls, ptls_message_emitter_t *emitter)
+{
+ ptls_hash_context_t *msghash_backup = tls->key_schedule->hashes[0].ctx->clone_(tls->key_schedule->hashes[0].ctx);
+ ptls_buffer_t session_id;
+ char session_id_smallbuf[128];
+ uint32_t ticket_age_add;
+ int ret = 0;
+
+ assert(tls->ctx->ticket_lifetime != 0);
+ assert(tls->ctx->encrypt_ticket != NULL);
+
+ { /* calculate verify-data that will be sent by the client */
+ size_t orig_off = emitter->buf->off;
+ if (tls->pending_handshake_secret != NULL && !tls->ctx->omit_end_of_early_data) {
+ assert(tls->state == PTLS_STATE_SERVER_EXPECT_END_OF_EARLY_DATA);
+ ptls_buffer_push_message_body(emitter->buf, tls->key_schedule, PTLS_HANDSHAKE_TYPE_END_OF_EARLY_DATA, {});
+ emitter->buf->off = orig_off;
+ }
+ ptls_buffer_push_message_body(emitter->buf, tls->key_schedule, PTLS_HANDSHAKE_TYPE_FINISHED, {
+ if ((ret = ptls_buffer_reserve(emitter->buf, tls->key_schedule->hashes[0].algo->digest_size)) != 0)
+ goto Exit;
+ if ((ret = calc_verify_data(emitter->buf->base + emitter->buf->off, tls->key_schedule,
+ tls->pending_handshake_secret != NULL ? tls->pending_handshake_secret
+ : tls->traffic_protection.dec.secret)) != 0)
+ goto Exit;
+ emitter->buf->off += tls->key_schedule->hashes[0].algo->digest_size;
+ });
+ emitter->buf->off = orig_off;
+ }
+
+ tls->ctx->random_bytes(&ticket_age_add, sizeof(ticket_age_add));
+
+ /* build the raw nsk */
+ ptls_buffer_init(&session_id, session_id_smallbuf, sizeof(session_id_smallbuf));
+ ret = encode_session_identifier(tls->ctx, &session_id, ticket_age_add, ptls_iovec_init(NULL, 0), tls->key_schedule,
+ tls->server_name, tls->key_share->id, tls->cipher_suite->id, tls->negotiated_protocol);
+ if (ret != 0)
+ goto Exit;
+
+ /* encrypt and send */
+ ptls_push_message(emitter, tls->key_schedule, PTLS_HANDSHAKE_TYPE_NEW_SESSION_TICKET, {
+ ptls_buffer_push32(emitter->buf, tls->ctx->ticket_lifetime);
+ ptls_buffer_push32(emitter->buf, ticket_age_add);
+ ptls_buffer_push_block(emitter->buf, 1, {});
+ ptls_buffer_push_block(emitter->buf, 2, {
+ if ((ret = tls->ctx->encrypt_ticket->cb(tls->ctx->encrypt_ticket, tls, 1, emitter->buf,
+ ptls_iovec_init(session_id.base, session_id.off))) != 0)
+ goto Exit;
+ });
+ ptls_buffer_push_block(emitter->buf, 2, {
+ if (tls->ctx->max_early_data_size != 0)
+ buffer_push_extension(emitter->buf, PTLS_EXTENSION_TYPE_EARLY_DATA,
+ { ptls_buffer_push32(emitter->buf, tls->ctx->max_early_data_size); });
+ });
+ });
+
+Exit:
+ ptls_buffer_dispose(&session_id);
+
+ /* restore handshake state */
+ tls->key_schedule->hashes[0].ctx->final(tls->key_schedule->hashes[0].ctx, NULL, PTLS_HASH_FINAL_MODE_FREE);
+ tls->key_schedule->hashes[0].ctx = msghash_backup;
+
+ return ret;
+}
+
+static int push_change_cipher_spec(ptls_t *tls, ptls_buffer_t *sendbuf)
+{
+ int ret = 0;
+
+ if (!tls->send_change_cipher_spec)
+ goto Exit;
+ buffer_push_record(sendbuf, PTLS_CONTENT_TYPE_CHANGE_CIPHER_SPEC, { ptls_buffer_push(sendbuf, 1); });
+ tls->send_change_cipher_spec = 0;
+Exit:
+ return ret;
+}
+
+static int push_additional_extensions(ptls_handshake_properties_t *properties, ptls_buffer_t *sendbuf)
+{
+ int ret;
+
+ if (properties != NULL && properties->additional_extensions != NULL) {
+ ptls_raw_extension_t *ext;
+ for (ext = properties->additional_extensions; ext->type != UINT16_MAX; ++ext) {
+ buffer_push_extension(sendbuf, ext->type, { ptls_buffer_pushv(sendbuf, ext->data.base, ext->data.len); });
+ }
+ }
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int push_signature_algorithms(ptls_buffer_t *sendbuf)
+{
+ int ret;
+
+ ptls_buffer_push_block(sendbuf, 2, {
+ ptls_buffer_push16(sendbuf, PTLS_SIGNATURE_RSA_PSS_RSAE_SHA256);
+ ptls_buffer_push16(sendbuf, PTLS_SIGNATURE_ECDSA_SECP256R1_SHA256);
+ ptls_buffer_push16(sendbuf, PTLS_SIGNATURE_RSA_PKCS1_SHA256);
+ ptls_buffer_push16(sendbuf, PTLS_SIGNATURE_RSA_PKCS1_SHA1);
+ });
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int decode_signature_algorithms(struct st_ptls_signature_algorithms_t *sa, const uint8_t **src, const uint8_t *end)
+{
+ int ret;
+
+ ptls_decode_block(*src, end, 2, {
+ do {
+ uint16_t id;
+ if ((ret = ptls_decode16(&id, src, end)) != 0)
+ goto Exit;
+ if (sa->count < sizeof(sa->list) / sizeof(sa->list[0]))
+ sa->list[sa->count++] = id;
+ } while (*src != end);
+ });
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static ptls_hash_context_t *create_sha256_context(ptls_context_t *ctx)
+{
+ ptls_cipher_suite_t **cs;
+
+ for (cs = ctx->cipher_suites; *cs != NULL; ++cs) {
+ switch ((*cs)->id) {
+ case PTLS_CIPHER_SUITE_AES_128_GCM_SHA256:
+ case PTLS_CIPHER_SUITE_CHACHA20_POLY1305_SHA256:
+ return (*cs)->hash->create();
+ }
+ }
+
+ return NULL;
+}
+
+static int select_cipher(ptls_cipher_suite_t **selected, ptls_cipher_suite_t **candidates, const uint8_t *src,
+ const uint8_t *const end)
+{
+ int ret;
+
+ while (src != end) {
+ uint16_t id;
+ if ((ret = ptls_decode16(&id, &src, end)) != 0)
+ goto Exit;
+ ptls_cipher_suite_t **c = candidates;
+ for (; *c != NULL; ++c) {
+ if ((*c)->id == id) {
+ *selected = *c;
+ return 0;
+ }
+ }
+ }
+
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+
+Exit:
+ return ret;
+}
+
+static int push_key_share_entry(ptls_buffer_t *buf, uint16_t group, ptls_iovec_t pubkey)
+{
+ int ret;
+
+ ptls_buffer_push16(buf, group);
+ ptls_buffer_push_block(buf, 2, { ptls_buffer_pushv(buf, pubkey.base, pubkey.len); });
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int decode_key_share_entry(uint16_t *group, ptls_iovec_t *key_exchange, const uint8_t **src, const uint8_t *const end)
+{
+ int ret;
+
+ if ((ret = ptls_decode16(group, src, end)) != 0)
+ goto Exit;
+ ptls_decode_open_block(*src, end, 2, {
+ *key_exchange = ptls_iovec_init(*src, end - *src);
+ *src = end;
+ });
+
+Exit:
+ return ret;
+}
+
+static int select_key_share(ptls_key_exchange_algorithm_t **selected, ptls_iovec_t *peer_key,
+ ptls_key_exchange_algorithm_t **candidates, const uint8_t **src, const uint8_t *const end,
+ int expect_one)
+{
+ int ret;
+
+ *selected = NULL;
+
+ if (expect_one && *src == end) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+
+ while (*src != end) {
+ uint16_t group;
+ ptls_iovec_t key;
+ if ((ret = decode_key_share_entry(&group, &key, src, end)) != 0)
+ goto Exit;
+ ptls_key_exchange_algorithm_t **c = candidates;
+ for (; *c != NULL; ++c) {
+ if (*selected == NULL && (*c)->id == group) {
+ *selected = *c;
+ *peer_key = key;
+ }
+ }
+ if (expect_one) {
+ ret = *selected != NULL ? 0 : PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+
+ ret = 0;
+
+Exit:
+ return ret;
+}
+
+static int emit_server_name_extension(ptls_buffer_t *buf, const char *server_name)
+{
+ int ret;
+
+ ptls_buffer_push_block(buf, 2, {
+ ptls_buffer_push(buf, PTLS_SERVER_NAME_TYPE_HOSTNAME);
+ ptls_buffer_push_block(buf, 2, { ptls_buffer_pushv(buf, server_name, strlen(server_name)); });
+ });
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int parse_esni_keys(ptls_context_t *ctx, uint16_t *esni_version, ptls_key_exchange_algorithm_t **selected_key_share,
+ ptls_cipher_suite_t **selected_cipher, ptls_iovec_t *peer_key, uint16_t *padded_length,
+ char **published_sni, ptls_iovec_t input)
+{
+ const uint8_t *src = input.base, *const end = input.base + input.len;
+ uint16_t version;
+ uint64_t not_before, not_after, now;
+ int ret = 0;
+
+ /* version */
+ if ((ret = ptls_decode16(&version, &src, end)) != 0)
+ goto Exit;
+ if (version != PTLS_ESNI_VERSION_DRAFT03) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+
+ { /* verify checksum */
+ ptls_hash_context_t *hctx;
+ uint8_t digest[PTLS_SHA256_DIGEST_SIZE];
+ if (end - src < 4) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ if ((hctx = create_sha256_context(ctx)) == NULL) {
+ ret = PTLS_ERROR_LIBRARY;
+ goto Exit;
+ }
+ hctx->update(hctx, input.base, src - input.base);
+ hctx->update(hctx, "\0\0\0\0", 4);
+ hctx->update(hctx, src + 4, end - (src + 4));
+ hctx->final(hctx, digest, PTLS_HASH_FINAL_MODE_FREE);
+ if (memcmp(src, digest, 4) != 0) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ src += 4;
+ }
+ *esni_version = version;
+ /* published sni */
+ ptls_decode_open_block(src, end, 2, {
+ size_t len = end - src;
+ *published_sni = malloc(len + 1);
+ if (*published_sni == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if (len > 0) {
+ memcpy(*published_sni, src, len);
+ }
+ (*published_sni)[len] = 0;
+ src = end;
+ });
+ /* key-shares */
+ ptls_decode_open_block(src, end, 2, {
+ if ((ret = select_key_share(selected_key_share, peer_key, ctx->key_exchanges, &src, end, 0)) != 0)
+ goto Exit;
+ });
+ /* cipher-suite */
+ ptls_decode_open_block(src, end, 2, {
+ if ((ret = select_cipher(selected_cipher, ctx->cipher_suites, src, end)) != 0)
+ goto Exit;
+ src = end;
+ });
+ /* padded-length */
+ if ((ret = ptls_decode16(padded_length, &src, end)) != 0)
+ goto Exit;
+ if (padded_length == 0)
+ goto Exit;
+ /* not-before, not_after */
+ if ((ret = ptls_decode64(¬_before, &src, end)) != 0 || (ret = ptls_decode64(¬_after, &src, end)) != 0)
+ goto Exit;
+ /* extensions */
+ ptls_decode_block(src, end, 2, {
+ while (src != end) {
+ uint16_t id;
+ if ((ret = ptls_decode16(&id, &src, end)) != 0)
+ goto Exit;
+ ptls_decode_open_block(src, end, 2, { src = end; });
+ }
+ });
+
+ /* check validity period */
+ now = ctx->get_time->cb(ctx->get_time);
+ if (!(not_before * 1000 <= now && now <= not_after * 1000)) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int create_esni_aead(ptls_aead_context_t **aead_ctx, int is_enc, ptls_cipher_suite_t *cipher, ptls_iovec_t ecdh_secret,
+ const uint8_t *esni_contents_hash)
+{
+ uint8_t aead_secret[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ if ((ret = ptls_hkdf_extract(cipher->hash, aead_secret, ptls_iovec_init(NULL, 0), ecdh_secret)) != 0)
+ goto Exit;
+ if ((*aead_ctx = new_aead(cipher->aead, cipher->hash, is_enc, aead_secret,
+ ptls_iovec_init(esni_contents_hash, cipher->hash->digest_size), "tls13 esni ")) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ ret = 0;
+Exit:
+ ptls_clear_memory(aead_secret, sizeof(aead_secret));
+ return ret;
+}
+
+static int build_esni_contents_hash(ptls_hash_algorithm_t *hash, uint8_t *digest, const uint8_t *record_digest, uint16_t group,
+ ptls_iovec_t pubkey, const uint8_t *client_random)
+{
+ ptls_buffer_t buf;
+ uint8_t smallbuf[256];
+ int ret;
+
+ /* build ESNIContents */
+ ptls_buffer_init(&buf, smallbuf, sizeof(smallbuf));
+ ptls_buffer_push_block(&buf, 2, { ptls_buffer_pushv(&buf, record_digest, hash->digest_size); });
+ if ((ret = push_key_share_entry(&buf, group, pubkey)) != 0)
+ goto Exit;
+ ptls_buffer_pushv(&buf, client_random, PTLS_HELLO_RANDOM_SIZE);
+
+ /* calculate digest */
+ if ((ret = ptls_calc_hash(hash, digest, buf.base, buf.off)) != 0)
+ goto Exit;
+
+ ret = 0;
+Exit:
+ ptls_buffer_dispose(&buf);
+ return ret;
+}
+
+static void free_esni_secret(ptls_esni_secret_t **esni, int is_server)
+{
+ assert(*esni != NULL);
+ if ((*esni)->secret.base != NULL) {
+ ptls_clear_memory((*esni)->secret.base, (*esni)->secret.len);
+ free((*esni)->secret.base);
+ }
+ if (!is_server)
+ free((*esni)->client.pubkey.base);
+ ptls_clear_memory((*esni), sizeof(**esni));
+ free(*esni);
+ *esni = NULL;
+}
+
+static int client_setup_esni(ptls_context_t *ctx, ptls_esni_secret_t **esni, ptls_iovec_t esni_keys, char **published_sni,
+ const uint8_t *client_random)
+{
+ ptls_iovec_t peer_key;
+ int ret;
+
+ if ((*esni = malloc(sizeof(**esni))) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ memset(*esni, 0, sizeof(**esni));
+
+ /* parse ESNI_Keys (and return success while keeping *esni NULL) */
+ if (parse_esni_keys(ctx, &(*esni)->version, &(*esni)->client.key_share, &(*esni)->client.cipher, &peer_key,
+ &(*esni)->client.padded_length, published_sni, esni_keys) != 0) {
+ free(*esni);
+ *esni = NULL;
+ return 0;
+ }
+
+ ctx->random_bytes((*esni)->nonce, sizeof((*esni)->nonce));
+
+ /* calc record digest */
+ if ((ret = ptls_calc_hash((*esni)->client.cipher->hash, (*esni)->client.record_digest, esni_keys.base, esni_keys.len)) != 0)
+ goto Exit;
+ /* derive ECDH secret */
+ if ((ret = (*esni)->client.key_share->exchange((*esni)->client.key_share, &(*esni)->client.pubkey, &(*esni)->secret,
+ peer_key)) != 0)
+ goto Exit;
+ /* calc H(ESNIContents) */
+ if ((ret = build_esni_contents_hash((*esni)->client.cipher->hash, (*esni)->esni_contents_hash, (*esni)->client.record_digest,
+ (*esni)->client.key_share->id, (*esni)->client.pubkey, client_random)) != 0)
+ goto Exit;
+
+ ret = 0;
+Exit:
+ if (ret != 0)
+ free_esni_secret(esni, 0);
+ return ret;
+}
+
+static int emit_esni_extension(ptls_esni_secret_t *esni, ptls_buffer_t *buf, ptls_iovec_t esni_keys, const char *server_name,
+ size_t key_share_ch_off, size_t key_share_ch_len)
+{
+ ptls_aead_context_t *aead = NULL;
+ int ret;
+
+ if ((ret = create_esni_aead(&aead, 1, esni->client.cipher, esni->secret, esni->esni_contents_hash)) != 0)
+ goto Exit;
+
+ /* cipher-suite id */
+ ptls_buffer_push16(buf, esni->client.cipher->id);
+ /* key-share */
+ if ((ret = push_key_share_entry(buf, esni->client.key_share->id, esni->client.pubkey)) != 0)
+ goto Exit;
+ /* record-digest */
+ ptls_buffer_push_block(buf, 2, { ptls_buffer_pushv(buf, esni->client.record_digest, esni->client.cipher->hash->digest_size); });
+ /* encrypted sni */
+ ptls_buffer_push_block(buf, 2, {
+ size_t start_off = buf->off;
+ /* nonce */
+ ptls_buffer_pushv(buf, esni->nonce, PTLS_ESNI_NONCE_SIZE);
+ /* emit server-name extension */
+ if ((ret = emit_server_name_extension(buf, server_name)) != 0)
+ goto Exit;
+ /* pad */
+ if (buf->off - start_off < (size_t)(esni->client.padded_length + PTLS_ESNI_NONCE_SIZE)) {
+ size_t bytes_to_pad = esni->client.padded_length + PTLS_ESNI_NONCE_SIZE - (buf->off - start_off);
+ if ((ret = ptls_buffer_reserve(buf, bytes_to_pad)) != 0)
+ goto Exit;
+ memset(buf->base + buf->off, 0, bytes_to_pad);
+ buf->off += bytes_to_pad;
+ }
+ /* encrypt */
+ if ((ret = ptls_buffer_reserve(buf, aead->algo->tag_size)) != 0)
+ goto Exit;
+ ptls_aead_encrypt(aead, buf->base + start_off, buf->base + start_off, buf->off - start_off, 0, buf->base + key_share_ch_off,
+ key_share_ch_len);
+ buf->off += aead->algo->tag_size;
+ });
+
+ ret = 0;
+Exit:
+ if (aead != NULL)
+ ptls_aead_free(aead);
+ return ret;
+}
+
+static int send_client_hello(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_handshake_properties_t *properties,
+ ptls_iovec_t *cookie)
+{
+ ptls_iovec_t resumption_secret = {NULL}, resumption_ticket;
+ char *published_sni = NULL;
+ uint32_t obfuscated_ticket_age = 0;
+ size_t msghash_off;
+ uint8_t binder_key[PTLS_MAX_DIGEST_SIZE];
+ int ret, is_second_flight = tls->key_schedule != NULL,
+ send_sni = tls->server_name != NULL && !ptls_server_name_is_ipaddr(tls->server_name);
+
+ if (properties != NULL) {
+ /* try to use ESNI */
+ if (!is_second_flight && send_sni && properties->client.esni_keys.base != NULL) {
+ if ((ret = client_setup_esni(tls->ctx, &tls->esni, properties->client.esni_keys, &published_sni, tls->client_random)) !=
+ 0) {
+ goto Exit;
+ }
+ if (tls->ctx->update_esni_key != NULL) {
+ if ((ret = tls->ctx->update_esni_key->cb(tls->ctx->update_esni_key, tls, tls->esni->secret,
+ tls->esni->client.cipher->hash, tls->esni->esni_contents_hash)) != 0)
+ goto Exit;
+ }
+ }
+ /* setup resumption-related data. If successful, resumption_secret becomes a non-zero value. */
+ if (properties->client.session_ticket.base != NULL) {
+ ptls_key_exchange_algorithm_t *key_share = NULL;
+ ptls_cipher_suite_t *cipher_suite = NULL;
+ uint32_t max_early_data_size;
+ if (decode_stored_session_ticket(tls, &key_share, &cipher_suite, &resumption_secret, &obfuscated_ticket_age,
+ &resumption_ticket, &max_early_data_size, properties->client.session_ticket.base,
+ properties->client.session_ticket.base + properties->client.session_ticket.len) == 0) {
+ tls->client.offered_psk = 1;
+ /* key-share selected by HRR should not be overridden */
+ if (tls->key_share == NULL)
+ tls->key_share = key_share;
+ tls->cipher_suite = cipher_suite;
+ if (!is_second_flight && max_early_data_size != 0 && properties->client.max_early_data_size != NULL) {
+ tls->client.using_early_data = 1;
+ *properties->client.max_early_data_size = max_early_data_size;
+ }
+ } else {
+ resumption_secret = ptls_iovec_init(NULL, 0);
+ }
+ }
+ if (tls->client.using_early_data) {
+ properties->client.early_data_acceptance = PTLS_EARLY_DATA_ACCEPTANCE_UNKNOWN;
+ } else {
+ if (properties->client.max_early_data_size != NULL)
+ *properties->client.max_early_data_size = 0;
+ properties->client.early_data_acceptance = PTLS_EARLY_DATA_REJECTED;
+ }
+ }
+
+ /* use the default key share if still not undetermined */
+ if (tls->key_share == NULL && !(properties != NULL && properties->client.negotiate_before_key_exchange))
+ tls->key_share = tls->ctx->key_exchanges[0];
+
+ if (!is_second_flight) {
+ tls->key_schedule = key_schedule_new(tls->cipher_suite, tls->ctx->cipher_suites, tls->ctx->hkdf_label_prefix__obsolete);
+ if ((ret = key_schedule_extract(tls->key_schedule, resumption_secret)) != 0)
+ goto Exit;
+ }
+
+ msghash_off = emitter->buf->off + emitter->record_header_length;
+ ptls_push_message(emitter, NULL, PTLS_HANDSHAKE_TYPE_CLIENT_HELLO, {
+ ptls_buffer_t *sendbuf = emitter->buf;
+ /* legacy_version */
+ ptls_buffer_push16(sendbuf, 0x0303);
+ /* random_bytes */
+ ptls_buffer_pushv(sendbuf, tls->client_random, sizeof(tls->client_random));
+ /* lecagy_session_id */
+ ptls_buffer_push_block(
+ sendbuf, 1, { ptls_buffer_pushv(sendbuf, tls->client.legacy_session_id, sizeof(tls->client.legacy_session_id)); });
+ /* cipher_suites */
+ ptls_buffer_push_block(sendbuf, 2, {
+ ptls_cipher_suite_t **cs = tls->ctx->cipher_suites;
+ for (; *cs != NULL; ++cs)
+ ptls_buffer_push16(sendbuf, (*cs)->id);
+ });
+ /* legacy_compression_methods */
+ ptls_buffer_push_block(sendbuf, 1, { ptls_buffer_push(sendbuf, 0); });
+ /* extensions */
+ ptls_buffer_push_block(sendbuf, 2, {
+ struct {
+ size_t off;
+ size_t len;
+ } key_share_client_hello;
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_KEY_SHARE, {
+ key_share_client_hello.off = sendbuf->off;
+ ptls_buffer_push_block(sendbuf, 2, {
+ if (tls->key_share != NULL) {
+ if ((ret = tls->key_share->create(tls->key_share, &tls->client.key_share_ctx)) != 0)
+ goto Exit;
+ if ((ret = push_key_share_entry(sendbuf, tls->key_share->id, tls->client.key_share_ctx->pubkey)) != 0)
+ goto Exit;
+ }
+ });
+ key_share_client_hello.len = sendbuf->off - key_share_client_hello.off;
+ });
+ if (send_sni) {
+ if (tls->esni != NULL) {
+ if (published_sni != NULL) {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_SERVER_NAME, {
+ if ((ret = emit_server_name_extension(sendbuf, published_sni)) != 0)
+ goto Exit;
+ });
+ }
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_ENCRYPTED_SERVER_NAME, {
+ if ((ret = emit_esni_extension(tls->esni, sendbuf, properties->client.esni_keys, tls->server_name,
+ key_share_client_hello.off, key_share_client_hello.len)) != 0)
+ goto Exit;
+ });
+ } else {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_SERVER_NAME, {
+ if ((ret = emit_server_name_extension(sendbuf, tls->server_name)) != 0)
+ goto Exit;
+ });
+ }
+ }
+ if (properties != NULL && properties->client.negotiated_protocols.count != 0) {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_ALPN, {
+ ptls_buffer_push_block(sendbuf, 2, {
+ size_t i;
+ for (i = 0; i != properties->client.negotiated_protocols.count; ++i) {
+ ptls_buffer_push_block(sendbuf, 1, {
+ ptls_iovec_t p = properties->client.negotiated_protocols.list[i];
+ ptls_buffer_pushv(sendbuf, p.base, p.len);
+ });
+ }
+ });
+ });
+ }
+ if (tls->ctx->decompress_certificate != NULL) {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_COMPRESS_CERTIFICATE, {
+ ptls_buffer_push_block(sendbuf, 1, {
+ const uint16_t *algo = tls->ctx->decompress_certificate->supported_algorithms;
+ assert(*algo != UINT16_MAX);
+ for (; *algo != UINT16_MAX; ++algo)
+ ptls_buffer_push16(sendbuf, *algo);
+ });
+ });
+ }
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_SUPPORTED_VERSIONS, {
+ ptls_buffer_push_block(sendbuf, 1, {
+ size_t i;
+ for (i = 0; i != sizeof(supported_versions) / sizeof(supported_versions[0]); ++i)
+ ptls_buffer_push16(sendbuf, supported_versions[i]);
+ });
+ });
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_SIGNATURE_ALGORITHMS, {
+ if ((ret = push_signature_algorithms(sendbuf)) != 0)
+ goto Exit;
+ });
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_SUPPORTED_GROUPS, {
+ ptls_key_exchange_algorithm_t **algo = tls->ctx->key_exchanges;
+ ptls_buffer_push_block(sendbuf, 2, {
+ for (; *algo != NULL; ++algo)
+ ptls_buffer_push16(sendbuf, (*algo)->id);
+ });
+ });
+ if (cookie != NULL && cookie->base != NULL) {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_COOKIE, {
+ ptls_buffer_push_block(sendbuf, 2, { ptls_buffer_pushv(sendbuf, cookie->base, cookie->len); });
+ });
+ }
+ if ((ret = push_additional_extensions(properties, sendbuf)) != 0)
+ goto Exit;
+ if (tls->ctx->save_ticket != NULL || resumption_secret.base != NULL) {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_PSK_KEY_EXCHANGE_MODES, {
+ ptls_buffer_push_block(sendbuf, 1, {
+ if (!tls->ctx->require_dhe_on_psk)
+ ptls_buffer_push(sendbuf, PTLS_PSK_KE_MODE_PSK);
+ ptls_buffer_push(sendbuf, PTLS_PSK_KE_MODE_PSK_DHE);
+ });
+ });
+ }
+ if (resumption_secret.base != NULL) {
+ if (tls->client.using_early_data && !is_second_flight)
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_EARLY_DATA, {});
+ /* pre-shared key "MUST be the last extension in the ClientHello" (draft-17 section 4.2.6) */
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_PRE_SHARED_KEY, {
+ ptls_buffer_push_block(sendbuf, 2, {
+ ptls_buffer_push_block(sendbuf, 2,
+ { ptls_buffer_pushv(sendbuf, resumption_ticket.base, resumption_ticket.len); });
+ ptls_buffer_push32(sendbuf, obfuscated_ticket_age);
+ });
+ /* allocate space for PSK binder. the space is filled at the bottom of the function */
+ ptls_buffer_push_block(sendbuf, 2, {
+ ptls_buffer_push_block(sendbuf, 1, {
+ if ((ret = ptls_buffer_reserve(sendbuf, tls->key_schedule->hashes[0].algo->digest_size)) != 0)
+ goto Exit;
+ sendbuf->off += tls->key_schedule->hashes[0].algo->digest_size;
+ });
+ });
+ });
+ }
+ });
+ });
+
+ /* update the message hash, filling in the PSK binder HMAC if necessary */
+ if (resumption_secret.base != NULL) {
+ size_t psk_binder_off = emitter->buf->off - (3 + tls->key_schedule->hashes[0].algo->digest_size);
+ if ((ret = derive_secret_with_empty_digest(tls->key_schedule, binder_key, "res binder")) != 0)
+ goto Exit;
+ ptls__key_schedule_update_hash(tls->key_schedule, emitter->buf->base + msghash_off, psk_binder_off - msghash_off);
+ msghash_off = psk_binder_off;
+ if ((ret = calc_verify_data(emitter->buf->base + psk_binder_off + 3, tls->key_schedule, binder_key)) != 0)
+ goto Exit;
+ }
+ ptls__key_schedule_update_hash(tls->key_schedule, emitter->buf->base + msghash_off, emitter->buf->off - msghash_off);
+
+ if (tls->client.using_early_data) {
+ assert(!is_second_flight);
+ if ((ret = setup_traffic_protection(tls, 1, "c e traffic", 1, 0)) != 0)
+ goto Exit;
+ if ((ret = push_change_cipher_spec(tls, emitter->buf)) != 0)
+ goto Exit;
+ }
+ if (resumption_secret.base != NULL && !is_second_flight) {
+ if ((ret = derive_exporter_secret(tls, 1)) != 0)
+ goto Exit;
+ }
+ tls->state = cookie == NULL ? PTLS_STATE_CLIENT_EXPECT_SERVER_HELLO : PTLS_STATE_CLIENT_EXPECT_SECOND_SERVER_HELLO;
+ ret = PTLS_ERROR_IN_PROGRESS;
+
+Exit:
+ if (published_sni != NULL) {
+ free(published_sni);
+ }
+ ptls_clear_memory(binder_key, sizeof(binder_key));
+ return ret;
+}
+
+static ptls_cipher_suite_t *find_cipher_suite(ptls_context_t *ctx, uint16_t id)
+{
+ ptls_cipher_suite_t **cs;
+
+ for (cs = ctx->cipher_suites; *cs != NULL && (*cs)->id != id; ++cs)
+ ;
+ return *cs;
+}
+
+static int decode_server_hello(ptls_t *tls, struct st_ptls_server_hello_t *sh, const uint8_t *src, const uint8_t *const end)
+{
+ int ret;
+
+ *sh = (struct st_ptls_server_hello_t){{0}};
+
+ /* ignore legacy-version */
+ if (end - src < 2) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ src += 2;
+
+ /* random */
+ if (end - src < PTLS_HELLO_RANDOM_SIZE) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ sh->is_retry_request = memcmp(src, hello_retry_random, PTLS_HELLO_RANDOM_SIZE) == 0;
+ src += PTLS_HELLO_RANDOM_SIZE;
+
+ /* legacy_session_id */
+ ptls_decode_open_block(src, end, 1, {
+ if (end - src > 32) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ sh->legacy_session_id = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+
+ { /* select cipher_suite */
+ uint16_t csid;
+ if ((ret = ptls_decode16(&csid, &src, end)) != 0)
+ goto Exit;
+ if ((tls->cipher_suite = find_cipher_suite(tls->ctx, csid)) == NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+
+ /* legacy_compression_method */
+ if (src == end || *src++ != 0) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+
+ if (sh->is_retry_request)
+ sh->retry_request.selected_group = UINT16_MAX;
+
+ uint16_t exttype, found_version = UINT16_MAX, selected_psk_identity = UINT16_MAX;
+ decode_extensions(src, end, PTLS_HANDSHAKE_TYPE_SERVER_HELLO, &exttype, {
+ if (tls->ctx->on_extension != NULL &&
+ (ret = tls->ctx->on_extension->cb(tls->ctx->on_extension, tls, PTLS_HANDSHAKE_TYPE_SERVER_HELLO, exttype,
+ ptls_iovec_init(src, end - src)) != 0))
+ goto Exit;
+ switch (exttype) {
+ case PTLS_EXTENSION_TYPE_SUPPORTED_VERSIONS:
+ if ((ret = ptls_decode16(&found_version, &src, end)) != 0)
+ goto Exit;
+ break;
+ case PTLS_EXTENSION_TYPE_KEY_SHARE:
+ if (sh->is_retry_request) {
+ if ((ret = ptls_decode16(&sh->retry_request.selected_group, &src, end)) != 0)
+ goto Exit;
+ } else {
+ uint16_t group;
+ if ((ret = decode_key_share_entry(&group, &sh->peerkey, &src, end)) != 0)
+ goto Exit;
+ if (src != end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ if (tls->key_share == NULL || tls->key_share->id != group) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+ break;
+ case PTLS_EXTENSION_TYPE_COOKIE:
+ if (sh->is_retry_request) {
+ ptls_decode_block(src, end, 2, {
+ if (src == end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ sh->retry_request.cookie = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ } else {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ break;
+ case PTLS_EXTENSION_TYPE_PRE_SHARED_KEY:
+ if (sh->is_retry_request) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ } else {
+ if ((ret = ptls_decode16(&selected_psk_identity, &src, end)) != 0)
+ goto Exit;
+ }
+ break;
+ default:
+ src = end;
+ break;
+ }
+ });
+
+ if (!is_supported_version(found_version)) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ if (!sh->is_retry_request) {
+ if (selected_psk_identity != UINT16_MAX) {
+ if (!tls->client.offered_psk) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ if (selected_psk_identity != 0) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ tls->is_psk_handshake = 1;
+ }
+ if (sh->peerkey.base == NULL && !tls->is_psk_handshake) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int handle_hello_retry_request(ptls_t *tls, ptls_message_emitter_t *emitter, struct st_ptls_server_hello_t *sh,
+ ptls_iovec_t message, ptls_handshake_properties_t *properties)
+{
+ int ret;
+
+ if (tls->client.key_share_ctx != NULL) {
+ tls->client.key_share_ctx->on_exchange(&tls->client.key_share_ctx, 1, NULL, ptls_iovec_init(NULL, 0));
+ tls->client.key_share_ctx = NULL;
+ }
+ if (tls->client.using_early_data) {
+ /* release traffic encryption key so that 2nd CH goes out in cleartext, but keep the epoch at 1 since we've already
+ * called derive-secret */
+ if (tls->ctx->update_traffic_key == NULL) {
+ assert(tls->traffic_protection.enc.aead != NULL);
+ ptls_aead_free(tls->traffic_protection.enc.aead);
+ tls->traffic_protection.enc.aead = NULL;
+ }
+ tls->client.using_early_data = 0;
+ }
+
+ if (sh->retry_request.selected_group != UINT16_MAX) {
+ /* we offer the first key_exchanges[0] as KEY_SHARE unless client.negotiate_before_key_exchange is set */
+ ptls_key_exchange_algorithm_t **cand;
+ for (cand = tls->ctx->key_exchanges; *cand != NULL; ++cand)
+ if ((*cand)->id == sh->retry_request.selected_group)
+ break;
+ if (*cand == NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ tls->key_share = *cand;
+ } else if (tls->key_share != NULL) {
+ /* retain the key-share using in first CH, if server does not specify one */
+ } else {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+
+ key_schedule_transform_post_ch1hash(tls->key_schedule);
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+ ret = send_client_hello(tls, emitter, properties, &sh->retry_request.cookie);
+
+Exit:
+ return ret;
+}
+
+static int client_handle_hello(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_iovec_t message,
+ ptls_handshake_properties_t *properties)
+{
+ struct st_ptls_server_hello_t sh;
+ ptls_iovec_t ecdh_secret = {NULL};
+ int ret;
+
+ if ((ret = decode_server_hello(tls, &sh, message.base + PTLS_HANDSHAKE_HEADER_SIZE, message.base + message.len)) != 0)
+ goto Exit;
+ if (!(sh.legacy_session_id.len == sizeof(tls->client.legacy_session_id) &&
+ ptls_mem_equal(sh.legacy_session_id.base, tls->client.legacy_session_id, sizeof(tls->client.legacy_session_id)))) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+
+ if (sh.is_retry_request) {
+ if ((ret = key_schedule_select_one(tls->key_schedule, tls->cipher_suite, 0)) != 0)
+ goto Exit;
+ return handle_hello_retry_request(tls, emitter, &sh, message, properties);
+ }
+
+ if ((ret = key_schedule_select_one(tls->key_schedule, tls->cipher_suite, tls->client.offered_psk && !tls->is_psk_handshake)) !=
+ 0)
+ goto Exit;
+
+ if (sh.peerkey.base != NULL) {
+ if ((ret = tls->client.key_share_ctx->on_exchange(&tls->client.key_share_ctx, 1, &ecdh_secret, sh.peerkey)) != 0)
+ goto Exit;
+ }
+
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+
+ if ((ret = key_schedule_extract(tls->key_schedule, ecdh_secret)) != 0)
+ goto Exit;
+ if ((ret = setup_traffic_protection(tls, 0, "s hs traffic", 2, 0)) != 0)
+ goto Exit;
+ if (tls->client.using_early_data) {
+ if ((tls->pending_handshake_secret = malloc(PTLS_MAX_DIGEST_SIZE)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if ((ret = derive_secret(tls->key_schedule, tls->pending_handshake_secret, "c hs traffic")) != 0)
+ goto Exit;
+ if (tls->ctx->update_traffic_key != NULL &&
+ (ret = tls->ctx->update_traffic_key->cb(tls->ctx->update_traffic_key, tls, 1, 2, tls->pending_handshake_secret)) != 0)
+ goto Exit;
+ } else {
+ if ((ret = setup_traffic_protection(tls, 1, "c hs traffic", 2, 0)) != 0)
+ goto Exit;
+ }
+
+ tls->state = PTLS_STATE_CLIENT_EXPECT_ENCRYPTED_EXTENSIONS;
+ ret = PTLS_ERROR_IN_PROGRESS;
+
+Exit:
+ if (ecdh_secret.base != NULL) {
+ ptls_clear_memory(ecdh_secret.base, ecdh_secret.len);
+ free(ecdh_secret.base);
+ }
+ return ret;
+}
+
+static int handle_unknown_extension(ptls_t *tls, ptls_handshake_properties_t *properties, uint16_t type, const uint8_t *src,
+ const uint8_t *const end, ptls_raw_extension_t *slots)
+{
+ if (properties != NULL && properties->collect_extension != NULL && properties->collect_extension(tls, properties, type)) {
+ size_t i;
+ for (i = 0; slots[i].type != UINT16_MAX; ++i) {
+ assert(i < MAX_UNKNOWN_EXTENSIONS);
+ if (slots[i].type == type)
+ return PTLS_ALERT_ILLEGAL_PARAMETER;
+ }
+ if (i < MAX_UNKNOWN_EXTENSIONS) {
+ slots[i].type = type;
+ slots[i].data = ptls_iovec_init(src, end - src);
+ slots[i + 1].type = UINT16_MAX;
+ }
+ }
+ return 0;
+}
+
+static int report_unknown_extensions(ptls_t *tls, ptls_handshake_properties_t *properties, ptls_raw_extension_t *slots)
+{
+ if (properties != NULL && properties->collect_extension != NULL) {
+ assert(properties->collected_extensions != NULL);
+ return properties->collected_extensions(tls, properties, slots);
+ } else {
+ return 0;
+ }
+}
+
+static int client_handle_encrypted_extensions(ptls_t *tls, ptls_iovec_t message, ptls_handshake_properties_t *properties)
+{
+ const uint8_t *src = message.base + PTLS_HANDSHAKE_HEADER_SIZE, *const end = message.base + message.len, *esni_nonce = NULL;
+ uint16_t type;
+ ptls_raw_extension_t unknown_extensions[MAX_UNKNOWN_EXTENSIONS + 1];
+ int ret, skip_early_data = 1;
+
+ unknown_extensions[0].type = UINT16_MAX;
+
+ decode_extensions(src, end, PTLS_HANDSHAKE_TYPE_ENCRYPTED_EXTENSIONS, &type, {
+ if (tls->ctx->on_extension != NULL &&
+ (ret = tls->ctx->on_extension->cb(tls->ctx->on_extension, tls, PTLS_HANDSHAKE_TYPE_ENCRYPTED_EXTENSIONS, type,
+ ptls_iovec_init(src, end - src)) != 0))
+ goto Exit;
+ switch (type) {
+ case PTLS_EXTENSION_TYPE_SERVER_NAME:
+ if (src != end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ if (!(tls->server_name != NULL && !ptls_server_name_is_ipaddr(tls->server_name))) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ break;
+ case PTLS_EXTENSION_TYPE_ENCRYPTED_SERVER_NAME:
+ if (*src == PTLS_ESNI_RESPONSE_TYPE_ACCEPT) {
+ if (end - src != PTLS_ESNI_NONCE_SIZE + 1) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ esni_nonce = src + 1;
+ } else {
+ /* TODO: provide API to parse the RETRY REQUEST response */
+ ret = PTLS_ERROR_ESNI_RETRY;
+ goto Exit;
+ }
+ break;
+ case PTLS_EXTENSION_TYPE_ALPN:
+ ptls_decode_block(src, end, 2, {
+ ptls_decode_open_block(src, end, 1, {
+ if (src == end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ if ((ret = ptls_set_negotiated_protocol(tls, (const char *)src, end - src)) != 0)
+ goto Exit;
+ src = end;
+ });
+ if (src != end) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ goto Exit;
+ }
+ });
+ break;
+ case PTLS_EXTENSION_TYPE_EARLY_DATA:
+ if (!tls->client.using_early_data) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ skip_early_data = 0;
+ break;
+ default:
+ handle_unknown_extension(tls, properties, type, src, end, unknown_extensions);
+ break;
+ }
+ src = end;
+ });
+
+ if (tls->esni != NULL) {
+ if (esni_nonce == NULL || !ptls_mem_equal(esni_nonce, tls->esni->nonce, PTLS_ESNI_NONCE_SIZE)) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ free_esni_secret(&tls->esni, 0);
+ } else {
+ if (esni_nonce != NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+
+ if (tls->client.using_early_data) {
+ if (skip_early_data)
+ tls->client.using_early_data = 0;
+ if (properties != NULL)
+ properties->client.early_data_acceptance = skip_early_data ? PTLS_EARLY_DATA_REJECTED : PTLS_EARLY_DATA_ACCEPTED;
+ }
+ if ((ret = report_unknown_extensions(tls, properties, unknown_extensions)) != 0)
+ goto Exit;
+
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+ tls->state =
+ tls->is_psk_handshake ? PTLS_STATE_CLIENT_EXPECT_FINISHED : PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_REQUEST_OR_CERTIFICATE;
+ ret = PTLS_ERROR_IN_PROGRESS;
+
+Exit:
+ return ret;
+}
+
+static int decode_certificate_request(ptls_t *tls, struct st_ptls_certificate_request_t *cr, const uint8_t *src,
+ const uint8_t *const end)
+{
+ int ret;
+ uint16_t exttype = 0;
+
+ /* certificate request context */
+ ptls_decode_open_block(src, end, 1, {
+ size_t len = end - src;
+ if (len > 255) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ if ((cr->context.base = malloc(len != 0 ? len : 1)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ cr->context.len = len;
+ memcpy(cr->context.base, src, len);
+ src = end;
+ });
+
+ /* decode extensions */
+ decode_extensions(src, end, PTLS_HANDSHAKE_TYPE_CERTIFICATE_REQUEST, &exttype, {
+ if (tls->ctx->on_extension != NULL &&
+ (ret = tls->ctx->on_extension->cb(tls->ctx->on_extension, tls, PTLS_HANDSHAKE_TYPE_CERTIFICATE_REQUEST, exttype,
+ ptls_iovec_init(src, end - src)) != 0))
+ goto Exit;
+ switch (exttype) {
+ case PTLS_EXTENSION_TYPE_SIGNATURE_ALGORITHMS:
+ if ((ret = decode_signature_algorithms(&cr->signature_algorithms, &src, end)) != 0)
+ goto Exit;
+ break;
+ }
+ src = end;
+ });
+
+ if (cr->signature_algorithms.count == 0) {
+ ret = PTLS_ALERT_MISSING_EXTENSION;
+ goto Exit;
+ }
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+int ptls_build_certificate_message(ptls_buffer_t *buf, ptls_iovec_t context, ptls_iovec_t *certificates, size_t num_certificates,
+ ptls_iovec_t ocsp_status)
+{
+ int ret;
+
+ ptls_buffer_push_block(buf, 1, { ptls_buffer_pushv(buf, context.base, context.len); });
+ ptls_buffer_push_block(buf, 3, {
+ size_t i;
+ for (i = 0; i != num_certificates; ++i) {
+ ptls_buffer_push_block(buf, 3, { ptls_buffer_pushv(buf, certificates[i].base, certificates[i].len); });
+ ptls_buffer_push_block(buf, 2, {
+ if (i == 0 && ocsp_status.len != 0) {
+ buffer_push_extension(buf, PTLS_EXTENSION_TYPE_STATUS_REQUEST, {
+ ptls_buffer_push(buf, 1); /* status_type == ocsp */
+ ptls_buffer_push_block(buf, 3, { ptls_buffer_pushv(buf, ocsp_status.base, ocsp_status.len); });
+ });
+ }
+ });
+ }
+ });
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int default_emit_certificate_cb(ptls_emit_certificate_t *_self, ptls_t *tls, ptls_message_emitter_t *emitter,
+ ptls_key_schedule_t *key_sched, ptls_iovec_t context, int push_status_request)
+{
+ int ret;
+
+ ptls_push_message(emitter, key_sched, PTLS_HANDSHAKE_TYPE_CERTIFICATE, {
+ if ((ret = ptls_build_certificate_message(emitter->buf, context, tls->ctx->certificates.list, tls->ctx->certificates.count,
+ ptls_iovec_init(NULL, 0))) != 0)
+ goto Exit;
+ });
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int send_certificate_and_certificate_verify(ptls_t *tls, ptls_message_emitter_t *emitter,
+ struct st_ptls_signature_algorithms_t *signature_algorithms,
+ ptls_iovec_t context, const char *context_string, int push_status_request)
+{
+ static ptls_emit_certificate_t default_emit_certificate = {default_emit_certificate_cb};
+ ptls_emit_certificate_t *emit_certificate =
+ tls->ctx->emit_certificate != NULL ? tls->ctx->emit_certificate : &default_emit_certificate;
+ int ret;
+
+ if (signature_algorithms->count == 0) {
+ ret = PTLS_ALERT_MISSING_EXTENSION;
+ goto Exit;
+ }
+
+ /* send Certificate (or the equivalent) */
+ if ((ret = emit_certificate->cb(emit_certificate, tls, emitter, tls->key_schedule, context, push_status_request)) != 0)
+ goto Exit;
+
+ /* build and send CertificateVerify */
+ if (tls->ctx->sign_certificate != NULL) {
+ ptls_push_message(emitter, tls->key_schedule, PTLS_HANDSHAKE_TYPE_CERTIFICATE_VERIFY, {
+ ptls_buffer_t *sendbuf = emitter->buf;
+ size_t algo_off = sendbuf->off;
+ ptls_buffer_push16(sendbuf, 0); /* filled in later */
+ ptls_buffer_push_block(sendbuf, 2, {
+ uint16_t algo;
+ uint8_t data[PTLS_MAX_CERTIFICATE_VERIFY_SIGNDATA_SIZE];
+ size_t datalen = build_certificate_verify_signdata(data, tls->key_schedule, context_string);
+ if ((ret = tls->ctx->sign_certificate->cb(tls->ctx->sign_certificate, tls, &algo, sendbuf,
+ ptls_iovec_init(data, datalen), signature_algorithms->list,
+ signature_algorithms->count)) != 0) {
+ goto Exit;
+ }
+ sendbuf->base[algo_off] = (uint8_t)(algo >> 8);
+ sendbuf->base[algo_off + 1] = (uint8_t)algo;
+ });
+ });
+ }
+
+Exit:
+ return ret;
+}
+
+static int client_handle_certificate_request(ptls_t *tls, ptls_iovec_t message, ptls_handshake_properties_t *properties)
+{
+ const uint8_t *src = message.base + PTLS_HANDSHAKE_HEADER_SIZE, *const end = message.base + message.len;
+ int ret = 0;
+
+ if ((ret = decode_certificate_request(tls, &tls->client.certificate_request, src, end)) != 0)
+ return ret;
+
+ /* This field SHALL be zero length unless used for the post-handshake authentication exchanges (section 4.3.2) */
+ if (tls->client.certificate_request.context.len != 0)
+ return PTLS_ALERT_ILLEGAL_PARAMETER;
+
+ tls->state = PTLS_STATE_CLIENT_EXPECT_CERTIFICATE;
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+
+ return PTLS_ERROR_IN_PROGRESS;
+}
+
+static int handle_certificate(ptls_t *tls, const uint8_t *src, const uint8_t *end, int *got_certs)
+{
+ ptls_iovec_t certs[16];
+ size_t num_certs = 0;
+ int ret = 0;
+
+ /* certificate request context */
+ ptls_decode_open_block(src, end, 1, {
+ if (src != end) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ });
+ /* certificate_list */
+ ptls_decode_block(src, end, 3, {
+ while (src != end) {
+ ptls_decode_open_block(src, end, 3, {
+ if (num_certs < sizeof(certs) / sizeof(certs[0]))
+ certs[num_certs++] = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ uint16_t type;
+ decode_open_extensions(src, end, PTLS_HANDSHAKE_TYPE_CERTIFICATE, &type, {
+ if (tls->ctx->on_extension != NULL &&
+ (ret = tls->ctx->on_extension->cb(tls->ctx->on_extension, tls, PTLS_HANDSHAKE_TYPE_CERTIFICATE, type,
+ ptls_iovec_init(src, end - src)) != 0))
+ goto Exit;
+ src = end;
+ });
+ }
+ });
+
+ if (num_certs != 0 && tls->ctx->verify_certificate != NULL) {
+ if ((ret = tls->ctx->verify_certificate->cb(tls->ctx->verify_certificate, tls, &tls->certificate_verify.cb,
+ &tls->certificate_verify.verify_ctx, certs, num_certs)) != 0)
+ goto Exit;
+ }
+
+ *got_certs = num_certs != 0;
+
+Exit:
+ return ret;
+}
+
+static int client_do_handle_certificate(ptls_t *tls, const uint8_t *src, const uint8_t *end)
+{
+ int got_certs, ret;
+
+ if ((ret = handle_certificate(tls, src, end, &got_certs)) != 0)
+ return ret;
+ if (!got_certs)
+ return PTLS_ALERT_ILLEGAL_PARAMETER;
+
+ return 0;
+}
+
+static int client_handle_certificate(ptls_t *tls, ptls_iovec_t message)
+{
+ int ret;
+
+ if ((ret = client_do_handle_certificate(tls, message.base + PTLS_HANDSHAKE_HEADER_SIZE, message.base + message.len)) != 0)
+ return ret;
+
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+
+ tls->state = PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_VERIFY;
+ return PTLS_ERROR_IN_PROGRESS;
+}
+
+static int client_handle_compressed_certificate(ptls_t *tls, ptls_iovec_t message)
+{
+ const uint8_t *src = message.base + PTLS_HANDSHAKE_HEADER_SIZE, *const end = message.base + message.len;
+ uint16_t algo;
+ uint32_t uncompressed_size;
+ uint8_t *uncompressed = NULL;
+ int ret;
+
+ if (tls->ctx->decompress_certificate == NULL) {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ goto Exit;
+ }
+
+ /* decode */
+ if ((ret = ptls_decode16(&algo, &src, end)) != 0)
+ goto Exit;
+ if ((ret = ptls_decode24(&uncompressed_size, &src, end)) != 0)
+ goto Exit;
+ if (uncompressed_size > 65536) { /* TODO find a sensible number */
+ ret = PTLS_ALERT_BAD_CERTIFICATE;
+ goto Exit;
+ }
+ if ((uncompressed = malloc(uncompressed_size)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ ptls_decode_block(src, end, 3, {
+ if ((ret = tls->ctx->decompress_certificate->cb(tls->ctx->decompress_certificate, tls, algo,
+ ptls_iovec_init(uncompressed, uncompressed_size),
+ ptls_iovec_init(src, end - src))) != 0)
+ goto Exit;
+ src = end;
+ });
+
+ /* handle */
+ if ((ret = client_do_handle_certificate(tls, uncompressed, uncompressed + uncompressed_size)) != 0)
+ goto Exit;
+
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+ tls->state = PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_VERIFY;
+ ret = PTLS_ERROR_IN_PROGRESS;
+
+Exit:
+ free(uncompressed);
+ return ret;
+}
+
+static int server_handle_certificate(ptls_t *tls, ptls_iovec_t message)
+{
+ int got_certs, ret;
+
+ if ((ret = handle_certificate(tls, message.base + PTLS_HANDSHAKE_HEADER_SIZE, message.base + message.len, &got_certs)) != 0)
+ return ret;
+ if (!got_certs)
+ return PTLS_ALERT_CERTIFICATE_REQUIRED;
+
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+
+ tls->state = PTLS_STATE_SERVER_EXPECT_CERTIFICATE_VERIFY;
+ return PTLS_ERROR_IN_PROGRESS;
+}
+
+static int handle_certificate_verify(ptls_t *tls, ptls_iovec_t message, const char *context_string)
+{
+ const uint8_t *src = message.base + PTLS_HANDSHAKE_HEADER_SIZE, *const end = message.base + message.len;
+ uint16_t algo;
+ ptls_iovec_t signature;
+ uint8_t signdata[PTLS_MAX_CERTIFICATE_VERIFY_SIGNDATA_SIZE];
+ size_t signdata_size;
+ int ret;
+
+ /* decode */
+ if ((ret = ptls_decode16(&algo, &src, end)) != 0)
+ goto Exit;
+ ptls_decode_block(src, end, 2, {
+ signature = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+
+ /* validate */
+ switch (algo) {
+ case PTLS_SIGNATURE_RSA_PSS_RSAE_SHA256:
+ case PTLS_SIGNATURE_ECDSA_SECP256R1_SHA256:
+ /* ok */
+ break;
+ default:
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ signdata_size = build_certificate_verify_signdata(signdata, tls->key_schedule, context_string);
+ if (tls->certificate_verify.cb != NULL) {
+ ret = tls->certificate_verify.cb(tls->certificate_verify.verify_ctx, ptls_iovec_init(signdata, signdata_size), signature);
+ } else {
+ ret = 0;
+ }
+ ptls_clear_memory(signdata, signdata_size);
+ tls->certificate_verify.cb = NULL;
+ if (ret != 0) {
+ goto Exit;
+ }
+
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+
+Exit:
+ return ret;
+}
+
+static int client_handle_certificate_verify(ptls_t *tls, ptls_iovec_t message)
+{
+ int ret = handle_certificate_verify(tls, message, PTLS_SERVER_CERTIFICATE_VERIFY_CONTEXT_STRING);
+
+ if (ret == 0) {
+ tls->state = PTLS_STATE_CLIENT_EXPECT_FINISHED;
+ ret = PTLS_ERROR_IN_PROGRESS;
+ }
+
+ return ret;
+}
+
+static int server_handle_certificate_verify(ptls_t *tls, ptls_iovec_t message)
+{
+ int ret = handle_certificate_verify(tls, message, PTLS_CLIENT_CERTIFICATE_VERIFY_CONTEXT_STRING);
+
+ if (ret == 0) {
+ tls->state = PTLS_STATE_SERVER_EXPECT_FINISHED;
+ ret = PTLS_ERROR_IN_PROGRESS;
+ }
+
+ return ret;
+}
+
+static int client_handle_finished(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_iovec_t message)
+{
+ uint8_t send_secret[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ if ((ret = verify_finished(tls, message)) != 0)
+ goto Exit;
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+
+ /* update traffic keys by using messages upto ServerFinished, but commission them after sending ClientFinished */
+ if ((ret = key_schedule_extract(tls->key_schedule, ptls_iovec_init(NULL, 0))) != 0)
+ goto Exit;
+ if ((ret = setup_traffic_protection(tls, 0, "s ap traffic", 3, 0)) != 0)
+ goto Exit;
+ if ((ret = derive_secret(tls->key_schedule, send_secret, "c ap traffic")) != 0)
+ goto Exit;
+ if ((ret = derive_exporter_secret(tls, 0)) != 0)
+ goto Exit;
+
+ /* if sending early data, emit EOED and commision the client handshake traffic secret */
+ if (tls->pending_handshake_secret != NULL) {
+ assert(tls->traffic_protection.enc.aead != NULL || tls->ctx->update_traffic_key != NULL);
+ if (tls->client.using_early_data && !tls->ctx->omit_end_of_early_data)
+ ptls_push_message(emitter, tls->key_schedule, PTLS_HANDSHAKE_TYPE_END_OF_EARLY_DATA, {});
+ tls->client.using_early_data = 0;
+ if ((ret = commission_handshake_secret(tls)) != 0)
+ goto Exit;
+ }
+
+ if (tls->client.certificate_request.context.base != NULL) {
+ /* If this is a resumed session, the server must not send the certificate request in the handshake */
+ if (tls->is_psk_handshake) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ ret = send_certificate_and_certificate_verify(tls, emitter, &tls->client.certificate_request.signature_algorithms,
+ tls->client.certificate_request.context,
+ PTLS_CLIENT_CERTIFICATE_VERIFY_CONTEXT_STRING, 0);
+ free(tls->client.certificate_request.context.base);
+ tls->client.certificate_request.context = ptls_iovec_init(NULL, 0);
+ if (ret != 0)
+ goto Exit;
+ }
+
+ if ((ret = push_change_cipher_spec(tls, emitter->buf)) != 0)
+ goto Exit;
+ ret = send_finished(tls, emitter);
+
+ memcpy(tls->traffic_protection.enc.secret, send_secret, sizeof(send_secret));
+ if ((ret = setup_traffic_protection(tls, 1, NULL, 3, 0)) != 0)
+ goto Exit;
+
+ tls->state = PTLS_STATE_CLIENT_POST_HANDSHAKE;
+
+Exit:
+ ptls_clear_memory(send_secret, sizeof(send_secret));
+ return ret;
+}
+
+static int client_handle_new_session_ticket(ptls_t *tls, ptls_iovec_t message)
+{
+ const uint8_t *src = message.base + PTLS_HANDSHAKE_HEADER_SIZE, *const end = message.base + message.len;
+ ptls_iovec_t ticket_nonce;
+ int ret;
+
+ { /* verify the format */
+ uint32_t ticket_lifetime, ticket_age_add, max_early_data_size;
+ ptls_iovec_t ticket;
+ if ((ret = decode_new_session_ticket(tls, &ticket_lifetime, &ticket_age_add, &ticket_nonce, &ticket, &max_early_data_size,
+ src, end)) != 0)
+ return ret;
+ }
+
+ /* do nothing if use of session ticket is disabled */
+ if (tls->ctx->save_ticket == NULL)
+ return 0;
+
+ /* save the extension, along with the key of myself */
+ ptls_buffer_t ticket_buf;
+ uint8_t ticket_buf_small[512];
+ ptls_buffer_init(&ticket_buf, ticket_buf_small, sizeof(ticket_buf_small));
+ ptls_buffer_push64(&ticket_buf, tls->ctx->get_time->cb(tls->ctx->get_time));
+ ptls_buffer_push16(&ticket_buf, tls->key_share->id);
+ ptls_buffer_push16(&ticket_buf, tls->cipher_suite->id);
+ ptls_buffer_push_block(&ticket_buf, 3, { ptls_buffer_pushv(&ticket_buf, src, end - src); });
+ ptls_buffer_push_block(&ticket_buf, 2, {
+ if ((ret = ptls_buffer_reserve(&ticket_buf, tls->key_schedule->hashes[0].algo->digest_size)) != 0)
+ goto Exit;
+ if ((ret = derive_resumption_secret(tls->key_schedule, ticket_buf.base + ticket_buf.off, ticket_nonce)) != 0)
+ goto Exit;
+ ticket_buf.off += tls->key_schedule->hashes[0].algo->digest_size;
+ });
+
+ if ((ret = tls->ctx->save_ticket->cb(tls->ctx->save_ticket, tls, ptls_iovec_init(ticket_buf.base, ticket_buf.off))) != 0)
+ goto Exit;
+
+ ret = 0;
+Exit:
+ ptls_buffer_dispose(&ticket_buf);
+ return ret;
+}
+
+static int client_hello_decode_server_name(ptls_iovec_t *name, const uint8_t **src, const uint8_t *const end)
+{
+ int ret = 0;
+
+ ptls_decode_open_block(*src, end, 2, {
+ if (*src == end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ do {
+ uint8_t type = *(*src)++;
+ ptls_decode_open_block(*src, end, 2, {
+ switch (type) {
+ case PTLS_SERVER_NAME_TYPE_HOSTNAME:
+ if (memchr(*src, '\0', end - *src) != 0) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ *name = ptls_iovec_init(*src, end - *src);
+ break;
+ default:
+ break;
+ }
+ *src = end;
+ });
+ } while (*src != end);
+ });
+
+Exit:
+ return ret;
+}
+
+static int client_hello_decrypt_esni(ptls_context_t *ctx, ptls_iovec_t *server_name, ptls_esni_secret_t **secret,
+ struct st_ptls_client_hello_t *ch)
+{
+ ptls_esni_context_t **esni;
+ ptls_key_exchange_context_t **key_share_ctx;
+ uint8_t *decrypted = NULL;
+ ptls_aead_context_t *aead = NULL;
+ int ret;
+
+ /* allocate secret */
+ assert(*secret == NULL);
+ if ((*secret = malloc(sizeof(**secret))) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ memset(*secret, 0, sizeof(**secret));
+
+ /* find the matching esni structure */
+ for (esni = ctx->esni; *esni != NULL; ++esni) {
+ size_t i;
+ for (i = 0; (*esni)->cipher_suites[i].cipher_suite != NULL; ++i)
+ if ((*esni)->cipher_suites[i].cipher_suite->id == ch->esni.cipher->id)
+ break;
+ if ((*esni)->cipher_suites[i].cipher_suite == NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ if (memcmp((*esni)->cipher_suites[i].record_digest, ch->esni.record_digest, ch->esni.cipher->hash->digest_size) == 0) {
+ (*secret)->version = (*esni)->version;
+ break;
+ }
+ }
+ if (*esni == NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+
+ /* find the matching private key for ESNI decryption */
+ for (key_share_ctx = (*esni)->key_exchanges; *key_share_ctx != NULL; ++key_share_ctx)
+ if ((*key_share_ctx)->algo->id == ch->esni.key_share->id)
+ break;
+ if (*key_share_ctx == NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+
+ /* calculate ESNIContents */
+ if ((ret = build_esni_contents_hash(ch->esni.cipher->hash, (*secret)->esni_contents_hash, ch->esni.record_digest,
+ ch->esni.key_share->id, ch->esni.peer_key, ch->random_bytes)) != 0)
+ goto Exit;
+ /* derive the shared secret */
+ if ((ret = (*key_share_ctx)->on_exchange(key_share_ctx, 0, &(*secret)->secret, ch->esni.peer_key)) != 0)
+ goto Exit;
+ /* decrypt */
+ if (ch->esni.encrypted_sni.len - ch->esni.cipher->aead->tag_size != (*esni)->padded_length + PTLS_ESNI_NONCE_SIZE) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ if ((decrypted = malloc((*esni)->padded_length + PTLS_ESNI_NONCE_SIZE)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if ((ret = create_esni_aead(&aead, 0, ch->esni.cipher, (*secret)->secret, (*secret)->esni_contents_hash)) != 0)
+ goto Exit;
+ if (ptls_aead_decrypt(aead, decrypted, ch->esni.encrypted_sni.base, ch->esni.encrypted_sni.len, 0, ch->key_shares.base,
+ ch->key_shares.len) != (*esni)->padded_length + PTLS_ESNI_NONCE_SIZE) {
+ ret = PTLS_ALERT_DECRYPT_ERROR;
+ goto Exit;
+ }
+ ptls_aead_free(aead);
+ aead = NULL;
+
+ { /* decode sni */
+ const uint8_t *src = decrypted, *const end = src + (*esni)->padded_length;
+ ptls_iovec_t found_name;
+ if (end - src < PTLS_ESNI_NONCE_SIZE) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ memcpy((*secret)->nonce, src, PTLS_ESNI_NONCE_SIZE);
+ src += PTLS_ESNI_NONCE_SIZE;
+ if ((ret = client_hello_decode_server_name(&found_name, &src, end)) != 0)
+ goto Exit;
+ for (; src != end; ++src) {
+ if (*src != '\0') {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+ /* if successful, reuse memory allocated for padded_server_name for storing the found name (freed by the caller) */
+ memmove(decrypted, found_name.base, found_name.len);
+ *server_name = ptls_iovec_init(decrypted, found_name.len);
+ decrypted = NULL;
+ }
+
+ ret = 0;
+Exit:
+ if (decrypted != NULL)
+ free(decrypted);
+ if (aead != NULL)
+ ptls_aead_free(aead);
+ if (ret != 0 && *secret != NULL)
+ free_esni_secret(secret, 1);
+ return ret;
+}
+
+static int select_negotiated_group(ptls_key_exchange_algorithm_t **selected, ptls_key_exchange_algorithm_t **candidates,
+ const uint8_t *src, const uint8_t *const end)
+{
+ int ret;
+
+ ptls_decode_block(src, end, 2, {
+ while (src != end) {
+ uint16_t group;
+ if ((ret = ptls_decode16(&group, &src, end)) != 0)
+ goto Exit;
+ ptls_key_exchange_algorithm_t **c = candidates;
+ for (; *c != NULL; ++c) {
+ if ((*c)->id == group) {
+ *selected = *c;
+ return 0;
+ }
+ }
+ }
+ });
+
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+
+Exit:
+ return ret;
+}
+
+static int decode_client_hello(ptls_t *tls, struct st_ptls_client_hello_t *ch, const uint8_t *src, const uint8_t *const end,
+ ptls_handshake_properties_t *properties)
+{
+ uint16_t exttype = 0;
+ int ret;
+
+ { /* check protocol version */
+ uint16_t protver;
+ if ((ret = ptls_decode16(&protver, &src, end)) != 0)
+ goto Exit;
+ if (protver != 0x0303) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ goto Exit;
+ }
+ }
+
+ /* skip random */
+ if (end - src < PTLS_HELLO_RANDOM_SIZE) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ ch->random_bytes = src;
+ src += PTLS_HELLO_RANDOM_SIZE;
+
+ /* skip legacy_session_id */
+ ptls_decode_open_block(src, end, 1, {
+ if (end - src > 32) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ ch->legacy_session_id = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+
+ /* decode and select from ciphersuites */
+ ptls_decode_open_block(src, end, 2, {
+ ch->cipher_suites = ptls_iovec_init(src, end - src);
+ uint16_t *id = ch->client_ciphers.list;
+ do {
+ if ((ret = ptls_decode16(id, &src, end)) != 0)
+ goto Exit;
+ id++;
+ ch->client_ciphers.count++;
+ if (id >= ch->client_ciphers.list + MAX_CLIENT_CIPHERS) {
+ src = end;
+ break;
+ }
+ } while (src != end);
+ });
+
+ /* decode legacy_compression_methods */
+ ptls_decode_open_block(src, end, 1, {
+ if (src == end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ ch->compression_methods.ids = src;
+ ch->compression_methods.count = end - src;
+ src = end;
+ });
+
+ /* decode extensions */
+ decode_extensions(src, end, PTLS_HANDSHAKE_TYPE_CLIENT_HELLO, &exttype, {
+ if (tls->ctx->on_extension != NULL &&
+ (ret = tls->ctx->on_extension->cb(tls->ctx->on_extension, tls, PTLS_HANDSHAKE_TYPE_CLIENT_HELLO, exttype,
+ ptls_iovec_init(src, end - src)) != 0))
+ goto Exit;
+ switch (exttype) {
+ case PTLS_EXTENSION_TYPE_SERVER_NAME:
+ if ((ret = client_hello_decode_server_name(&ch->server_name, &src, end)) != 0)
+ goto Exit;
+ if (src != end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ break;
+ case PTLS_EXTENSION_TYPE_ENCRYPTED_SERVER_NAME: {
+ ptls_cipher_suite_t **cipher;
+ if (ch->esni.cipher != NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ { /* cipher-suite */
+ uint16_t csid;
+ if ((ret = ptls_decode16(&csid, &src, end)) != 0)
+ goto Exit;
+ for (cipher = tls->ctx->cipher_suites; *cipher != NULL; ++cipher)
+ if ((*cipher)->id == csid)
+ break;
+ if (*cipher == NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+ /* key-share (including peer-key) */
+ if ((ret = select_key_share(&ch->esni.key_share, &ch->esni.peer_key, tls->ctx->key_exchanges, &src, end, 1)) != 0)
+ goto Exit;
+ ptls_decode_open_block(src, end, 2, {
+ size_t len = end - src;
+ if (len != (*cipher)->hash->digest_size) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ ch->esni.record_digest = src;
+ src += len;
+ });
+ ptls_decode_block(src, end, 2, {
+ size_t len = end - src;
+ if (len < (*cipher)->aead->tag_size) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ ch->esni.encrypted_sni = ptls_iovec_init(src, len);
+ src += len;
+ });
+ ch->esni.cipher = *cipher; /* set only after successful parsing */
+ } break;
+ case PTLS_EXTENSION_TYPE_ALPN:
+ ptls_decode_block(src, end, 2, {
+ do {
+ ptls_decode_open_block(src, end, 1, {
+ /* rfc7301 3.1: empty strings MUST NOT be included */
+ if (src == end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ if (ch->alpn.count < sizeof(ch->alpn.list) / sizeof(ch->alpn.list[0]))
+ ch->alpn.list[ch->alpn.count++] = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ } while (src != end);
+ });
+ break;
+ case PTLS_EXTENSION_TYPE_COMPRESS_CERTIFICATE:
+ ptls_decode_block(src, end, 1, {
+ do {
+ uint16_t id;
+ if ((ret = ptls_decode16(&id, &src, end)) != 0)
+ goto Exit;
+ if (ch->cert_compression_algos.count <
+ sizeof(ch->cert_compression_algos.list) / sizeof(ch->cert_compression_algos.list[0]))
+ ch->cert_compression_algos.list[ch->cert_compression_algos.count++] = id;
+ } while (src != end);
+ });
+ break;
+ case PTLS_EXTENSION_TYPE_SUPPORTED_GROUPS:
+ ch->negotiated_groups = ptls_iovec_init(src, end - src);
+ break;
+ case PTLS_EXTENSION_TYPE_SIGNATURE_ALGORITHMS:
+ if ((ret = decode_signature_algorithms(&ch->signature_algorithms, &src, end)) != 0)
+ goto Exit;
+ break;
+ case PTLS_EXTENSION_TYPE_KEY_SHARE:
+ ch->key_shares = ptls_iovec_init(src, end - src);
+ break;
+ case PTLS_EXTENSION_TYPE_SUPPORTED_VERSIONS:
+ ptls_decode_block(src, end, 1, {
+ size_t selected_index = sizeof(supported_versions) / sizeof(supported_versions[0]);
+ do {
+ size_t i;
+ uint16_t v;
+ if ((ret = ptls_decode16(&v, &src, end)) != 0)
+ goto Exit;
+ for (i = 0; i != selected_index; ++i) {
+ if (supported_versions[i] == v) {
+ selected_index = i;
+ break;
+ }
+ }
+ } while (src != end);
+ if (selected_index != sizeof(supported_versions) / sizeof(supported_versions[0]))
+ ch->selected_version = supported_versions[selected_index];
+ });
+ break;
+ case PTLS_EXTENSION_TYPE_COOKIE:
+ if (properties == NULL || properties->server.cookie.key == NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ ch->cookie.all = ptls_iovec_init(src, end - src);
+ ptls_decode_block(src, end, 2, {
+ ch->cookie.tbs.base = (void *)src;
+ ptls_decode_open_block(src, end, 2, {
+ ptls_decode_open_block(src, end, 1, {
+ ch->cookie.ch1_hash = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ if (src == end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ switch (*src++) {
+ case 0:
+ assert(!ch->cookie.sent_key_share);
+ break;
+ case 1:
+ ch->cookie.sent_key_share = 1;
+ break;
+ default:
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ });
+ ch->cookie.tbs.len = src - ch->cookie.tbs.base;
+ ptls_decode_block(src, end, 1, {
+ ch->cookie.signature = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ });
+ break;
+ case PTLS_EXTENSION_TYPE_PRE_SHARED_KEY: {
+ size_t num_identities = 0;
+ ptls_decode_open_block(src, end, 2, {
+ do {
+ struct st_ptls_client_hello_psk_t psk = {{NULL}};
+ ptls_decode_open_block(src, end, 2, {
+ psk.identity = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ if ((ret = ptls_decode32(&psk.obfuscated_ticket_age, &src, end)) != 0)
+ goto Exit;
+ if (ch->psk.identities.count < sizeof(ch->psk.identities.list) / sizeof(ch->psk.identities.list[0]))
+ ch->psk.identities.list[ch->psk.identities.count++] = psk;
+ ++num_identities;
+ } while (src != end);
+ });
+ ch->psk.hash_end = src;
+ ptls_decode_block(src, end, 2, {
+ size_t num_binders = 0;
+ do {
+ ptls_decode_open_block(src, end, 1, {
+ if (num_binders < ch->psk.identities.count)
+ ch->psk.identities.list[num_binders].binder = ptls_iovec_init(src, end - src);
+ src = end;
+ });
+ ++num_binders;
+ } while (src != end);
+ if (num_identities != num_binders) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ });
+ } break;
+ case PTLS_EXTENSION_TYPE_PSK_KEY_EXCHANGE_MODES:
+ ptls_decode_block(src, end, 1, {
+ if (src == end) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ for (; src != end; ++src) {
+ if (*src < sizeof(ch->psk.ke_modes) * 8)
+ ch->psk.ke_modes |= 1u << *src;
+ }
+ });
+ break;
+ case PTLS_EXTENSION_TYPE_EARLY_DATA:
+ ch->psk.early_data_indication = 1;
+ break;
+ case PTLS_EXTENSION_TYPE_STATUS_REQUEST:
+ ch->status_request = 1;
+ break;
+ default:
+ handle_unknown_extension(tls, properties, exttype, src, end, ch->unknown_extensions);
+ break;
+ }
+ src = end;
+ });
+
+ /* check if client hello make sense */
+ if (is_supported_version(ch->selected_version)) {
+ if (!(ch->compression_methods.count == 1 && ch->compression_methods.ids[0] == 0)) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ /* esni */
+ if (ch->esni.cipher != NULL) {
+ if (ch->key_shares.base == NULL) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+ /* pre-shared key */
+ if (ch->psk.hash_end != NULL) {
+ /* PSK must be the last extension */
+ if (exttype != PTLS_EXTENSION_TYPE_PRE_SHARED_KEY) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ } else {
+ if (ch->psk.early_data_indication) {
+ ret = PTLS_ALERT_ILLEGAL_PARAMETER;
+ goto Exit;
+ }
+ }
+ } else {
+ ret = PTLS_ALERT_PROTOCOL_VERSION;
+ goto Exit;
+ }
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int vec_is_string(ptls_iovec_t x, const char *y)
+{
+ return strncmp((const char *)x.base, y, x.len) == 0 && y[x.len] == '\0';
+}
+
+static int try_psk_handshake(ptls_t *tls, size_t *psk_index, int *accept_early_data, struct st_ptls_client_hello_t *ch,
+ ptls_iovec_t ch_trunc)
+{
+ ptls_buffer_t decbuf;
+ ptls_iovec_t ticket_psk, ticket_server_name, ticket_negotiated_protocol;
+ uint64_t issue_at, now = tls->ctx->get_time->cb(tls->ctx->get_time);
+ uint32_t age_add;
+ uint16_t ticket_key_exchange_id, ticket_csid;
+ uint8_t decbuf_small[256], binder_key[PTLS_MAX_DIGEST_SIZE], verify_data[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ ptls_buffer_init(&decbuf, decbuf_small, sizeof(decbuf_small));
+
+ for (*psk_index = 0; *psk_index < ch->psk.identities.count; ++*psk_index) {
+ struct st_ptls_client_hello_psk_t *identity = ch->psk.identities.list + *psk_index;
+ /* decrypt and decode */
+ int can_accept_early_data = 1;
+ decbuf.off = 0;
+ switch (tls->ctx->encrypt_ticket->cb(tls->ctx->encrypt_ticket, tls, 0, &decbuf, identity->identity)) {
+ case 0: /* decrypted */
+ break;
+ case PTLS_ERROR_REJECT_EARLY_DATA: /* decrypted, but early data is rejected */
+ can_accept_early_data = 0;
+ break;
+ default: /* decryption failure */
+ continue;
+ }
+ if (decode_session_identifier(&issue_at, &ticket_psk, &age_add, &ticket_server_name, &ticket_key_exchange_id, &ticket_csid,
+ &ticket_negotiated_protocol, decbuf.base, decbuf.base + decbuf.off) != 0)
+ continue;
+ /* check age */
+ if (now < issue_at)
+ continue;
+ if (now - issue_at > (uint64_t)tls->ctx->ticket_lifetime * 1000)
+ continue;
+ *accept_early_data = 0;
+ if (ch->psk.early_data_indication && can_accept_early_data) {
+ /* accept early-data if abs(diff) between the reported age and the actual age is within += 10 seconds */
+ int64_t delta = (now - issue_at) - (identity->obfuscated_ticket_age - age_add);
+ if (delta < 0)
+ delta = -delta;
+ if (delta <= PTLS_EARLY_DATA_MAX_DELAY)
+ *accept_early_data = 1;
+ }
+ /* check server-name */
+ if (ticket_server_name.len != 0) {
+ if (tls->server_name == NULL)
+ continue;
+ if (!vec_is_string(ticket_server_name, tls->server_name))
+ continue;
+ } else {
+ if (tls->server_name != NULL)
+ continue;
+ }
+ { /* check key-exchange */
+ ptls_key_exchange_algorithm_t **a;
+ for (a = tls->ctx->key_exchanges; *a != NULL && (*a)->id != ticket_key_exchange_id; ++a)
+ ;
+ if (*a == NULL)
+ continue;
+ tls->key_share = *a;
+ }
+ /* check cipher-suite */
+ if (ticket_csid != tls->cipher_suite->id)
+ continue;
+ /* check negotiated-protocol */
+ if (ticket_negotiated_protocol.len != 0) {
+ if (tls->negotiated_protocol == NULL)
+ continue;
+ if (!vec_is_string(ticket_negotiated_protocol, tls->negotiated_protocol))
+ continue;
+ }
+ /* check the length of the decrypted psk and the PSK binder */
+ if (ticket_psk.len != tls->key_schedule->hashes[0].algo->digest_size)
+ continue;
+ if (ch->psk.identities.list[*psk_index].binder.len != tls->key_schedule->hashes[0].algo->digest_size)
+ continue;
+
+ /* found */
+ goto Found;
+ }
+
+ /* not found */
+ *psk_index = SIZE_MAX;
+ *accept_early_data = 0;
+ tls->key_share = NULL;
+ ret = 0;
+ goto Exit;
+
+Found:
+ if ((ret = key_schedule_extract(tls->key_schedule, ticket_psk)) != 0)
+ goto Exit;
+ if ((ret = derive_secret(tls->key_schedule, binder_key, "res binder")) != 0)
+ goto Exit;
+ ptls__key_schedule_update_hash(tls->key_schedule, ch_trunc.base, ch_trunc.len);
+ if ((ret = calc_verify_data(verify_data, tls->key_schedule, binder_key)) != 0)
+ goto Exit;
+ if (!ptls_mem_equal(ch->psk.identities.list[*psk_index].binder.base, verify_data,
+ tls->key_schedule->hashes[0].algo->digest_size)) {
+ ret = PTLS_ALERT_DECRYPT_ERROR;
+ goto Exit;
+ }
+ ret = 0;
+
+Exit:
+ ptls_buffer_dispose(&decbuf);
+ ptls_clear_memory(binder_key, sizeof(binder_key));
+ ptls_clear_memory(verify_data, sizeof(verify_data));
+ return ret;
+}
+
+static int calc_cookie_signature(ptls_t *tls, ptls_handshake_properties_t *properties,
+ ptls_key_exchange_algorithm_t *negotiated_group, ptls_iovec_t tbs, uint8_t *sig)
+{
+ ptls_hash_algorithm_t *algo = tls->ctx->cipher_suites[0]->hash;
+ ptls_hash_context_t *hctx;
+
+ if ((hctx = ptls_hmac_create(algo, properties->server.cookie.key, algo->digest_size)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+
+#define UPDATE_BLOCK(p, _len) \
+ do { \
+ size_t len = (_len); \
+ assert(len < UINT8_MAX); \
+ uint8_t len8 = (uint8_t)len; \
+ hctx->update(hctx, &len8, 1); \
+ hctx->update(hctx, (p), len); \
+ } while (0)
+#define UPDATE16(_v) \
+ do { \
+ uint16_t v = (_v); \
+ uint8_t b[2] = {v >> 8, v & 0xff}; \
+ hctx->update(hctx, b, 2); \
+ } while (0)
+
+ UPDATE_BLOCK(tls->client_random, sizeof(tls->client_random));
+ UPDATE_BLOCK(tls->server_name, tls->server_name != NULL ? strlen(tls->server_name) : 0);
+ UPDATE16(tls->cipher_suite->id);
+ UPDATE16(negotiated_group->id);
+ UPDATE_BLOCK(properties->server.cookie.additional_data.base, properties->server.cookie.additional_data.len);
+
+ UPDATE_BLOCK(tbs.base, tbs.len);
+
+#undef UPDATE_BLOCK
+#undef UPDATE16
+
+ hctx->final(hctx, sig, PTLS_HASH_FINAL_MODE_FREE);
+ return 0;
+}
+
+static int server_handle_hello(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_iovec_t message,
+ ptls_handshake_properties_t *properties)
+{
+#define EMIT_SERVER_HELLO(sched, fill_rand, extensions) \
+ ptls_push_message(emitter, (sched), PTLS_HANDSHAKE_TYPE_SERVER_HELLO, { \
+ ptls_buffer_push16(emitter->buf, 0x0303 /* legacy version */); \
+ if ((ret = ptls_buffer_reserve(emitter->buf, PTLS_HELLO_RANDOM_SIZE)) != 0) \
+ goto Exit; \
+ do { \
+ fill_rand \
+ } while (0); \
+ emitter->buf->off += PTLS_HELLO_RANDOM_SIZE; \
+ ptls_buffer_push_block(emitter->buf, 1, \
+ { ptls_buffer_pushv(emitter->buf, ch.legacy_session_id.base, ch.legacy_session_id.len); }); \
+ ptls_buffer_push16(emitter->buf, tls->cipher_suite->id); \
+ ptls_buffer_push(emitter->buf, 0); \
+ ptls_buffer_push_block(emitter->buf, 2, { \
+ buffer_push_extension(emitter->buf, PTLS_EXTENSION_TYPE_SUPPORTED_VERSIONS, \
+ { ptls_buffer_push16(emitter->buf, ch.selected_version); }); \
+ do { \
+ extensions \
+ } while (0); \
+ }); \
+ });
+
+#define EMIT_HELLO_RETRY_REQUEST(sched, negotiated_group, additional_extensions) \
+ EMIT_SERVER_HELLO((sched), { memcpy(emitter->buf->base + emitter->buf->off, hello_retry_random, PTLS_HELLO_RANDOM_SIZE); }, \
+ { \
+ ptls_key_exchange_algorithm_t *_negotiated_group = (negotiated_group); \
+ if (_negotiated_group != NULL) { \
+ buffer_push_extension(emitter->buf, PTLS_EXTENSION_TYPE_KEY_SHARE, \
+ { ptls_buffer_push16(emitter->buf, _negotiated_group->id); }); \
+ } \
+ do { \
+ additional_extensions \
+ } while (0); \
+ })
+ struct st_ptls_client_hello_t ch = {NULL, {NULL}, {NULL}, 0, {NULL}, {NULL}, {NULL}, {{0}},
+ {NULL}, {NULL}, {{{NULL}}}, {{0}}, {{0}}, {{NULL}}, {NULL}, {{UINT16_MAX}}};
+ struct {
+ ptls_key_exchange_algorithm_t *algorithm;
+ ptls_iovec_t peer_key;
+ } key_share = {NULL};
+ enum { HANDSHAKE_MODE_FULL, HANDSHAKE_MODE_PSK, HANDSHAKE_MODE_PSK_DHE } mode;
+ size_t psk_index = SIZE_MAX;
+ ptls_iovec_t pubkey = {0}, ecdh_secret = {0};
+ int accept_early_data = 0, is_second_flight = tls->state == PTLS_STATE_SERVER_EXPECT_SECOND_CLIENT_HELLO, ret;
+
+ /* decode ClientHello */
+ if ((ret = decode_client_hello(tls, &ch, message.base + PTLS_HANDSHAKE_HEADER_SIZE, message.base + message.len, properties)) !=
+ 0)
+ goto Exit;
+ if (tls->ctx->require_dhe_on_psk)
+ ch.psk.ke_modes &= ~(1u << PTLS_PSK_KE_MODE_PSK);
+
+ /* handle client_random, SNI, ESNI */
+ if (!is_second_flight) {
+ memcpy(tls->client_random, ch.random_bytes, sizeof(tls->client_random));
+ log_client_random(tls);
+ ptls_iovec_t server_name = {NULL};
+ int is_esni = 0;
+ if (ch.esni.cipher != NULL && tls->ctx->esni != NULL) {
+ if ((ret = client_hello_decrypt_esni(tls->ctx, &server_name, &tls->esni, &ch)) != 0)
+ goto Exit;
+ if (tls->ctx->update_esni_key != NULL) {
+ if ((ret = tls->ctx->update_esni_key->cb(tls->ctx->update_esni_key, tls, tls->esni->secret, ch.esni.cipher->hash,
+ tls->esni->esni_contents_hash)) != 0)
+ goto Exit;
+ }
+ is_esni = 1;
+ } else if (ch.server_name.base != NULL) {
+ server_name = ch.server_name;
+ }
+ if (tls->ctx->on_client_hello != NULL) {
+ ptls_on_client_hello_parameters_t params = {server_name,
+ message,
+ {ch.alpn.list, ch.alpn.count},
+ {ch.signature_algorithms.list, ch.signature_algorithms.count},
+ {ch.cert_compression_algos.list, ch.cert_compression_algos.count},
+ {ch.client_ciphers.list, ch.client_ciphers.count},
+ is_esni};
+ ret = tls->ctx->on_client_hello->cb(tls->ctx->on_client_hello, tls, ¶ms);
+ } else {
+ ret = 0;
+ }
+ if (is_esni)
+ free(server_name.base);
+ if (ret != 0)
+ goto Exit;
+ } else {
+ if (ch.psk.early_data_indication) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ /* the following check is necessary so that we would be able to track the connection in SSLKEYLOGFILE, even though it
+ * might not be for the safety of the protocol */
+ if (!ptls_mem_equal(tls->client_random, ch.random_bytes, sizeof(tls->client_random))) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ goto Exit;
+ }
+ /* We compare SNI only when the value is saved by the on_client_hello callback. This should be OK because we are
+ * ignoring the value unless the callback saves the server-name. */
+ if (tls->server_name != NULL) {
+ size_t l = strlen(tls->server_name);
+ if (!(ch.server_name.len == l && memcmp(ch.server_name.base, tls->server_name, l) == 0)) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ goto Exit;
+ }
+ }
+ }
+
+ { /* select (or check) cipher-suite, create key_schedule */
+ ptls_cipher_suite_t *cs;
+ if ((ret = select_cipher(&cs, tls->ctx->cipher_suites, ch.cipher_suites.base,
+ ch.cipher_suites.base + ch.cipher_suites.len)) != 0)
+ goto Exit;
+ if (!is_second_flight) {
+ tls->cipher_suite = cs;
+ tls->key_schedule = key_schedule_new(cs, NULL, tls->ctx->hkdf_label_prefix__obsolete);
+ } else {
+ if (tls->cipher_suite != cs) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ goto Exit;
+ }
+ }
+ }
+
+ /* select key_share */
+ if (key_share.algorithm == NULL && ch.key_shares.base != NULL) {
+ const uint8_t *src = ch.key_shares.base, *const end = src + ch.key_shares.len;
+ ptls_decode_block(src, end, 2, {
+ if ((ret = select_key_share(&key_share.algorithm, &key_share.peer_key, tls->ctx->key_exchanges, &src, end, 0)) != 0)
+ goto Exit;
+ });
+ }
+
+ if (!is_second_flight) {
+ if (ch.cookie.all.len != 0 && key_share.algorithm != NULL) {
+
+ /* use cookie to check the integrity of the handshake, and update the context */
+ uint8_t sig[PTLS_MAX_DIGEST_SIZE];
+ size_t sigsize = tls->ctx->cipher_suites[0]->hash->digest_size;
+ if ((ret = calc_cookie_signature(tls, properties, key_share.algorithm, ch.cookie.tbs, sig)) != 0)
+ goto Exit;
+ if (!(ch.cookie.signature.len == sigsize && ptls_mem_equal(ch.cookie.signature.base, sig, sigsize))) {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ goto Exit;
+ }
+ /* integrity check passed; update states */
+ key_schedule_update_ch1hash_prefix(tls->key_schedule);
+ ptls__key_schedule_update_hash(tls->key_schedule, ch.cookie.ch1_hash.base, ch.cookie.ch1_hash.len);
+ key_schedule_extract(tls->key_schedule, ptls_iovec_init(NULL, 0));
+ /* ... reusing sendbuf to rebuild HRR for hash calculation */
+ size_t hrr_start = emitter->buf->off;
+ EMIT_HELLO_RETRY_REQUEST(tls->key_schedule, ch.cookie.sent_key_share ? key_share.algorithm : NULL, {
+ buffer_push_extension(emitter->buf, PTLS_EXTENSION_TYPE_COOKIE,
+ { ptls_buffer_pushv(emitter->buf, ch.cookie.all.base, ch.cookie.all.len); });
+ });
+ emitter->buf->off = hrr_start;
+ is_second_flight = 1;
+
+ } else if (key_share.algorithm == NULL || (properties != NULL && properties->server.enforce_retry)) {
+
+ /* send HelloRetryRequest */
+ if (ch.negotiated_groups.base == NULL) {
+ ret = PTLS_ALERT_MISSING_EXTENSION;
+ goto Exit;
+ }
+ ptls_key_exchange_algorithm_t *negotiated_group;
+ if ((ret = select_negotiated_group(&negotiated_group, tls->ctx->key_exchanges, ch.negotiated_groups.base,
+ ch.negotiated_groups.base + ch.negotiated_groups.len)) != 0)
+ goto Exit;
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+ assert(tls->key_schedule->generation == 0);
+ if (properties != NULL && properties->server.retry_uses_cookie) {
+ /* emit HRR with cookie (note: we MUST omit KeyShare if the client has specified the correct one; see 46554f0)
+ */
+ EMIT_HELLO_RETRY_REQUEST(NULL, key_share.algorithm != NULL ? NULL : negotiated_group, {
+ ptls_buffer_t *sendbuf = emitter->buf;
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_COOKIE, {
+ ptls_buffer_push_block(sendbuf, 2, {
+ /* push to-be-signed data */
+ size_t tbs_start = sendbuf->off;
+ ptls_buffer_push_block(sendbuf, 2, {
+ /* first block of the cookie data is the hash(ch1) */
+ ptls_buffer_push_block(sendbuf, 1, {
+ size_t sz = tls->cipher_suite->hash->digest_size;
+ if ((ret = ptls_buffer_reserve(sendbuf, sz)) != 0)
+ goto Exit;
+ key_schedule_extract_ch1hash(tls->key_schedule, sendbuf->base + sendbuf->off);
+ sendbuf->off += sz;
+ });
+ /* second is if we have sent key_share extension */
+ ptls_buffer_push(sendbuf, key_share.algorithm == NULL);
+ /* we can add more data here */
+ });
+ size_t tbs_len = sendbuf->off - tbs_start;
+ /* push the signature */
+ ptls_buffer_push_block(sendbuf, 1, {
+ size_t sz = tls->ctx->cipher_suites[0]->hash->digest_size;
+ if ((ret = ptls_buffer_reserve(sendbuf, sz)) != 0)
+ goto Exit;
+ if ((ret = calc_cookie_signature(tls, properties, negotiated_group,
+ ptls_iovec_init(sendbuf->base + tbs_start, tbs_len),
+ sendbuf->base + sendbuf->off)) != 0)
+ goto Exit;
+ sendbuf->off += sz;
+ });
+ });
+ });
+ });
+ if ((ret = push_change_cipher_spec(tls, emitter->buf)) != 0)
+ goto Exit;
+ ret = PTLS_ERROR_STATELESS_RETRY;
+ } else {
+ /* invoking stateful retry; roll the key schedule and emit HRR */
+ key_schedule_transform_post_ch1hash(tls->key_schedule);
+ key_schedule_extract(tls->key_schedule, ptls_iovec_init(NULL, 0));
+ EMIT_HELLO_RETRY_REQUEST(tls->key_schedule, key_share.algorithm != NULL ? NULL : negotiated_group, {});
+ if ((ret = push_change_cipher_spec(tls, emitter->buf)) != 0)
+ goto Exit;
+ tls->state = PTLS_STATE_SERVER_EXPECT_SECOND_CLIENT_HELLO;
+ if (ch.psk.early_data_indication)
+ tls->server.early_data_skipped_bytes = 0;
+ ret = PTLS_ERROR_IN_PROGRESS;
+ }
+ goto Exit;
+ }
+ }
+
+ /* handle unknown extensions */
+ if ((ret = report_unknown_extensions(tls, properties, ch.unknown_extensions)) != 0)
+ goto Exit;
+
+ /* try psk handshake */
+ if (!is_second_flight && ch.psk.hash_end != 0 &&
+ (ch.psk.ke_modes & ((1u << PTLS_PSK_KE_MODE_PSK) | (1u << PTLS_PSK_KE_MODE_PSK_DHE))) != 0 &&
+ tls->ctx->encrypt_ticket != NULL && !tls->ctx->require_client_authentication) {
+ if ((ret = try_psk_handshake(tls, &psk_index, &accept_early_data, &ch,
+ ptls_iovec_init(message.base, ch.psk.hash_end - message.base))) != 0) {
+ goto Exit;
+ }
+ }
+
+ /* If client authentication is enabled, we always force a full handshake.
+ * TODO: Check for `post_handshake_auth` extension and if that is present, do not force full handshake!
+ * Remove also the check `!require_client_authentication` above.
+ *
+ * adjust key_schedule, determine handshake mode
+ */
+ if (psk_index == SIZE_MAX || tls->ctx->require_client_authentication) {
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+ if (!is_second_flight) {
+ assert(tls->key_schedule->generation == 0);
+ key_schedule_extract(tls->key_schedule, ptls_iovec_init(NULL, 0));
+ }
+ mode = HANDSHAKE_MODE_FULL;
+ if (properties != NULL)
+ properties->server.selected_psk_binder.len = 0;
+ } else {
+ ptls__key_schedule_update_hash(tls->key_schedule, ch.psk.hash_end, message.base + message.len - ch.psk.hash_end);
+ if ((ch.psk.ke_modes & (1u << PTLS_PSK_KE_MODE_PSK)) != 0) {
+ mode = HANDSHAKE_MODE_PSK;
+ } else {
+ assert((ch.psk.ke_modes & (1u << PTLS_PSK_KE_MODE_PSK_DHE)) != 0);
+ mode = HANDSHAKE_MODE_PSK_DHE;
+ }
+ tls->is_psk_handshake = 1;
+ if (properties != NULL) {
+ ptls_iovec_t *selected = &ch.psk.identities.list[psk_index].binder;
+ memcpy(properties->server.selected_psk_binder.base, selected->base, selected->len);
+ properties->server.selected_psk_binder.len = selected->len;
+ }
+ }
+
+ if (accept_early_data && tls->ctx->max_early_data_size != 0 && psk_index == 0) {
+ if ((tls->pending_handshake_secret = malloc(PTLS_MAX_DIGEST_SIZE)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if ((ret = derive_exporter_secret(tls, 1)) != 0)
+ goto Exit;
+ if ((ret = setup_traffic_protection(tls, 0, "c e traffic", 1, 0)) != 0)
+ goto Exit;
+ }
+
+ /* run key-exchange, to obtain pubkey and secret */
+ if (mode != HANDSHAKE_MODE_PSK) {
+ if (key_share.algorithm == NULL) {
+ ret = ch.key_shares.base != NULL ? PTLS_ALERT_HANDSHAKE_FAILURE : PTLS_ALERT_MISSING_EXTENSION;
+ goto Exit;
+ }
+ if ((ret = key_share.algorithm->exchange(key_share.algorithm, &pubkey, &ecdh_secret, key_share.peer_key)) != 0)
+ goto Exit;
+ tls->key_share = key_share.algorithm;
+ }
+
+ /* send ServerHello */
+ EMIT_SERVER_HELLO(tls->key_schedule,
+ { tls->ctx->random_bytes(emitter->buf->base + emitter->buf->off, PTLS_HELLO_RANDOM_SIZE); },
+ {
+ ptls_buffer_t *sendbuf = emitter->buf;
+ if (mode != HANDSHAKE_MODE_PSK) {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_KEY_SHARE, {
+ ptls_buffer_push16(sendbuf, key_share.algorithm->id);
+ ptls_buffer_push_block(sendbuf, 2, { ptls_buffer_pushv(sendbuf, pubkey.base, pubkey.len); });
+ });
+ }
+ if (mode != HANDSHAKE_MODE_FULL) {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_PRE_SHARED_KEY,
+ { ptls_buffer_push16(sendbuf, (uint16_t)psk_index); });
+ }
+ });
+ if ((ret = push_change_cipher_spec(tls, emitter->buf)) != 0)
+ goto Exit;
+
+ /* create protection contexts for the handshake */
+ assert(tls->key_schedule->generation == 1);
+ key_schedule_extract(tls->key_schedule, ecdh_secret);
+ if ((ret = setup_traffic_protection(tls, 1, "s hs traffic", 2, 0)) != 0)
+ goto Exit;
+ if (tls->pending_handshake_secret != NULL) {
+ if ((ret = derive_secret(tls->key_schedule, tls->pending_handshake_secret, "c hs traffic")) != 0)
+ goto Exit;
+ if (tls->ctx->update_traffic_key != NULL &&
+ (ret = tls->ctx->update_traffic_key->cb(tls->ctx->update_traffic_key, tls, 0, 2, tls->pending_handshake_secret)) != 0)
+ goto Exit;
+ } else {
+ if ((ret = setup_traffic_protection(tls, 0, "c hs traffic", 2, 0)) != 0)
+ goto Exit;
+ if (ch.psk.early_data_indication)
+ tls->server.early_data_skipped_bytes = 0;
+ }
+
+ /* send EncryptedExtensions */
+ ptls_push_message(emitter, tls->key_schedule, PTLS_HANDSHAKE_TYPE_ENCRYPTED_EXTENSIONS, {
+ ptls_buffer_t *sendbuf = emitter->buf;
+ ptls_buffer_push_block(sendbuf, 2, {
+ if (tls->esni != NULL) {
+ /* the extension is sent even if the application does not handle server name, because otherwise the handshake
+ * would fail (FIXME ch.esni.nonce will be zero on HRR) */
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_ENCRYPTED_SERVER_NAME, {
+ uint8_t response_type = PTLS_ESNI_RESPONSE_TYPE_ACCEPT;
+ ptls_buffer_pushv(sendbuf, &response_type, 1);
+ ptls_buffer_pushv(sendbuf, tls->esni->nonce, PTLS_ESNI_NONCE_SIZE);
+ });
+ free_esni_secret(&tls->esni, 1);
+ } else if (tls->server_name != NULL) {
+ /* In this event, the server SHALL include an extension of type "server_name" in the (extended) server hello.
+ * The "extension_data" field of this extension SHALL be empty. (RFC 6066 section 3) */
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_SERVER_NAME, {});
+ }
+ if (tls->negotiated_protocol != NULL) {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_ALPN, {
+ ptls_buffer_push_block(sendbuf, 2, {
+ ptls_buffer_push_block(sendbuf, 1, {
+ ptls_buffer_pushv(sendbuf, tls->negotiated_protocol, strlen(tls->negotiated_protocol));
+ });
+ });
+ });
+ }
+ if (tls->pending_handshake_secret != NULL)
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_EARLY_DATA, {});
+ if ((ret = push_additional_extensions(properties, sendbuf)) != 0)
+ goto Exit;
+ });
+ });
+
+ if (mode == HANDSHAKE_MODE_FULL) {
+ /* send certificate request if client authentication is activated */
+ if (tls->ctx->require_client_authentication) {
+ ptls_push_message(emitter, tls->key_schedule, PTLS_HANDSHAKE_TYPE_CERTIFICATE_REQUEST, {
+ /* certificate_request_context, this field SHALL be zero length, unless the certificate
+ * request is used for post-handshake authentication.
+ */
+ ptls_buffer_t *sendbuf = emitter->buf;
+ ptls_buffer_push(sendbuf, 0);
+ /* extensions */
+ ptls_buffer_push_block(sendbuf, 2, {
+ buffer_push_extension(sendbuf, PTLS_EXTENSION_TYPE_SIGNATURE_ALGORITHMS, {
+ if ((ret = push_signature_algorithms(sendbuf)) != 0)
+ goto Exit;
+ });
+ });
+ });
+
+ if (ret != 0) {
+ goto Exit;
+ }
+ }
+
+ ret = send_certificate_and_certificate_verify(tls, emitter, &ch.signature_algorithms, ptls_iovec_init(NULL, 0),
+ PTLS_SERVER_CERTIFICATE_VERIFY_CONTEXT_STRING, ch.status_request);
+
+ if (ret != 0) {
+ goto Exit;
+ }
+ }
+
+ if ((ret = send_finished(tls, emitter)) != 0)
+ goto Exit;
+
+ assert(tls->key_schedule->generation == 2);
+ if ((ret = key_schedule_extract(tls->key_schedule, ptls_iovec_init(NULL, 0))) != 0)
+ goto Exit;
+ if ((ret = setup_traffic_protection(tls, 1, "s ap traffic", 3, 0)) != 0)
+ goto Exit;
+ if ((ret = derive_secret(tls->key_schedule, tls->server.pending_traffic_secret, "c ap traffic")) != 0)
+ goto Exit;
+ if ((ret = derive_exporter_secret(tls, 0)) != 0)
+ goto Exit;
+
+ if (tls->pending_handshake_secret != NULL) {
+ if (tls->ctx->omit_end_of_early_data) {
+ if ((ret = commission_handshake_secret(tls)) != 0)
+ goto Exit;
+ tls->state = PTLS_STATE_SERVER_EXPECT_FINISHED;
+ } else {
+ tls->state = PTLS_STATE_SERVER_EXPECT_END_OF_EARLY_DATA;
+ }
+ } else if (tls->ctx->require_client_authentication) {
+ tls->state = PTLS_STATE_SERVER_EXPECT_CERTIFICATE;
+ } else {
+ tls->state = PTLS_STATE_SERVER_EXPECT_FINISHED;
+ }
+
+ /* send session ticket if necessary */
+ if (ch.psk.ke_modes != 0 && tls->ctx->ticket_lifetime != 0) {
+ if ((ret = send_session_ticket(tls, emitter)) != 0)
+ goto Exit;
+ }
+
+ if (tls->ctx->require_client_authentication) {
+ ret = PTLS_ERROR_IN_PROGRESS;
+ } else {
+ ret = 0;
+ }
+
+Exit:
+ free(pubkey.base);
+ if (ecdh_secret.base != NULL) {
+ ptls_clear_memory(ecdh_secret.base, ecdh_secret.len);
+ free(ecdh_secret.base);
+ }
+ return ret;
+
+#undef EMIT_SERVER_HELLO
+#undef EMIT_HELLO_RETRY_REQUEST
+}
+
+static int server_handle_end_of_early_data(ptls_t *tls, ptls_iovec_t message)
+{
+ int ret;
+
+ if ((ret = commission_handshake_secret(tls)) != 0)
+ goto Exit;
+
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+ tls->state = PTLS_STATE_SERVER_EXPECT_FINISHED;
+ ret = PTLS_ERROR_IN_PROGRESS;
+
+Exit:
+ return ret;
+}
+
+static int server_handle_finished(ptls_t *tls, ptls_iovec_t message)
+{
+ int ret;
+
+ if ((ret = verify_finished(tls, message)) != 0)
+ return ret;
+
+ memcpy(tls->traffic_protection.dec.secret, tls->server.pending_traffic_secret, sizeof(tls->server.pending_traffic_secret));
+ ptls_clear_memory(tls->server.pending_traffic_secret, sizeof(tls->server.pending_traffic_secret));
+ if ((ret = setup_traffic_protection(tls, 0, NULL, 3, 0)) != 0)
+ return ret;
+
+ ptls__key_schedule_update_hash(tls->key_schedule, message.base, message.len);
+
+ tls->state = PTLS_STATE_SERVER_POST_HANDSHAKE;
+ return 0;
+}
+
+static int update_traffic_key(ptls_t *tls, int is_enc)
+{
+ struct st_ptls_traffic_protection_t *tp = is_enc ? &tls->traffic_protection.enc : &tls->traffic_protection.dec;
+ uint8_t secret[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ ptls_hash_algorithm_t *hash = tls->key_schedule->hashes[0].algo;
+ if ((ret = hkdf_expand_label(hash, secret, hash->digest_size, ptls_iovec_init(tp->secret, hash->digest_size), "traffic upd",
+ ptls_iovec_init(NULL, 0), tls->key_schedule->hkdf_label_prefix)) != 0)
+ goto Exit;
+ memcpy(tp->secret, secret, sizeof(secret));
+ ret = setup_traffic_protection(tls, is_enc, NULL, 3, 1);
+
+Exit:
+ ptls_clear_memory(secret, sizeof(secret));
+ return ret;
+}
+
+static int handle_key_update(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_iovec_t message)
+{
+ const uint8_t *src = message.base + PTLS_HANDSHAKE_HEADER_SIZE, *const end = message.base + message.len;
+ int ret;
+
+ /* validate */
+ if (end - src != 1 || *src > 1)
+ return PTLS_ALERT_DECODE_ERROR;
+
+ /* update receive key */
+ if ((ret = update_traffic_key(tls, 0)) != 0)
+ return ret;
+
+ if (*src) {
+ if (tls->ctx->update_traffic_key != NULL)
+ return PTLS_ALERT_UNEXPECTED_MESSAGE;
+ tls->needs_key_update = 1;
+ }
+
+ return 0;
+}
+
+static int parse_record_header(struct st_ptls_record_t *rec, const uint8_t *src)
+{
+ rec->type = src[0];
+ rec->version = ntoh16(src + 1);
+ rec->length = ntoh16(src + 3);
+
+ if (rec->length >
+ (size_t)(rec->type == PTLS_CONTENT_TYPE_APPDATA ? PTLS_MAX_ENCRYPTED_RECORD_SIZE : PTLS_MAX_PLAINTEXT_RECORD_SIZE))
+ return PTLS_ALERT_DECODE_ERROR;
+
+ return 0;
+}
+
+static int parse_record(ptls_t *tls, struct st_ptls_record_t *rec, const uint8_t *src, size_t *len)
+{
+ int ret;
+
+ if (tls->recvbuf.rec.base == NULL && *len >= 5) {
+ /* fast path */
+ if ((ret = parse_record_header(rec, src)) != 0)
+ return ret;
+ if (5 + rec->length <= *len) {
+ rec->fragment = src + 5;
+ *len = rec->length + 5;
+ return 0;
+ }
+ }
+
+ /* slow path */
+ const uint8_t *const end = src + *len;
+ *rec = (struct st_ptls_record_t){0};
+
+ if (tls->recvbuf.rec.base == NULL) {
+ ptls_buffer_init(&tls->recvbuf.rec, "", 0);
+ if ((ret = ptls_buffer_reserve(&tls->recvbuf.rec, 5)) != 0)
+ return ret;
+ }
+
+ /* fill and parse the header */
+ while (tls->recvbuf.rec.off < 5) {
+ if (src == end)
+ return PTLS_ERROR_IN_PROGRESS;
+ tls->recvbuf.rec.base[tls->recvbuf.rec.off++] = *src++;
+ }
+ if ((ret = parse_record_header(rec, tls->recvbuf.rec.base)) != 0)
+ return ret;
+
+ /* fill the fragment */
+ size_t addlen = rec->length + 5 - tls->recvbuf.rec.off;
+ if (addlen != 0) {
+ if ((ret = ptls_buffer_reserve(&tls->recvbuf.rec, addlen)) != 0)
+ return ret;
+ if (addlen > (size_t)(end - src))
+ addlen = end - src;
+ if (addlen != 0) {
+ memcpy(tls->recvbuf.rec.base + tls->recvbuf.rec.off, src, addlen);
+ tls->recvbuf.rec.off += addlen;
+ src += addlen;
+ }
+ }
+
+ /* set rec->fragment if a complete record has been parsed */
+ if (tls->recvbuf.rec.off == rec->length + 5) {
+ rec->fragment = tls->recvbuf.rec.base + 5;
+ ret = 0;
+ } else {
+ ret = PTLS_ERROR_IN_PROGRESS;
+ }
+
+ *len -= end - src;
+ return ret;
+}
+
+static void update_open_count(ptls_context_t *ctx, ssize_t delta)
+{
+ if (ctx->update_open_count != NULL)
+ ctx->update_open_count->cb(ctx->update_open_count, delta);
+}
+
+static ptls_t *new_instance(ptls_context_t *ctx, int is_server)
+{
+ ptls_t *tls;
+
+ assert(ctx->get_time != NULL && "please set ctx->get_time to `&ptls_get_time`; see #92");
+
+ if ((tls = malloc(sizeof(*tls))) == NULL)
+ return NULL;
+
+ update_open_count(ctx, 1);
+ *tls = (ptls_t){ctx};
+ tls->is_server = is_server;
+ tls->send_change_cipher_spec = ctx->send_change_cipher_spec;
+ tls->skip_tracing = ptls_default_skip_tracing;
+ return tls;
+}
+
+ptls_t *ptls_client_new(ptls_context_t *ctx)
+{
+ ptls_t *tls = new_instance(ctx, 0);
+ tls->state = PTLS_STATE_CLIENT_HANDSHAKE_START;
+ tls->ctx->random_bytes(tls->client_random, sizeof(tls->client_random));
+ log_client_random(tls);
+ tls->ctx->random_bytes(tls->client.legacy_session_id, sizeof(tls->client.legacy_session_id));
+
+ PTLS_PROBE(NEW, tls, 0);
+ return tls;
+}
+
+ptls_t *ptls_server_new(ptls_context_t *ctx)
+{
+ ptls_t *tls = new_instance(ctx, 1);
+ tls->state = PTLS_STATE_SERVER_EXPECT_CLIENT_HELLO;
+ tls->server.early_data_skipped_bytes = UINT32_MAX;
+
+ PTLS_PROBE(NEW, tls, 1);
+ return tls;
+}
+
+void ptls_free(ptls_t *tls)
+{
+ PTLS_PROBE0(FREE, tls);
+ ptls_buffer_dispose(&tls->recvbuf.rec);
+ ptls_buffer_dispose(&tls->recvbuf.mess);
+ free_exporter_master_secret(tls, 1);
+ free_exporter_master_secret(tls, 0);
+ if (tls->esni != NULL)
+ free_esni_secret(&tls->esni, tls->is_server);
+ if (tls->key_schedule != NULL)
+ key_schedule_free(tls->key_schedule);
+ if (tls->traffic_protection.dec.aead != NULL)
+ ptls_aead_free(tls->traffic_protection.dec.aead);
+ if (tls->traffic_protection.enc.aead != NULL)
+ ptls_aead_free(tls->traffic_protection.enc.aead);
+ free(tls->server_name);
+ free(tls->negotiated_protocol);
+ if (tls->is_server) {
+ /* nothing to do */
+ } else {
+ if (tls->client.key_share_ctx != NULL)
+ tls->client.key_share_ctx->on_exchange(&tls->client.key_share_ctx, 1, NULL, ptls_iovec_init(NULL, 0));
+ if (tls->client.certificate_request.context.base != NULL)
+ free(tls->client.certificate_request.context.base);
+ }
+ if (tls->certificate_verify.cb != NULL) {
+ tls->certificate_verify.cb(tls->certificate_verify.verify_ctx, ptls_iovec_init(NULL, 0), ptls_iovec_init(NULL, 0));
+ }
+ if (tls->pending_handshake_secret != NULL) {
+ ptls_clear_memory(tls->pending_handshake_secret, PTLS_MAX_DIGEST_SIZE);
+ free(tls->pending_handshake_secret);
+ }
+ update_open_count(tls->ctx, -1);
+ ptls_clear_memory(tls, sizeof(*tls));
+ free(tls);
+}
+
+ptls_context_t *ptls_get_context(ptls_t *tls)
+{
+ return tls->ctx;
+}
+
+void ptls_set_context(ptls_t *tls, ptls_context_t *ctx)
+{
+ update_open_count(ctx, 1);
+ update_open_count(tls->ctx, -1);
+ tls->ctx = ctx;
+}
+
+ptls_iovec_t ptls_get_client_random(ptls_t *tls)
+{
+ return ptls_iovec_init(tls->client_random, PTLS_HELLO_RANDOM_SIZE);
+}
+
+ptls_cipher_suite_t *ptls_get_cipher(ptls_t *tls)
+{
+ return tls->cipher_suite;
+}
+
+const char *ptls_get_server_name(ptls_t *tls)
+{
+ return tls->server_name;
+}
+
+int ptls_set_server_name(ptls_t *tls, const char *server_name, size_t server_name_len)
+{
+ char *duped = NULL;
+
+ if (server_name != NULL) {
+ if (server_name_len == 0)
+ server_name_len = strlen(server_name);
+ if ((duped = malloc(server_name_len + 1)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ memcpy(duped, server_name, server_name_len);
+ duped[server_name_len] = '\0';
+ }
+
+ free(tls->server_name);
+ tls->server_name = duped;
+
+ return 0;
+}
+
+const char *ptls_get_negotiated_protocol(ptls_t *tls)
+{
+ return tls->negotiated_protocol;
+}
+
+int ptls_set_negotiated_protocol(ptls_t *tls, const char *protocol, size_t protocol_len)
+{
+ char *duped = NULL;
+
+ if (protocol != NULL) {
+ if (protocol_len == 0)
+ protocol_len = strlen(protocol);
+ if ((duped = malloc(protocol_len + 1)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ memcpy(duped, protocol, protocol_len);
+ duped[protocol_len] = '\0';
+ }
+
+ free(tls->negotiated_protocol);
+ tls->negotiated_protocol = duped;
+
+ return 0;
+}
+
+int ptls_handshake_is_complete(ptls_t *tls)
+{
+ return tls->state >= PTLS_STATE_POST_HANDSHAKE_MIN;
+}
+
+int ptls_is_psk_handshake(ptls_t *tls)
+{
+ return tls->is_psk_handshake;
+}
+
+void **ptls_get_data_ptr(ptls_t *tls)
+{
+ return &tls->data_ptr;
+}
+
+int ptls_skip_tracing(ptls_t *tls)
+{
+ return tls->skip_tracing;
+}
+
+void ptls_set_skip_tracing(ptls_t *tls, int skip_tracing)
+{
+ tls->skip_tracing = skip_tracing;
+}
+
+static int handle_client_handshake_message(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_iovec_t message, int is_end_of_record,
+ ptls_handshake_properties_t *properties)
+{
+ uint8_t type = message.base[0];
+ int ret;
+
+ switch (tls->state) {
+ case PTLS_STATE_CLIENT_EXPECT_SERVER_HELLO:
+ case PTLS_STATE_CLIENT_EXPECT_SECOND_SERVER_HELLO:
+ if (type == PTLS_HANDSHAKE_TYPE_SERVER_HELLO && is_end_of_record) {
+ ret = client_handle_hello(tls, emitter, message, properties);
+ } else {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ }
+ break;
+ case PTLS_STATE_CLIENT_EXPECT_ENCRYPTED_EXTENSIONS:
+ if (type == PTLS_HANDSHAKE_TYPE_ENCRYPTED_EXTENSIONS) {
+ ret = client_handle_encrypted_extensions(tls, message, properties);
+ } else {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ }
+ break;
+ case PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_REQUEST_OR_CERTIFICATE:
+ if (type == PTLS_HANDSHAKE_TYPE_CERTIFICATE_REQUEST) {
+ ret = client_handle_certificate_request(tls, message, properties);
+ break;
+ }
+ /* fall through */
+ case PTLS_STATE_CLIENT_EXPECT_CERTIFICATE:
+ switch (type) {
+ case PTLS_HANDSHAKE_TYPE_CERTIFICATE:
+ ret = client_handle_certificate(tls, message);
+ break;
+ case PTLS_HANDSHAKE_TYPE_COMPRESSED_CERTIFICATE:
+ ret = client_handle_compressed_certificate(tls, message);
+ break;
+ default:
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ break;
+ }
+ break;
+ case PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_VERIFY:
+ if (type == PTLS_HANDSHAKE_TYPE_CERTIFICATE_VERIFY) {
+ ret = client_handle_certificate_verify(tls, message);
+ } else {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ }
+ break;
+ case PTLS_STATE_CLIENT_EXPECT_FINISHED:
+ if (type == PTLS_HANDSHAKE_TYPE_FINISHED && is_end_of_record) {
+ ret = client_handle_finished(tls, emitter, message);
+ } else {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ }
+ break;
+ case PTLS_STATE_CLIENT_POST_HANDSHAKE:
+ switch (type) {
+ case PTLS_HANDSHAKE_TYPE_NEW_SESSION_TICKET:
+ ret = client_handle_new_session_ticket(tls, message);
+ break;
+ case PTLS_HANDSHAKE_TYPE_KEY_UPDATE:
+ ret = handle_key_update(tls, emitter, message);
+ break;
+ default:
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ break;
+ }
+ break;
+ default:
+ assert(!"unexpected state");
+ ret = PTLS_ALERT_INTERNAL_ERROR;
+ break;
+ }
+
+ PTLS_PROBE(RECEIVE_MESSAGE, tls, message.base[0], message.base + PTLS_HANDSHAKE_HEADER_SIZE,
+ message.len - PTLS_HANDSHAKE_HEADER_SIZE, ret);
+
+ return ret;
+}
+
+static int handle_server_handshake_message(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_iovec_t message, int is_end_of_record,
+ ptls_handshake_properties_t *properties)
+{
+ uint8_t type = message.base[0];
+ int ret;
+
+ switch (tls->state) {
+ case PTLS_STATE_SERVER_EXPECT_CLIENT_HELLO:
+ case PTLS_STATE_SERVER_EXPECT_SECOND_CLIENT_HELLO:
+ if (type == PTLS_HANDSHAKE_TYPE_CLIENT_HELLO && is_end_of_record) {
+ ret = server_handle_hello(tls, emitter, message, properties);
+ } else {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ }
+ break;
+ case PTLS_STATE_SERVER_EXPECT_CERTIFICATE:
+ if (type == PTLS_HANDSHAKE_TYPE_CERTIFICATE) {
+ ret = server_handle_certificate(tls, message);
+ } else {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ }
+ break;
+ case PTLS_STATE_SERVER_EXPECT_CERTIFICATE_VERIFY:
+ if (type == PTLS_HANDSHAKE_TYPE_CERTIFICATE_VERIFY) {
+ ret = server_handle_certificate_verify(tls, message);
+ } else {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ }
+ break;
+ case PTLS_STATE_SERVER_EXPECT_END_OF_EARLY_DATA:
+ assert(!tls->ctx->omit_end_of_early_data);
+ if (type == PTLS_HANDSHAKE_TYPE_END_OF_EARLY_DATA) {
+ ret = server_handle_end_of_early_data(tls, message);
+ } else {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ }
+ break;
+ case PTLS_STATE_SERVER_EXPECT_FINISHED:
+ if (type == PTLS_HANDSHAKE_TYPE_FINISHED && is_end_of_record) {
+ ret = server_handle_finished(tls, message);
+ } else {
+ ret = PTLS_ALERT_HANDSHAKE_FAILURE;
+ }
+ break;
+ case PTLS_STATE_SERVER_POST_HANDSHAKE:
+ switch (type) {
+ case PTLS_HANDSHAKE_TYPE_KEY_UPDATE:
+ ret = handle_key_update(tls, emitter, message);
+ break;
+ default:
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ break;
+ }
+ break;
+ default:
+ assert(!"unexpected state");
+ ret = PTLS_ALERT_INTERNAL_ERROR;
+ break;
+ }
+
+ PTLS_PROBE(RECEIVE_MESSAGE, tls, message.base[0], message.base + PTLS_HANDSHAKE_HEADER_SIZE,
+ message.len - PTLS_HANDSHAKE_HEADER_SIZE, ret);
+
+ return ret;
+}
+
+static int handle_alert(ptls_t *tls, const uint8_t *src, size_t len)
+{
+ if (len != 2)
+ return PTLS_ALERT_DECODE_ERROR;
+
+ uint8_t desc = src[1];
+
+ /* all fatal alerts and USER_CANCELLED warning tears down the connection immediately, regardless of the transmitted level */
+ return PTLS_ALERT_TO_PEER_ERROR(desc);
+}
+
+static int message_buffer_is_overflow(ptls_context_t *ctx, size_t size)
+{
+ if (ctx->max_buffer_size == 0)
+ return 0;
+ if (size <= ctx->max_buffer_size)
+ return 0;
+ return 1;
+}
+
+static int handle_handshake_record(ptls_t *tls,
+ int (*cb)(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_iovec_t message,
+ int is_end_of_record, ptls_handshake_properties_t *properties),
+ ptls_message_emitter_t *emitter, struct st_ptls_record_t *rec,
+ ptls_handshake_properties_t *properties)
+{
+ int ret;
+
+ /* handshake */
+ if (rec->type != PTLS_CONTENT_TYPE_HANDSHAKE)
+ return PTLS_ALERT_DECODE_ERROR;
+
+ /* flatten the unhandled messages */
+ const uint8_t *src, *src_end;
+ if (tls->recvbuf.mess.base == NULL) {
+ src = rec->fragment;
+ src_end = src + rec->length;
+ } else {
+ if (message_buffer_is_overflow(tls->ctx, tls->recvbuf.mess.off + rec->length))
+ return PTLS_ALERT_HANDSHAKE_FAILURE;
+ if ((ret = ptls_buffer_reserve(&tls->recvbuf.mess, rec->length)) != 0)
+ return ret;
+ memcpy(tls->recvbuf.mess.base + tls->recvbuf.mess.off, rec->fragment, rec->length);
+ tls->recvbuf.mess.off += rec->length;
+ src = tls->recvbuf.mess.base;
+ src_end = src + tls->recvbuf.mess.off;
+ }
+
+ /* handle the messages */
+ ret = PTLS_ERROR_IN_PROGRESS;
+ while (src_end - src >= 4) {
+ size_t mess_len = 4 + ntoh24(src + 1);
+ if (src_end - src < (int)mess_len)
+ break;
+ ret = cb(tls, emitter, ptls_iovec_init(src, mess_len), src_end - src == mess_len, properties);
+ switch (ret) {
+ case 0:
+ case PTLS_ERROR_IN_PROGRESS:
+ break;
+ default:
+ ptls_buffer_dispose(&tls->recvbuf.mess);
+ return ret;
+ }
+ src += mess_len;
+ }
+
+ /* keep last partial message in buffer */
+ if (src != src_end) {
+ size_t new_size = src_end - src;
+ if (message_buffer_is_overflow(tls->ctx, new_size))
+ return PTLS_ALERT_HANDSHAKE_FAILURE;
+ if (tls->recvbuf.mess.base == NULL) {
+ ptls_buffer_init(&tls->recvbuf.mess, "", 0);
+ if ((ret = ptls_buffer_reserve(&tls->recvbuf.mess, new_size)) != 0)
+ return ret;
+ memcpy(tls->recvbuf.mess.base, src, new_size);
+ } else {
+ memmove(tls->recvbuf.mess.base, src, new_size);
+ }
+ tls->recvbuf.mess.off = new_size;
+ ret = PTLS_ERROR_IN_PROGRESS;
+ } else {
+ ptls_buffer_dispose(&tls->recvbuf.mess);
+ }
+
+ return ret;
+}
+
+static int handle_input(ptls_t *tls, ptls_message_emitter_t *emitter, ptls_buffer_t *decryptbuf, const void *input, size_t *inlen,
+ ptls_handshake_properties_t *properties)
+{
+ struct st_ptls_record_t rec;
+ int ret;
+
+ /* extract the record */
+ if ((ret = parse_record(tls, &rec, input, inlen)) != 0)
+ return ret;
+ assert(rec.fragment != NULL);
+
+ /* decrypt the record */
+ if (rec.type == PTLS_CONTENT_TYPE_CHANGE_CIPHER_SPEC) {
+ if (tls->state < PTLS_STATE_POST_HANDSHAKE_MIN) {
+ if (!(rec.length == 1 && rec.fragment[0] == 0x01))
+ return PTLS_ALERT_ILLEGAL_PARAMETER;
+ } else {
+ return PTLS_ALERT_HANDSHAKE_FAILURE;
+ }
+ ret = PTLS_ERROR_IN_PROGRESS;
+ goto NextRecord;
+ }
+ if (tls->traffic_protection.dec.aead != NULL && rec.type != PTLS_CONTENT_TYPE_ALERT) {
+ size_t decrypted_length;
+ if (rec.type != PTLS_CONTENT_TYPE_APPDATA)
+ return PTLS_ALERT_HANDSHAKE_FAILURE;
+ if ((ret = ptls_buffer_reserve(decryptbuf, 5 + rec.length)) != 0)
+ return ret;
+ if ((ret = aead_decrypt(&tls->traffic_protection.dec, decryptbuf->base + decryptbuf->off, &decrypted_length, rec.fragment,
+ rec.length)) != 0) {
+ if (tls->is_server && tls->server.early_data_skipped_bytes != UINT32_MAX)
+ goto ServerSkipEarlyData;
+ return ret;
+ }
+ rec.length = decrypted_length;
+ rec.fragment = decryptbuf->base + decryptbuf->off;
+ /* skip padding */
+ for (; rec.length != 0; --rec.length)
+ if (rec.fragment[rec.length - 1] != 0)
+ break;
+ if (rec.length == 0)
+ return PTLS_ALERT_UNEXPECTED_MESSAGE;
+ rec.type = rec.fragment[--rec.length];
+ } else if (rec.type == PTLS_CONTENT_TYPE_APPDATA && tls->is_server && tls->server.early_data_skipped_bytes != UINT32_MAX) {
+ goto ServerSkipEarlyData;
+ }
+
+ if (tls->recvbuf.mess.base != NULL || rec.type == PTLS_CONTENT_TYPE_HANDSHAKE) {
+ /* handshake record */
+ ret = handle_handshake_record(tls, tls->is_server ? handle_server_handshake_message : handle_client_handshake_message,
+ emitter, &rec, properties);
+ } else {
+ /* handling of an alert or an application record */
+ switch (rec.type) {
+ case PTLS_CONTENT_TYPE_APPDATA:
+ if (tls->state >= PTLS_STATE_POST_HANDSHAKE_MIN) {
+ decryptbuf->off += rec.length;
+ ret = 0;
+ } else if (tls->state == PTLS_STATE_SERVER_EXPECT_END_OF_EARLY_DATA) {
+ if (tls->traffic_protection.dec.aead != NULL)
+ decryptbuf->off += rec.length;
+ ret = 0;
+ } else {
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ }
+ break;
+ case PTLS_CONTENT_TYPE_ALERT:
+ ret = handle_alert(tls, rec.fragment, rec.length);
+ break;
+ default:
+ ret = PTLS_ALERT_UNEXPECTED_MESSAGE;
+ break;
+ }
+ }
+
+NextRecord:
+ ptls_buffer_dispose(&tls->recvbuf.rec);
+ return ret;
+
+ServerSkipEarlyData:
+ tls->server.early_data_skipped_bytes += (uint32_t)rec.length;
+ if (tls->server.early_data_skipped_bytes > PTLS_MAX_EARLY_DATA_SKIP_SIZE)
+ return PTLS_ALERT_HANDSHAKE_FAILURE;
+ ret = PTLS_ERROR_IN_PROGRESS;
+ goto NextRecord;
+}
+
+static void init_record_message_emitter(ptls_t *tls, struct st_ptls_record_message_emitter_t *emitter, ptls_buffer_t *sendbuf)
+{
+ *emitter = (struct st_ptls_record_message_emitter_t){
+ {sendbuf, &tls->traffic_protection.enc, 5, begin_record_message, commit_record_message}};
+}
+
+int ptls_handshake(ptls_t *tls, ptls_buffer_t *_sendbuf, const void *input, size_t *inlen, ptls_handshake_properties_t *properties)
+{
+ struct st_ptls_record_message_emitter_t emitter;
+ int ret;
+
+ assert(tls->state < PTLS_STATE_POST_HANDSHAKE_MIN);
+
+ init_record_message_emitter(tls, &emitter, _sendbuf);
+ size_t sendbuf_orig_off = emitter.super.buf->off;
+
+ /* special handlings */
+ switch (tls->state) {
+ case PTLS_STATE_CLIENT_HANDSHAKE_START: {
+ assert(input == NULL || *inlen == 0);
+ assert(tls->ctx->key_exchanges[0] != NULL);
+ return send_client_hello(tls, &emitter.super, properties, NULL);
+ }
+ default:
+ break;
+ }
+
+ const uint8_t *src = input, *const src_end = src + *inlen;
+ ptls_buffer_t decryptbuf;
+ uint8_t decryptbuf_small[256];
+
+ ptls_buffer_init(&decryptbuf, decryptbuf_small, sizeof(decryptbuf_small));
+
+ /* perform handhake until completion or until all the input has been swallowed */
+ ret = PTLS_ERROR_IN_PROGRESS;
+ while (ret == PTLS_ERROR_IN_PROGRESS && src != src_end) {
+ size_t consumed = src_end - src;
+ ret = handle_input(tls, &emitter.super, &decryptbuf, src, &consumed, properties);
+ src += consumed;
+ assert(decryptbuf.off == 0);
+ }
+
+ ptls_buffer_dispose(&decryptbuf);
+
+ switch (ret) {
+ case 0:
+ case PTLS_ERROR_IN_PROGRESS:
+ case PTLS_ERROR_STATELESS_RETRY:
+ break;
+ default:
+ /* flush partially written response */
+ ptls_clear_memory(emitter.super.buf->base + sendbuf_orig_off, emitter.super.buf->off - sendbuf_orig_off);
+ emitter.super.buf->off = sendbuf_orig_off;
+ /* send alert immediately */
+ if (PTLS_ERROR_GET_CLASS(ret) != PTLS_ERROR_CLASS_PEER_ALERT)
+ if (ptls_send_alert(tls, emitter.super.buf, PTLS_ALERT_LEVEL_FATAL,
+ PTLS_ERROR_GET_CLASS(ret) == PTLS_ERROR_CLASS_SELF_ALERT ? ret : PTLS_ALERT_INTERNAL_ERROR) != 0)
+ emitter.super.buf->off = sendbuf_orig_off;
+ break;
+ }
+
+ *inlen -= src_end - src;
+ return ret;
+}
+
+int ptls_receive(ptls_t *tls, ptls_buffer_t *decryptbuf, const void *_input, size_t *inlen)
+{
+ const uint8_t *input = (const uint8_t *)_input, *const end = input + *inlen;
+ size_t decryptbuf_orig_size = decryptbuf->off;
+ int ret = 0;
+
+ assert(tls->state >= PTLS_STATE_SERVER_EXPECT_END_OF_EARLY_DATA);
+
+ /* loop until we decrypt some application data (or an error) */
+ while (ret == 0 && input != end && decryptbuf_orig_size == decryptbuf->off) {
+ size_t consumed = end - input;
+ ret = handle_input(tls, NULL, decryptbuf, input, &consumed, NULL);
+ input += consumed;
+
+ switch (ret) {
+ case 0:
+ break;
+ case PTLS_ERROR_IN_PROGRESS:
+ ret = 0;
+ break;
+ case PTLS_ERROR_CLASS_PEER_ALERT + PTLS_ALERT_CLOSE_NOTIFY:
+ /* TODO send close alert */
+ break;
+ default:
+ if (PTLS_ERROR_GET_CLASS(ret) == PTLS_ERROR_CLASS_SELF_ALERT) {
+ /* TODO send alert */
+ }
+ break;
+ }
+ }
+
+ *inlen -= end - input;
+
+ return ret;
+}
+
+static int update_send_key(ptls_t *tls, ptls_buffer_t *_sendbuf, int request_update)
+{
+ struct st_ptls_record_message_emitter_t emitter;
+ int ret;
+
+ init_record_message_emitter(tls, &emitter, _sendbuf);
+ size_t sendbuf_orig_off = emitter.super.buf->off;
+
+ ptls_push_message(&emitter.super, NULL, PTLS_HANDSHAKE_TYPE_KEY_UPDATE,
+ { ptls_buffer_push(emitter.super.buf, !!request_update); });
+ if ((ret = update_traffic_key(tls, 1)) != 0)
+ goto Exit;
+ ret = 0;
+
+Exit:
+ if (ret != 0)
+ emitter.super.buf->off = sendbuf_orig_off;
+ return ret;
+}
+
+int ptls_send(ptls_t *tls, ptls_buffer_t *sendbuf, const void *input, size_t inlen)
+{
+ assert(tls->traffic_protection.enc.aead != NULL);
+
+ /* "For AES-GCM, up to 2^24.5 full-size records (about 24 million) may be encrypted on a given connection while keeping a
+ * safety margin of approximately 2^-57 for Authenticated Encryption (AE) security." (RFC 8446 section 5.5)
+ */
+ if (tls->traffic_protection.enc.seq >= 16777216)
+ tls->needs_key_update = 1;
+
+ if (tls->needs_key_update) {
+ int ret;
+ if ((ret = update_send_key(tls, sendbuf, tls->key_update_send_request)) != 0)
+ return ret;
+ tls->needs_key_update = 0;
+ tls->key_update_send_request = 0;
+ }
+
+ return buffer_push_encrypted_records(sendbuf, PTLS_CONTENT_TYPE_APPDATA, input, inlen, &tls->traffic_protection.enc);
+}
+
+int ptls_update_key(ptls_t *tls, int request_update)
+{
+ assert(tls->ctx->update_traffic_key == NULL);
+ tls->needs_key_update = 1;
+ tls->key_update_send_request = request_update;
+ return 0;
+}
+
+size_t ptls_get_record_overhead(ptls_t *tls)
+{
+ return 6 + tls->traffic_protection.enc.aead->algo->tag_size;
+}
+
+int ptls_send_alert(ptls_t *tls, ptls_buffer_t *sendbuf, uint8_t level, uint8_t description)
+{
+ size_t rec_start = sendbuf->off;
+ int ret = 0;
+
+ buffer_push_record(sendbuf, PTLS_CONTENT_TYPE_ALERT, { ptls_buffer_push(sendbuf, level, description); });
+ /* encrypt the alert if we have the encryption keys, unless when it is the early data key */
+ if (tls->traffic_protection.enc.aead != NULL && !(tls->state <= PTLS_STATE_CLIENT_EXPECT_FINISHED)) {
+ if ((ret = buffer_encrypt_record(sendbuf, rec_start, &tls->traffic_protection.enc)) != 0)
+ goto Exit;
+ }
+
+Exit:
+ return ret;
+}
+
+int ptls_export_secret(ptls_t *tls, void *output, size_t outlen, const char *label, ptls_iovec_t context_value, int is_early)
+{
+ ptls_hash_algorithm_t *algo = tls->key_schedule->hashes[0].algo;
+ uint8_t *master_secret = is_early ? tls->exporter_master_secret.early : tls->exporter_master_secret.one_rtt,
+ derived_secret[PTLS_MAX_DIGEST_SIZE], context_value_hash[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ if (master_secret == NULL) {
+ if (is_early) {
+ switch (tls->state) {
+ case PTLS_STATE_CLIENT_HANDSHAKE_START:
+ case PTLS_STATE_SERVER_EXPECT_CLIENT_HELLO:
+ ret = PTLS_ERROR_IN_PROGRESS;
+ break;
+ default:
+ ret = PTLS_ERROR_NOT_AVAILABLE;
+ break;
+ }
+ } else {
+ ret = PTLS_ERROR_IN_PROGRESS;
+ }
+ return ret;
+ }
+
+ if ((ret = ptls_calc_hash(algo, context_value_hash, context_value.base, context_value.len)) != 0)
+ return ret;
+
+ if ((ret = hkdf_expand_label(algo, derived_secret, algo->digest_size, ptls_iovec_init(master_secret, algo->digest_size), label,
+ ptls_iovec_init(algo->empty_digest, algo->digest_size), tls->key_schedule->hkdf_label_prefix)) !=
+ 0)
+ goto Exit;
+ ret = hkdf_expand_label(algo, output, outlen, ptls_iovec_init(derived_secret, algo->digest_size), "exporter",
+ ptls_iovec_init(context_value_hash, algo->digest_size), tls->key_schedule->hkdf_label_prefix);
+
+Exit:
+ ptls_clear_memory(derived_secret, sizeof(derived_secret));
+ ptls_clear_memory(context_value_hash, sizeof(context_value_hash));
+ return ret;
+}
+
+struct st_picotls_hmac_context_t {
+ ptls_hash_context_t super;
+ ptls_hash_algorithm_t *algo;
+ ptls_hash_context_t *hash;
+ uint8_t key[1];
+};
+
+static void hmac_update(ptls_hash_context_t *_ctx, const void *src, size_t len)
+{
+ struct st_picotls_hmac_context_t *ctx = (struct st_picotls_hmac_context_t *)_ctx;
+ ctx->hash->update(ctx->hash, src, len);
+}
+
+static void hmac_apply_key(struct st_picotls_hmac_context_t *ctx, uint8_t pad)
+{
+ size_t i;
+
+ for (i = 0; i != ctx->algo->block_size; ++i)
+ ctx->key[i] ^= pad;
+ ctx->hash->update(ctx->hash, ctx->key, ctx->algo->block_size);
+ for (i = 0; i != ctx->algo->block_size; ++i)
+ ctx->key[i] ^= pad;
+}
+
+static void hmac_final(ptls_hash_context_t *_ctx, void *md, ptls_hash_final_mode_t mode)
+{
+ struct st_picotls_hmac_context_t *ctx = (struct st_picotls_hmac_context_t *)_ctx;
+
+ assert(mode != PTLS_HASH_FINAL_MODE_SNAPSHOT || !"not supported");
+
+ if (md != NULL) {
+ ctx->hash->final(ctx->hash, md, PTLS_HASH_FINAL_MODE_RESET);
+ hmac_apply_key(ctx, 0x5c);
+ ctx->hash->update(ctx->hash, md, ctx->algo->digest_size);
+ }
+ ctx->hash->final(ctx->hash, md, mode);
+
+ switch (mode) {
+ case PTLS_HASH_FINAL_MODE_FREE:
+ ptls_clear_memory(ctx->key, ctx->algo->block_size);
+ free(ctx);
+ break;
+ case PTLS_HASH_FINAL_MODE_RESET:
+ hmac_apply_key(ctx, 0x36);
+ break;
+ default:
+ assert(!"FIXME");
+ break;
+ }
+}
+
+int ptls_calc_hash(ptls_hash_algorithm_t *algo, void *output, const void *src, size_t len)
+{
+ ptls_hash_context_t *ctx;
+
+ if ((ctx = algo->create()) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ ctx->update(ctx, src, len);
+ ctx->final(ctx, output, PTLS_HASH_FINAL_MODE_FREE);
+ return 0;
+}
+
+ptls_hash_context_t *ptls_hmac_create(ptls_hash_algorithm_t *algo, const void *key, size_t key_size)
+{
+ struct st_picotls_hmac_context_t *ctx;
+
+ assert(key_size <= algo->block_size);
+
+ if ((ctx = malloc(offsetof(struct st_picotls_hmac_context_t, key) + algo->block_size)) == NULL)
+ return NULL;
+
+ *ctx = (struct st_picotls_hmac_context_t){{hmac_update, hmac_final}, algo};
+ if ((ctx->hash = algo->create()) == NULL) {
+ free(ctx);
+ return NULL;
+ }
+ memset(ctx->key, 0, algo->block_size);
+ memcpy(ctx->key, key, key_size);
+
+ hmac_apply_key(ctx, 0x36);
+
+ return &ctx->super;
+}
+
+int ptls_hkdf_extract(ptls_hash_algorithm_t *algo, void *output, ptls_iovec_t salt, ptls_iovec_t ikm)
+{
+ ptls_hash_context_t *hash;
+
+ if (salt.len == 0)
+ salt = ptls_iovec_init(zeroes_of_max_digest_size, algo->digest_size);
+
+ if ((hash = ptls_hmac_create(algo, salt.base, salt.len)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ hash->update(hash, ikm.base, ikm.len);
+ hash->final(hash, output, PTLS_HASH_FINAL_MODE_FREE);
+ return 0;
+}
+
+int ptls_hkdf_expand(ptls_hash_algorithm_t *algo, void *output, size_t outlen, ptls_iovec_t prk, ptls_iovec_t info)
+{
+ ptls_hash_context_t *hmac = NULL;
+ size_t i;
+ uint8_t digest[PTLS_MAX_DIGEST_SIZE];
+
+ for (i = 0; (i * algo->digest_size) < outlen; ++i) {
+ if (hmac == NULL) {
+ if ((hmac = ptls_hmac_create(algo, prk.base, prk.len)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ } else {
+ hmac->update(hmac, digest, algo->digest_size);
+ }
+ hmac->update(hmac, info.base, info.len);
+ uint8_t gen = (uint8_t)(i + 1);
+ hmac->update(hmac, &gen, 1);
+ hmac->final(hmac, digest, 1);
+
+ size_t off_start = i * algo->digest_size, off_end = off_start + algo->digest_size;
+ if (off_end > outlen)
+ off_end = outlen;
+ memcpy((uint8_t *)output + off_start, digest, off_end - off_start);
+ }
+
+ if (hmac != NULL)
+ hmac->final(hmac, NULL, PTLS_HASH_FINAL_MODE_FREE);
+
+ ptls_clear_memory(digest, algo->digest_size);
+
+ return 0;
+}
+
+int hkdf_expand_label(ptls_hash_algorithm_t *algo, void *output, size_t outlen, ptls_iovec_t secret, const char *label,
+ ptls_iovec_t hash_value, const char *label_prefix)
+{
+ ptls_buffer_t hkdf_label;
+ uint8_t hkdf_label_buf[512];
+ int ret;
+
+ assert(label_prefix != NULL);
+
+ ptls_buffer_init(&hkdf_label, hkdf_label_buf, sizeof(hkdf_label_buf));
+
+ ptls_buffer_push16(&hkdf_label, (uint16_t)outlen);
+ ptls_buffer_push_block(&hkdf_label, 1, {
+ ptls_buffer_pushv(&hkdf_label, label_prefix, strlen(label_prefix));
+ ptls_buffer_pushv(&hkdf_label, label, strlen(label));
+ });
+ ptls_buffer_push_block(&hkdf_label, 1, { ptls_buffer_pushv(&hkdf_label, hash_value.base, hash_value.len); });
+
+ ret = ptls_hkdf_expand(algo, output, outlen, secret, ptls_iovec_init(hkdf_label.base, hkdf_label.off));
+
+Exit:
+ ptls_buffer_dispose(&hkdf_label);
+ return ret;
+}
+
+int ptls_hkdf_expand_label(ptls_hash_algorithm_t *algo, void *output, size_t outlen, ptls_iovec_t secret, const char *label,
+ ptls_iovec_t hash_value, const char *label_prefix)
+{
+ /* the handshake layer should call hkdf_expand_label directly, always setting key_schedule->hkdf_label_prefix as the
+ * argument */
+ if (label_prefix == NULL)
+ label_prefix = PTLS_HKDF_EXPAND_LABEL_PREFIX;
+ return hkdf_expand_label(algo, output, outlen, secret, label, hash_value, label_prefix);
+}
+
+ptls_cipher_context_t *ptls_cipher_new(ptls_cipher_algorithm_t *algo, int is_enc, const void *key)
+{
+ ptls_cipher_context_t *ctx;
+
+ if ((ctx = (ptls_cipher_context_t *)malloc(algo->context_size)) == NULL)
+ return NULL;
+ *ctx = (ptls_cipher_context_t){algo};
+ if (algo->setup_crypto(ctx, is_enc, key) != 0) {
+ free(ctx);
+ ctx = NULL;
+ }
+ return ctx;
+}
+
+void ptls_cipher_free(ptls_cipher_context_t *ctx)
+{
+ ctx->do_dispose(ctx);
+ free(ctx);
+}
+
+ptls_aead_context_t *new_aead(ptls_aead_algorithm_t *aead, ptls_hash_algorithm_t *hash, int is_enc, const void *secret,
+ ptls_iovec_t hash_value, const char *label_prefix)
+{
+ ptls_aead_context_t *ctx;
+ uint8_t key[PTLS_MAX_SECRET_SIZE];
+ int ret;
+
+ if ((ctx = (ptls_aead_context_t *)malloc(aead->context_size)) == NULL)
+ return NULL;
+
+ *ctx = (ptls_aead_context_t){aead};
+ if ((ret = get_traffic_key(hash, key, aead->key_size, 0, secret, hash_value, label_prefix)) != 0)
+ goto Exit;
+ if ((ret = get_traffic_key(hash, ctx->static_iv, aead->iv_size, 1, secret, hash_value, label_prefix)) != 0)
+ goto Exit;
+ ret = aead->setup_crypto(ctx, is_enc, key);
+
+Exit:
+ ptls_clear_memory(key, aead->key_size);
+ if (ret != 0) {
+ ptls_clear_memory(ctx->static_iv, aead->iv_size);
+ free(ctx);
+ ctx = NULL;
+ }
+
+ return ctx;
+}
+
+ptls_aead_context_t *ptls_aead_new(ptls_aead_algorithm_t *aead, ptls_hash_algorithm_t *hash, int is_enc, const void *secret,
+ const char *label_prefix)
+{
+ return new_aead(aead, hash, is_enc, secret, ptls_iovec_init(NULL, 0), label_prefix);
+}
+
+void ptls_aead_free(ptls_aead_context_t *ctx)
+{
+ ctx->dispose_crypto(ctx);
+ ptls_clear_memory(ctx->static_iv, ctx->algo->iv_size);
+ free(ctx);
+}
+
+size_t ptls_aead_encrypt(ptls_aead_context_t *ctx, void *output, const void *input, size_t inlen, uint64_t seq, const void *aad,
+ size_t aadlen)
+{
+ size_t off = 0;
+
+ ptls_aead_encrypt_init(ctx, seq, aad, aadlen);
+ off += ptls_aead_encrypt_update(ctx, ((uint8_t *)output) + off, input, inlen);
+ off += ptls_aead_encrypt_final(ctx, ((uint8_t *)output) + off);
+
+ return off;
+}
+
+void ptls_aead__build_iv(ptls_aead_context_t *ctx, uint8_t *iv, uint64_t seq)
+{
+ size_t iv_size = ctx->algo->iv_size, i;
+ const uint8_t *s = ctx->static_iv;
+ uint8_t *d = iv;
+
+ /* build iv */
+ for (i = iv_size - 8; i != 0; --i)
+ *d++ = *s++;
+ i = 64;
+ do {
+ i -= 8;
+ *d++ = *s++ ^ (uint8_t)(seq >> i);
+ } while (i != 0);
+}
+
+static void clear_memory(void *p, size_t len)
+{
+ if (len != 0)
+ memset(p, 0, len);
+}
+
+void (*volatile ptls_clear_memory)(void *p, size_t len) = clear_memory;
+
+static int mem_equal(const void *_x, const void *_y, size_t len)
+{
+ const volatile uint8_t *x = _x, *y = _y;
+ uint8_t t = 0;
+
+ for (; len != 0; --len)
+ t |= *x++ ^ *y++;
+
+ return t == 0;
+}
+
+int (*volatile ptls_mem_equal)(const void *x, const void *y, size_t len) = mem_equal;
+
+static uint64_t get_time(ptls_get_time_t *self)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return (uint64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+
+ptls_get_time_t ptls_get_time = {get_time};
+#if PICOTLS_USE_DTRACE
+PTLS_THREADLOCAL unsigned ptls_default_skip_tracing = 0;
+#endif
+
+int ptls_is_server(ptls_t *tls)
+{
+ return tls->is_server;
+}
+
+struct st_ptls_raw_message_emitter_t {
+ ptls_message_emitter_t super;
+ size_t start_off;
+ size_t *epoch_offsets;
+};
+
+static int begin_raw_message(ptls_message_emitter_t *_self)
+{
+ struct st_ptls_raw_message_emitter_t *self = (void *)_self;
+
+ self->start_off = self->super.buf->off;
+ return 0;
+}
+
+static int commit_raw_message(ptls_message_emitter_t *_self)
+{
+ struct st_ptls_raw_message_emitter_t *self = (void *)_self;
+ size_t epoch;
+
+ /* epoch is the key epoch, with the only exception being 2nd CH generated after 0-RTT key */
+ epoch = self->super.enc->epoch;
+ if (epoch == 1 && self->super.buf->base[self->start_off] == PTLS_HANDSHAKE_TYPE_CLIENT_HELLO)
+ epoch = 0;
+
+ for (++epoch; epoch < 5; ++epoch) {
+ assert(self->epoch_offsets[epoch] == self->start_off);
+ self->epoch_offsets[epoch] = self->super.buf->off;
+ }
+
+ self->start_off = SIZE_MAX;
+
+ return 0;
+}
+
+size_t ptls_get_read_epoch(ptls_t *tls)
+{
+ switch (tls->state) {
+ case PTLS_STATE_CLIENT_HANDSHAKE_START:
+ case PTLS_STATE_CLIENT_EXPECT_SERVER_HELLO:
+ case PTLS_STATE_CLIENT_EXPECT_SECOND_SERVER_HELLO:
+ case PTLS_STATE_SERVER_EXPECT_CLIENT_HELLO:
+ case PTLS_STATE_SERVER_EXPECT_SECOND_CLIENT_HELLO:
+ return 0; /* plaintext */
+ case PTLS_STATE_SERVER_EXPECT_END_OF_EARLY_DATA:
+ assert(!tls->ctx->omit_end_of_early_data);
+ return 1; /* 0-rtt */
+ case PTLS_STATE_CLIENT_EXPECT_ENCRYPTED_EXTENSIONS:
+ case PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_REQUEST_OR_CERTIFICATE:
+ case PTLS_STATE_CLIENT_EXPECT_CERTIFICATE:
+ case PTLS_STATE_CLIENT_EXPECT_CERTIFICATE_VERIFY:
+ case PTLS_STATE_CLIENT_EXPECT_FINISHED:
+ case PTLS_STATE_SERVER_EXPECT_CERTIFICATE:
+ case PTLS_STATE_SERVER_EXPECT_CERTIFICATE_VERIFY:
+ case PTLS_STATE_SERVER_EXPECT_FINISHED:
+ return 2; /* handshake */
+ case PTLS_STATE_CLIENT_POST_HANDSHAKE:
+ case PTLS_STATE_SERVER_POST_HANDSHAKE:
+ return 3; /* 1-rtt */
+ default:
+ assert(!"invalid state");
+ return SIZE_MAX;
+ }
+}
+
+int ptls_handle_message(ptls_t *tls, ptls_buffer_t *sendbuf, size_t epoch_offsets[5], size_t in_epoch, const void *input,
+ size_t inlen, ptls_handshake_properties_t *properties)
+{
+ return tls->is_server ? ptls_server_handle_message(tls, sendbuf, epoch_offsets, in_epoch, input, inlen, properties)
+ : ptls_client_handle_message(tls, sendbuf, epoch_offsets, in_epoch, input, inlen, properties);
+}
+
+int ptls_client_handle_message(ptls_t *tls, ptls_buffer_t *sendbuf, size_t epoch_offsets[5], size_t in_epoch, const void *input,
+ size_t inlen, ptls_handshake_properties_t *properties)
+{
+ assert(!tls->is_server);
+
+ struct st_ptls_raw_message_emitter_t emitter = {
+ {sendbuf, &tls->traffic_protection.enc, 0, begin_raw_message, commit_raw_message}, SIZE_MAX, epoch_offsets};
+ struct st_ptls_record_t rec = {PTLS_CONTENT_TYPE_HANDSHAKE, 0, inlen, input};
+
+ if (input == NULL)
+ return send_client_hello(tls, &emitter.super, properties, NULL);
+
+ if (ptls_get_read_epoch(tls) != in_epoch)
+ return PTLS_ALERT_UNEXPECTED_MESSAGE;
+
+ return handle_handshake_record(tls, handle_client_handshake_message, &emitter.super, &rec, properties);
+}
+
+int ptls_server_handle_message(ptls_t *tls, ptls_buffer_t *sendbuf, size_t epoch_offsets[5], size_t in_epoch, const void *input,
+ size_t inlen, ptls_handshake_properties_t *properties)
+{
+ assert(tls->is_server);
+
+ struct st_ptls_raw_message_emitter_t emitter = {
+ {sendbuf, &tls->traffic_protection.enc, 0, begin_raw_message, commit_raw_message}, SIZE_MAX, epoch_offsets};
+ struct st_ptls_record_t rec = {PTLS_CONTENT_TYPE_HANDSHAKE, 0, inlen, input};
+
+ assert(input);
+
+ if (ptls_get_read_epoch(tls) != in_epoch)
+ return PTLS_ALERT_UNEXPECTED_MESSAGE;
+
+ return handle_handshake_record(tls, handle_server_handshake_message, &emitter.super, &rec, properties);
+}
+
+int ptls_esni_init_context(ptls_context_t *ctx, ptls_esni_context_t *esni, ptls_iovec_t esni_keys,
+ ptls_key_exchange_context_t **key_exchanges)
+{
+ const uint8_t *src = esni_keys.base, *const end = src + esni_keys.len;
+ size_t num_key_exchanges, num_cipher_suites = 0;
+ int ret;
+
+ for (num_key_exchanges = 0; key_exchanges[num_key_exchanges] != NULL; ++num_key_exchanges)
+ ;
+
+ memset(esni, 0, sizeof(*esni));
+ if ((esni->key_exchanges = malloc(sizeof(*esni->key_exchanges) * (num_key_exchanges + 1))) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ memcpy(esni->key_exchanges, key_exchanges, sizeof(*esni->key_exchanges) * (num_key_exchanges + 1));
+
+ /* ESNIKeys */
+ if ((ret = ptls_decode16(&esni->version, &src, end)) != 0)
+ goto Exit;
+ /* Skip checksum fields */
+ if (end - src < 4) {
+ ret = PTLS_ALERT_DECRYPT_ERROR;
+ goto Exit;
+ }
+ src += 4;
+ /* Published SNI field */
+ ptls_decode_open_block(src, end, 2, { src = end; });
+
+ /* Process the list of KeyShareEntries, verify for each of them that the ciphersuite is supported. */
+ ptls_decode_open_block(src, end, 2, {
+ do {
+ /* parse */
+ uint16_t id;
+ if ((ret = ptls_decode16(&id, &src, end)) != 0)
+ goto Exit;
+ ptls_decode_open_block(src, end, 2, { src = end; });
+ /* check that matching key-share exists */
+ ptls_key_exchange_context_t **found;
+ for (found = key_exchanges; *found != NULL; ++found)
+ if ((*found)->algo->id == id)
+ break;
+ if (found == NULL) {
+ ret = PTLS_ERROR_INCOMPATIBLE_KEY;
+ goto Exit;
+ }
+ } while (src != end);
+ });
+ /* Process the list of cipher_suites. If they are supported, store in esni context */
+ ptls_decode_open_block(src, end, 2, {
+ void *newp;
+ do {
+ uint16_t id;
+ if ((ret = ptls_decode16(&id, &src, end)) != 0)
+ goto Exit;
+ size_t i;
+ for (i = 0; ctx->cipher_suites[i] != NULL; ++i)
+ if (ctx->cipher_suites[i]->id == id)
+ break;
+ if (ctx->cipher_suites[i] != NULL) {
+ if ((newp = realloc(esni->cipher_suites, sizeof(*esni->cipher_suites) * (num_cipher_suites + 1))) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ esni->cipher_suites = newp;
+ esni->cipher_suites[num_cipher_suites++].cipher_suite = ctx->cipher_suites[i];
+ }
+ } while (src != end);
+ if ((newp = realloc(esni->cipher_suites, sizeof(*esni->cipher_suites) * (num_cipher_suites + 1))) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ esni->cipher_suites = newp;
+ esni->cipher_suites[num_cipher_suites].cipher_suite = NULL;
+ });
+ /* Parse the padded length, not before, not after parameters */
+ if ((ret = ptls_decode16(&esni->padded_length, &src, end)) != 0)
+ goto Exit;
+ if ((ret = ptls_decode64(&esni->not_before, &src, end)) != 0)
+ goto Exit;
+ if ((ret = ptls_decode64(&esni->not_after, &src, end)) != 0)
+ goto Exit;
+ /* Skip the extension fields */
+ ptls_decode_block(src, end, 2, {
+ while (src != end) {
+ uint16_t ext_type;
+ if ((ret = ptls_decode16(&ext_type, &src, end)) != 0)
+ goto Exit;
+ ptls_decode_open_block(src, end, 2, { src = end; });
+ }
+ });
+
+ { /* calculate digests for every cipher-suite */
+ size_t i;
+ for (i = 0; esni->cipher_suites[i].cipher_suite != NULL; ++i) {
+ if ((ret = ptls_calc_hash(esni->cipher_suites[i].cipher_suite->hash, esni->cipher_suites[i].record_digest,
+ esni_keys.base, esni_keys.len)) != 0)
+ goto Exit;
+ }
+ }
+
+ ret = 0;
+Exit:
+ if (ret != 0)
+ ptls_esni_dispose_context(esni);
+ return ret;
+}
+
+void ptls_esni_dispose_context(ptls_esni_context_t *esni)
+{
+ size_t i;
+
+ if (esni->key_exchanges != NULL) {
+ for (i = 0; esni->key_exchanges[i] != NULL; ++i)
+ esni->key_exchanges[i]->on_exchange(esni->key_exchanges + i, 1, NULL, ptls_iovec_init(NULL, 0));
+ free(esni->key_exchanges);
+ }
+ free(esni->cipher_suites);
+}
+
+/**
+ * Obtain the ESNI secrets negotiated during the handshake.
+ */
+ptls_esni_secret_t *ptls_get_esni_secret(ptls_t *ctx)
+{
+ return ctx->esni;
+}
+
+/**
+ * checks if given name looks like an IP address
+ */
+int ptls_server_name_is_ipaddr(const char *name)
+{
+#ifdef AF_INET
+ struct sockaddr_in sin;
+ if (inet_pton(AF_INET, name, &sin) == 1)
+ return 1;
+#endif
+#ifdef AF_INET6
+ struct sockaddr_in6 sin6;
+ if (inet_pton(AF_INET6, name, &sin6) == 1)
+ return 1;
+#endif
+ return 0;
+}
+
+char *ptls_hexdump(char *buf, const void *_src, size_t len)
+{
+ char *dst = buf;
+ const uint8_t *src = _src;
+ size_t i;
+
+ for (i = 0; i != len; ++i) {
+ *dst++ = "0123456789abcdef"[src[i] >> 4];
+ *dst++ = "0123456789abcdef"[src[i] & 0xf];
+ }
+ *dst++ = '\0';
+ return buf;
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 DeNA Co., Ltd., Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef picotls_h
+#define picotls_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _WINDOWS
+#include "wincompat.h"
+#endif
+
+#include <assert.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <stddef.h>
+
+#if __GNUC__ >= 3
+#define PTLS_LIKELY(x) __builtin_expect(!!(x), 1)
+#define PTLS_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define PTLS_LIKELY(x) (x)
+#define PTLS_UNLIKELY(x) (x)
+#endif
+
+#ifdef _WINDOWS
+#define PTLS_THREADLOCAL __declspec(thread)
+#else
+#define PTLS_THREADLOCAL __thread
+#endif
+
+#ifndef PTLS_FUZZ_HANDSHAKE
+#define PTLS_FUZZ_HANDSHAKE 0
+#endif
+
+#define PTLS_HELLO_RANDOM_SIZE 32
+
+#define PTLS_AES128_KEY_SIZE 16
+#define PTLS_AES256_KEY_SIZE 32
+#define PTLS_AES_BLOCK_SIZE 16
+#define PTLS_AES_IV_SIZE 16
+#define PTLS_AESGCM_IV_SIZE 12
+#define PTLS_AESGCM_TAG_SIZE 16
+
+#define PTLS_CHACHA20_KEY_SIZE 32
+#define PTLS_CHACHA20_IV_SIZE 16
+#define PTLS_CHACHA20POLY1305_IV_SIZE 12
+#define PTLS_CHACHA20POLY1305_TAG_SIZE 16
+
+#define PTLS_BLOWFISH_KEY_SIZE 16
+#define PTLS_BLOWFISH_BLOCK_SIZE 8
+
+#define PTLS_SHA256_BLOCK_SIZE 64
+#define PTLS_SHA256_DIGEST_SIZE 32
+
+#define PTLS_SHA384_BLOCK_SIZE 128
+#define PTLS_SHA384_DIGEST_SIZE 48
+
+#define PTLS_MAX_SECRET_SIZE 32
+#define PTLS_MAX_IV_SIZE 16
+#define PTLS_MAX_DIGEST_SIZE 64
+
+/* cipher-suites */
+#define PTLS_CIPHER_SUITE_AES_128_GCM_SHA256 0x1301
+#define PTLS_CIPHER_SUITE_AES_256_GCM_SHA384 0x1302
+#define PTLS_CIPHER_SUITE_CHACHA20_POLY1305_SHA256 0x1303
+
+/* negotiated_groups */
+#define PTLS_GROUP_SECP256R1 23
+#define PTLS_GROUP_SECP384R1 24
+#define PTLS_GROUP_SECP521R1 25
+#define PTLS_GROUP_X25519 29
+#define PTLS_GROUP_X448 30
+
+/* signature algorithms */
+#define PTLS_SIGNATURE_RSA_PKCS1_SHA1 0x0201
+#define PTLS_SIGNATURE_RSA_PKCS1_SHA256 0x0401
+#define PTLS_SIGNATURE_ECDSA_SECP256R1_SHA256 0x0403
+#define PTLS_SIGNATURE_ECDSA_SECP384R1_SHA384 0x0503
+#define PTLS_SIGNATURE_ECDSA_SECP521R1_SHA512 0x0603
+#define PTLS_SIGNATURE_RSA_PSS_RSAE_SHA256 0x0804
+#define PTLS_SIGNATURE_RSA_PSS_RSAE_SHA384 0x0805
+#define PTLS_SIGNATURE_RSA_PSS_RSAE_SHA512 0x0806
+
+/* ESNI */
+#define PTLS_ESNI_VERSION_DRAFT03 0xff02
+
+#define PTLS_ESNI_RESPONSE_TYPE_ACCEPT 0
+#define PTLS_ESNI_RESPONSE_TYPE_RETRY_REQUEST 1
+
+/* error classes and macros */
+#define PTLS_ERROR_CLASS_SELF_ALERT 0
+#define PTLS_ERROR_CLASS_PEER_ALERT 0x100
+#define PTLS_ERROR_CLASS_INTERNAL 0x200
+
+#define PTLS_ERROR_GET_CLASS(e) ((e) & ~0xff)
+#define PTLS_ALERT_TO_SELF_ERROR(e) ((e) + PTLS_ERROR_CLASS_SELF_ALERT)
+#define PTLS_ALERT_TO_PEER_ERROR(e) ((e) + PTLS_ERROR_CLASS_PEER_ALERT)
+#define PTLS_ERROR_TO_ALERT(e) ((e)&0xff)
+
+/* the HKDF prefix */
+#define PTLS_HKDF_EXPAND_LABEL_PREFIX "tls13 "
+
+/* alerts */
+#define PTLS_ALERT_LEVEL_WARNING 1
+#define PTLS_ALERT_LEVEL_FATAL 2
+
+#define PTLS_ALERT_CLOSE_NOTIFY 0
+#define PTLS_ALERT_UNEXPECTED_MESSAGE 10
+#define PTLS_ALERT_BAD_RECORD_MAC 20
+#define PTLS_ALERT_HANDSHAKE_FAILURE 40
+#define PTLS_ALERT_BAD_CERTIFICATE 42
+#define PTLS_ALERT_CERTIFICATE_REVOKED 44
+#define PTLS_ALERT_CERTIFICATE_EXPIRED 45
+#define PTLS_ALERT_CERTIFICATE_UNKNOWN 46
+#define PTLS_ALERT_ILLEGAL_PARAMETER 47
+#define PTLS_ALERT_UNKNOWN_CA 48
+#define PTLS_ALERT_DECODE_ERROR 50
+#define PTLS_ALERT_DECRYPT_ERROR 51
+#define PTLS_ALERT_PROTOCOL_VERSION 70
+#define PTLS_ALERT_INTERNAL_ERROR 80
+#define PTLS_ALERT_USER_CANCELED 90
+#define PTLS_ALERT_MISSING_EXTENSION 109
+#define PTLS_ALERT_UNRECOGNIZED_NAME 112
+#define PTLS_ALERT_CERTIFICATE_REQUIRED 116
+#define PTLS_ALERT_NO_APPLICATION_PROTOCOL 120
+
+/* internal errors */
+#define PTLS_ERROR_NO_MEMORY (PTLS_ERROR_CLASS_INTERNAL + 1)
+#define PTLS_ERROR_IN_PROGRESS (PTLS_ERROR_CLASS_INTERNAL + 2)
+#define PTLS_ERROR_LIBRARY (PTLS_ERROR_CLASS_INTERNAL + 3)
+#define PTLS_ERROR_INCOMPATIBLE_KEY (PTLS_ERROR_CLASS_INTERNAL + 4)
+#define PTLS_ERROR_SESSION_NOT_FOUND (PTLS_ERROR_CLASS_INTERNAL + 5)
+#define PTLS_ERROR_STATELESS_RETRY (PTLS_ERROR_CLASS_INTERNAL + 6)
+#define PTLS_ERROR_NOT_AVAILABLE (PTLS_ERROR_CLASS_INTERNAL + 7)
+#define PTLS_ERROR_COMPRESSION_FAILURE (PTLS_ERROR_CLASS_INTERNAL + 8)
+#define PTLS_ERROR_ESNI_RETRY (PTLS_ERROR_CLASS_INTERNAL + 8)
+#define PTLS_ERROR_REJECT_EARLY_DATA (PTLS_ERROR_CLASS_INTERNAL + 9)
+
+#define PTLS_ERROR_INCORRECT_BASE64 (PTLS_ERROR_CLASS_INTERNAL + 50)
+#define PTLS_ERROR_PEM_LABEL_NOT_FOUND (PTLS_ERROR_CLASS_INTERNAL + 51)
+#define PTLS_ERROR_BER_INCORRECT_ENCODING (PTLS_ERROR_CLASS_INTERNAL + 52)
+#define PTLS_ERROR_BER_MALFORMED_TYPE (PTLS_ERROR_CLASS_INTERNAL + 53)
+#define PTLS_ERROR_BER_MALFORMED_LENGTH (PTLS_ERROR_CLASS_INTERNAL + 54)
+#define PTLS_ERROR_BER_EXCESSIVE_LENGTH (PTLS_ERROR_CLASS_INTERNAL + 55)
+#define PTLS_ERROR_BER_ELEMENT_TOO_SHORT (PTLS_ERROR_CLASS_INTERNAL + 56)
+#define PTLS_ERROR_BER_UNEXPECTED_EOC (PTLS_ERROR_CLASS_INTERNAL + 57)
+#define PTLS_ERROR_DER_INDEFINITE_LENGTH (PTLS_ERROR_CLASS_INTERNAL + 58)
+#define PTLS_ERROR_INCORRECT_ASN1_SYNTAX (PTLS_ERROR_CLASS_INTERNAL + 59)
+#define PTLS_ERROR_INCORRECT_PEM_KEY_VERSION (PTLS_ERROR_CLASS_INTERNAL + 60)
+#define PTLS_ERROR_INCORRECT_PEM_ECDSA_KEY_VERSION (PTLS_ERROR_CLASS_INTERNAL + 61)
+#define PTLS_ERROR_INCORRECT_PEM_ECDSA_CURVE (PTLS_ERROR_CLASS_INTERNAL + 62)
+#define PTLS_ERROR_INCORRECT_PEM_ECDSA_KEYSIZE (PTLS_ERROR_CLASS_INTERNAL + 63)
+#define PTLS_ERROR_INCORRECT_ASN1_ECDSA_KEY_SYNTAX (PTLS_ERROR_CLASS_INTERNAL + 64)
+
+#define PTLS_HANDSHAKE_TYPE_CLIENT_HELLO 1
+#define PTLS_HANDSHAKE_TYPE_SERVER_HELLO 2
+#define PTLS_HANDSHAKE_TYPE_NEW_SESSION_TICKET 4
+#define PTLS_HANDSHAKE_TYPE_END_OF_EARLY_DATA 5
+#define PTLS_HANDSHAKE_TYPE_ENCRYPTED_EXTENSIONS 8
+#define PTLS_HANDSHAKE_TYPE_CERTIFICATE 11
+#define PTLS_HANDSHAKE_TYPE_CERTIFICATE_REQUEST 13
+#define PTLS_HANDSHAKE_TYPE_CERTIFICATE_VERIFY 15
+#define PTLS_HANDSHAKE_TYPE_FINISHED 20
+#define PTLS_HANDSHAKE_TYPE_KEY_UPDATE 24
+#define PTLS_HANDSHAKE_TYPE_COMPRESSED_CERTIFICATE 25
+#define PTLS_HANDSHAKE_TYPE_MESSAGE_HASH 254
+
+#define PTLS_ZERO_DIGEST_SHA256 \
+ { \
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, \
+ 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 \
+ }
+
+#define PTLS_ZERO_DIGEST_SHA384 \
+ { \
+ 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38, 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a, 0x21, 0xfd, 0xb7, 0x11, \
+ 0x14, 0xbe, 0x07, 0x43, 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda, 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, \
+ 0xfb, 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b \
+ }
+
+typedef struct st_ptls_t ptls_t;
+typedef struct st_ptls_context_t ptls_context_t;
+typedef struct st_ptls_key_schedule_t ptls_key_schedule_t;
+
+/**
+ * represents a sequence of octets
+ */
+typedef struct st_ptls_iovec_t {
+ uint8_t *base;
+ size_t len;
+} ptls_iovec_t;
+
+/**
+ * used for storing output
+ */
+typedef struct st_ptls_buffer_t {
+ uint8_t *base;
+ size_t capacity;
+ size_t off;
+ int is_allocated;
+} ptls_buffer_t;
+
+/**
+ * key exchange context built by ptls_key_exchange_algorithm::create.
+ */
+typedef struct st_ptls_key_exchange_context_t {
+ /**
+ * the underlying algorithm
+ */
+ const struct st_ptls_key_exchange_algorithm_t *algo;
+ /**
+ * the public key
+ */
+ ptls_iovec_t pubkey;
+ /**
+ * If `release` is set, the callee frees resources allocated to the context and set *keyex to NULL
+ */
+ int (*on_exchange)(struct st_ptls_key_exchange_context_t **keyex, int release, ptls_iovec_t *secret, ptls_iovec_t peerkey);
+} ptls_key_exchange_context_t;
+
+/**
+ * A key exchange algorithm.
+ */
+typedef const struct st_ptls_key_exchange_algorithm_t {
+ /**
+ * ID defined by the TLS specification
+ */
+ uint16_t id;
+ /**
+ * creates a context for asynchronous key exchange. The function is called when ClientHello is generated. The on_exchange
+ * callback of the created context is called when the client receives ServerHello.
+ */
+ int (*create)(const struct st_ptls_key_exchange_algorithm_t *algo, ptls_key_exchange_context_t **ctx);
+ /**
+ * implements synchronous key exchange. Called when receiving a ServerHello.
+ */
+ int (*exchange)(const struct st_ptls_key_exchange_algorithm_t *algo, ptls_iovec_t *pubkey, ptls_iovec_t *secret,
+ ptls_iovec_t peerkey);
+ /**
+ * crypto-specific data
+ */
+ intptr_t data;
+} ptls_key_exchange_algorithm_t;
+
+/**
+ * context of a symmetric cipher
+ */
+typedef struct st_ptls_cipher_context_t {
+ const struct st_ptls_cipher_algorithm_t *algo;
+ /* field above this line must not be altered by the crypto binding */
+ void (*do_dispose)(struct st_ptls_cipher_context_t *ctx);
+ void (*do_init)(struct st_ptls_cipher_context_t *ctx, const void *iv);
+ void (*do_transform)(struct st_ptls_cipher_context_t *ctx, void *output, const void *input, size_t len);
+} ptls_cipher_context_t;
+
+/**
+ * a symmetric cipher
+ */
+typedef const struct st_ptls_cipher_algorithm_t {
+ const char *name;
+ size_t key_size;
+ size_t block_size;
+ size_t iv_size;
+ size_t context_size;
+ int (*setup_crypto)(ptls_cipher_context_t *ctx, int is_enc, const void *key);
+} ptls_cipher_algorithm_t;
+
+/**
+ * AEAD context. AEAD implementations are allowed to stuff data at the end of the struct. The size of the memory allocated for the
+ * struct is governed by ptls_aead_algorithm_t::context_size.
+ */
+typedef struct st_ptls_aead_context_t {
+ const struct st_ptls_aead_algorithm_t *algo;
+ uint8_t static_iv[PTLS_MAX_IV_SIZE];
+ /* field above this line must not be altered by the crypto binding */
+ void (*dispose_crypto)(struct st_ptls_aead_context_t *ctx);
+ void (*do_encrypt_init)(struct st_ptls_aead_context_t *ctx, const void *iv, const void *aad, size_t aadlen);
+ size_t (*do_encrypt_update)(struct st_ptls_aead_context_t *ctx, void *output, const void *input, size_t inlen);
+ size_t (*do_encrypt_final)(struct st_ptls_aead_context_t *ctx, void *output);
+ size_t (*do_decrypt)(struct st_ptls_aead_context_t *ctx, void *output, const void *input, size_t inlen, const void *iv,
+ const void *aad, size_t aadlen);
+} ptls_aead_context_t;
+
+/**
+ * An AEAD cipher.
+ */
+typedef const struct st_ptls_aead_algorithm_t {
+ /**
+ * name (following the convention of `openssl ciphers -v ALL`)
+ */
+ const char *name;
+ /**
+ * the underlying key stream
+ */
+ ptls_cipher_algorithm_t *ctr_cipher;
+ /**
+ * the underlying ecb cipher (might not be available)
+ */
+ ptls_cipher_algorithm_t *ecb_cipher;
+ /**
+ * key size
+ */
+ size_t key_size;
+ /**
+ * size of the IV
+ */
+ size_t iv_size;
+ /**
+ * size of the tag
+ */
+ size_t tag_size;
+ /**
+ * size of memory allocated for ptls_aead_context_t. AEAD implementations can set this value to something greater than
+ * sizeof(ptls_aead_context_t) and stuff additional data at the bottom of the struct.
+ */
+ size_t context_size;
+ /**
+ * callback that sets up the crypto
+ */
+ int (*setup_crypto)(ptls_aead_context_t *ctx, int is_enc, const void *key);
+} ptls_aead_algorithm_t;
+
+/**
+ *
+ */
+typedef enum en_ptls_hash_final_mode_t {
+ /**
+ * obtains the digest and frees the context
+ */
+ PTLS_HASH_FINAL_MODE_FREE = 0,
+ /**
+ * obtains the digest and reset the context to initial state
+ */
+ PTLS_HASH_FINAL_MODE_RESET = 1,
+ /**
+ * obtains the digest while leaving the context as-is
+ */
+ PTLS_HASH_FINAL_MODE_SNAPSHOT = 2
+} ptls_hash_final_mode_t;
+
+/**
+ * A hash context.
+ */
+typedef struct st_ptls_hash_context_t {
+ /**
+ * feeds additional data into the hash context
+ */
+ void (*update)(struct st_ptls_hash_context_t *ctx, const void *src, size_t len);
+ /**
+ * returns the digest and performs necessary operation specified by mode
+ */
+ void (*final)(struct st_ptls_hash_context_t *ctx, void *md, ptls_hash_final_mode_t mode);
+ /**
+ * creates a copy of the hash context
+ */
+ struct st_ptls_hash_context_t *(*clone_)(struct st_ptls_hash_context_t *src);
+} ptls_hash_context_t;
+
+/**
+ * A hash algorithm and its properties.
+ */
+typedef const struct st_ptls_hash_algorithm_t {
+ /**
+ * block size
+ */
+ size_t block_size;
+ /**
+ * digest size
+ */
+ size_t digest_size;
+ /**
+ * constructor that creates the hash context
+ */
+ ptls_hash_context_t *(*create)(void);
+ /**
+ * digest of zero-length octets
+ */
+ uint8_t empty_digest[PTLS_MAX_DIGEST_SIZE];
+} ptls_hash_algorithm_t;
+
+typedef const struct st_ptls_cipher_suite_t {
+ uint16_t id;
+ ptls_aead_algorithm_t *aead;
+ ptls_hash_algorithm_t *hash;
+} ptls_cipher_suite_t;
+
+struct st_ptls_traffic_protection_t;
+
+typedef struct st_ptls_message_emitter_t {
+ ptls_buffer_t *buf;
+ struct st_ptls_traffic_protection_t *enc;
+ size_t record_header_length;
+ int (*begin_message)(struct st_ptls_message_emitter_t *self);
+ int (*commit_message)(struct st_ptls_message_emitter_t *self);
+} ptls_message_emitter_t;
+
+/**
+ * holds ESNIKeys and the private key (instantiated by ptls_esni_parse, freed using ptls_esni_dispose)
+ */
+typedef struct st_ptls_esni_context_t {
+ ptls_key_exchange_context_t **key_exchanges;
+ struct {
+ ptls_cipher_suite_t *cipher_suite;
+ uint8_t record_digest[PTLS_MAX_DIGEST_SIZE];
+ } * cipher_suites;
+ uint16_t padded_length;
+ uint64_t not_before;
+ uint64_t not_after;
+ uint16_t version;
+} ptls_esni_context_t;
+
+/**
+ * holds the ESNI secret, as exchanged during the handshake
+ */
+
+#define PTLS_ESNI_NONCE_SIZE 16
+
+typedef struct st_ptls_esni_secret_t {
+ ptls_iovec_t secret;
+ uint8_t nonce[PTLS_ESNI_NONCE_SIZE];
+ uint8_t esni_contents_hash[PTLS_MAX_DIGEST_SIZE];
+ struct {
+ ptls_key_exchange_algorithm_t *key_share;
+ ptls_cipher_suite_t *cipher;
+ ptls_iovec_t pubkey;
+ uint8_t record_digest[PTLS_MAX_DIGEST_SIZE];
+ uint16_t padded_length;
+ } client;
+ uint16_t version;
+} ptls_esni_secret_t;
+
+#define PTLS_CALLBACK_TYPE0(ret, name) \
+ typedef struct st_ptls_##name##_t { \
+ ret (*cb)(struct st_ptls_##name##_t * self); \
+ } ptls_##name##_t
+
+#define PTLS_CALLBACK_TYPE(ret, name, ...) \
+ typedef struct st_ptls_##name##_t { \
+ ret (*cb)(struct st_ptls_##name##_t * self, __VA_ARGS__); \
+ } ptls_##name##_t
+
+/**
+ * arguments passsed to the on_client_hello callback
+ */
+typedef struct st_ptls_on_client_hello_parameters_t {
+ /**
+ * SNI value received from the client. The value is {NULL, 0} if the extension was absent.
+ */
+ ptls_iovec_t server_name;
+ /**
+ * Raw value of the client_hello message.
+ */
+ ptls_iovec_t raw_message;
+ /**
+ *
+ */
+ struct {
+ ptls_iovec_t *list;
+ size_t count;
+ } negotiated_protocols;
+ struct {
+ const uint16_t *list;
+ size_t count;
+ } signature_algorithms;
+ struct {
+ const uint16_t *list;
+ size_t count;
+ } certificate_compression_algorithms;
+ struct {
+ const uint16_t *list;
+ size_t count;
+ } cipher_suites;
+ /**
+ * if ESNI was used
+ */
+ unsigned esni : 1;
+} ptls_on_client_hello_parameters_t;
+
+/**
+ * returns current time in milliseconds (ptls_get_time can be used to return the physical time)
+ */
+PTLS_CALLBACK_TYPE0(uint64_t, get_time);
+/**
+ * after receiving ClientHello, the core calls the optional callback to give a chance to the swap the context depending on the input
+ * values. The callback is required to call `ptls_set_server_name` if an SNI extension needs to be sent to the client.
+ */
+PTLS_CALLBACK_TYPE(int, on_client_hello, ptls_t *tls, ptls_on_client_hello_parameters_t *params);
+/**
+ * callback to generate the certificate message. `ptls_context::certificates` are set when the callback is set to NULL.
+ */
+PTLS_CALLBACK_TYPE(int, emit_certificate, ptls_t *tls, ptls_message_emitter_t *emitter, ptls_key_schedule_t *key_sched,
+ ptls_iovec_t context, int push_status_request);
+/**
+ * when gerenating CertificateVerify, the core calls the callback to sign the handshake context using the certificate.
+ */
+PTLS_CALLBACK_TYPE(int, sign_certificate, ptls_t *tls, uint16_t *selected_algorithm, ptls_buffer_t *output, ptls_iovec_t input,
+ const uint16_t *algorithms, size_t num_algorithms);
+/**
+ * after receiving Certificate, the core calls the callback to verify the certificate chain and to obtain a pointer to a
+ * callback that should be used for verifying CertificateVerify. If an error occurs between a successful return from this
+ * callback to the invocation of the verify_sign callback, verify_sign is called with both data and sign set to an empty buffer.
+ * The implementor of the callback should use that as the opportunity to free any temporary data allocated for the verify_sign
+ * callback.
+ */
+PTLS_CALLBACK_TYPE(int, verify_certificate, ptls_t *tls,
+ int (**verify_sign)(void *verify_ctx, ptls_iovec_t data, ptls_iovec_t sign), void **verify_data,
+ ptls_iovec_t *certs, size_t num_certs);
+/**
+ * Encrypt-and-signs (or verify-and-decrypts) a ticket (server-only).
+ * When used for encryption (i.e., is_encrypt being set), the function should return 0 if successful, or else a non-zero value.
+ * When used for decryption, the function should return 0 (successful), PTLS_ERROR_REJECT_EARLY_DATA (successful, but 0-RTT is
+ * forbidden), or any other value to indicate failure.
+ */
+PTLS_CALLBACK_TYPE(int, encrypt_ticket, ptls_t *tls, int is_encrypt, ptls_buffer_t *dst, ptls_iovec_t src);
+/**
+ * saves a ticket (client-only)
+ */
+PTLS_CALLBACK_TYPE(int, save_ticket, ptls_t *tls, ptls_iovec_t input);
+/**
+ * event logging (incl. secret logging)
+ */
+typedef struct st_ptls_log_event_t {
+ void (*cb)(struct st_ptls_log_event_t *self, ptls_t *tls, const char *type, const char *fmt, ...)
+ __attribute__((format(printf, 4, 5)));
+} ptls_log_event_t;
+/**
+ * reference counting
+ */
+PTLS_CALLBACK_TYPE(void, update_open_count, ssize_t delta);
+/**
+ * applications that have their own record layer can set this function to derive their own traffic keys from the traffic secret.
+ * The cipher-suite that is being associated to the connection can be obtained by calling the ptls_get_cipher function.
+ */
+PTLS_CALLBACK_TYPE(int, update_traffic_key, ptls_t *tls, int is_enc, size_t epoch, const void *secret);
+/**
+ * callback for every extension detected during decoding
+ */
+PTLS_CALLBACK_TYPE(int, on_extension, ptls_t *tls, uint8_t hstype, uint16_t exttype, ptls_iovec_t extdata);
+/**
+ *
+ */
+typedef struct st_ptls_decompress_certificate_t {
+ /**
+ * list of supported algorithms terminated by UINT16_MAX
+ */
+ const uint16_t *supported_algorithms;
+ /**
+ * callback that decompresses the message
+ */
+ int (*cb)(struct st_ptls_decompress_certificate_t *self, ptls_t *tls, uint16_t algorithm, ptls_iovec_t output,
+ ptls_iovec_t input);
+} ptls_decompress_certificate_t;
+/**
+ * provides access to the ESNI shared secret (Zx). API is subject to change.
+ */
+PTLS_CALLBACK_TYPE(int, update_esni_key, ptls_t *tls, ptls_iovec_t secret, ptls_hash_algorithm_t *hash,
+ const void *hashed_esni_contents);
+
+/**
+ * the configuration
+ */
+struct st_ptls_context_t {
+ /**
+ * PRNG to be used
+ */
+ void (*random_bytes)(void *buf, size_t len);
+ /**
+ *
+ */
+ ptls_get_time_t *get_time;
+ /**
+ * list of supported key-exchange algorithms terminated by NULL
+ */
+ ptls_key_exchange_algorithm_t **key_exchanges;
+ /**
+ * list of supported cipher-suites terminated by NULL
+ */
+ ptls_cipher_suite_t **cipher_suites;
+ /**
+ * list of certificates
+ */
+ struct {
+ ptls_iovec_t *list;
+ size_t count;
+ } certificates;
+ /**
+ * list of ESNI data terminated by NULL
+ */
+ ptls_esni_context_t **esni;
+ /**
+ *
+ */
+ ptls_on_client_hello_t *on_client_hello;
+ /**
+ *
+ */
+ ptls_emit_certificate_t *emit_certificate;
+ /**
+ *
+ */
+ ptls_sign_certificate_t *sign_certificate;
+ /**
+ *
+ */
+ ptls_verify_certificate_t *verify_certificate;
+ /**
+ * lifetime of a session ticket (server-only)
+ */
+ uint32_t ticket_lifetime;
+ /**
+ * maximum permitted size of early data (server-only)
+ */
+ uint32_t max_early_data_size;
+ /**
+ * maximum size of the message buffer (default: 0 = unlimited = 3 + 2^24 bytes)
+ */
+ size_t max_buffer_size;
+ /**
+ * the field is obsolete; should be set to NULL for QUIC draft-17. Note also that even though everybody did, it was incorrect
+ * to set the value to "quic " in the earlier versions of the draft.
+ */
+ const char *hkdf_label_prefix__obsolete;
+ /**
+ * if set, psk handshakes use (ec)dhe
+ */
+ unsigned require_dhe_on_psk : 1;
+ /**
+ * if exporter master secrets should be recorded
+ */
+ unsigned use_exporter : 1;
+ /**
+ * if ChangeCipherSpec message should be sent during handshake
+ */
+ unsigned send_change_cipher_spec : 1;
+ /**
+ * if set, the server requests client certificates
+ * to authenticate the client.
+ */
+ unsigned require_client_authentication : 1;
+ /**
+ * if set, EOED will not be emitted or accepted
+ */
+ unsigned omit_end_of_early_data : 1;
+ /**
+ *
+ */
+ ptls_encrypt_ticket_t *encrypt_ticket;
+ /**
+ *
+ */
+ ptls_save_ticket_t *save_ticket;
+ /**
+ *
+ */
+ ptls_log_event_t *log_event;
+ /**
+ *
+ */
+ ptls_update_open_count_t *update_open_count;
+ /**
+ *
+ */
+ ptls_update_traffic_key_t *update_traffic_key;
+ /**
+ *
+ */
+ ptls_decompress_certificate_t *decompress_certificate;
+ /**
+ *
+ */
+ ptls_update_esni_key_t *update_esni_key;
+ /**
+ *
+ */
+ ptls_on_extension_t *on_extension;
+};
+
+typedef struct st_ptls_raw_extension_t {
+ uint16_t type;
+ ptls_iovec_t data;
+} ptls_raw_extension_t;
+
+typedef enum en_ptls_early_data_acceptance_t {
+ PTLS_EARLY_DATA_ACCEPTANCE_UNKNOWN = 0,
+ PTLS_EARLY_DATA_REJECTED,
+ PTLS_EARLY_DATA_ACCEPTED
+} ptls_early_data_acceptance_t;
+
+/**
+ * optional arguments to client-driven handshake
+ */
+#ifdef _WINDOWS
+/* suppress warning C4201: nonstandard extension used: nameless struct/union */
+#pragma warning(push)
+#pragma warning(disable : 4201)
+#endif
+typedef struct st_ptls_handshake_properties_t {
+ union {
+ struct {
+ /**
+ * list of protocols offered through ALPN
+ */
+ struct {
+ const ptls_iovec_t *list;
+ size_t count;
+ } negotiated_protocols;
+ /**
+ * session ticket sent to the application via save_ticket callback
+ */
+ ptls_iovec_t session_ticket;
+ /**
+ * pointer to store the maximum size of early-data that can be sent immediately. If set to non-NULL, the first call to
+ * ptls_handshake (or ptls_handle_message) will set `*max_early_data` to the value obtained from the session ticket, or
+ * to zero if early-data cannot be sent. If NULL, early data will not be used.
+ */
+ size_t *max_early_data_size;
+ /**
+ * If early-data has been accepted by peer, or if the state is still unknown. The state changes anytime after handshake
+ * keys become available. Applications can peek the tri-state variable every time it calls `ptls_hanshake` or
+ * `ptls_handle_message` to determine the result at the earliest moment. This is an output parameter.
+ */
+ ptls_early_data_acceptance_t early_data_acceptance;
+ /**
+ * negotiate the key exchange method before sending key_share
+ */
+ unsigned negotiate_before_key_exchange : 1;
+ /**
+ * ESNIKeys (the value of the TXT record, after being base64-"decoded")
+ */
+ ptls_iovec_t esni_keys;
+ } client;
+ struct {
+ /**
+ * psk binder being selected (len is set to zero if none)
+ */
+ struct {
+ uint8_t base[PTLS_MAX_DIGEST_SIZE];
+ size_t len;
+ } selected_psk_binder;
+ /**
+ * parameters related to use of the Cookie extension
+ */
+ struct {
+ /**
+ * HMAC key to protect the integrity of the cookie. The key should be as long as the digest size of the first
+ * ciphersuite specified in ptls_context_t (i.e. the hash algorithm of the best ciphersuite that can be chosen).
+ */
+ const void *key;
+ /**
+ * additional data to be used for verifying the cookie
+ */
+ ptls_iovec_t additional_data;
+ } cookie;
+ /**
+ * if HRR should always be sent
+ */
+ unsigned enforce_retry : 1;
+ /**
+ * if retry should be stateless (cookie.key MUST be set when this option is used)
+ */
+ unsigned retry_uses_cookie : 1;
+ } server;
+ };
+ /**
+ * an optional list of additional extensions to send either in CH or EE, terminated by type == UINT16_MAX
+ */
+ ptls_raw_extension_t *additional_extensions;
+ /**
+ * an optional callback that returns a boolean value indicating if a particular extension should be collected
+ */
+ int (*collect_extension)(ptls_t *tls, struct st_ptls_handshake_properties_t *properties, uint16_t type);
+ /**
+ * an optional callback that reports the extensions being collected
+ */
+ int (*collected_extensions)(ptls_t *tls, struct st_ptls_handshake_properties_t *properties, ptls_raw_extension_t *extensions);
+} ptls_handshake_properties_t;
+#ifdef _WINDOWS
+#pragma warning(pop)
+#endif
+
+/**
+ * builds a new ptls_iovec_t instance using the supplied parameters
+ */
+static ptls_iovec_t ptls_iovec_init(const void *p, size_t len);
+/**
+ * initializes a buffer, setting the default destination to the small buffer provided as the argument.
+ */
+static void ptls_buffer_init(ptls_buffer_t *buf, void *smallbuf, size_t smallbuf_size);
+/**
+ * disposes a buffer, freeing resources allocated by the buffer itself (if any)
+ */
+static void ptls_buffer_dispose(ptls_buffer_t *buf);
+/**
+ * internal
+ */
+void ptls_buffer__release_memory(ptls_buffer_t *buf);
+/**
+ * reserves space for additional amount of memory
+ */
+int ptls_buffer_reserve(ptls_buffer_t *buf, size_t delta);
+/**
+ * internal
+ */
+int ptls_buffer__do_pushv(ptls_buffer_t *buf, const void *src, size_t len);
+/**
+ * internal
+ */
+int ptls_buffer__adjust_quic_blocksize(ptls_buffer_t *buf, size_t body_size);
+/**
+ * internal
+ */
+int ptls_buffer__adjust_asn1_blocksize(ptls_buffer_t *buf, size_t body_size);
+/**
+ * pushes an unsigned bigint
+ */
+int ptls_buffer_push_asn1_ubigint(ptls_buffer_t *buf, const void *bignum, size_t size);
+/**
+ * encodes a quic varint (maximum length is PTLS_ENCODE_QUICINT_CAPACITY)
+ */
+static uint8_t *ptls_encode_quicint(uint8_t *p, uint64_t v);
+#define PTLS_ENCODE_QUICINT_CAPACITY 8
+
+#define ptls_buffer_pushv(buf, src, len) \
+ do { \
+ if ((ret = ptls_buffer__do_pushv((buf), (src), (len))) != 0) \
+ goto Exit; \
+ } while (0)
+
+#define ptls_buffer_push(buf, ...) \
+ do { \
+ if ((ret = ptls_buffer__do_pushv((buf), (uint8_t[]){__VA_ARGS__}, sizeof((uint8_t[]){__VA_ARGS__}))) != 0) \
+ goto Exit; \
+ } while (0)
+
+#define ptls_buffer_push16(buf, v) \
+ do { \
+ uint16_t _v = (v); \
+ ptls_buffer_push(buf, (uint8_t)(_v >> 8), (uint8_t)_v); \
+ } while (0)
+
+#define ptls_buffer_push24(buf, v) \
+ do { \
+ uint32_t _v = (v); \
+ ptls_buffer_push(buf, (uint8_t)(_v >> 16), (uint8_t)(_v >> 8), (uint8_t)_v); \
+ } while (0)
+
+#define ptls_buffer_push32(buf, v) \
+ do { \
+ uint32_t _v = (v); \
+ ptls_buffer_push(buf, (uint8_t)(_v >> 24), (uint8_t)(_v >> 16), (uint8_t)(_v >> 8), (uint8_t)_v); \
+ } while (0)
+
+#define ptls_buffer_push64(buf, v) \
+ do { \
+ uint64_t _v = (v); \
+ ptls_buffer_push(buf, (uint8_t)(_v >> 56), (uint8_t)(_v >> 48), (uint8_t)(_v >> 40), (uint8_t)(_v >> 32), \
+ (uint8_t)(_v >> 24), (uint8_t)(_v >> 16), (uint8_t)(_v >> 8), (uint8_t)_v); \
+ } while (0)
+
+#define ptls_buffer_push_quicint(buf, v) \
+ do { \
+ if ((ret = ptls_buffer_reserve((buf), PTLS_ENCODE_QUICINT_CAPACITY)) != 0) \
+ goto Exit; \
+ uint8_t *d = ptls_encode_quicint((buf)->base + (buf)->off, (v)); \
+ (buf)->off = d - (buf)->base; \
+ } while (0)
+
+#define ptls_buffer_push_block(buf, _capacity, block) \
+ do { \
+ size_t capacity = (_capacity); \
+ ptls_buffer_pushv((buf), (uint8_t *)"\0\0\0\0\0\0\0", capacity != -1 ? capacity : 1); \
+ size_t body_start = (buf)->off; \
+ do { \
+ block \
+ } while (0); \
+ size_t body_size = (buf)->off - body_start; \
+ if (capacity != -1) { \
+ for (; capacity != 0; --capacity) \
+ (buf)->base[body_start - capacity] = (uint8_t)(body_size >> (8 * (capacity - 1))); \
+ } else { \
+ if ((ret = ptls_buffer__adjust_quic_blocksize((buf), body_size)) != 0) \
+ goto Exit; \
+ } \
+ } while (0)
+
+#define ptls_buffer_push_asn1_block(buf, block) \
+ do { \
+ ptls_buffer_push((buf), 0xff); /* dummy */ \
+ size_t body_start = (buf)->off; \
+ do { \
+ block \
+ } while (0); \
+ size_t body_size = (buf)->off - body_start; \
+ if (body_size < 128) { \
+ (buf)->base[body_start - 1] = (uint8_t)body_size; \
+ } else { \
+ if ((ret = ptls_buffer__adjust_asn1_blocksize((buf), body_size)) != 0) \
+ goto Exit; \
+ } \
+ } while (0)
+
+#define ptls_buffer_push_asn1_sequence(buf, block) \
+ do { \
+ ptls_buffer_push((buf), 0x30); \
+ ptls_buffer_push_asn1_block((buf), block); \
+ } while (0)
+
+#define ptls_buffer_push_message_body(buf, key_sched, type, block) \
+ do { \
+ ptls_buffer_t *_buf = (buf); \
+ ptls_key_schedule_t *_key_sched = (key_sched); \
+ size_t mess_start = _buf->off; \
+ ptls_buffer_push(_buf, (type)); \
+ ptls_buffer_push_block(_buf, 3, block); \
+ if (_key_sched != NULL) \
+ ptls__key_schedule_update_hash(_key_sched, _buf->base + mess_start, _buf->off - mess_start); \
+ } while (0)
+
+#define ptls_push_message(emitter, key_sched, type, block) \
+ do { \
+ ptls_message_emitter_t *_emitter = (emitter); \
+ if ((ret = _emitter->begin_message(_emitter)) != 0) \
+ goto Exit; \
+ ptls_buffer_push_message_body(_emitter->buf, (key_sched), (type), block); \
+ if ((ret = _emitter->commit_message(_emitter)) != 0) \
+ goto Exit; \
+ } while (0)
+
+int ptls_decode16(uint16_t *value, const uint8_t **src, const uint8_t *end);
+int ptls_decode24(uint32_t *value, const uint8_t **src, const uint8_t *end);
+int ptls_decode32(uint32_t *value, const uint8_t **src, const uint8_t *end);
+int ptls_decode64(uint64_t *value, const uint8_t **src, const uint8_t *end);
+uint64_t ptls_decode_quicint(const uint8_t **src, const uint8_t *end);
+
+#define ptls_decode_open_block(src, end, capacity, block) \
+ do { \
+ size_t _capacity = (capacity); \
+ size_t _block_size; \
+ if (_capacity == -1) { \
+ uint64_t _block_size64; \
+ const uint8_t *_src = (src); \
+ if ((_block_size64 = ptls_decode_quicint(&_src, end)) == UINT64_MAX || \
+ (sizeof(size_t) < 8 && (_block_size64 >> (8 * sizeof(size_t))) != 0)) { \
+ ret = PTLS_ALERT_DECODE_ERROR; \
+ goto Exit; \
+ } \
+ (src) = _src; \
+ _block_size = (size_t)_block_size64; \
+ } else { \
+ if (_capacity > (size_t)(end - (src))) { \
+ ret = PTLS_ALERT_DECODE_ERROR; \
+ goto Exit; \
+ } \
+ _block_size = 0; \
+ do { \
+ _block_size = _block_size << 8 | *(src)++; \
+ } while (--_capacity != 0); \
+ } \
+ if (_block_size > (size_t)(end - (src))) { \
+ ret = PTLS_ALERT_DECODE_ERROR; \
+ goto Exit; \
+ } \
+ do { \
+ const uint8_t *const end = (src) + _block_size; \
+ do { \
+ block \
+ } while (0); \
+ if ((src) != end) { \
+ ret = PTLS_ALERT_DECODE_ERROR; \
+ goto Exit; \
+ } \
+ } while (0); \
+ } while (0)
+
+#define ptls_decode_assert_block_close(src, end) \
+ do { \
+ if ((src) != end) { \
+ ret = PTLS_ALERT_DECODE_ERROR; \
+ goto Exit; \
+ } \
+ } while (0);
+
+#define ptls_decode_block(src, end, capacity, block) \
+ do { \
+ ptls_decode_open_block((src), end, capacity, block); \
+ ptls_decode_assert_block_close((src), end); \
+ } while (0)
+
+/**
+ * create a client object to handle new TLS connection
+ */
+ptls_t *ptls_client_new(ptls_context_t *ctx);
+/**
+ * create a server object to handle new TLS connection
+ */
+ptls_t *ptls_server_new(ptls_context_t *ctx);
+/**
+ * creates a object handle new TLS connection
+ */
+static ptls_t *ptls_new(ptls_context_t *ctx, int is_server);
+/**
+ * releases all resources associated to the object
+ */
+void ptls_free(ptls_t *tls);
+/**
+ * returns address of the crypto callbacks that the connection is using
+ */
+ptls_context_t *ptls_get_context(ptls_t *tls);
+/**
+ * updates the context of a connection. Can be called from `on_client_hello` callback.
+ */
+void ptls_set_context(ptls_t *tls, ptls_context_t *ctx);
+/**
+ * returns the client-random
+ */
+ptls_iovec_t ptls_get_client_random(ptls_t *tls);
+/**
+ * returns the cipher-suite being used
+ */
+ptls_cipher_suite_t *ptls_get_cipher(ptls_t *tls);
+/**
+ * returns the server-name (NULL if SNI is not used or failed to negotiate)
+ */
+const char *ptls_get_server_name(ptls_t *tls);
+/**
+ * sets the server-name associated to the TLS connection. If server_name_len is zero, then strlen(server_name) is called to
+ * determine the length of the name.
+ * On the client-side, the value is used for certificate validation. The value will be also sent as an SNI extension, if it looks
+ * like a DNS name.
+ * On the server-side, it can be called from on_client_hello to indicate the acceptance of the SNI extension to the client.
+ */
+int ptls_set_server_name(ptls_t *tls, const char *server_name, size_t server_name_len);
+/**
+ * returns the negotiated protocol (or NULL)
+ */
+const char *ptls_get_negotiated_protocol(ptls_t *tls);
+/**
+ * sets the negotiated protocol. If protocol_len is zero, strlen(protocol) is called to determine the length of the protocol name.
+ */
+int ptls_set_negotiated_protocol(ptls_t *tls, const char *protocol, size_t protocol_len);
+/**
+ * returns if the handshake has been completed
+ */
+int ptls_handshake_is_complete(ptls_t *tls);
+/**
+ * returns if a PSK (or PSK-DHE) handshake was performed
+ */
+int ptls_is_psk_handshake(ptls_t *tls);
+/**
+ * returns a pointer to user data pointer (client is reponsible for freeing the associated data prior to calling ptls_free)
+ */
+void **ptls_get_data_ptr(ptls_t *tls);
+/**
+ *
+ */
+int ptls_skip_tracing(ptls_t *tls);
+/**
+ *
+ */
+void ptls_set_skip_tracing(ptls_t *tls, int skip_tracing);
+/**
+ * proceeds with the handshake, optionally taking some input from peer. The function returns zero in case the handshake completed
+ * successfully. PTLS_ERROR_IN_PROGRESS is returned in case the handshake is incomplete. Otherwise, an error value is returned. The
+ * contents of sendbuf should be sent to the client, regardless of whether if an error is returned. inlen is an argument used for
+ * both input and output. As an input, the arguments takes the size of the data available as input. Upon return the value is updated
+ * to the number of bytes consumed by the handshake. In case the returned value is PTLS_ERROR_IN_PROGRESS there is a guarantee that
+ * all the input are consumed (i.e. the value of inlen does not change).
+ */
+int ptls_handshake(ptls_t *tls, ptls_buffer_t *sendbuf, const void *input, size_t *inlen, ptls_handshake_properties_t *args);
+/**
+ * decrypts the first record within given buffer
+ */
+int ptls_receive(ptls_t *tls, ptls_buffer_t *plaintextbuf, const void *input, size_t *len);
+/**
+ * encrypts given buffer into multiple TLS records
+ */
+int ptls_send(ptls_t *tls, ptls_buffer_t *sendbuf, const void *input, size_t inlen);
+/**
+ * updates the send traffic key (as well as asks the peer to update)
+ */
+int ptls_update_key(ptls_t *tls, int request_update);
+/**
+ * Returns if the context is a server context.
+ */
+int ptls_is_server(ptls_t *tls);
+/**
+ * returns per-record overhead
+ */
+size_t ptls_get_record_overhead(ptls_t *tls);
+/**
+ * sends an alert
+ */
+int ptls_send_alert(ptls_t *tls, ptls_buffer_t *sendbuf, uint8_t level, uint8_t description);
+/**
+ *
+ */
+int ptls_export_secret(ptls_t *tls, void *output, size_t outlen, const char *label, ptls_iovec_t context_value, int is_early);
+/**
+ * build the body of a Certificate message. Can be called with tls set to NULL in order to create a precompressed message.
+ */
+int ptls_build_certificate_message(ptls_buffer_t *buf, ptls_iovec_t request_context, ptls_iovec_t *certificates,
+ size_t num_certificates, ptls_iovec_t ocsp_status);
+/**
+ *
+ */
+int ptls_calc_hash(ptls_hash_algorithm_t *algo, void *output, const void *src, size_t len);
+/**
+ *
+ */
+ptls_hash_context_t *ptls_hmac_create(ptls_hash_algorithm_t *algo, const void *key, size_t key_size);
+/**
+ *
+ */
+int ptls_hkdf_extract(ptls_hash_algorithm_t *hash, void *output, ptls_iovec_t salt, ptls_iovec_t ikm);
+/**
+ *
+ */
+int ptls_hkdf_expand(ptls_hash_algorithm_t *hash, void *output, size_t outlen, ptls_iovec_t prk, ptls_iovec_t info);
+/**
+ *
+ */
+int ptls_hkdf_expand_label(ptls_hash_algorithm_t *algo, void *output, size_t outlen, ptls_iovec_t secret, const char *label,
+ ptls_iovec_t hash_value, const char *label_prefix);
+/**
+ * instantiates a symmetric cipher
+ */
+ptls_cipher_context_t *ptls_cipher_new(ptls_cipher_algorithm_t *algo, int is_enc, const void *key);
+/**
+ * destroys a symmetric cipher
+ */
+void ptls_cipher_free(ptls_cipher_context_t *ctx);
+/**
+ * initializes the IV; this function must be called prior to calling ptls_cipher_encrypt
+ */
+static void ptls_cipher_init(ptls_cipher_context_t *ctx, const void *iv);
+/**
+ * Encrypts given text. The function must be used in a way that the output length would be equal to the input length. For example,
+ * when using a block cipher in ECB mode, `len` must be a multiple of the block size when using a block cipher. The length can be
+ * of any value when using a stream cipher or a block cipher in CTR mode.
+ */
+static void ptls_cipher_encrypt(ptls_cipher_context_t *ctx, void *output, const void *input, size_t len);
+/**
+ * instantiates an AEAD cipher given a secret, which is expanded using hkdf to a set of key and iv
+ * @param aead
+ * @param hash
+ * @param is_enc 1 if creating a context for encryption, 0 if creating a context for decryption
+ * @param secret the secret. The size must be the digest length of the hash algorithm
+ * @return pointer to an AEAD context if successful, otherwise NULL
+ */
+ptls_aead_context_t *ptls_aead_new(ptls_aead_algorithm_t *aead, ptls_hash_algorithm_t *hash, int is_enc, const void *secret,
+ const char *label_prefix);
+/**
+ * destroys an AEAD cipher context
+ */
+void ptls_aead_free(ptls_aead_context_t *ctx);
+/**
+ *
+ */
+size_t ptls_aead_encrypt(ptls_aead_context_t *ctx, void *output, const void *input, size_t inlen, uint64_t seq, const void *aad,
+ size_t aadlen);
+/**
+ * initializes the internal state of the encryptor
+ */
+static void ptls_aead_encrypt_init(ptls_aead_context_t *ctx, uint64_t seq, const void *aad, size_t aadlen);
+/**
+ * encrypts the input and updates the GCM state
+ * @return number of bytes emitted to output
+ */
+static size_t ptls_aead_encrypt_update(ptls_aead_context_t *ctx, void *output, const void *input, size_t inlen);
+/**
+ * emits buffered data (if any) and the GCM tag
+ * @return number of bytes emitted to output
+ */
+static size_t ptls_aead_encrypt_final(ptls_aead_context_t *ctx, void *output);
+/**
+ * decrypts an AEAD record
+ * @return number of bytes emitted to output if successful, or SIZE_MAX if the input is invalid (e.g. broken MAC)
+ */
+static size_t ptls_aead_decrypt(ptls_aead_context_t *ctx, void *output, const void *input, size_t inlen, uint64_t seq,
+ const void *aad, size_t aadlen);
+/**
+ * Return the current read epoch.
+ */
+size_t ptls_get_read_epoch(ptls_t *tls);
+/**
+ * Runs the handshake by dealing directly with handshake messages. Callers MUST delay supplying input to this function until the
+ * epoch of the input becomes equal to the value returned by `ptls_get_read_epoch()`.
+ * @param tls the TLS context
+ * @param sendbuf buffer to which the output will be written
+ * @param epoch_offsets start and end offset of the messages in each epoch. For example, when the server emits ServerHello between
+ * offset 0 and 38, the following handshake messages between offset 39 and 348, and a post-handshake message
+ * between 349 and 451, epoch_offsets will be {0,39,39,349,452} and the length of the sendbuf will be 452.
+ * This argument is an I/O argument. Applications can either reset sendbuf to empty and epoch_offsets and to
+ * all zero every time they invoke the function, or retain the values until the handshake completes so that
+ * data will be appended to sendbuf and epoch_offsets will be adjusted.
+ * @param in_epoch epoch of the input
+ * @param input input bytes (must be NULL when starting the handshake on the client side)
+ * @param inlen length of the input
+ * @param properties properties specific to the running handshake
+ * @return same as `ptls_handshake`
+ */
+int ptls_handle_message(ptls_t *tls, ptls_buffer_t *sendbuf, size_t epoch_offsets[5], size_t in_epoch, const void *input,
+ size_t inlen, ptls_handshake_properties_t *properties);
+int ptls_client_handle_message(ptls_t *tls, ptls_buffer_t *sendbuf, size_t epoch_offsets[5], size_t in_epoch, const void *input,
+ size_t inlen, ptls_handshake_properties_t *properties);
+int ptls_server_handle_message(ptls_t *tls, ptls_buffer_t *sendbuf, size_t epoch_offsets[5], size_t in_epoch, const void *input,
+ size_t inlen, ptls_handshake_properties_t *properties);
+/**
+ * internal
+ */
+void ptls_aead__build_iv(ptls_aead_context_t *ctx, uint8_t *iv, uint64_t seq);
+/**
+ * internal
+ */
+void ptls__key_schedule_update_hash(ptls_key_schedule_t *sched, const uint8_t *msg, size_t msglen);
+/**
+ * clears memory
+ */
+extern void (*volatile ptls_clear_memory)(void *p, size_t len);
+/**
+ * constant-time memcmp
+ */
+extern int (*volatile ptls_mem_equal)(const void *x, const void *y, size_t len);
+/**
+ *
+ */
+static ptls_iovec_t ptls_iovec_init(const void *p, size_t len);
+/**
+ * checks if a server name is an IP address.
+ */
+int ptls_server_name_is_ipaddr(const char *name);
+/**
+ * loads a certificate chain to ptls_context_t::certificates. `certificate.list` and each element of the list is allocated by
+ * malloc. It is the responsibility of the user to free them when discarding the TLS context.
+ */
+int ptls_load_certificates(ptls_context_t *ctx, char const *cert_pem_file);
+/**
+ *
+ */
+int ptls_esni_init_context(ptls_context_t *ctx, ptls_esni_context_t *esni, ptls_iovec_t esni_keys,
+ ptls_key_exchange_context_t **key_exchanges);
+/**
+ *
+ */
+void ptls_esni_dispose_context(ptls_esni_context_t *esni);
+/**
+ * Obtain the ESNI secrets negotiated during the handshake.
+ */
+ptls_esni_secret_t *ptls_get_esni_secret(ptls_t *ctx);
+/**
+ *
+ */
+char *ptls_hexdump(char *dst, const void *src, size_t len);
+/**
+ * the default get_time callback
+ */
+extern ptls_get_time_t ptls_get_time;
+#if PICOTLS_USE_DTRACE
+/**
+ *
+ */
+extern PTLS_THREADLOCAL unsigned ptls_default_skip_tracing;
+#else
+#define ptls_default_skip_tracing 0
+#endif
+
+/* inline functions */
+
+inline ptls_t *ptls_new(ptls_context_t *ctx, int is_server)
+{
+ return is_server ? ptls_server_new(ctx) : ptls_client_new(ctx);
+}
+
+inline ptls_iovec_t ptls_iovec_init(const void *p, size_t len)
+{
+ /* avoid the "return (ptls_iovec_t){(uint8_t *)p, len};" construct because it requires C99
+ * and triggers a warning "C4204: nonstandard extension used: non-constant aggregate initializer"
+ * in Visual Studio */
+ ptls_iovec_t r;
+ r.base = (uint8_t *)p;
+ r.len = len;
+ return r;
+}
+
+inline void ptls_buffer_init(ptls_buffer_t *buf, void *smallbuf, size_t smallbuf_size)
+{
+ assert(smallbuf != NULL);
+ buf->base = (uint8_t *)smallbuf;
+ buf->off = 0;
+ buf->capacity = smallbuf_size;
+ buf->is_allocated = 0;
+}
+
+inline void ptls_buffer_dispose(ptls_buffer_t *buf)
+{
+ ptls_buffer__release_memory(buf);
+ *buf = (ptls_buffer_t){NULL};
+}
+
+inline uint8_t *ptls_encode_quicint(uint8_t *p, uint64_t v)
+{
+ if (PTLS_UNLIKELY(v > 63)) {
+ if (PTLS_UNLIKELY(v > 16383)) {
+ unsigned sb;
+ if (PTLS_UNLIKELY(v > 1073741823)) {
+ assert(v <= 4611686018427387903);
+ *p++ = 0xc0 | (uint8_t)(v >> 56);
+ sb = 6 * 8;
+ } else {
+ *p++ = 0x80 | (uint8_t)(v >> 24);
+ sb = 2 * 8;
+ }
+ do {
+ *p++ = (uint8_t)(v >> sb);
+ } while ((sb -= 8) != 0);
+ } else {
+ *p++ = 0x40 | (uint8_t)((uint16_t)v >> 8);
+ }
+ }
+ *p++ = (uint8_t)v;
+ return p;
+}
+
+inline void ptls_cipher_init(ptls_cipher_context_t *ctx, const void *iv)
+{
+ ctx->do_init(ctx, iv);
+}
+
+inline void ptls_cipher_encrypt(ptls_cipher_context_t *ctx, void *output, const void *input, size_t len)
+{
+ ctx->do_transform(ctx, output, input, len);
+}
+
+inline void ptls_aead_encrypt_init(ptls_aead_context_t *ctx, uint64_t seq, const void *aad, size_t aadlen)
+{
+ uint8_t iv[PTLS_MAX_IV_SIZE];
+
+ ptls_aead__build_iv(ctx, iv, seq);
+ ctx->do_encrypt_init(ctx, iv, aad, aadlen);
+}
+
+inline size_t ptls_aead_encrypt_update(ptls_aead_context_t *ctx, void *output, const void *input, size_t inlen)
+{
+ return ctx->do_encrypt_update(ctx, output, input, inlen);
+}
+
+inline size_t ptls_aead_encrypt_final(ptls_aead_context_t *ctx, void *output)
+{
+ return ctx->do_encrypt_final(ctx, output);
+}
+
+inline size_t ptls_aead_decrypt(ptls_aead_context_t *ctx, void *output, const void *input, size_t inlen, uint64_t seq,
+ const void *aad, size_t aadlen)
+{
+ uint8_t iv[PTLS_MAX_IV_SIZE];
+
+ ptls_aead__build_iv(ctx, iv, seq);
+ return ctx->do_decrypt(ctx, output, input, inlen, iv, aad, aadlen);
+}
+
+#define ptls_define_hash(name, ctx_type, init_func, update_func, final_func) \
+ \
+ struct name##_context_t { \
+ ptls_hash_context_t super; \
+ ctx_type ctx; \
+ }; \
+ \
+ static void name##_update(ptls_hash_context_t *_ctx, const void *src, size_t len) \
+ { \
+ struct name##_context_t *ctx = (struct name##_context_t *)_ctx; \
+ update_func(&ctx->ctx, src, len); \
+ } \
+ \
+ static void name##_final(ptls_hash_context_t *_ctx, void *md, ptls_hash_final_mode_t mode) \
+ { \
+ struct name##_context_t *ctx = (struct name##_context_t *)_ctx; \
+ if (mode == PTLS_HASH_FINAL_MODE_SNAPSHOT) { \
+ ctx_type copy = ctx->ctx; \
+ final_func(©, md); \
+ ptls_clear_memory(©, sizeof(copy)); \
+ return; \
+ } \
+ if (md != NULL) \
+ final_func(&ctx->ctx, md); \
+ switch (mode) { \
+ case PTLS_HASH_FINAL_MODE_FREE: \
+ ptls_clear_memory(&ctx->ctx, sizeof(ctx->ctx)); \
+ free(ctx); \
+ break; \
+ case PTLS_HASH_FINAL_MODE_RESET: \
+ init_func(&ctx->ctx); \
+ break; \
+ default: \
+ assert(!"FIXME"); \
+ break; \
+ } \
+ } \
+ \
+ static ptls_hash_context_t *name##_clone(ptls_hash_context_t *_src) \
+ { \
+ struct name##_context_t *dst, *src = (struct name##_context_t *)_src; \
+ if ((dst = malloc(sizeof(*dst))) == NULL) \
+ return NULL; \
+ *dst = *src; \
+ return &dst->super; \
+ } \
+ \
+ static ptls_hash_context_t *name##_create(void) \
+ { \
+ struct name##_context_t *ctx; \
+ if ((ctx = malloc(sizeof(*ctx))) == NULL) \
+ return NULL; \
+ ctx->super = (ptls_hash_context_t){name##_update, name##_final, name##_clone}; \
+ init_func(&ctx->ctx); \
+ return &ctx->super; \
+ }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+* Copyright (c) 2017 Christian Huitema <huitema@huitema.net>
+*
+* Permission to use, copy, modify, and distribute this software for any
+* purpose with or without fee is hereby granted, provided that the above
+* copyright notice and this permission notice appear in all copies.
+*
+* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef PTLS_ASN1_H
+#define PTLS_ASN1_H
+
+// #include "picotls/minicrypto.h"
+
+/*
+* The ASN.1 functions take a "log context" parameter of type ptls_minicrypto_log_ctx_t.
+*
+* The log function in that code can be instantiated for example as:
+*
+* void log_printf(void * ctx, const char * format, ...)
+* {
+* va_list argptr;
+* va_start(argptr, format);
+* vfprintf(stderr, format, argptr);
+* }
+*
+* Using definitions from <stdio.h> and <stdarg.h>
+*/
+
+typedef struct st_ptls_minicrypto_log_ctx_t {
+ void *ctx;
+ void (*fn)(void *ctx, const char *format, ...);
+} ptls_minicrypto_log_ctx_t;
+
+size_t ptls_asn1_error_message(char const *error_label, size_t bytes_max, size_t byte_index, int level,
+ ptls_minicrypto_log_ctx_t *log_ctx);
+
+void ptls_asn1_dump_content(const uint8_t *bytes, size_t bytes_max, size_t byte_index, ptls_minicrypto_log_ctx_t *log_ctx);
+
+size_t ptls_asn1_read_type(const uint8_t *bytes, size_t bytes_max, int *structure_bit, int *type_class, uint32_t *type_number,
+ int *decode_error, int level, ptls_minicrypto_log_ctx_t *log_ctx);
+
+void ptls_asn1_print_type(int type_class, uint32_t type_number, int level, ptls_minicrypto_log_ctx_t *log_ctx);
+
+size_t ptls_asn1_read_length(const uint8_t *bytes, size_t bytes_max, size_t byte_index, uint32_t *length, int *indefinite_length,
+ size_t *last_byte, int *decode_error, int level, ptls_minicrypto_log_ctx_t *log_ctx);
+
+size_t ptls_asn1_get_expected_type_and_length(const uint8_t *bytes, size_t bytes_max, size_t byte_index, uint8_t expected_type,
+ uint32_t *length, int *indefinite_length, size_t *last_byte, int *decode_error,
+ ptls_minicrypto_log_ctx_t *log_ctx);
+
+size_t ptls_asn1_validation_recursive(const uint8_t *bytes, size_t bytes_max, int *decode_error, int level,
+ ptls_minicrypto_log_ctx_t *log_ctx);
+
+int ptls_asn1_validation(const uint8_t *bytes, size_t length, ptls_minicrypto_log_ctx_t *log_ctx);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2018 Fastly
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef picotls_certificate_compression_h
+#define picotls_certificate_compression_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "picotls.h"
+
+#define PTLS_CERTIFICATE_COMPRESSION_ALGORITHM_GZIP 1
+#define PTLS_CERTIFICATE_COMPRESSION_ALGORITHM_BROTLI 2
+
+typedef struct st_ptls_emit_compressed_certificate_t {
+ ptls_emit_certificate_t super;
+ uint16_t algo;
+ struct st_ptls_compressed_certificate_entry_t {
+ uint32_t uncompressed_length;
+ ptls_iovec_t bytes;
+ } with_ocsp_status, without_ocsp_status;
+} ptls_emit_compressed_certificate_t;
+
+extern ptls_decompress_certificate_t ptls_decompress_certificate;
+
+/**
+ * initializes a certificate emitter that precompresses a certificate chain (and ocsp status)
+ */
+int ptls_init_compressed_certificate(ptls_emit_compressed_certificate_t *ecc, ptls_iovec_t *certificates, size_t num_certificates,
+ ptls_iovec_t ocsp_status);
+/**
+ *
+ */
+void ptls_dispose_compressed_certificate(ptls_emit_compressed_certificate_t *ecc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2019 Christian Huitema <huitema@huitema.net>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef PTLS_FFX_H
+#define PTLS_FFX_H
+
+/*
+ * Format preserving encryption using the FFX algorithm.
+ *
+ * We demonstrate here a simple encryption process derived
+ * from the FFX algorithms, which is effectively a specific
+ * mode of running a verified encryption code. The
+ * algorithm is Feistel cipher in which the S-boxes are
+ * defined by a symmetric encryption algorithm such as
+ * AES or ChaCha20.
+ * See "Ciphers with Arbitrary Finite Domains" by
+ * John Black and Phillip Rogaway, 2001 --
+ * http://web.cs.ucdavis.edu/~rogaway/papers/subset.pdf
+ *
+ * An instantiation of the algorithm is defined by a
+ * series of parameters:
+ * - the context of the symmetric crypto algorithm,
+ * - key used for the symmetric algorithm,
+ * - number of rounds,
+ * - length of the block in bits
+ *
+ * We consider just two symmetric algorithms for now,
+ * ChaCha20 and AES128CTR. In theory, any symmetric algorithm
+ * operating on a 128 bit block would work, and crytographic
+ * hashes producing at least 128 bits of output could also
+ * be used. In practice, ChaCha20 and AES128 cover most of
+ * the use cases.
+ *
+ * The implementation will produce a result for any block
+ * length lower than 256, although values lower than 32 would
+ * not be recommended.
+ *
+ * The block to be encrypted is passed as a byte array of size
+ * (block_length + 7)/8. When the block_length is not a
+ * multiple of 8, the algorithm guarantees that the extra bits
+ * in the last byte are left untouched. For example, if the
+ * block length is 39, the least significant bit of the
+ * fifth byte will be copied from input to output.
+ *
+ * The number of rounds is left as a configuration parameter,
+ * which is constrained to be even by our implementation. The
+ * required number of passes varies with the application's
+ * constraints. The practical minimum is 4 passes. Demanding
+ * applications can use 8 passes, and the practical conservative
+ * value is 10, as specified by NIST for the FF1 variant of
+ * the same algorithm. This choice between 4, 8 or 10 is
+ * based on "Luby-Rackoff: 7 Rounds are Enough
+ * for 2^n(1-epsilon) Security" by Jacques Patarin, 2003 --
+ * https://www.iacr.org/archive/crypto2003/27290510/27290510.pdf
+ *
+ * Encrypting short numbers, by nature, produces a codebook
+ * of limited size. In many applications, the short number is
+ * part of a larger object that is passed in clear text. In that
+ * case, NIST recommends using as much as possible of that clear
+ * text as an initialization vector, used as "tweak" in the
+ * FFX algorithm. See:
+ * https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38G.pdf
+ */
+
+typedef struct st_ptls_ffx_context_t {
+ ptls_cipher_context_t super;
+ ptls_cipher_context_t *enc_ctx;
+ int nb_rounds;
+ int is_enc;
+ size_t byte_length;
+ size_t nb_left;
+ size_t nb_right;
+ uint8_t mask_last_byte;
+ uint8_t tweaks[16];
+} ptls_ffx_context_t;
+
+/**
+ * The PTLS_FFX_CIPHER_ALGO macro will define a variant of the FFX algorithm by specifying
+ * the base algorithm (vraiable name of type ptls_cipher_algorithm_t), the bit length
+ * of the block, the selected number of blocks and the key size of the base algorithm,
+ * in bytes.
+ *
+ * The macro will automatically generate an algorithm name, of the form:
+ * ptls_ffx_<base algorithm name>_b<bit length>_r<number of rounds>
+ * For example, selecting the algorithm "ptls_minicrypto_chacha20" with a block
+ * size of 53 bits and 8 rounds will generate the name:
+ * ptls_ffx_ptls_minicrypto_chacha20_b53_r8
+ * This name is declared as a static variable.
+ *
+ * Once the FFX variant is defined, the name can be used to create a
+ * cipher context using ptls_cipher_new. The context can then be used
+ * through the function ptls_cipher_init, ptls_cipher_encrypt, and
+ * can be freed by calling ptls_cipher_free.
+ *
+ * The ptls_cipher_encrypt will encrypt a code word of the specified
+ * bit length, or decrypt it if the context was created with the
+ * option "is_enc = 0". The code word is represented as an array of
+ * bytes. If the bit length is not a multiple of 8, the remaining
+ * low level bits in the last byte will be left unchanged.
+ */
+#define PTLS_FFX_CIPHER_ALGO_NAME(base, bitlength, nbrounds) #base "-ffx-b" #bitlength "-r" #nbrounds
+#define PTLS_FFX_CIPHER_ALGO(base, bitlength, nbrounds, keysize) \
+ static int ptls_ffx_##base##_b##bitlength##_r##nbrounds##_setup(ptls_cipher_context_t *ctx, int is_enc, const void *key) \
+ { \
+ return ptls_ffx_setup_crypto(ctx, &base, is_enc, nbrounds, bitlength, key); \
+ } \
+ static ptls_cipher_algorithm_t ptls_ffx_##base##_b##bitlength##_r##nbrounds = { \
+ PTLS_FFX_CIPHER_ALGO_NAME(base, bitlength, nbrounds), keysize, (bitlength + 7) / 8, 16, sizeof(ptls_ffx_context_t), \
+ ptls_ffx_##base##_b##bitlength##_r##nbrounds##_setup};
+
+/*
+ * The function ptls_ffx_new creates a cipher context for a specific FFX variant.
+ * It is equivalent to defining the variant with the PTLS_FFX_CIPHER_ALGO macro,
+ * then creating the context using ptls_cipher_new.
+ */
+ptls_cipher_context_t *ptls_ffx_new(ptls_cipher_algorithm_t *algo, int is_enc, int nb_rounds, size_t bit_length, const void *key);
+
+/**
+ * The function ptls_ffx_setup_crypto is called by ptls_cipher_new or
+ * ptls_ffx_new when initializing an FFX variant. It should not be
+ * called directly.
+ */
+int ptls_ffx_setup_crypto(ptls_cipher_context_t *_ctx, ptls_cipher_algorithm_t *algo, int is_enc, int nb_rounds, size_t bit_length,
+ const void *key);
+#endif /* PTLS_FFX_H */
--- /dev/null
+/*
+ * Copyright (c) 2016 DeNA Co., Ltd., Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef picotls_minicrypto_h
+#define picotls_minicrypto_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "picotls.h"
+
+#define SECP256R1_PRIVATE_KEY_SIZE 32
+#define SECP256R1_PUBLIC_KEY_SIZE 65 /* including the header */
+#define SECP256R1_SHARED_SECRET_SIZE 32
+
+typedef struct st_ptls_minicrypto_secp256r1sha256_sign_certificate_t {
+ ptls_sign_certificate_t super;
+ uint8_t key[SECP256R1_PRIVATE_KEY_SIZE];
+} ptls_minicrypto_secp256r1sha256_sign_certificate_t;
+
+void ptls_minicrypto_random_bytes(void *buf, size_t len);
+
+int ptls_minicrypto_init_secp256r1sha256_sign_certificate(ptls_minicrypto_secp256r1sha256_sign_certificate_t *self,
+ ptls_iovec_t key);
+
+extern ptls_key_exchange_algorithm_t ptls_minicrypto_secp256r1, ptls_minicrypto_x25519;
+extern ptls_key_exchange_algorithm_t *ptls_minicrypto_key_exchanges[];
+extern ptls_cipher_algorithm_t ptls_minicrypto_aes128ecb, ptls_minicrypto_aes256ecb, ptls_minicrypto_aes128ctr,
+ ptls_minicrypto_aes256ctr, ptls_minicrypto_chacha20;
+extern ptls_aead_algorithm_t ptls_minicrypto_aes128gcm, ptls_minicrypto_aes256gcm, ptls_minicrypto_chacha20poly1305;
+extern ptls_hash_algorithm_t ptls_minicrypto_sha256, ptls_minicrypto_sha384;
+extern ptls_cipher_suite_t ptls_minicrypto_aes128gcmsha256, ptls_minicrypto_aes256gcmsha384, ptls_minicrypto_chacha20poly1305sha256;
+extern ptls_cipher_suite_t *ptls_minicrypto_cipher_suites[];
+
+typedef struct st_ptls_asn1_pkcs8_private_key_t {
+ ptls_iovec_t vec;
+ size_t algorithm_index;
+ uint32_t algorithm_length;
+ size_t parameters_index;
+ uint32_t parameters_length;
+ size_t key_data_index;
+ uint32_t key_data_length;
+} ptls_asn1_pkcs8_private_key_t;
+
+int ptls_minicrypto_load_private_key(ptls_context_t *ctx, char const *pem_fname);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2016 DeNA Co., Ltd., Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef picotls_openssl_h
+#define picotls_openssl_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <openssl/opensslv.h>
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+#include <openssl/x509.h>
+#include <openssl/opensslconf.h>
+#include "../picotls.h"
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
+#if !defined(OPENSSL_NO_CHACHA) && !defined(OPENSSL_NO_POLY1305)
+#define PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 1
+#endif
+#endif
+
+extern ptls_key_exchange_algorithm_t ptls_openssl_secp256r1;
+#ifdef NID_secp384r1
+#define PTLS_OPENSSL_HAVE_SECP384R1 1
+#define PTLS_OPENSSL_HAS_SECP384R1 1 /* deprecated; use HAVE_ */
+extern ptls_key_exchange_algorithm_t ptls_openssl_secp384r1;
+#endif
+#ifdef NID_secp521r1
+#define PTLS_OPENSSL_HAVE_SECP521R1 1
+#define PTLS_OPENSSL_HAS_SECP521R1 1 /* deprecated; use HAVE_ */
+extern ptls_key_exchange_algorithm_t ptls_openssl_secp521r1;
+#endif
+#if defined(NID_X25519) && !defined(LIBRESSL_VERSION_NUMBER)
+#define PTLS_OPENSSL_HAVE_X25519 1
+#define PTLS_OPENSSL_HAS_X25519 1 /* deprecated; use HAVE_ */
+extern ptls_key_exchange_algorithm_t ptls_openssl_x25519;
+#endif
+#ifndef OPENSSL_NO_BF
+#define PTLS_OPENSSL_HAVE_BF 1
+#endif
+
+extern ptls_key_exchange_algorithm_t *ptls_openssl_key_exchanges[];
+
+extern ptls_cipher_algorithm_t ptls_openssl_aes128ecb;
+extern ptls_cipher_algorithm_t ptls_openssl_aes128ctr;
+extern ptls_aead_algorithm_t ptls_openssl_aes128gcm;
+extern ptls_cipher_algorithm_t ptls_openssl_aes256ecb;
+extern ptls_cipher_algorithm_t ptls_openssl_aes256ctr;
+extern ptls_aead_algorithm_t ptls_openssl_aes256gcm;
+extern ptls_hash_algorithm_t ptls_openssl_sha256;
+extern ptls_hash_algorithm_t ptls_openssl_sha384;
+extern ptls_cipher_suite_t ptls_openssl_aes128gcmsha256;
+extern ptls_cipher_suite_t ptls_openssl_aes256gcmsha384;
+extern ptls_cipher_suite_t *ptls_openssl_cipher_suites[];
+
+#if defined(PTLS_OPENSSL_HAVE_CHACHA20_POLY1305)
+extern ptls_cipher_algorithm_t ptls_openssl_chacha20;
+extern ptls_aead_algorithm_t ptls_openssl_chacha20poly1305;
+extern ptls_cipher_suite_t ptls_openssl_chacha20poly1305sha256;
+#endif
+
+#if PTLS_OPENSSL_HAVE_BF
+extern ptls_cipher_algorithm_t ptls_openssl_bfecb;
+#endif
+
+void ptls_openssl_random_bytes(void *buf, size_t len);
+/**
+ * constructs a key exchange context. pkey's reference count is incremented.
+ */
+int ptls_openssl_create_key_exchange(ptls_key_exchange_context_t **ctx, EVP_PKEY *pkey);
+
+struct st_ptls_openssl_signature_scheme_t {
+ uint16_t scheme_id;
+ const EVP_MD *scheme_md;
+};
+
+typedef struct st_ptls_openssl_sign_certificate_t {
+ ptls_sign_certificate_t super;
+ EVP_PKEY *key;
+ struct st_ptls_openssl_signature_scheme_t schemes[4]; /* terminated by .scheme_id == UINT16_MAX */
+} ptls_openssl_sign_certificate_t;
+
+int ptls_openssl_init_sign_certificate(ptls_openssl_sign_certificate_t *self, EVP_PKEY *key);
+void ptls_openssl_dispose_sign_certificate(ptls_openssl_sign_certificate_t *self);
+int ptls_openssl_load_certificates(ptls_context_t *ctx, X509 *cert, STACK_OF(X509) * chain);
+
+typedef struct st_ptls_openssl_verify_certificate_t {
+ ptls_verify_certificate_t super;
+ X509_STORE *cert_store;
+} ptls_openssl_verify_certificate_t;
+
+int ptls_openssl_init_verify_certificate(ptls_openssl_verify_certificate_t *self, X509_STORE *store);
+void ptls_openssl_dispose_verify_certificate(ptls_openssl_verify_certificate_t *self);
+X509_STORE *ptls_openssl_create_default_certificate_store(void);
+
+int ptls_openssl_encrypt_ticket(ptls_buffer_t *dst, ptls_iovec_t src,
+ int (*cb)(unsigned char *, unsigned char *, EVP_CIPHER_CTX *, HMAC_CTX *, int));
+int ptls_openssl_decrypt_ticket(ptls_buffer_t *dst, ptls_iovec_t src,
+ int (*cb)(unsigned char *, unsigned char *, EVP_CIPHER_CTX *, HMAC_CTX *, int));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+* Copyright (c) 2017 Christian Huitema <huitema@huitema.net>
+*
+* Permission to use, copy, modify, and distribute this software for any
+* purpose with or without fee is hereby granted, provided that the above
+* copyright notice and this permission notice appear in all copies.
+*
+* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef PTLS_PEMBASE64_H
+#define PTLS_PEMBASE64_H
+
+/*
+* Base64 functions used in encoding and decoding of PEM files
+*/
+
+#define PTLS_BASE64_DECODE_DONE 0
+#define PTLS_BASE64_DECODE_IN_PROGRESS 1
+#define PTLS_BASE64_DECODE_FAILED -1
+
+typedef struct st_ptls_base64_decode_state_t {
+ int nbc;
+ int nbo;
+ int status;
+ uint32_t v;
+} ptls_base64_decode_state_t;
+
+int ptls_base64_encode(const uint8_t *data, size_t data_len, char *base64_text);
+
+size_t ptls_base64_howlong(size_t data_length);
+
+void ptls_base64_decode_init(ptls_base64_decode_state_t *state);
+int ptls_base64_decode(const char *base64_text, ptls_base64_decode_state_t *state, ptls_buffer_t *buf);
+
+int ptls_load_pem_objects(char const *pem_fname, const char *label, ptls_iovec_t *list, size_t list_max, size_t *nb_objects);
+
+#endif /* PTLS_PEMBASE64_H */
--- /dev/null
+/*
+ * Copyright (c) 2016 DeNA Co., Ltd., Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef picotls_bcrypt_h
+#define picotls_bcrypt_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "../picotls.h"
+
+#ifdef _WINDOWS
+#include <bcrypt.h>
+
+extern ptls_cipher_algorithm_t ptls_bcrypt_aes128ecb;
+extern ptls_cipher_algorithm_t ptls_bcrypt_aes256ecb;
+extern ptls_cipher_algorithm_t ptls_bcrypt_aes128ctr;
+extern ptls_cipher_algorithm_t ptls_bcrypt_aes256ctr;
+
+extern ptls_aead_algorithm_t ptls_bcrypt_aes128gcm;
+extern ptls_aead_algorithm_t ptls_bcrypt_aes256gcm;
+
+extern ptls_hash_algorithm_t ptls_bcrypt_sha256;
+extern ptls_hash_algorithm_t ptls_bcrypt_sha384;
+
+extern ptls_cipher_suite_t ptls_bcrypt_aes128gcmsha256;
+extern ptls_cipher_suite_t ptls_bcrypt_aes256gcmsha384;
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* picotls_bcrypt_h */
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include "contrib/quicly/klib/khash.h"
+#include "contrib/quicly/quicly.h"
+#include "contrib/quicly/defaults.h"
+#include "contrib/quicly/sentmap.h"
+#include "contrib/quicly/frame.h"
+#include "contrib/quicly/streambuf.h"
+#include "contrib/quicly/cc.h"
+#if QUICLY_USE_EMBEDDED_PROBES
+#include "contrib/quicly/embedded-probes.h"
+#elif QUICLY_USE_DTRACE
+#include "contrib/quicly/quicly-probes.h"
+#endif
+
+#define QUICLY_MIN_INITIAL_DCID_LEN 8
+
+#define QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS 0xffa5
+#define QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID 0
+#define QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT 1
+#define QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN 2
+#define QUICLY_TRANSPORT_PARAMETER_ID_MAX_PACKET_SIZE 3
+#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA 4
+#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 5
+#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 6
+#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI 7
+#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI 8
+#define QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI 9
+#define QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT 10
+#define QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY 11
+#define QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION 12
+#define QUICLY_TRANSPORT_PARAMETER_ID_PREFERRED_ADDRESS 13
+
+#define QUICLY_EPOCH_INITIAL 0
+#define QUICLY_EPOCH_0RTT 1
+#define QUICLY_EPOCH_HANDSHAKE 2
+#define QUICLY_EPOCH_1RTT 3
+
+/**
+ * maximum size of token that quicly accepts
+ */
+#define QUICLY_MAX_TOKEN_LEN 512
+/**
+ * do not try to send ACK-eliciting frames if the available CWND is below this value
+ */
+#define MIN_SEND_WINDOW 64
+/**
+ * sends ACK bundled with PING, when number of gaps in the ack queue reaches or exceeds this threshold. This value should be much
+ * smaller than QUICLY_MAX_RANGES.
+ */
+#define QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK 8
+
+KHASH_MAP_INIT_INT64(quicly_stream_t, quicly_stream_t *)
+
+#if QUICLY_USE_EMBEDDED_PROBES || QUICLY_USE_DTRACE
+#define QUICLY_PROBE(label, conn, ...) \
+ do { \
+ quicly_conn_t *_conn = (conn); \
+ if (PTLS_UNLIKELY(QUICLY_##label##_ENABLED()) && !ptls_skip_tracing(_conn->crypto.tls)) \
+ QUICLY_##label(_conn, __VA_ARGS__); \
+ } while (0)
+#define QUICLY_PROBE_HEXDUMP(s, l) \
+ ({ \
+ size_t _l = (l); \
+ ptls_hexdump(alloca(_l * 2 + 1), (s), _l); \
+ })
+#define QUICLY_PROBE_ESCAPE_UNSAFE_STRING(s, l) \
+ ({ \
+ size_t _l = (l); \
+ quicly_escape_unsafe_string(alloca(_l * 4 + 1), (s), _l); \
+ })
+#else
+#define QUICLY_PROBE(label, conn, ...)
+#define QUICLY_PROBE_HEXDUMP(s, l)
+#define QUICLY_PROBE_ESCAPE_UNSAFE_STRING(s, l)
+#endif
+
+struct st_quicly_cipher_context_t {
+ ptls_aead_context_t *aead;
+ ptls_cipher_context_t *header_protection;
+};
+
+struct st_quicly_pending_path_challenge_t {
+ struct st_quicly_pending_path_challenge_t *next;
+ uint8_t is_response;
+ uint8_t data[QUICLY_PATH_CHALLENGE_DATA_LEN];
+};
+
+struct st_quicly_pn_space_t {
+ /**
+ * acks to be sent to peer
+ */
+ quicly_ranges_t ack_queue;
+ /**
+ * time at when the largest pn in the ack_queue has been received (or INT64_MAX if none)
+ */
+ int64_t largest_pn_received_at;
+ /**
+ *
+ */
+ uint64_t next_expected_packet_number;
+ /**
+ * packet count before ack is sent
+ */
+ uint32_t unacked_count;
+};
+
+struct st_quicly_handshake_space_t {
+ struct st_quicly_pn_space_t super;
+ struct {
+ struct st_quicly_cipher_context_t ingress;
+ struct st_quicly_cipher_context_t egress;
+ } cipher;
+};
+
+struct st_quicly_application_space_t {
+ struct st_quicly_pn_space_t super;
+ struct {
+ struct {
+ struct {
+ ptls_cipher_context_t *zero_rtt, *one_rtt;
+ } header_protection;
+ ptls_aead_context_t *aead[2]; /* 0-RTT uses aead[1], 1-RTT uses aead[key_phase] */
+ uint8_t secret[PTLS_MAX_DIGEST_SIZE];
+ struct {
+ uint64_t prepared;
+ uint64_t decrypted;
+ } key_phase;
+ } ingress;
+ struct {
+ struct st_quicly_cipher_context_t key;
+ uint8_t secret[PTLS_MAX_DIGEST_SIZE];
+ uint64_t key_phase;
+ struct {
+ /**
+ * PN at which key update was initiated. Set to UINT64_MAX once key update is acked.
+ */
+ uint64_t last;
+ /**
+ * PN at which key update should be initiated. Set to UINT64_MAX when key update cannot be initiated.
+ */
+ uint64_t next;
+ } key_update_pn;
+ } egress;
+ } cipher;
+ int one_rtt_writable;
+};
+
+struct st_quicly_conn_t {
+ struct _st_quicly_conn_public_t super;
+ /**
+ * the initial context
+ */
+ struct st_quicly_handshake_space_t *initial;
+ /**
+ * the handshake context
+ */
+ struct st_quicly_handshake_space_t *handshake;
+ /**
+ * 0-RTT and 1-RTT context
+ */
+ struct st_quicly_application_space_t *application;
+ /**
+ * hashtable of streams
+ */
+ khash_t(quicly_stream_t) * streams;
+ /**
+ *
+ */
+ struct {
+ /**
+ *
+ */
+ struct {
+ uint64_t bytes_consumed;
+ quicly_maxsender_t sender;
+ } max_data;
+ /**
+ *
+ */
+ struct {
+ quicly_maxsender_t *uni, *bidi;
+ } max_streams;
+ } ingress;
+ /**
+ *
+ */
+ struct {
+ /**
+ * contains actions that needs to be performed when an ack is being received
+ */
+ quicly_sentmap_t sentmap;
+ /**
+ * all packets where pn < max_lost_pn are deemed lost
+ */
+ uint64_t max_lost_pn;
+ /**
+ * loss recovery
+ */
+ quicly_loss_t loss;
+ /**
+ * next or the currently encoding packet number
+ */
+ uint64_t packet_number;
+ /**
+ * next PN to be skipped
+ */
+ uint64_t next_pn_to_skip;
+ /**
+ * valid if state is CLOSING
+ */
+ struct {
+ uint16_t error_code;
+ uint64_t frame_type; /* UINT64_MAX if application close */
+ const char *reason_phrase;
+ unsigned long num_packets_received;
+ unsigned long num_sent;
+ } connection_close;
+ /**
+ *
+ */
+ struct {
+ uint64_t permitted;
+ uint64_t sent;
+ } max_data;
+ /**
+ *
+ */
+ struct {
+ struct st_quicly_max_streams_t {
+ uint64_t count;
+ quicly_maxsender_t blocked_sender;
+ } uni, bidi;
+ } max_streams;
+ /**
+ *
+ */
+ struct {
+ struct st_quicly_pending_path_challenge_t *head, **tail_ref;
+ } path_challenge;
+ /**
+ *
+ */
+ struct {
+ uint64_t generation;
+ uint64_t max_acked;
+ uint32_t num_inflight;
+ } new_token;
+ /**
+ *
+ */
+ int64_t last_retransmittable_sent_at;
+ /**
+ * when to send an ACK, or other frames used for managing the connection
+ */
+ int64_t send_ack_at;
+ /**
+ *
+ */
+ quicly_cc_t cc;
+ /**
+ * things to be sent at the stream-level, that are not governed by the stream scheduler
+ */
+ struct {
+ /**
+ * list of blocked streams (sorted in ascending order of stream_ids)
+ */
+ struct {
+ quicly_linklist_t uni;
+ quicly_linklist_t bidi;
+ } blocked;
+ /**
+ * list of streams with pending control data (e.g., RESET_STREAM)
+ */
+ quicly_linklist_t control;
+ } pending_streams;
+ /**
+ * bit vector indicating if there's any pending crypto data (the insignificant 4 bits), or other non-stream data
+ */
+ uint8_t pending_flows;
+#define QUICLY_PENDING_FLOW_NEW_TOKEN_BIT (1 << 5)
+#define QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT (1 << 6)
+ } egress;
+ /**
+ * crypto data
+ */
+ struct {
+ ptls_t *tls;
+ ptls_handshake_properties_t handshake_properties;
+ struct {
+ ptls_raw_extension_t ext[2];
+ ptls_buffer_t buf;
+ } transport_params;
+ } crypto;
+ /**
+ * retry token (if the token is a Retry token can be determined by consulting the length of retry_odcid)
+ */
+ ptls_iovec_t token;
+ /**
+ * len=0 if not used
+ */
+ quicly_cid_t retry_odcid;
+ struct {
+ /**
+ * The moment when the idle timeout fires (including the additional 3 PTO). The value is set to INT64_MAX while the
+ * handshake is in progress.
+ */
+ int64_t at;
+ /**
+ * idle timeout
+ */
+ uint8_t should_rearm_on_send : 1;
+ } idle_timeout;
+};
+
+struct st_quicly_handle_payload_state_t {
+ const uint8_t *src, *const end;
+ size_t epoch;
+ uint64_t frame_type;
+};
+
+static void crypto_stream_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len);
+
+static const quicly_stream_callbacks_t crypto_stream_callbacks = {quicly_streambuf_destroy, quicly_streambuf_egress_shift,
+ quicly_streambuf_egress_emit, NULL, crypto_stream_receive};
+
+static int update_traffic_key_cb(ptls_update_traffic_key_t *self, ptls_t *tls, int is_enc, size_t epoch, const void *secret);
+static int initiate_close(quicly_conn_t *conn, int err, uint64_t frame_type, const char *reason_phrase);
+static int discard_sentmap_by_epoch(quicly_conn_t *conn, unsigned ack_epochs);
+
+static const quicly_transport_parameters_t default_transport_params = {
+ {0, 0, 0}, 0, 0, 0, 0, QUICLY_DEFAULT_ACK_DELAY_EXPONENT, QUICLY_DEFAULT_MAX_ACK_DELAY};
+
+static __thread int64_t now;
+
+static void update_now(quicly_context_t *ctx)
+{
+ int64_t newval = ctx->now->cb(ctx->now);
+
+ if (now < newval)
+ now = newval;
+}
+
+/**
+ * USDT on cannot handle thread-local variables provided as arguments. Hence this wrapper.
+ */
+static int64_t now_cb(void)
+{
+ return now;
+}
+
+static int64_t (*volatile probe_now)(void) = now_cb;
+
+static void set_address(quicly_address_t *addr, struct sockaddr *sa)
+{
+ if (sa == NULL) {
+ addr->sa.sa_family = AF_UNSPEC;
+ return;
+ }
+
+ switch (sa->sa_family) {
+ case AF_UNSPEC:
+ addr->sa.sa_family = AF_UNSPEC;
+ break;
+ case AF_INET:
+ addr->sin = *(struct sockaddr_in *)sa;
+ break;
+ case AF_INET6:
+ addr->sin6 = *(struct sockaddr_in6 *)sa;
+ break;
+ default:
+ memset(addr, 0xff, sizeof(*addr));
+ assert(!"unexpected address type");
+ break;
+ }
+}
+
+static ptls_cipher_suite_t *get_aes128gcmsha256(quicly_context_t *ctx)
+{
+ ptls_cipher_suite_t **cs;
+
+ for (cs = ctx->tls->cipher_suites;; ++cs) {
+ assert(cs != NULL);
+ if ((*cs)->id == PTLS_CIPHER_SUITE_AES_128_GCM_SHA256)
+ break;
+ }
+ return *cs;
+}
+
+static inline uint8_t get_epoch(uint8_t first_byte)
+{
+ if (!QUICLY_PACKET_IS_LONG_HEADER(first_byte))
+ return QUICLY_EPOCH_1RTT;
+
+ switch (first_byte & QUICLY_PACKET_TYPE_BITMASK) {
+ case QUICLY_PACKET_TYPE_INITIAL:
+ return QUICLY_EPOCH_INITIAL;
+ case QUICLY_PACKET_TYPE_HANDSHAKE:
+ return QUICLY_EPOCH_HANDSHAKE;
+ case QUICLY_PACKET_TYPE_0RTT:
+ return QUICLY_EPOCH_0RTT;
+ default:
+ assert(!"FIXME");
+ }
+}
+
+static void set_cid(quicly_cid_t *dest, ptls_iovec_t src)
+{
+ memcpy(dest->cid, src.base, src.len);
+ dest->len = src.len;
+}
+
+static ptls_aead_context_t *create_retry_aead(quicly_context_t *ctx, int is_enc)
+{
+ static const uint8_t secret[] = {0x65, 0x6e, 0x61, 0xe3, 0x36, 0xae, 0x94, 0x17, 0xf7, 0xf0, 0xed,
+ 0xd8, 0xd7, 0x8d, 0x46, 0x1e, 0x2a, 0xa7, 0x08, 0x4a, 0xba, 0x7a,
+ 0x14, 0xc1, 0xe9, 0xf7, 0x26, 0xd5, 0x57, 0x09, 0x16, 0x9a};
+ ptls_cipher_suite_t *algo = get_aes128gcmsha256(ctx);
+ ptls_aead_context_t *aead = ptls_aead_new(algo->aead, algo->hash, is_enc, secret, QUICLY_AEAD_BASE_LABEL);
+ assert(aead != NULL);
+ return aead;
+}
+
+static void dispose_cipher(struct st_quicly_cipher_context_t *ctx)
+{
+ ptls_aead_free(ctx->aead);
+ ptls_cipher_free(ctx->header_protection);
+}
+
+size_t quicly_decode_packet(quicly_context_t *ctx, quicly_decoded_packet_t *packet, const uint8_t *src, size_t len)
+{
+ const uint8_t *src_end = src + len;
+
+ if (len < 2)
+ goto Error;
+
+ packet->octets = ptls_iovec_init(src, len);
+ packet->datagram_size = len;
+ packet->token = ptls_iovec_init(NULL, 0);
+ packet->decrypted.pn = UINT64_MAX;
+ ++src;
+
+ if (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0])) {
+ /* long header */
+ uint64_t rest_length;
+ if (src_end - src < 5)
+ goto Error;
+ packet->version = quicly_decode32(&src);
+ packet->cid.dest.encrypted.len = *src++;
+ if (src_end - src < packet->cid.dest.encrypted.len + 1)
+ goto Error;
+ packet->cid.dest.encrypted.base = (uint8_t *)src;
+ src += packet->cid.dest.encrypted.len;
+ packet->cid.src.len = *src++;
+ if (src_end - src < packet->cid.src.len)
+ goto Error;
+ packet->cid.src.base = (uint8_t *)src;
+ src += packet->cid.src.len;
+ if (ctx->cid_encryptor != NULL) {
+ ctx->cid_encryptor->decrypt_cid(ctx->cid_encryptor, &packet->cid.dest.plaintext, packet->cid.dest.encrypted.base,
+ packet->cid.dest.encrypted.len);
+ } else {
+ packet->cid.dest.plaintext = (quicly_cid_plaintext_t){0};
+ }
+ switch (packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) {
+ case QUICLY_PACKET_TYPE_INITIAL:
+ case QUICLY_PACKET_TYPE_0RTT:
+ packet->cid.dest.might_be_client_generated = 1;
+ break;
+ default:
+ packet->cid.dest.might_be_client_generated = 0;
+ break;
+ }
+ if (packet->version != QUICLY_PROTOCOL_VERSION) {
+ /* VN packet or packets of unknown version cannot be parsed. `encrypted_off` is set to the first byte after SCID. */
+ packet->encrypted_off = src - packet->octets.base;
+ } else if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_RETRY) {
+ /* retry */
+ if (src_end - src <= PTLS_AESGCM_TAG_SIZE)
+ goto Error;
+ packet->token = ptls_iovec_init(src, src_end - src - PTLS_AESGCM_TAG_SIZE);
+ src += packet->token.len;
+ packet->encrypted_off = src - packet->octets.base;
+ } else {
+ /* coalescible long header packet */
+ if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_INITIAL) {
+ /* initial has a token */
+ uint64_t token_len;
+ if ((token_len = quicly_decodev(&src, src_end)) == UINT64_MAX)
+ goto Error;
+ if (src_end - src < token_len)
+ goto Error;
+ packet->token = ptls_iovec_init(src, token_len);
+ src += token_len;
+ }
+ if ((rest_length = quicly_decodev(&src, src_end)) == UINT64_MAX)
+ goto Error;
+ if (rest_length < 1)
+ goto Error;
+ if (src_end - src < rest_length)
+ goto Error;
+ packet->encrypted_off = src - packet->octets.base;
+ packet->octets.len = packet->encrypted_off + rest_length;
+ }
+ packet->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET;
+ } else {
+ /* short header */
+ if (ctx->cid_encryptor != NULL) {
+ if (src_end - src < QUICLY_MAX_CID_LEN_V1)
+ goto Error;
+ size_t host_cidl = ctx->cid_encryptor->decrypt_cid(ctx->cid_encryptor, &packet->cid.dest.plaintext, src, 0);
+ if (host_cidl == SIZE_MAX)
+ goto Error;
+ packet->cid.dest.encrypted = ptls_iovec_init(src, host_cidl);
+ src += host_cidl;
+ } else {
+ packet->cid.dest.encrypted = ptls_iovec_init(NULL, 0);
+ packet->cid.dest.plaintext = (quicly_cid_plaintext_t){0};
+ }
+ packet->cid.dest.might_be_client_generated = 0;
+ packet->cid.src = ptls_iovec_init(NULL, 0);
+ packet->version = 0;
+ packet->encrypted_off = src - packet->octets.base;
+ packet->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_MAYBE_STATELESS_RESET;
+ }
+
+ return packet->octets.len;
+
+Error:
+ return SIZE_MAX;
+}
+
+uint64_t quicly_determine_packet_number(uint32_t truncated, size_t num_bits, uint64_t expected)
+{
+ uint64_t win = (uint64_t)1 << num_bits, candidate = (expected & ~(win - 1)) | truncated;
+
+ if (candidate + win / 2 <= expected)
+ return candidate + win;
+ if (candidate > expected + win / 2 && candidate >= win)
+ return candidate - win;
+ return candidate;
+}
+
+static void assert_consistency(quicly_conn_t *conn, int timer_must_be_in_future)
+{
+ if (conn->super.state >= QUICLY_STATE_CLOSING) {
+ assert(!timer_must_be_in_future || now < conn->egress.send_ack_at);
+ return;
+ }
+
+ if (conn->egress.sentmap.bytes_in_flight != 0 || conn->super.peer.address_validation.send_probe) {
+ assert(conn->egress.loss.alarm_at != INT64_MAX);
+ } else {
+ assert(conn->egress.loss.loss_time == INT64_MAX);
+ }
+ /* Allow timers not in the future when the peer is not yet validated, since we may not be able to send packets even when timers
+ * fire. */
+ if (timer_must_be_in_future && conn->super.peer.address_validation.validated)
+ assert(now < conn->egress.loss.alarm_at);
+}
+
+static int on_invalid_ack(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ if (event == QUICLY_SENTMAP_EVENT_ACKED)
+ return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+ return 0;
+}
+
+static uint64_t calc_next_pn_to_skip(ptls_context_t *tlsctx, uint64_t next_pn)
+{
+ static __thread struct {
+ uint16_t values[32];
+ size_t off;
+ } cached_rand;
+
+ if (cached_rand.off == 0) {
+ tlsctx->random_bytes(cached_rand.values, sizeof(cached_rand.values));
+ cached_rand.off = sizeof(cached_rand.values) / sizeof(cached_rand.values[0]);
+ }
+
+ /* on average, skip one PN per every 256 packets, by selecting one of the 511 packet numbers following next_pn */
+ return next_pn + 1 + (cached_rand.values[--cached_rand.off] & 0x1ff);
+}
+
+static void init_max_streams(struct st_quicly_max_streams_t *m)
+{
+ m->count = 0;
+ quicly_maxsender_init(&m->blocked_sender, -1);
+}
+
+static int update_max_streams(struct st_quicly_max_streams_t *m, uint64_t count)
+{
+ if (count > (uint64_t)1 << 60)
+ return QUICLY_TRANSPORT_ERROR_STREAM_LIMIT;
+
+ if (m->count < count) {
+ m->count = count;
+ if (m->blocked_sender.max_acked < count)
+ m->blocked_sender.max_acked = count;
+ }
+
+ return 0;
+}
+
+int quicly_connection_is_ready(quicly_conn_t *conn)
+{
+ return conn->application != NULL;
+}
+
+static int stream_is_destroyable(quicly_stream_t *stream)
+{
+ if (!quicly_recvstate_transfer_complete(&stream->recvstate))
+ return 0;
+ if (!quicly_sendstate_transfer_complete(&stream->sendstate))
+ return 0;
+ switch (stream->_send_aux.reset_stream.sender_state) {
+ case QUICLY_SENDER_STATE_NONE:
+ case QUICLY_SENDER_STATE_ACKED:
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+static void sched_stream_control(quicly_stream_t *stream)
+{
+ assert(stream->stream_id >= 0);
+
+ if (!quicly_linklist_is_linked(&stream->_send_aux.pending_link.control))
+ quicly_linklist_insert(stream->conn->egress.pending_streams.control.prev, &stream->_send_aux.pending_link.control);
+}
+
+static void resched_stream_data(quicly_stream_t *stream)
+{
+ if (stream->stream_id < 0) {
+ assert(-4 <= stream->stream_id);
+ uint8_t mask = 1 << -(1 + stream->stream_id);
+ if (stream->sendstate.pending.num_ranges != 0) {
+ stream->conn->egress.pending_flows |= mask;
+ } else {
+ stream->conn->egress.pending_flows &= ~mask;
+ }
+ return;
+ }
+
+ /* do nothing if blocked */
+ if (stream->streams_blocked)
+ return;
+
+ quicly_stream_scheduler_t *scheduler = stream->conn->super.ctx->stream_scheduler;
+ scheduler->update_state(scheduler, stream);
+}
+
+static int should_send_max_data(quicly_conn_t *conn)
+{
+ return quicly_maxsender_should_send_max(&conn->ingress.max_data.sender, conn->ingress.max_data.bytes_consumed,
+ (uint32_t)conn->super.ctx->transport_params.max_data, 512);
+}
+
+static int should_send_max_stream_data(quicly_stream_t *stream)
+{
+ if (stream->recvstate.eos != UINT64_MAX)
+ return 0;
+ return quicly_maxsender_should_send_max(&stream->_send_aux.max_stream_data_sender, stream->recvstate.data_off,
+ stream->_recv_aux.window, 512);
+}
+
+int quicly_stream_sync_sendbuf(quicly_stream_t *stream, int activate)
+{
+ int ret;
+
+ if (activate) {
+ if ((ret = quicly_sendstate_activate(&stream->sendstate)) != 0)
+ return ret;
+ }
+
+ resched_stream_data(stream);
+ return 0;
+}
+
+void quicly_stream_sync_recvbuf(quicly_stream_t *stream, size_t shift_amount)
+{
+ stream->recvstate.data_off += shift_amount;
+ if (stream->stream_id >= 0) {
+ if (should_send_max_stream_data(stream))
+ sched_stream_control(stream);
+ }
+}
+
+static int schedule_path_challenge(quicly_conn_t *conn, int is_response, const uint8_t *data)
+{
+ struct st_quicly_pending_path_challenge_t *pending;
+
+ if ((pending = malloc(sizeof(struct st_quicly_pending_path_challenge_t))) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+
+ pending->next = NULL;
+ pending->is_response = is_response;
+ memcpy(pending->data, data, QUICLY_PATH_CHALLENGE_DATA_LEN);
+
+ *conn->egress.path_challenge.tail_ref = pending;
+ conn->egress.path_challenge.tail_ref = &pending->next;
+ return 0;
+}
+
+static int write_crypto_data(quicly_conn_t *conn, ptls_buffer_t *tlsbuf, size_t epoch_offsets[5])
+{
+ size_t epoch;
+ int ret;
+
+ if (tlsbuf->off == 0)
+ return 0;
+
+ for (epoch = 0; epoch < 4; ++epoch) {
+ size_t len = epoch_offsets[epoch + 1] - epoch_offsets[epoch];
+ if (len == 0)
+ continue;
+ quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch));
+ assert(stream != NULL);
+ if ((ret = quicly_streambuf_egress_write(stream, tlsbuf->base + epoch_offsets[epoch], len)) != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void crypto_stream_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len)
+{
+ quicly_conn_t *conn = stream->conn;
+ size_t in_epoch = -(1 + stream->stream_id), epoch_offsets[5] = {0};
+ ptls_iovec_t input;
+ ptls_buffer_t output;
+
+ if (quicly_streambuf_ingress_receive(stream, off, src, len) != 0)
+ return;
+
+ ptls_buffer_init(&output, "", 0);
+
+ /* send handshake messages to picotls, and let it fill in the response */
+ while ((input = quicly_streambuf_ingress_get(stream)).len != 0) {
+ int handshake_result = ptls_handle_message(conn->crypto.tls, &output, epoch_offsets, in_epoch, input.base, input.len,
+ &conn->crypto.handshake_properties);
+ quicly_streambuf_ingress_shift(stream, input.len);
+ QUICLY_PROBE(CRYPTO_HANDSHAKE, conn, probe_now(), handshake_result);
+ switch (handshake_result) {
+ case 0:
+ case PTLS_ERROR_IN_PROGRESS:
+ break;
+ default:
+ initiate_close(conn,
+ PTLS_ERROR_GET_CLASS(handshake_result) == PTLS_ERROR_CLASS_SELF_ALERT ? handshake_result
+ : QUICLY_TRANSPORT_ERROR_INTERNAL,
+ QUICLY_FRAME_TYPE_CRYPTO, NULL);
+ goto Exit;
+ }
+ /* drop 0-RTT write key if 0-RTT is rejected by peer */
+ if (conn->application != NULL && !conn->application->one_rtt_writable &&
+ conn->application->cipher.egress.key.aead != NULL) {
+ assert(quicly_is_client(conn));
+ if (conn->crypto.handshake_properties.client.early_data_acceptance == PTLS_EARLY_DATA_REJECTED) {
+ dispose_cipher(&conn->application->cipher.egress.key);
+ conn->application->cipher.egress.key = (struct st_quicly_cipher_context_t){NULL};
+ discard_sentmap_by_epoch(
+ conn, 1u << QUICLY_EPOCH_1RTT); /* retire all packets with ack_epoch == 3; they are all 0-RTT packets */
+ }
+ }
+ }
+ write_crypto_data(conn, &output, epoch_offsets);
+
+Exit:
+ ptls_buffer_dispose(&output);
+}
+
+static void init_stream_properties(quicly_stream_t *stream, uint32_t initial_max_stream_data_local,
+ uint64_t initial_max_stream_data_remote)
+{
+ int is_client = quicly_is_client(stream->conn);
+
+ if (quicly_stream_has_send_side(is_client, stream->stream_id)) {
+ quicly_sendstate_init(&stream->sendstate);
+ } else {
+ quicly_sendstate_init_closed(&stream->sendstate);
+ }
+ if (quicly_stream_has_receive_side(is_client, stream->stream_id)) {
+ quicly_recvstate_init(&stream->recvstate);
+ } else {
+ quicly_recvstate_init_closed(&stream->recvstate);
+ }
+ stream->streams_blocked = 0;
+
+ stream->_send_aux.max_stream_data = initial_max_stream_data_remote;
+ stream->_send_aux.stop_sending.sender_state = QUICLY_SENDER_STATE_NONE;
+ stream->_send_aux.stop_sending.error_code = 0;
+ stream->_send_aux.reset_stream.sender_state = QUICLY_SENDER_STATE_NONE;
+ stream->_send_aux.reset_stream.error_code = 0;
+ quicly_maxsender_init(&stream->_send_aux.max_stream_data_sender, initial_max_stream_data_local);
+ quicly_linklist_init(&stream->_send_aux.pending_link.control);
+ quicly_linklist_init(&stream->_send_aux.pending_link.default_scheduler);
+
+ stream->_recv_aux.window = initial_max_stream_data_local;
+
+ /* Set the number of max ranges to be capable of handling following case:
+ * * every one of the two packets being sent are lost
+ * * average size of a STREAM frame found in a packet is >= ~512 bytes
+ * See also: the doc-comment on `_recv_aux.max_ranges`.
+ */
+ if ((stream->_recv_aux.max_ranges = initial_max_stream_data_local / 1024) < 63)
+ stream->_recv_aux.max_ranges = 63;
+}
+
+static void dispose_stream_properties(quicly_stream_t *stream)
+{
+ quicly_sendstate_dispose(&stream->sendstate);
+ quicly_recvstate_dispose(&stream->recvstate);
+ quicly_maxsender_dispose(&stream->_send_aux.max_stream_data_sender);
+ quicly_linklist_unlink(&stream->_send_aux.pending_link.control);
+ quicly_linklist_unlink(&stream->_send_aux.pending_link.default_scheduler);
+}
+
+static quicly_stream_t *open_stream(quicly_conn_t *conn, uint64_t stream_id, uint32_t initial_max_stream_data_local,
+ uint64_t initial_max_stream_data_remote)
+{
+ quicly_stream_t *stream;
+
+ if ((stream = malloc(sizeof(*stream))) == NULL)
+ return NULL;
+ stream->conn = conn;
+ stream->stream_id = stream_id;
+ stream->callbacks = NULL;
+ stream->data = NULL;
+
+ int r;
+ khiter_t iter = kh_put(quicly_stream_t, conn->streams, stream_id, &r);
+ assert(iter != kh_end(conn->streams));
+ kh_val(conn->streams, iter) = stream;
+
+ init_stream_properties(stream, initial_max_stream_data_local, initial_max_stream_data_remote);
+
+ return stream;
+}
+
+static struct st_quicly_conn_streamgroup_state_t *get_streamgroup_state(quicly_conn_t *conn, quicly_stream_id_t stream_id)
+{
+ if (quicly_is_client(conn) == quicly_stream_is_client_initiated(stream_id)) {
+ return quicly_stream_is_unidirectional(stream_id) ? &conn->super.host.uni : &conn->super.host.bidi;
+ } else {
+ return quicly_stream_is_unidirectional(stream_id) ? &conn->super.peer.uni : &conn->super.peer.bidi;
+ }
+}
+
+static int should_send_max_streams(quicly_conn_t *conn, int uni)
+{
+ quicly_maxsender_t *maxsender;
+ if ((maxsender = uni ? conn->ingress.max_streams.uni : conn->ingress.max_streams.bidi) == NULL)
+ return 0;
+
+ struct st_quicly_conn_streamgroup_state_t *group = uni ? &conn->super.peer.uni : &conn->super.peer.bidi;
+ if (!quicly_maxsender_should_send_max(maxsender, group->next_stream_id / 4, group->num_streams, 768))
+ return 0;
+
+ return 1;
+}
+
+static void destroy_stream(quicly_stream_t *stream, int err)
+{
+ quicly_conn_t *conn = stream->conn;
+
+ if (stream->callbacks != NULL)
+ stream->callbacks->on_destroy(stream, err);
+
+ khiter_t iter = kh_get(quicly_stream_t, conn->streams, stream->stream_id);
+ assert(iter != kh_end(conn->streams));
+ kh_del(quicly_stream_t, conn->streams, iter);
+
+ if (stream->stream_id < 0) {
+ size_t epoch = -(1 + stream->stream_id);
+ stream->conn->egress.pending_flows &= ~(uint8_t)(1 << epoch);
+ } else {
+ struct st_quicly_conn_streamgroup_state_t *group = get_streamgroup_state(conn, stream->stream_id);
+ --group->num_streams;
+ }
+
+ dispose_stream_properties(stream);
+
+ if (conn->application != NULL) {
+ /* The function is normally invoked when receiving a packet, therefore just setting send_ack_at to zero is sufficient to
+ * trigger the emission of the MAX_STREAMS frame. FWIW, the only case the function is invoked when not receiving a packet is
+ * when the connection is being closed. In such case, the change will not have any bad side effects.
+ */
+ if (should_send_max_streams(conn, quicly_stream_is_unidirectional(stream->stream_id)))
+ conn->egress.send_ack_at = 0;
+ }
+
+ free(stream);
+}
+
+static void destroy_all_streams(quicly_conn_t *conn, int err, int including_crypto_streams)
+{
+ quicly_stream_t *stream;
+ kh_foreach_value(conn->streams, stream, {
+ /* TODO do we need to send reset signals to open streams? */
+ if (including_crypto_streams || stream->stream_id >= 0)
+ destroy_stream(stream, err);
+ });
+}
+
+quicly_stream_t *quicly_get_stream(quicly_conn_t *conn, quicly_stream_id_t stream_id)
+{
+ khiter_t iter = kh_get(quicly_stream_t, conn->streams, stream_id);
+ if (iter != kh_end(conn->streams))
+ return kh_val(conn->streams, iter);
+ return NULL;
+}
+
+ptls_t *quicly_get_tls(quicly_conn_t *conn)
+{
+ return conn->crypto.tls;
+}
+
+int quicly_get_stats(quicly_conn_t *conn, quicly_stats_t *stats)
+{
+ /* copy the pre-built stats fields */
+ memcpy(stats, &conn->super.stats, sizeof(conn->super.stats));
+
+ /* set or generate the non-pre-built stats fields here */
+ stats->rtt = conn->egress.loss.rtt;
+ stats->cc = conn->egress.cc;
+
+ return 0;
+}
+
+quicly_stream_id_t quicly_get_ingress_max_streams(quicly_conn_t *conn, int uni)
+{
+ quicly_maxsender_t *maxsender = uni ? conn->ingress.max_streams.uni : conn->ingress.max_streams.bidi;
+ return maxsender->max_committed;
+}
+
+void quicly_get_max_data(quicly_conn_t *conn, uint64_t *send_permitted, uint64_t *sent, uint64_t *consumed)
+{
+ if (send_permitted != NULL)
+ *send_permitted = conn->egress.max_data.permitted;
+ if (sent != NULL)
+ *sent = conn->egress.max_data.sent;
+ if (consumed != NULL)
+ *consumed = conn->ingress.max_data.bytes_consumed;
+}
+
+static void update_idle_timeout(quicly_conn_t *conn, int is_in_receive)
+{
+ if (!is_in_receive && !conn->idle_timeout.should_rearm_on_send)
+ return;
+
+ /* calculate the minimum of the two max_idle_timeout */
+ int64_t idle_msec = INT64_MAX;
+ if (conn->initial == NULL && conn->handshake == NULL && conn->super.peer.transport_params.max_idle_timeout != 0)
+ idle_msec = conn->super.peer.transport_params.max_idle_timeout;
+ if (conn->super.ctx->transport_params.max_idle_timeout != 0 && conn->super.ctx->transport_params.max_idle_timeout < idle_msec)
+ idle_msec = conn->super.ctx->transport_params.max_idle_timeout;
+
+ if (idle_msec == INT64_MAX)
+ return;
+
+ uint32_t three_pto = 3 * quicly_rtt_get_pto(&conn->egress.loss.rtt, conn->super.ctx->transport_params.max_ack_delay,
+ conn->egress.loss.conf->min_pto);
+ conn->idle_timeout.at = now + (idle_msec > three_pto ? idle_msec : three_pto);
+ conn->idle_timeout.should_rearm_on_send = is_in_receive;
+}
+
+static int scheduler_can_send(quicly_conn_t *conn)
+{
+ /* scheduler would never have data to send, until application keys become available */
+ if (conn->application == NULL)
+ return 0;
+ int conn_is_saturated = !(conn->egress.max_data.sent < conn->egress.max_data.permitted);
+ return conn->super.ctx->stream_scheduler->can_send(conn->super.ctx->stream_scheduler, conn, conn_is_saturated);
+}
+
+static void update_loss_alarm(quicly_conn_t *conn)
+{
+ int has_outstanding = conn->egress.sentmap.bytes_in_flight != 0 || conn->super.peer.address_validation.send_probe,
+ handshake_is_in_progress = conn->initial != NULL || conn->handshake != NULL;
+ quicly_loss_update_alarm(&conn->egress.loss, now, conn->egress.last_retransmittable_sent_at, has_outstanding,
+ scheduler_can_send(conn), handshake_is_in_progress, conn->egress.max_data.sent);
+}
+
+static int create_handshake_flow(quicly_conn_t *conn, size_t epoch)
+{
+ quicly_stream_t *stream;
+ int ret;
+
+ if ((stream = open_stream(conn, -(quicly_stream_id_t)(1 + epoch), 65536, 65536)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ if ((ret = quicly_streambuf_create(stream, sizeof(quicly_streambuf_t))) != 0) {
+ destroy_stream(stream, ret);
+ return ret;
+ }
+ stream->callbacks = &crypto_stream_callbacks;
+
+ return 0;
+}
+
+static void destroy_handshake_flow(quicly_conn_t *conn, size_t epoch)
+{
+ quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch));
+ if (stream != NULL)
+ destroy_stream(stream, 0);
+}
+
+static struct st_quicly_pn_space_t *alloc_pn_space(size_t sz)
+{
+ struct st_quicly_pn_space_t *space;
+
+ if ((space = malloc(sz)) == NULL)
+ return NULL;
+
+ quicly_ranges_init(&space->ack_queue);
+ space->largest_pn_received_at = INT64_MAX;
+ space->next_expected_packet_number = 0;
+ space->unacked_count = 0;
+ if (sz != sizeof(*space))
+ memset((uint8_t *)space + sizeof(*space), 0, sz - sizeof(*space));
+
+ return space;
+}
+
+static void do_free_pn_space(struct st_quicly_pn_space_t *space)
+{
+ quicly_ranges_clear(&space->ack_queue);
+ free(space);
+}
+
+static int record_pn(quicly_ranges_t *ranges, uint64_t pn, int *is_out_of_order)
+{
+ int ret;
+
+ *is_out_of_order = 0;
+
+ if (ranges->num_ranges != 0) {
+ /* fast path that is taken when we receive a packet in-order */
+ if (ranges->ranges[ranges->num_ranges - 1].end == pn) {
+ ranges->ranges[ranges->num_ranges - 1].end = pn + 1;
+ return 0;
+ }
+ *is_out_of_order = 1;
+ }
+
+ /* slow path; we add, then remove the oldest ranges when the number of ranges exceed the maximum */
+ if ((ret = quicly_ranges_add(ranges, pn, pn + 1)) != 0)
+ return ret;
+ if (ranges->num_ranges > QUICLY_MAX_ACK_BLOCKS)
+ quicly_ranges_drop_by_range_indices(ranges, ranges->num_ranges - QUICLY_MAX_ACK_BLOCKS, ranges->num_ranges);
+
+ return 0;
+}
+
+static int record_receipt(quicly_conn_t *conn, struct st_quicly_pn_space_t *space, uint64_t pn, int is_ack_only, size_t epoch)
+{
+ int ret, ack_now, is_out_of_order;
+
+ if ((ret = record_pn(&space->ack_queue, pn, &is_out_of_order)) != 0)
+ goto Exit;
+
+ ack_now = is_out_of_order && !is_ack_only;
+
+ /* update largest_pn_received_at (TODO implement deduplication at an earlier moment?) */
+ if (space->ack_queue.ranges[space->ack_queue.num_ranges - 1].end == pn + 1)
+ space->largest_pn_received_at = now;
+
+ /* if the received packet is ack-eliciting, update / schedule transmission of ACK */
+ if (!is_ack_only) {
+ space->unacked_count++;
+ /* Ack after QUICLY_NUM_PACKETS_BEFORE_ACK packets or after the delayed ack timeout */
+ if (space->unacked_count >= QUICLY_NUM_PACKETS_BEFORE_ACK || epoch == QUICLY_EPOCH_INITIAL ||
+ epoch == QUICLY_EPOCH_HANDSHAKE)
+ ack_now = 1;
+ }
+
+ if (ack_now) {
+ conn->egress.send_ack_at = now;
+ } else if (conn->egress.send_ack_at == INT64_MAX) {
+ conn->egress.send_ack_at = now + QUICLY_DELAYED_ACK_TIMEOUT;
+ }
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static void free_handshake_space(struct st_quicly_handshake_space_t **space)
+{
+ if (*space != NULL) {
+ if ((*space)->cipher.ingress.aead != NULL)
+ dispose_cipher(&(*space)->cipher.ingress);
+ if ((*space)->cipher.egress.aead != NULL)
+ dispose_cipher(&(*space)->cipher.egress);
+ do_free_pn_space(&(*space)->super);
+ *space = NULL;
+ }
+}
+
+static int setup_cipher(quicly_conn_t *conn, size_t epoch, int is_enc, ptls_cipher_context_t **hp_ctx,
+ ptls_aead_context_t **aead_ctx, ptls_aead_algorithm_t *aead, ptls_hash_algorithm_t *hash,
+ const void *secret)
+{
+ /* quicly_accept builds cipher before instantitating a connection. In such case, we use the default crypto engine */
+ quicly_crypto_engine_t *engine = conn != NULL ? conn->super.ctx->crypto_engine : &quicly_default_crypto_engine;
+
+ return engine->setup_cipher(engine, conn, epoch, is_enc, hp_ctx, aead_ctx, aead, hash, secret);
+}
+
+static int setup_handshake_space_and_flow(quicly_conn_t *conn, size_t epoch)
+{
+ struct st_quicly_handshake_space_t **space = epoch == QUICLY_EPOCH_INITIAL ? &conn->initial : &conn->handshake;
+ if ((*space = (void *)alloc_pn_space(sizeof(struct st_quicly_handshake_space_t))) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ return create_handshake_flow(conn, epoch);
+}
+
+static void free_application_space(struct st_quicly_application_space_t **space)
+{
+ if (*space != NULL) {
+#define DISPOSE_INGRESS(label, func) \
+ if ((*space)->cipher.ingress.label != NULL) \
+ func((*space)->cipher.ingress.label)
+ DISPOSE_INGRESS(header_protection.zero_rtt, ptls_cipher_free);
+ DISPOSE_INGRESS(header_protection.one_rtt, ptls_cipher_free);
+ DISPOSE_INGRESS(aead[0], ptls_aead_free);
+ DISPOSE_INGRESS(aead[1], ptls_aead_free);
+#undef DISPOSE_INGRESS
+ if ((*space)->cipher.egress.key.aead != NULL)
+ dispose_cipher(&(*space)->cipher.egress.key);
+ memset((*space)->cipher.egress.secret, 0, sizeof((*space)->cipher.egress.secret));
+ do_free_pn_space(&(*space)->super);
+ *space = NULL;
+ }
+}
+
+static int setup_application_space(quicly_conn_t *conn)
+{
+ if ((conn->application = (void *)alloc_pn_space(sizeof(struct st_quicly_application_space_t))) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+
+ /* prohibit key-update until receiving an ACK for an 1-RTT packet */
+ conn->application->cipher.egress.key_update_pn.last = 0;
+ conn->application->cipher.egress.key_update_pn.next = UINT64_MAX;
+
+ return create_handshake_flow(conn, QUICLY_EPOCH_1RTT);
+}
+
+static int discard_handshake_context(quicly_conn_t *conn, size_t epoch)
+{
+ int ret;
+
+ assert(epoch == QUICLY_EPOCH_INITIAL || epoch == QUICLY_EPOCH_HANDSHAKE);
+
+ if ((ret = discard_sentmap_by_epoch(conn, 1u << epoch)) != 0)
+ return ret;
+ destroy_handshake_flow(conn, epoch);
+ free_handshake_space(epoch == QUICLY_EPOCH_INITIAL ? &conn->initial : &conn->handshake);
+
+ return 0;
+}
+
+static int apply_peer_transport_params(quicly_conn_t *conn)
+{
+ int ret;
+
+ conn->egress.max_data.permitted = conn->super.peer.transport_params.max_data;
+ if ((ret = update_max_streams(&conn->egress.max_streams.uni, conn->super.peer.transport_params.max_streams_uni)) != 0)
+ return ret;
+ if ((ret = update_max_streams(&conn->egress.max_streams.bidi, conn->super.peer.transport_params.max_streams_bidi)) != 0)
+ return ret;
+
+ return 0;
+}
+
+static int update_1rtt_key(quicly_conn_t *conn, ptls_cipher_suite_t *cipher, int is_enc, ptls_aead_context_t **aead,
+ uint8_t *secret)
+{
+ uint8_t new_secret[PTLS_MAX_DIGEST_SIZE];
+ ptls_aead_context_t *new_aead = NULL;
+ int ret;
+
+ /* generate next AEAD key */
+ if ((ret = ptls_hkdf_expand_label(cipher->hash, new_secret, cipher->hash->digest_size,
+ ptls_iovec_init(secret, cipher->hash->digest_size), "quic ku", ptls_iovec_init(NULL, 0),
+ NULL)) != 0)
+ goto Exit;
+ if ((ret = setup_cipher(conn, QUICLY_EPOCH_1RTT, is_enc, NULL, &new_aead, cipher->aead, cipher->hash, new_secret)) != 0)
+ goto Exit;
+
+ /* success! update AEAD and secret */
+ if (*aead != NULL)
+ ptls_aead_free(*aead);
+ *aead = new_aead;
+ new_aead = NULL;
+ memcpy(secret, new_secret, cipher->hash->digest_size);
+
+ ret = 0;
+Exit:
+ if (new_aead != NULL)
+ ptls_aead_free(new_aead);
+ ptls_clear_memory(new_secret, cipher->hash->digest_size);
+ return ret;
+}
+
+static int update_1rtt_egress_key(quicly_conn_t *conn)
+{
+ struct st_quicly_application_space_t *space = conn->application;
+ ptls_cipher_suite_t *cipher = ptls_get_cipher(conn->crypto.tls);
+ int ret;
+
+ /* generate next AEAD key, and increment key phase if it succeeds */
+ if ((ret = update_1rtt_key(conn, cipher, 1, &space->cipher.egress.key.aead, space->cipher.egress.secret)) != 0)
+ return ret;
+ ++space->cipher.egress.key_phase;
+
+ /* signal that we are waiting for an ACK */
+ space->cipher.egress.key_update_pn.last = conn->egress.packet_number;
+ space->cipher.egress.key_update_pn.next = UINT64_MAX;
+
+ QUICLY_PROBE(CRYPTO_SEND_KEY_UPDATE, conn, probe_now(), space->cipher.egress.key_phase,
+ QUICLY_PROBE_HEXDUMP(space->cipher.egress.secret, cipher->hash->digest_size));
+
+ return 0;
+}
+
+static int received_key_update(quicly_conn_t *conn, uint64_t newly_decrypted_key_phase)
+{
+ struct st_quicly_application_space_t *space = conn->application;
+
+ assert(space->cipher.ingress.key_phase.decrypted < newly_decrypted_key_phase);
+ assert(newly_decrypted_key_phase <= space->cipher.ingress.key_phase.prepared);
+
+ space->cipher.ingress.key_phase.decrypted = newly_decrypted_key_phase;
+
+ QUICLY_PROBE(CRYPTO_RECEIVE_KEY_UPDATE, conn, probe_now(), space->cipher.ingress.key_phase.decrypted,
+ QUICLY_PROBE_HEXDUMP(space->cipher.ingress.secret, ptls_get_cipher(conn->crypto.tls)->hash->digest_size));
+
+ if (space->cipher.egress.key_phase < space->cipher.ingress.key_phase.decrypted) {
+ return update_1rtt_egress_key(conn);
+ } else {
+ return 0;
+ }
+}
+
+void quicly_free(quicly_conn_t *conn)
+{
+ QUICLY_PROBE(FREE, conn, probe_now());
+
+ destroy_all_streams(conn, 0, 1);
+
+ quicly_maxsender_dispose(&conn->ingress.max_data.sender);
+ if (conn->ingress.max_streams.uni != NULL)
+ quicly_maxsender_dispose(conn->ingress.max_streams.uni);
+ if (conn->ingress.max_streams.bidi != NULL)
+ quicly_maxsender_dispose(conn->ingress.max_streams.bidi);
+ while (conn->egress.path_challenge.head != NULL) {
+ struct st_quicly_pending_path_challenge_t *pending = conn->egress.path_challenge.head;
+ conn->egress.path_challenge.head = pending->next;
+ free(pending);
+ }
+ quicly_sentmap_dispose(&conn->egress.sentmap);
+
+ kh_destroy(quicly_stream_t, conn->streams);
+
+ assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.uni));
+ assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.blocked.bidi));
+ assert(!quicly_linklist_is_linked(&conn->egress.pending_streams.control));
+ assert(!quicly_linklist_is_linked(&conn->super._default_scheduler.active));
+ assert(!quicly_linklist_is_linked(&conn->super._default_scheduler.blocked));
+
+ free_handshake_space(&conn->initial);
+ free_handshake_space(&conn->handshake);
+ free_application_space(&conn->application);
+
+ ptls_buffer_dispose(&conn->crypto.transport_params.buf);
+ ptls_free(conn->crypto.tls);
+
+ free(conn->token.base);
+ free(conn);
+}
+
+static int setup_initial_key(struct st_quicly_cipher_context_t *ctx, ptls_cipher_suite_t *cs, const void *master_secret,
+ const char *label, int is_enc, quicly_conn_t *conn)
+{
+ uint8_t aead_secret[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ if ((ret = ptls_hkdf_expand_label(cs->hash, aead_secret, cs->hash->digest_size,
+ ptls_iovec_init(master_secret, cs->hash->digest_size), label, ptls_iovec_init(NULL, 0),
+ NULL)) != 0)
+ goto Exit;
+ if ((ret = setup_cipher(conn, QUICLY_EPOCH_INITIAL, is_enc, &ctx->header_protection, &ctx->aead, cs->aead, cs->hash,
+ aead_secret)) != 0)
+ goto Exit;
+
+Exit:
+ ptls_clear_memory(aead_secret, sizeof(aead_secret));
+ return ret;
+}
+
+/**
+ * @param conn maybe NULL when called by quicly_accept
+ */
+static int setup_initial_encryption(ptls_cipher_suite_t *cs, struct st_quicly_cipher_context_t *ingress,
+ struct st_quicly_cipher_context_t *egress, ptls_iovec_t cid, int is_client, quicly_conn_t *conn)
+{
+ static const uint8_t salt[] = {0xc3, 0xee, 0xf7, 0x12, 0xc7, 0x2e, 0xbb, 0x5a, 0x11, 0xa7,
+ 0xd2, 0x43, 0x2b, 0xb4, 0x63, 0x65, 0xbe, 0xf9, 0xf5, 0x02};
+ static const char *labels[2] = {"client in", "server in"};
+ uint8_t secret[PTLS_MAX_DIGEST_SIZE];
+ int ret;
+
+ /* extract master secret */
+ if ((ret = ptls_hkdf_extract(cs->hash, secret, ptls_iovec_init(salt, sizeof(salt)), cid)) != 0)
+ goto Exit;
+
+ /* create aead contexts */
+ if (ingress != NULL && (ret = setup_initial_key(ingress, cs, secret, labels[is_client], 0, conn)) != 0)
+ goto Exit;
+ if (egress != NULL && (ret = setup_initial_key(egress, cs, secret, labels[!is_client], 1, conn)) != 0) {
+ if (ingress != NULL)
+ dispose_cipher(ingress);
+ goto Exit;
+ }
+
+Exit:
+ ptls_clear_memory(secret, sizeof(secret));
+ return ret;
+}
+
+static int apply_stream_frame(quicly_stream_t *stream, quicly_stream_frame_t *frame)
+{
+ int ret;
+
+ QUICLY_PROBE(STREAM_RECEIVE, stream->conn, probe_now(), stream, frame->offset, frame->data.len);
+
+ if (quicly_recvstate_transfer_complete(&stream->recvstate))
+ return 0;
+
+ /* flow control */
+ if (stream->stream_id >= 0) {
+ /* STREAMs */
+ uint64_t max_stream_data = frame->offset + frame->data.len;
+ if ((int64_t)stream->_recv_aux.window < (int64_t)max_stream_data - (int64_t)stream->recvstate.data_off)
+ return QUICLY_TRANSPORT_ERROR_FLOW_CONTROL;
+ if (stream->recvstate.received.ranges[stream->recvstate.received.num_ranges - 1].end < max_stream_data) {
+ uint64_t newly_received =
+ max_stream_data - stream->recvstate.received.ranges[stream->recvstate.received.num_ranges - 1].end;
+ if (stream->conn->ingress.max_data.bytes_consumed + newly_received >
+ stream->conn->ingress.max_data.sender.max_committed)
+ return QUICLY_TRANSPORT_ERROR_FLOW_CONTROL;
+ stream->conn->ingress.max_data.bytes_consumed += newly_received;
+ /* FIXME send MAX_DATA if necessary */
+ }
+ } else {
+ /* CRYPTO streams; maybe add different limit for 1-RTT CRYPTO? */
+ if (frame->offset + frame->data.len > stream->conn->super.ctx->max_crypto_bytes)
+ return QUICLY_TRANSPORT_ERROR_CRYPTO_BUFFER_EXCEEDED;
+ }
+
+ /* update recvbuf */
+ size_t apply_len = frame->data.len;
+ if ((ret = quicly_recvstate_update(&stream->recvstate, frame->offset, &apply_len, frame->is_fin,
+ stream->_recv_aux.max_ranges)) != 0)
+ return ret;
+
+ if (apply_len != 0 || quicly_recvstate_transfer_complete(&stream->recvstate)) {
+ uint64_t buf_offset = frame->offset + frame->data.len - apply_len - stream->recvstate.data_off;
+ stream->callbacks->on_receive(stream, (size_t)buf_offset, frame->data.base + frame->data.len - apply_len, apply_len);
+ if (stream->conn->super.state >= QUICLY_STATE_CLOSING)
+ return QUICLY_ERROR_IS_CLOSING;
+ }
+
+ if (should_send_max_stream_data(stream))
+ sched_stream_control(stream);
+
+ if (stream_is_destroyable(stream))
+ destroy_stream(stream, 0);
+
+ return 0;
+}
+
+int quicly_encode_transport_parameter_list(ptls_buffer_t *buf, int is_client, const quicly_transport_parameters_t *params,
+ const quicly_cid_t *odcid, const void *stateless_reset_token, int expand)
+{
+ int ret;
+
+#define PUSH_TP(buf, id, block) \
+ do { \
+ ptls_buffer_push_quicint((buf), (id)); \
+ ptls_buffer_push_block((buf), -1, block); \
+ } while (0)
+
+ if (params->max_stream_data.bidi_local != 0)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL,
+ { ptls_buffer_push_quicint(buf, params->max_stream_data.bidi_local); });
+ if (params->max_stream_data.bidi_remote != 0)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE,
+ { ptls_buffer_push_quicint(buf, params->max_stream_data.bidi_remote); });
+ if (params->max_stream_data.uni != 0)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI,
+ { ptls_buffer_push_quicint(buf, params->max_stream_data.uni); });
+ if (params->max_data != 0)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA, { ptls_buffer_push_quicint(buf, params->max_data); });
+ if (params->max_idle_timeout != 0)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT, { ptls_buffer_push_quicint(buf, params->max_idle_timeout); });
+ if (is_client) {
+ assert(odcid == NULL && stateless_reset_token == NULL);
+ } else {
+ if (odcid != NULL)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID, { ptls_buffer_pushv(buf, odcid->cid, odcid->len); });
+ if (stateless_reset_token != NULL)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN,
+ { ptls_buffer_pushv(buf, stateless_reset_token, QUICLY_STATELESS_RESET_TOKEN_LEN); });
+ }
+ if (params->max_streams_bidi != 0)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI,
+ { ptls_buffer_push_quicint(buf, params->max_streams_bidi); });
+ if (params->max_streams_uni != 0)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI,
+ { ptls_buffer_push_quicint(buf, params->max_streams_uni); });
+ if (QUICLY_LOCAL_ACK_DELAY_EXPONENT != QUICLY_DEFAULT_ACK_DELAY_EXPONENT)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT,
+ { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_ACK_DELAY_EXPONENT); });
+ if (QUICLY_LOCAL_MAX_ACK_DELAY != QUICLY_DEFAULT_MAX_ACK_DELAY)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY, { ptls_buffer_push_quicint(buf, QUICLY_LOCAL_MAX_ACK_DELAY); });
+ if (params->disable_active_migration)
+ PUSH_TP(buf, QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION, {});
+ /* if requested, add a greasing TP of 1 MTU size so that CH spans across multiple packets */
+ if (expand) {
+ PUSH_TP(buf, 31 * 100 + 27, {
+ if ((ret = ptls_buffer_reserve(buf, QUICLY_MAX_PACKET_SIZE)) != 0)
+ goto Exit;
+ memset(buf->base + buf->off, 0, QUICLY_MAX_PACKET_SIZE);
+ buf->off += QUICLY_MAX_PACKET_SIZE;
+ });
+ }
+
+#undef PUSH_TP
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+int quicly_decode_transport_parameter_list(quicly_transport_parameters_t *params, quicly_cid_t *odcid, void *stateless_reset_token,
+ int is_client, const uint8_t *src, const uint8_t *end)
+{
+/* When non-negative, ext_index contains the literal position within the list of extensions recognized by this function. That index
+ * is being used to find duplicates using a 64-bit bitmap (found_ext_bits). When the extension is being processed, ext_index is set
+ * to -1. */
+#define DECODE_ONE_EXTENSION(_id, block) \
+ do { \
+ if (ext_index >= 0) { \
+ if (id == (_id)) { \
+ if ((found_ext_bits & ((uint64_t)1 << ext_index)) != 0) { \
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; \
+ goto Exit; \
+ } \
+ found_ext_bits |= (uint64_t)1 << ext_index; \
+ {block} ext_index = -1; \
+ } else { \
+ ++ext_index; \
+ } \
+ } \
+ } while (0)
+
+ uint64_t found_ext_bits = 0;
+ int ret;
+
+ /* set parameters to their default values */
+ *params = default_transport_params;
+ if (odcid != NULL)
+ odcid->len = 0;
+ if (stateless_reset_token != NULL)
+ memset(stateless_reset_token, 0, QUICLY_STATELESS_RESET_TOKEN_LEN);
+
+ /* decode the parameters block */
+ while (src != end) {
+ uint64_t id;
+ if ((id = quicly_decodev(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ int ext_index = 0;
+ ptls_decode_open_block(src, end, -1, {
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_ORIGINAL_CONNECTION_ID, {
+ size_t cidlen = end - src;
+ if (!(is_client && cidlen <= QUICLY_MAX_CID_LEN_V1)) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ if (odcid != NULL)
+ set_cid(odcid, ptls_iovec_init(src, cidlen));
+ src = end;
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, {
+ if ((params->max_stream_data.bidi_local = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, {
+ if ((params->max_stream_data.bidi_remote = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAM_DATA_UNI, {
+ if ((params->max_stream_data.uni = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_DATA, {
+ if ((params->max_data = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_STATELESS_RESET_TOKEN, {
+ if (!(is_client && end - src == QUICLY_STATELESS_RESET_TOKEN_LEN)) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ memcpy(stateless_reset_token, src, QUICLY_STATELESS_RESET_TOKEN_LEN);
+ src = end;
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_MAX_IDLE_TIMEOUT, {
+ if ((params->max_idle_timeout = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_BIDI, {
+ if ((params->max_streams_bidi = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_INITIAL_MAX_STREAMS_UNI, {
+ if ((params->max_streams_uni = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_ACK_DELAY_EXPONENT, {
+ uint64_t v;
+ if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ if (v > 20) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ params->ack_delay_exponent = (uint8_t)v;
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_MAX_ACK_DELAY, {
+ uint64_t v;
+ if ((v = ptls_decode_quicint(&src, end)) == UINT64_MAX) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ if (v >= 16384) { /* "values of 2^14 or greater are invalid" */
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ params->max_ack_delay = (uint16_t)v;
+ });
+ DECODE_ONE_EXTENSION(QUICLY_TRANSPORT_PARAMETER_ID_DISABLE_ACTIVE_MIGRATION, { params->disable_active_migration = 1; });
+ /* skip unknown extension */
+ if (ext_index >= 0)
+ src = end;
+ });
+ }
+
+ ret = 0;
+Exit:
+ if (ret == PTLS_ALERT_DECODE_ERROR)
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ return ret;
+
+#undef DECODE_ONE_EXTENSION
+}
+
+static int collect_transport_parameters(ptls_t *tls, struct st_ptls_handshake_properties_t *properties, uint16_t type)
+{
+ return type == QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS;
+}
+
+static quicly_conn_t *create_connection(quicly_context_t *ctx, const char *server_name, struct sockaddr *remote_addr,
+ struct sockaddr *local_addr, const quicly_cid_plaintext_t *new_cid,
+ ptls_handshake_properties_t *handshake_properties)
+{
+ ptls_t *tls = NULL;
+ struct {
+ quicly_conn_t _;
+ quicly_maxsender_t max_streams_bidi;
+ quicly_maxsender_t max_streams_uni;
+ } * conn;
+
+ assert(remote_addr != NULL && remote_addr->sa_family != AF_UNSPEC);
+
+ if ((tls = ptls_new(ctx->tls, server_name == NULL)) == NULL)
+ return NULL;
+ if (server_name != NULL && ptls_set_server_name(tls, server_name, strlen(server_name)) != 0) {
+ ptls_free(tls);
+ return NULL;
+ }
+ if ((conn = malloc(sizeof(*conn))) == NULL) {
+ ptls_free(tls);
+ return NULL;
+ }
+
+ memset(conn, 0, sizeof(*conn));
+ conn->_.super.ctx = ctx;
+ conn->_.super.master_id = *new_cid;
+ set_address(&conn->_.super.host.address, local_addr);
+ set_address(&conn->_.super.peer.address, remote_addr);
+ if (ctx->cid_encryptor != NULL) {
+ conn->_.super.master_id.path_id = 0;
+ ctx->cid_encryptor->encrypt_cid(ctx->cid_encryptor, &conn->_.super.host.src_cid, &conn->_.super.host.stateless_reset_token,
+ &conn->_.super.master_id);
+ conn->_.super.master_id.path_id = 1;
+ } else {
+ conn->_.super.master_id.path_id = QUICLY_MAX_PATH_ID;
+ }
+ conn->_.super.state = QUICLY_STATE_FIRSTFLIGHT;
+ if (server_name != NULL) {
+ ctx->tls->random_bytes(conn->_.super.peer.cid.cid, QUICLY_MIN_INITIAL_DCID_LEN);
+ conn->_.super.peer.cid.len = QUICLY_MIN_INITIAL_DCID_LEN;
+ conn->_.super.host.bidi.next_stream_id = 0;
+ conn->_.super.host.uni.next_stream_id = 2;
+ conn->_.super.peer.bidi.next_stream_id = 1;
+ conn->_.super.peer.uni.next_stream_id = 3;
+ } else {
+ conn->_.super.host.bidi.next_stream_id = 1;
+ conn->_.super.host.uni.next_stream_id = 3;
+ conn->_.super.peer.bidi.next_stream_id = 0;
+ conn->_.super.peer.uni.next_stream_id = 2;
+ }
+ conn->_.super.peer.transport_params = default_transport_params;
+ if (server_name != NULL && ctx->enforce_version_negotiation) {
+ ctx->tls->random_bytes(&conn->_.super.version, sizeof(conn->_.super.version));
+ conn->_.super.version = (conn->_.super.version & 0xf0f0f0f0) | 0x0a0a0a0a;
+ } else {
+ conn->_.super.version = QUICLY_PROTOCOL_VERSION;
+ }
+ quicly_linklist_init(&conn->_.super._default_scheduler.active);
+ quicly_linklist_init(&conn->_.super._default_scheduler.blocked);
+ conn->_.streams = kh_init(quicly_stream_t);
+ quicly_maxsender_init(&conn->_.ingress.max_data.sender, conn->_.super.ctx->transport_params.max_data);
+ if (conn->_.super.ctx->transport_params.max_streams_uni != 0) {
+ conn->_.ingress.max_streams.uni = &conn->max_streams_uni;
+ quicly_maxsender_init(conn->_.ingress.max_streams.uni, conn->_.super.ctx->transport_params.max_streams_uni);
+ }
+ if (conn->_.super.ctx->transport_params.max_streams_bidi != 0) {
+ conn->_.ingress.max_streams.bidi = &conn->max_streams_bidi;
+ quicly_maxsender_init(conn->_.ingress.max_streams.bidi, conn->_.super.ctx->transport_params.max_streams_bidi);
+ }
+ quicly_sentmap_init(&conn->_.egress.sentmap);
+ quicly_loss_init(&conn->_.egress.loss, &conn->_.super.ctx->loss,
+ conn->_.super.ctx->loss.default_initial_rtt /* FIXME remember initial_rtt in session ticket */,
+ &conn->_.super.peer.transport_params.max_ack_delay, &conn->_.super.peer.transport_params.ack_delay_exponent);
+ conn->_.egress.next_pn_to_skip = calc_next_pn_to_skip(conn->_.super.ctx->tls, 0);
+ init_max_streams(&conn->_.egress.max_streams.uni);
+ init_max_streams(&conn->_.egress.max_streams.bidi);
+ conn->_.egress.path_challenge.tail_ref = &conn->_.egress.path_challenge.head;
+ conn->_.egress.send_ack_at = INT64_MAX;
+ quicly_cc_init(&conn->_.egress.cc);
+ quicly_linklist_init(&conn->_.egress.pending_streams.blocked.uni);
+ quicly_linklist_init(&conn->_.egress.pending_streams.blocked.bidi);
+ quicly_linklist_init(&conn->_.egress.pending_streams.control);
+ conn->_.crypto.tls = tls;
+ if (handshake_properties != NULL) {
+ assert(handshake_properties->additional_extensions == NULL);
+ assert(handshake_properties->collect_extension == NULL);
+ assert(handshake_properties->collected_extensions == NULL);
+ conn->_.crypto.handshake_properties = *handshake_properties;
+ } else {
+ conn->_.crypto.handshake_properties = (ptls_handshake_properties_t){{{{NULL}}}};
+ }
+ conn->_.crypto.handshake_properties.collect_extension = collect_transport_parameters;
+ conn->_.idle_timeout.at = INT64_MAX;
+ conn->_.idle_timeout.should_rearm_on_send = 1;
+
+ *ptls_get_data_ptr(tls) = &conn->_;
+
+ return &conn->_;
+}
+
+static int client_collected_extensions(ptls_t *tls, ptls_handshake_properties_t *properties, ptls_raw_extension_t *slots)
+{
+ quicly_conn_t *conn = (void *)((char *)properties - offsetof(quicly_conn_t, crypto.handshake_properties));
+ int ret;
+
+ assert(properties->client.early_data_acceptance != PTLS_EARLY_DATA_ACCEPTANCE_UNKNOWN);
+
+ if (slots[0].type == UINT16_MAX) {
+ ret = PTLS_ALERT_MISSING_EXTENSION;
+ goto Exit;
+ }
+ assert(slots[0].type == QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS);
+ assert(slots[1].type == UINT16_MAX);
+
+ const uint8_t *src = slots[0].data.base, *end = src + slots[0].data.len;
+ quicly_transport_parameters_t params;
+ quicly_cid_t odcid;
+
+ /* decode and validate */
+ if ((ret = quicly_decode_transport_parameter_list(¶ms, &odcid, conn->super.peer.stateless_reset._buf, 1, src, end)) != 0)
+ goto Exit;
+ if (odcid.len != conn->retry_odcid.len || memcmp(odcid.cid, conn->retry_odcid.cid, odcid.len) != 0) {
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER;
+ goto Exit;
+ }
+ if (properties->client.early_data_acceptance == PTLS_EARLY_DATA_ACCEPTED) {
+#define ZERORTT_VALIDATE(x) \
+ if (params.x < conn->super.peer.transport_params.x) { \
+ ret = QUICLY_TRANSPORT_ERROR_TRANSPORT_PARAMETER; \
+ goto Exit; \
+ }
+ ZERORTT_VALIDATE(max_data);
+ ZERORTT_VALIDATE(max_stream_data.bidi_local);
+ ZERORTT_VALIDATE(max_stream_data.bidi_remote);
+ ZERORTT_VALIDATE(max_stream_data.uni);
+ ZERORTT_VALIDATE(max_streams_bidi);
+ ZERORTT_VALIDATE(max_streams_uni);
+#undef ZERORTT_VALIDATE
+ }
+
+ /* store the results */
+ conn->super.peer.stateless_reset.token = conn->super.peer.stateless_reset._buf;
+ conn->super.peer.transport_params = params;
+
+Exit:
+ return ret; /* negative error codes used to transmit QUIC errors through picotls */
+}
+
+int quicly_connect(quicly_conn_t **_conn, quicly_context_t *ctx, const char *server_name, struct sockaddr *dest_addr,
+ struct sockaddr *src_addr, const quicly_cid_plaintext_t *new_cid, ptls_iovec_t address_token,
+ ptls_handshake_properties_t *handshake_properties, const quicly_transport_parameters_t *resumed_transport_params)
+{
+ quicly_conn_t *conn = NULL;
+ const quicly_cid_t *server_cid;
+ ptls_buffer_t buf;
+ size_t epoch_offsets[5] = {0};
+ size_t max_early_data_size = 0;
+ int ret;
+
+ update_now(ctx);
+
+ if ((conn = create_connection(ctx, server_name, dest_addr, src_addr, new_cid, handshake_properties)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ conn->super.peer.address_validation.validated = 1;
+ conn->super.peer.address_validation.send_probe = 1;
+ if (address_token.len != 0) {
+ if ((conn->token.base = malloc(address_token.len)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ memcpy(conn->token.base, address_token.base, address_token.len);
+ conn->token.len = address_token.len;
+ }
+ server_cid = quicly_get_peer_cid(conn);
+
+ QUICLY_PROBE(CONNECT, conn, probe_now(), conn->super.version);
+
+ if ((ret = setup_handshake_space_and_flow(conn, QUICLY_EPOCH_INITIAL)) != 0)
+ goto Exit;
+ if ((ret = setup_initial_encryption(get_aes128gcmsha256(ctx), &conn->initial->cipher.ingress, &conn->initial->cipher.egress,
+ ptls_iovec_init(server_cid->cid, server_cid->len), 1, conn)) != 0)
+ goto Exit;
+
+ /* handshake */
+ ptls_buffer_init(&conn->crypto.transport_params.buf, "", 0);
+ if ((ret = quicly_encode_transport_parameter_list(&conn->crypto.transport_params.buf, 1, &conn->super.ctx->transport_params,
+ NULL, NULL, conn->super.ctx->expand_client_hello)) != 0)
+ goto Exit;
+ conn->crypto.transport_params.ext[0] =
+ (ptls_raw_extension_t){QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS,
+ {conn->crypto.transport_params.buf.base, conn->crypto.transport_params.buf.off}};
+ conn->crypto.transport_params.ext[1] = (ptls_raw_extension_t){UINT16_MAX};
+ conn->crypto.handshake_properties.additional_extensions = conn->crypto.transport_params.ext;
+ conn->crypto.handshake_properties.collected_extensions = client_collected_extensions;
+
+ ptls_buffer_init(&buf, "", 0);
+ if (resumed_transport_params != NULL)
+ conn->crypto.handshake_properties.client.max_early_data_size = &max_early_data_size;
+ ret = ptls_handle_message(conn->crypto.tls, &buf, epoch_offsets, 0, NULL, 0, &conn->crypto.handshake_properties);
+ conn->crypto.handshake_properties.client.max_early_data_size = NULL;
+ if (ret != PTLS_ERROR_IN_PROGRESS) {
+ assert(ret > 0); /* no QUIC errors */
+ goto Exit;
+ }
+ write_crypto_data(conn, &buf, epoch_offsets);
+ ptls_buffer_dispose(&buf);
+
+ if (max_early_data_size != 0) {
+ conn->super.peer.transport_params = *resumed_transport_params;
+ if ((ret = apply_peer_transport_params(conn)) != 0)
+ goto Exit;
+ }
+
+ *_conn = conn;
+ ret = 0;
+
+Exit:
+ if (ret != 0) {
+ if (conn != NULL)
+ quicly_free(conn);
+ }
+ return ret;
+}
+
+static int server_collected_extensions(ptls_t *tls, ptls_handshake_properties_t *properties, ptls_raw_extension_t *slots)
+{
+ quicly_conn_t *conn = (void *)((char *)properties - offsetof(quicly_conn_t, crypto.handshake_properties));
+ int ret;
+
+ if (slots[0].type == UINT16_MAX) {
+ ret = PTLS_ALERT_MISSING_EXTENSION;
+ goto Exit;
+ }
+ assert(slots[0].type == QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS);
+ assert(slots[1].type == UINT16_MAX);
+
+ { /* decode transport_parameters extension */
+ const uint8_t *src = slots[0].data.base, *end = src + slots[0].data.len;
+ if ((ret = quicly_decode_transport_parameter_list(&conn->super.peer.transport_params, NULL, NULL, 0, src, end)) != 0)
+ goto Exit;
+ }
+
+ /* set transport_parameters extension to be sent in EE */
+ assert(properties->additional_extensions == NULL);
+ ptls_buffer_init(&conn->crypto.transport_params.buf, "", 0);
+ if ((ret = quicly_encode_transport_parameter_list(
+ &conn->crypto.transport_params.buf, 0, &conn->super.ctx->transport_params,
+ conn->retry_odcid.len != 0 ? &conn->retry_odcid : NULL,
+ conn->super.ctx->cid_encryptor != NULL ? conn->super.host.stateless_reset_token : NULL, 0)) != 0)
+ goto Exit;
+ properties->additional_extensions = conn->crypto.transport_params.ext;
+ conn->crypto.transport_params.ext[0] =
+ (ptls_raw_extension_t){QUICLY_TLS_EXTENSION_TYPE_TRANSPORT_PARAMETERS,
+ {conn->crypto.transport_params.buf.base, conn->crypto.transport_params.buf.off}};
+ conn->crypto.transport_params.ext[1] = (ptls_raw_extension_t){UINT16_MAX};
+ conn->crypto.handshake_properties.additional_extensions = conn->crypto.transport_params.ext;
+
+ ret = 0;
+
+Exit:
+ return ret;
+}
+
+static size_t aead_decrypt_core(ptls_aead_context_t *aead, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off)
+{
+ return ptls_aead_decrypt(aead, packet->octets.base + aead_off, packet->octets.base + aead_off, packet->octets.len - aead_off,
+ pn, packet->octets.base, aead_off);
+}
+
+static int aead_decrypt_fixed_key(void *ctx, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off, size_t *ptlen)
+{
+ ptls_aead_context_t *aead = ctx;
+
+ if ((*ptlen = aead_decrypt_core(aead, pn, packet, aead_off)) == SIZE_MAX)
+ return QUICLY_ERROR_PACKET_IGNORED;
+ return 0;
+}
+
+static int aead_decrypt_1rtt(void *ctx, uint64_t pn, quicly_decoded_packet_t *packet, size_t aead_off, size_t *ptlen)
+{
+ quicly_conn_t *conn = ctx;
+ struct st_quicly_application_space_t *space = conn->application;
+ size_t aead_index = (packet->octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0;
+ int ret;
+
+ /* prepare key, when not available (yet) */
+ if (space->cipher.ingress.aead[aead_index] == NULL) {
+ Retry_1RTT : {
+ /* Replace the AEAD key at the alternative slot (note: decryption key slots are shared by 0-RTT and 1-RTT), at the same time
+ * dropping 0-RTT header protection key. */
+ if (conn->application->cipher.ingress.header_protection.zero_rtt != NULL) {
+ ptls_cipher_free(conn->application->cipher.ingress.header_protection.zero_rtt);
+ conn->application->cipher.ingress.header_protection.zero_rtt = NULL;
+ }
+ ptls_cipher_suite_t *cipher = ptls_get_cipher(conn->crypto.tls);
+ if ((ret = update_1rtt_key(conn, cipher, 0, &space->cipher.ingress.aead[aead_index], space->cipher.ingress.secret)) != 0)
+ return ret;
+ ++space->cipher.ingress.key_phase.prepared;
+ QUICLY_PROBE(CRYPTO_RECEIVE_KEY_UPDATE_PREPARE, conn, probe_now(), space->cipher.ingress.key_phase.prepared,
+ QUICLY_PROBE_HEXDUMP(space->cipher.ingress.secret, cipher->hash->digest_size));
+ }
+ }
+
+ /* decrypt */
+ ptls_aead_context_t *aead = space->cipher.ingress.aead[aead_index];
+ if ((*ptlen = aead_decrypt_core(aead, pn, packet, aead_off)) == SIZE_MAX) {
+ /* retry with a new key, if possible */
+ if (space->cipher.ingress.key_phase.decrypted == space->cipher.ingress.key_phase.prepared &&
+ space->cipher.ingress.key_phase.decrypted % 2 != aead_index) {
+ /* reapply AEAD to revert payload to the encrypted form. This assumes that the cipher used in AEAD is CTR. */
+ aead_decrypt_core(aead, pn, packet, aead_off);
+ goto Retry_1RTT;
+ }
+ /* otherwise return failure */
+ return QUICLY_ERROR_PACKET_IGNORED;
+ }
+
+ /* update the confirmed key phase and also the egress key phase, if necessary */
+ if (space->cipher.ingress.key_phase.prepared != space->cipher.ingress.key_phase.decrypted &&
+ space->cipher.ingress.key_phase.prepared % 2 == aead_index) {
+ if ((ret = received_key_update(conn, space->cipher.ingress.key_phase.prepared)) != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int do_decrypt_packet(ptls_cipher_context_t *header_protection,
+ int (*aead_cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *), void *aead_ctx,
+ uint64_t *next_expected_pn, quicly_decoded_packet_t *packet, uint64_t *pn, ptls_iovec_t *payload)
+{
+ size_t encrypted_len = packet->octets.len - packet->encrypted_off;
+ uint8_t hpmask[5] = {0};
+ uint32_t pnbits = 0;
+ size_t pnlen, ptlen, i;
+ int ret;
+
+ /* decipher the header protection, as well as obtaining pnbits, pnlen */
+ if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE) {
+ *pn = UINT64_MAX;
+ return QUICLY_ERROR_PACKET_IGNORED;
+ }
+ ptls_cipher_init(header_protection, packet->octets.base + packet->encrypted_off + QUICLY_MAX_PN_SIZE);
+ ptls_cipher_encrypt(header_protection, hpmask, hpmask, sizeof(hpmask));
+ packet->octets.base[0] ^= hpmask[0] & (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0]) ? 0xf : 0x1f);
+ pnlen = (packet->octets.base[0] & 0x3) + 1;
+ for (i = 0; i != pnlen; ++i) {
+ packet->octets.base[packet->encrypted_off + i] ^= hpmask[i + 1];
+ pnbits = (pnbits << 8) | packet->octets.base[packet->encrypted_off + i];
+ }
+
+ size_t aead_off = packet->encrypted_off + pnlen;
+ *pn = quicly_determine_packet_number(pnbits, pnlen * 8, *next_expected_pn);
+
+ /* AEAD decryption */
+ if ((ret = (*aead_cb)(aead_ctx, *pn, packet, aead_off, &ptlen)) != 0) {
+ if (QUICLY_DEBUG)
+ fprintf(stderr, "%s: aead decryption failure (pn: %" PRIu64 ",code:%d)\n", __FUNCTION__, *pn, ret);
+ return ret;
+ }
+ if (*next_expected_pn <= *pn)
+ *next_expected_pn = *pn + 1;
+
+ *payload = ptls_iovec_init(packet->octets.base + aead_off, ptlen);
+ return 0;
+}
+
+static int decrypt_packet(ptls_cipher_context_t *header_protection,
+ int (*aead_cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *), void *aead_ctx,
+ uint64_t *next_expected_pn, quicly_decoded_packet_t *packet, uint64_t *pn, ptls_iovec_t *payload)
+{
+ int ret;
+
+ /* decrypt ourselves, or use the pre-decrypted input */
+ if (packet->decrypted.pn == UINT64_MAX) {
+ if ((ret = do_decrypt_packet(header_protection, aead_cb, aead_ctx, next_expected_pn, packet, pn, payload)) != 0)
+ return ret;
+ } else {
+ *payload = ptls_iovec_init(packet->octets.base + packet->encrypted_off, packet->octets.len - packet->encrypted_off);
+ *pn = packet->decrypted.pn;
+ if (aead_cb == aead_decrypt_1rtt) {
+ quicly_conn_t *conn = aead_ctx;
+ if (conn->application->cipher.ingress.key_phase.decrypted < packet->decrypted.key_phase) {
+ if ((ret = received_key_update(conn, packet->decrypted.key_phase)) != 0)
+ return ret;
+ }
+ }
+ if (*next_expected_pn < *pn)
+ *next_expected_pn = *pn + 1;
+ }
+
+ /* check reserved bits after AEAD decryption */
+ if ((packet->octets.base[0] & (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0]) ? QUICLY_LONG_HEADER_RESERVED_BITS
+ : QUICLY_SHORT_HEADER_RESERVED_BITS)) !=
+ 0) {
+ if (QUICLY_DEBUG)
+ fprintf(stderr, "%s: non-zero reserved bits (pn: %" PRIu64 ")\n", __FUNCTION__, *pn);
+ return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+ }
+ if (payload->len == 0) {
+ if (QUICLY_DEBUG)
+ fprintf(stderr, "%s: payload length is zero (pn: %" PRIu64 ")\n", __FUNCTION__, *pn);
+ return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+ }
+
+ if (QUICLY_DEBUG) {
+ char *payload_hex = quicly_hexdump(payload->base, payload->len, 4);
+ fprintf(stderr, "%s: AEAD payload:\n%s", __FUNCTION__, payload_hex);
+ free(payload_hex);
+ }
+
+ return 0;
+}
+
+static int on_ack_ack(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent, quicly_sentmap_event_t event)
+{
+ /* TODO log */
+
+ if (event == QUICLY_SENTMAP_EVENT_ACKED) {
+ /* find the pn space */
+ struct st_quicly_pn_space_t *space;
+ switch (packet->ack_epoch) {
+ case QUICLY_EPOCH_INITIAL:
+ space = &conn->initial->super;
+ break;
+ case QUICLY_EPOCH_HANDSHAKE:
+ space = &conn->handshake->super;
+ break;
+ case QUICLY_EPOCH_1RTT:
+ space = &conn->application->super;
+ break;
+ default:
+ assert(!"FIXME");
+ return QUICLY_TRANSPORT_ERROR_INTERNAL;
+ }
+ /* subtract given ACK range, then make adjustments */
+ int ret;
+ if ((ret = quicly_ranges_subtract(&space->ack_queue, sent->data.ack.range.start, sent->data.ack.range.end)) != 0)
+ return ret;
+ if (space->ack_queue.num_ranges == 0) {
+ space->largest_pn_received_at = INT64_MAX;
+ space->unacked_count = 0;
+ } else if (space->ack_queue.num_ranges > QUICLY_MAX_ACK_BLOCKS) {
+ quicly_ranges_drop_by_range_indices(&space->ack_queue, space->ack_queue.num_ranges - QUICLY_MAX_ACK_BLOCKS,
+ space->ack_queue.num_ranges);
+ }
+ }
+
+ return 0;
+}
+
+static int on_ack_stream(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent, quicly_sentmap_event_t event)
+{
+ quicly_stream_t *stream;
+ int ret;
+
+ if (event == QUICLY_SENTMAP_EVENT_EXPIRED)
+ return 0;
+
+ if (event == QUICLY_SENTMAP_EVENT_ACKED) {
+ QUICLY_PROBE(STREAM_ACKED, conn, probe_now(), sent->data.stream.stream_id, sent->data.stream.args.start,
+ sent->data.stream.args.end - sent->data.stream.args.start);
+ } else {
+ QUICLY_PROBE(STREAM_LOST, conn, probe_now(), sent->data.stream.stream_id, sent->data.stream.args.start,
+ sent->data.stream.args.end - sent->data.stream.args.start);
+ }
+
+ /* TODO cache pointer to stream (using a generation counter?) */
+ if ((stream = quicly_get_stream(conn, sent->data.stream.stream_id)) == NULL)
+ return 0;
+
+ if (event == QUICLY_SENTMAP_EVENT_ACKED) {
+ size_t bytes_to_shift;
+ if ((ret = quicly_sendstate_acked(&stream->sendstate, &sent->data.stream.args, packet->bytes_in_flight != 0,
+ &bytes_to_shift)) != 0)
+ return ret;
+ if (bytes_to_shift != 0)
+ stream->callbacks->on_send_shift(stream, bytes_to_shift);
+ if (stream_is_destroyable(stream)) {
+ destroy_stream(stream, 0);
+ } else if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE) {
+ resched_stream_data(stream);
+ }
+ } else {
+ /* FIXME handle rto error */
+ if ((ret = quicly_sendstate_lost(&stream->sendstate, &sent->data.stream.args)) != 0)
+ return ret;
+ if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE)
+ resched_stream_data(stream);
+ }
+
+ return 0;
+}
+
+static int on_ack_max_stream_data(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ quicly_stream_t *stream;
+
+ if (event == QUICLY_SENTMAP_EVENT_EXPIRED)
+ return 0;
+
+ /* TODO cache pointer to stream (using a generation counter?) */
+ if ((stream = quicly_get_stream(conn, sent->data.stream.stream_id)) != NULL) {
+ switch (event) {
+ case QUICLY_SENTMAP_EVENT_ACKED:
+ quicly_maxsender_acked(&stream->_send_aux.max_stream_data_sender, &sent->data.max_stream_data.args);
+ break;
+ case QUICLY_SENTMAP_EVENT_LOST:
+ quicly_maxsender_lost(&stream->_send_aux.max_stream_data_sender, &sent->data.max_stream_data.args);
+ if (should_send_max_stream_data(stream))
+ sched_stream_control(stream);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int on_ack_max_data(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ switch (event) {
+ case QUICLY_SENTMAP_EVENT_ACKED:
+ quicly_maxsender_acked(&conn->ingress.max_data.sender, &sent->data.max_data.args);
+ break;
+ case QUICLY_SENTMAP_EVENT_LOST:
+ quicly_maxsender_lost(&conn->ingress.max_data.sender, &sent->data.max_data.args);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int on_ack_max_streams(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ quicly_maxsender_t *maxsender = sent->data.max_streams.uni ? conn->ingress.max_streams.uni : conn->ingress.max_streams.bidi;
+ assert(maxsender != NULL); /* we would only receive an ACK if we have sent the frame */
+
+ switch (event) {
+ case QUICLY_SENTMAP_EVENT_ACKED:
+ quicly_maxsender_acked(maxsender, &sent->data.max_streams.args);
+ break;
+ case QUICLY_SENTMAP_EVENT_LOST:
+ quicly_maxsender_lost(maxsender, &sent->data.max_streams.args);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void on_ack_stream_state_sender(quicly_sender_state_t *sender_state, int acked)
+{
+ *sender_state = acked ? QUICLY_SENDER_STATE_ACKED : QUICLY_SENDER_STATE_SEND;
+}
+
+static int on_ack_reset_stream(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ if (event != QUICLY_SENTMAP_EVENT_EXPIRED) {
+ quicly_stream_t *stream;
+ if ((stream = quicly_get_stream(conn, sent->data.stream_state_sender.stream_id)) != NULL) {
+ on_ack_stream_state_sender(&stream->_send_aux.reset_stream.sender_state, event == QUICLY_SENTMAP_EVENT_ACKED);
+ if (stream_is_destroyable(stream))
+ destroy_stream(stream, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int on_ack_stop_sending(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ if (event != QUICLY_SENTMAP_EVENT_EXPIRED) {
+ quicly_stream_t *stream;
+ if ((stream = quicly_get_stream(conn, sent->data.stream_state_sender.stream_id)) != NULL) {
+ on_ack_stream_state_sender(&stream->_send_aux.stop_sending.sender_state, event == QUICLY_SENTMAP_EVENT_ACKED);
+ if (stream->_send_aux.stop_sending.sender_state != QUICLY_SENDER_STATE_ACKED)
+ sched_stream_control(stream);
+ }
+ }
+
+ return 0;
+}
+
+static int on_ack_streams_blocked(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ struct st_quicly_max_streams_t *m =
+ sent->data.streams_blocked.uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi;
+
+ switch (event) {
+ case QUICLY_SENTMAP_EVENT_ACKED:
+ quicly_maxsender_acked(&m->blocked_sender, &sent->data.streams_blocked.args);
+ break;
+ case QUICLY_SENTMAP_EVENT_LOST:
+ quicly_maxsender_lost(&m->blocked_sender, &sent->data.streams_blocked.args);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int on_ack_handshake_done(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ /* When HANDSHAKE_DONE is deemed lost, schedule retransmission. */
+ if (event == QUICLY_SENTMAP_EVENT_LOST)
+ conn->egress.pending_flows |= QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT;
+ return 0;
+}
+
+static int on_ack_new_token(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ if (sent->data.new_token.is_inflight) {
+ --conn->egress.new_token.num_inflight;
+ sent->data.new_token.is_inflight = 0;
+ }
+ switch (event) {
+ case QUICLY_SENTMAP_EVENT_ACKED:
+ QUICLY_PROBE(NEW_TOKEN_ACKED, conn, probe_now(), sent->data.new_token.generation);
+ if (conn->egress.new_token.max_acked < sent->data.new_token.generation)
+ conn->egress.new_token.max_acked = sent->data.new_token.generation;
+ break;
+ default:
+ break;
+ }
+
+ if (conn->egress.new_token.num_inflight == 0 && conn->egress.new_token.max_acked < conn->egress.new_token.generation)
+ conn->egress.pending_flows |= QUICLY_PENDING_FLOW_NEW_TOKEN_BIT;
+
+ return 0;
+}
+
+static ssize_t round_send_window(ssize_t window)
+{
+ if (window < MIN_SEND_WINDOW * 2) {
+ if (window < MIN_SEND_WINDOW) {
+ return 0;
+ } else {
+ return MIN_SEND_WINDOW * 2;
+ }
+ }
+ return window;
+}
+
+/* Helper function to compute send window based on:
+ * * state of peer validation,
+ * * current cwnd,
+ * * minimum send requirements in |min_bytes_to_send|, and
+ * * if sending is to be restricted to the minimum, indicated in |restrict_sending|
+ */
+static size_t calc_send_window(quicly_conn_t *conn, size_t min_bytes_to_send, int restrict_sending)
+{
+ /* If address is unvalidated, limit sending to 3x bytes received */
+ if (!conn->super.peer.address_validation.validated) {
+ uint64_t total = conn->super.stats.num_bytes.received * 3;
+ if (conn->super.stats.num_bytes.sent + MIN_SEND_WINDOW <= total)
+ return total - conn->super.stats.num_bytes.sent;
+ return 0;
+ }
+
+ /* Validated address. Ensure there's enough window to send minimum number of packets */
+ uint64_t window = 0;
+ if (!restrict_sending && conn->egress.cc.cwnd > conn->egress.sentmap.bytes_in_flight + min_bytes_to_send)
+ window = conn->egress.cc.cwnd - conn->egress.sentmap.bytes_in_flight;
+ if (window < MIN_SEND_WINDOW)
+ window = 0;
+ if (window < min_bytes_to_send)
+ window = min_bytes_to_send;
+ return window;
+}
+
+int64_t quicly_get_first_timeout(quicly_conn_t *conn)
+{
+ if (conn->super.state >= QUICLY_STATE_CLOSING)
+ return conn->egress.send_ack_at;
+
+ if (calc_send_window(conn, 0, 0) > 0) {
+ if (conn->egress.pending_flows != 0)
+ return 0;
+ if (quicly_linklist_is_linked(&conn->egress.pending_streams.control))
+ return 0;
+ if (scheduler_can_send(conn))
+ return 0;
+ } else if (!conn->super.peer.address_validation.validated) {
+ return conn->idle_timeout.at;
+ }
+
+ int64_t at = conn->egress.loss.alarm_at;
+ if (conn->egress.send_ack_at < at)
+ at = conn->egress.send_ack_at;
+ if (conn->idle_timeout.at < at)
+ at = conn->idle_timeout.at;
+
+ return at;
+}
+
+uint64_t quicly_get_next_expected_packet_number(quicly_conn_t *conn)
+{
+ if (!conn->application)
+ return UINT64_MAX;
+
+ return conn->application->super.next_expected_packet_number;
+}
+
+/* data structure that is used during one call through quicly_send()
+ */
+struct st_quicly_send_context_t {
+ /* current encryption context */
+ struct {
+ struct st_quicly_cipher_context_t *cipher;
+ uint8_t first_byte;
+ } current;
+
+ /* packet under construction */
+ struct {
+ quicly_datagram_t *packet;
+ struct st_quicly_cipher_context_t *cipher;
+ /**
+ * points to the first byte of the target QUIC packet. It will not point to packet->octets.base[0] when the datagram
+ * contains multiple QUIC packet.
+ */
+ uint8_t *first_byte_at;
+ uint8_t ack_eliciting : 1;
+ } target;
+
+ /* output buffer into which list of datagrams is written */
+ quicly_datagram_t **packets;
+ /* max number of datagrams that can be stored in |packets| */
+ size_t max_packets;
+ /* number of datagrams currently stored in |packets| */
+ size_t num_packets;
+ /* the currently available window for sending (in bytes) */
+ ssize_t send_window;
+ /* location where next frame should be written */
+ uint8_t *dst;
+ /* end of the payload area, beyond which frames cannot be written */
+ uint8_t *dst_end;
+ /* address at which payload starts */
+ uint8_t *dst_payload_from;
+};
+
+static int commit_send_packet(quicly_conn_t *conn, quicly_send_context_t *s, int coalesced)
+{
+ size_t packet_bytes_in_flight;
+
+ assert(s->target.cipher->aead != NULL);
+
+ assert(s->dst != s->dst_payload_from);
+
+ /* pad so that the pn + payload would be at least 4 bytes */
+ while (s->dst - s->dst_payload_from < QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE)
+ *s->dst++ = QUICLY_FRAME_TYPE_PADDING;
+
+ /* the last packet of first-flight datagrams is padded to become 1280 bytes */
+ if (!coalesced && quicly_is_client(conn) &&
+ (s->target.packet->data.base[0] & QUICLY_PACKET_TYPE_BITMASK) == QUICLY_PACKET_TYPE_INITIAL) {
+ const size_t max_size = QUICLY_MAX_PACKET_SIZE - QUICLY_AEAD_TAG_SIZE;
+ assert(quicly_is_client(conn));
+ assert(s->dst - s->target.packet->data.base <= max_size);
+ memset(s->dst, QUICLY_FRAME_TYPE_PADDING, s->target.packet->data.base + max_size - s->dst);
+ s->dst = s->target.packet->data.base + max_size;
+ }
+
+ /* encode packet size, packet number, key-phase */
+ if (QUICLY_PACKET_IS_LONG_HEADER(*s->target.first_byte_at)) {
+ uint16_t length = s->dst - s->dst_payload_from + s->target.cipher->aead->algo->tag_size + QUICLY_SEND_PN_SIZE;
+ /* length is always 2 bytes, see _do_prepare_packet */
+ length |= 0x4000;
+ quicly_encode16(s->dst_payload_from - QUICLY_SEND_PN_SIZE - 2, length);
+ } else {
+ if (conn->egress.packet_number >= conn->application->cipher.egress.key_update_pn.next) {
+ int ret;
+ if ((ret = update_1rtt_egress_key(conn)) != 0)
+ return ret;
+ }
+ if ((conn->application->cipher.egress.key_phase & 1) != 0)
+ *s->target.first_byte_at |= QUICLY_KEY_PHASE_BIT;
+ }
+ quicly_encode16(s->dst_payload_from - QUICLY_SEND_PN_SIZE, (uint16_t)conn->egress.packet_number);
+
+ /* AEAD protection */
+ s->dst = s->dst_payload_from + ptls_aead_encrypt(s->target.cipher->aead, s->dst_payload_from, s->dst_payload_from,
+ s->dst - s->dst_payload_from, conn->egress.packet_number,
+ s->target.first_byte_at, s->dst_payload_from - s->target.first_byte_at);
+ s->target.packet->data.len = s->dst - s->target.packet->data.base;
+ assert(s->target.packet->data.len <= conn->super.ctx->max_packet_size);
+
+ conn->super.ctx->crypto_engine->finalize_send_packet(
+ conn->super.ctx->crypto_engine, conn, s->target.cipher->header_protection, s->target.cipher->aead, s->target.packet,
+ s->target.first_byte_at - s->target.packet->data.base, s->dst_payload_from - s->target.packet->data.base, coalesced);
+
+ /* update CC, commit sentmap */
+ if (s->target.ack_eliciting) {
+ packet_bytes_in_flight = s->dst - s->target.first_byte_at;
+ s->send_window -= packet_bytes_in_flight;
+ } else {
+ packet_bytes_in_flight = 0;
+ }
+ if (quicly_sentmap_is_open(&conn->egress.sentmap))
+ quicly_sentmap_commit(&conn->egress.sentmap, (uint16_t)packet_bytes_in_flight);
+
+ QUICLY_PROBE(PACKET_COMMIT, conn, probe_now(), conn->egress.packet_number, s->dst - s->target.first_byte_at,
+ !s->target.ack_eliciting);
+ QUICLY_PROBE(QUICTRACE_SENT, conn, probe_now(), conn->egress.packet_number, s->dst - s->target.first_byte_at,
+ get_epoch(*s->target.first_byte_at));
+
+ ++conn->egress.packet_number;
+ ++conn->super.stats.num_packets.sent;
+
+ if (!coalesced) {
+ conn->super.stats.num_bytes.sent += s->target.packet->data.len;
+ s->packets[s->num_packets++] = s->target.packet;
+ s->target.packet = NULL;
+ s->target.cipher = NULL;
+ s->target.first_byte_at = NULL;
+ }
+
+ /* insert PN gap if necessary, registering the PN to the ack queue so that we'd close the connection in the event of receiving
+ * an ACK for that gap. */
+ if (conn->egress.packet_number >= conn->egress.next_pn_to_skip && !QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte) &&
+ conn->super.state < QUICLY_STATE_CLOSING) {
+ int ret;
+ if ((ret = quicly_sentmap_prepare(&conn->egress.sentmap, conn->egress.packet_number, now, QUICLY_EPOCH_1RTT)) != 0)
+ return ret;
+ if (quicly_sentmap_allocate(&conn->egress.sentmap, on_invalid_ack) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ quicly_sentmap_commit(&conn->egress.sentmap, 0);
+ ++conn->egress.packet_number;
+ conn->egress.next_pn_to_skip = calc_next_pn_to_skip(conn->super.ctx->tls, conn->egress.packet_number);
+ }
+
+ return 0;
+}
+
+static inline uint8_t *emit_cid(uint8_t *dst, const quicly_cid_t *cid)
+{
+ if (cid->len != 0) {
+ memcpy(dst, cid->cid, cid->len);
+ dst += cid->len;
+ }
+ return dst;
+}
+
+static int _do_allocate_frame(quicly_conn_t *conn, quicly_send_context_t *s, size_t min_space, int ack_eliciting)
+{
+ int coalescible, ret;
+
+ assert((s->current.first_byte & QUICLY_QUIC_BIT) != 0);
+
+ /* allocate and setup the new packet if necessary */
+ if (s->dst_end - s->dst < min_space || s->target.first_byte_at == NULL) {
+ coalescible = 0;
+ } else if (((*s->target.first_byte_at ^ s->current.first_byte) & QUICLY_PACKET_TYPE_BITMASK) != 0) {
+ coalescible = QUICLY_PACKET_IS_LONG_HEADER(*s->target.first_byte_at);
+ } else if (s->dst_end - s->dst < min_space) {
+ coalescible = 0;
+ } else {
+ /* use the existing packet */
+ goto TargetReady;
+ }
+
+ /* commit at the same time determining if we will coalesce the packets */
+ if (s->target.packet != NULL) {
+ if (coalescible) {
+ size_t overhead =
+ 1 /* type */ + conn->super.peer.cid.len + QUICLY_SEND_PN_SIZE + s->current.cipher->aead->algo->tag_size;
+ if (QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte))
+ overhead += 4 /* version */ + 1 /* cidl */ + conn->super.peer.cid.len + conn->super.host.src_cid.len +
+ (s->current.first_byte == QUICLY_PACKET_TYPE_INITIAL) /* token_length == 0 */ + 2 /* length */;
+ size_t packet_min_space = QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE;
+ if (packet_min_space < min_space)
+ packet_min_space = min_space;
+ if (overhead + packet_min_space > s->dst_end - s->dst)
+ coalescible = 0;
+ }
+ /* close out packet under construction */
+ if ((ret = commit_send_packet(conn, s, coalescible)) != 0)
+ return ret;
+ } else {
+ coalescible = 0;
+ }
+
+ /* allocate packet */
+ if (coalescible) {
+ s->dst_end += s->target.cipher->aead->algo->tag_size; /* restore the AEAD tag size (tag size can differ bet. epochs) */
+ s->target.cipher = s->current.cipher;
+ } else {
+ if (s->num_packets >= s->max_packets)
+ return QUICLY_ERROR_SENDBUF_FULL;
+ s->send_window = round_send_window(s->send_window);
+ if (ack_eliciting && s->send_window < (ssize_t)min_space)
+ return QUICLY_ERROR_SENDBUF_FULL;
+ if ((s->target.packet = conn->super.ctx->packet_allocator->alloc_packet(conn->super.ctx->packet_allocator,
+ conn->super.ctx->max_packet_size)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ s->target.packet->dest = conn->super.peer.address;
+ s->target.packet->src = conn->super.host.address;
+ s->target.cipher = s->current.cipher;
+ s->dst = s->target.packet->data.base;
+ s->dst_end = s->target.packet->data.base + conn->super.ctx->max_packet_size;
+ }
+ s->target.ack_eliciting = 0;
+
+ QUICLY_PROBE(PACKET_PREPARE, conn, probe_now(), s->current.first_byte,
+ QUICLY_PROBE_HEXDUMP(conn->super.peer.cid.cid, conn->super.peer.cid.len));
+
+ /* emit header */
+ s->target.first_byte_at = s->dst;
+ *s->dst++ = s->current.first_byte | 0x1 /* pnlen == 2 */;
+ if (QUICLY_PACKET_IS_LONG_HEADER(s->current.first_byte)) {
+ s->dst = quicly_encode32(s->dst, conn->super.version);
+ *s->dst++ = conn->super.peer.cid.len;
+ s->dst = emit_cid(s->dst, &conn->super.peer.cid);
+ *s->dst++ = conn->super.host.src_cid.len;
+ s->dst = emit_cid(s->dst, &conn->super.host.src_cid);
+ /* token */
+ if (s->current.first_byte == QUICLY_PACKET_TYPE_INITIAL) {
+ s->dst = quicly_encodev(s->dst, conn->token.len);
+ assert(s->dst_end - s->dst > conn->token.len);
+ memcpy(s->dst, conn->token.base, conn->token.len);
+ s->dst += conn->token.len;
+ }
+ /* payload length is filled laterwards (see commit_send_packet) */
+ *s->dst++ = 0;
+ *s->dst++ = 0;
+ } else {
+ s->dst = emit_cid(s->dst, &conn->super.peer.cid);
+ }
+ s->dst += QUICLY_SEND_PN_SIZE; /* space for PN bits, filled in at commit time */
+ s->dst_payload_from = s->dst;
+ assert(s->target.cipher->aead != NULL);
+ s->dst_end -= s->target.cipher->aead->algo->tag_size;
+ assert(s->dst_end - s->dst >= QUICLY_MAX_PN_SIZE - QUICLY_SEND_PN_SIZE);
+
+ /* register to sentmap */
+ if (conn->super.state < QUICLY_STATE_CLOSING) {
+ uint8_t ack_epoch = get_epoch(s->current.first_byte);
+ if (ack_epoch == QUICLY_EPOCH_0RTT)
+ ack_epoch = QUICLY_EPOCH_1RTT;
+ if ((ret = quicly_sentmap_prepare(&conn->egress.sentmap, conn->egress.packet_number, now, ack_epoch)) != 0)
+ return ret;
+ }
+
+TargetReady:
+ if (ack_eliciting) {
+ s->target.ack_eliciting = 1;
+ conn->egress.last_retransmittable_sent_at = now;
+ }
+ return 0;
+}
+
+static int allocate_frame(quicly_conn_t *conn, quicly_send_context_t *s, size_t min_space)
+{
+ return _do_allocate_frame(conn, s, min_space, 0);
+}
+
+static int allocate_ack_eliciting_frame(quicly_conn_t *conn, quicly_send_context_t *s, size_t min_space, quicly_sent_t **sent,
+ quicly_sent_acked_cb acked)
+{
+ int ret;
+
+ if ((ret = _do_allocate_frame(conn, s, min_space, 1)) != 0)
+ return ret;
+ if ((*sent = quicly_sentmap_allocate(&conn->egress.sentmap, acked)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+
+ /* TODO return the remaining window that the sender can use */
+ return ret;
+}
+
+static int send_ack(quicly_conn_t *conn, struct st_quicly_pn_space_t *space, quicly_send_context_t *s)
+{
+ uint64_t ack_delay;
+ int ret;
+
+ if (space->ack_queue.num_ranges == 0)
+ return 0;
+
+ /* calc ack_delay */
+ if (space->largest_pn_received_at < now) {
+ /* We underreport ack_delay up to 1 milliseconds assuming that QUICLY_LOCAL_ACK_DELAY_EXPONENT is 10. It's considered a
+ * non-issue because our time measurement is at millisecond granurality anyways. */
+ ack_delay = ((now - space->largest_pn_received_at) * 1000) >> QUICLY_LOCAL_ACK_DELAY_EXPONENT;
+ } else {
+ ack_delay = 0;
+ }
+
+Emit: /* emit an ACK frame */
+ if ((ret = allocate_frame(conn, s, QUICLY_ACK_FRAME_CAPACITY)) != 0)
+ return ret;
+ uint8_t *dst = s->dst;
+ dst = quicly_encode_ack_frame(dst, s->dst_end, &space->ack_queue, ack_delay);
+
+ /* when there's no space, retry with a new MTU-sized packet */
+ if (dst == NULL) {
+ /* [rare case] A coalesced packet might not have enough space to hold only an ACK. If so, pad it, as that's easier than
+ * rolling back. */
+ if (s->dst == s->dst_payload_from) {
+ assert(s->target.first_byte_at != s->target.packet->data.base);
+ *s->dst++ = QUICLY_FRAME_TYPE_PADDING;
+ }
+ if ((ret = commit_send_packet(conn, s, 0)) != 0)
+ return ret;
+ goto Emit;
+ }
+
+ /* when there are no less than QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK (8) gaps, bundle PING once every 4 packets being sent */
+ if (space->ack_queue.num_ranges >= QUICLY_NUM_ACK_BLOCKS_TO_INDUCE_ACKACK && conn->egress.packet_number % 4 == 0 &&
+ dst < s->dst_end)
+ *dst++ = QUICLY_FRAME_TYPE_PING;
+
+ s->dst = dst;
+
+ { /* save what's inflight */
+ size_t i;
+ for (i = 0; i != space->ack_queue.num_ranges; ++i) {
+ quicly_sent_t *sent;
+ if ((sent = quicly_sentmap_allocate(&conn->egress.sentmap, on_ack_ack)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ sent->data.ack.range = space->ack_queue.ranges[i];
+ }
+ }
+
+ space->unacked_count = 0;
+
+ return ret;
+}
+
+static int prepare_stream_state_sender(quicly_stream_t *stream, quicly_sender_state_t *sender, quicly_send_context_t *s,
+ size_t min_space, quicly_sent_acked_cb ack_cb)
+{
+ quicly_sent_t *sent;
+ int ret;
+
+ if ((ret = allocate_ack_eliciting_frame(stream->conn, s, min_space, &sent, ack_cb)) != 0)
+ return ret;
+ sent->data.stream_state_sender.stream_id = stream->stream_id;
+ *sender = QUICLY_SENDER_STATE_UNACKED;
+
+ return 0;
+}
+
+static int send_stream_control_frames(quicly_stream_t *stream, quicly_send_context_t *s)
+{
+ int ret;
+
+ /* send STOP_SENDING if necessray */
+ if (stream->_send_aux.stop_sending.sender_state == QUICLY_SENDER_STATE_SEND) {
+ /* FIXME also send an empty STREAM frame */
+ if ((ret = prepare_stream_state_sender(stream, &stream->_send_aux.stop_sending.sender_state, s,
+ QUICLY_STOP_SENDING_FRAME_CAPACITY, on_ack_stop_sending)) != 0)
+ return ret;
+ s->dst = quicly_encode_stop_sending_frame(s->dst, stream->stream_id, stream->_send_aux.stop_sending.error_code);
+ }
+
+ /* send MAX_STREAM_DATA if necessary */
+ if (should_send_max_stream_data(stream)) {
+ uint64_t new_value = stream->recvstate.data_off + stream->_recv_aux.window;
+ quicly_sent_t *sent;
+ /* prepare */
+ if ((ret = allocate_ack_eliciting_frame(stream->conn, s, QUICLY_MAX_STREAM_DATA_FRAME_CAPACITY, &sent,
+ on_ack_max_stream_data)) != 0)
+ return ret;
+ /* send */
+ s->dst = quicly_encode_max_stream_data_frame(s->dst, stream->stream_id, new_value);
+ /* register ack */
+ sent->data.max_stream_data.stream_id = stream->stream_id;
+ quicly_maxsender_record(&stream->_send_aux.max_stream_data_sender, new_value, &sent->data.max_stream_data.args);
+ QUICLY_PROBE(MAX_STREAM_DATA_SEND, stream->conn, probe_now(), stream, new_value);
+ }
+
+ /* send RESET_STREAM if necessary */
+ if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_SEND) {
+ if ((ret = prepare_stream_state_sender(stream, &stream->_send_aux.reset_stream.sender_state, s, QUICLY_RST_FRAME_CAPACITY,
+ on_ack_reset_stream)) != 0)
+ return ret;
+ s->dst = quicly_encode_reset_stream_frame(s->dst, stream->stream_id, stream->_send_aux.reset_stream.error_code,
+ stream->sendstate.size_inflight);
+ }
+
+ return 0;
+}
+
+int quicly_is_flow_capped(quicly_conn_t *conn)
+{
+ return !(conn->egress.max_data.sent < conn->egress.max_data.permitted);
+}
+
+int quicly_can_send_stream_data(quicly_conn_t *conn, quicly_send_context_t *s)
+{
+ return s->num_packets < s->max_packets;
+}
+
+int quicly_send_stream(quicly_stream_t *stream, quicly_send_context_t *s)
+{
+ uint64_t off = stream->sendstate.pending.ranges[0].start, end_off;
+ quicly_sent_t *sent;
+ uint8_t *frame_type_at;
+ size_t capacity, len;
+ int ret, wrote_all, is_fin;
+
+ /* write frame type, stream_id and offset, calculate capacity */
+ if (stream->stream_id < 0) {
+ if ((ret = allocate_ack_eliciting_frame(stream->conn, s,
+ 1 + quicly_encodev_capacity(off) + 2 /* type + len + offset + 1-byte payload */,
+ &sent, on_ack_stream)) != 0)
+ return ret;
+ frame_type_at = NULL;
+ *s->dst++ = QUICLY_FRAME_TYPE_CRYPTO;
+ s->dst = quicly_encodev(s->dst, off);
+ capacity = s->dst_end - s->dst;
+ } else {
+ uint8_t header[18], *hp = header + 1;
+ hp = quicly_encodev(hp, stream->stream_id);
+ if (off != 0) {
+ header[0] = QUICLY_FRAME_TYPE_STREAM_BASE | QUICLY_FRAME_TYPE_STREAM_BIT_OFF;
+ hp = quicly_encodev(hp, off);
+ } else {
+ header[0] = QUICLY_FRAME_TYPE_STREAM_BASE;
+ }
+ if (!quicly_sendstate_is_open(&stream->sendstate) && off == stream->sendstate.final_size) {
+ /* special case for emitting FIN only */
+ header[0] |= QUICLY_FRAME_TYPE_STREAM_BIT_FIN;
+ if ((ret = allocate_ack_eliciting_frame(stream->conn, s, hp - header, &sent, on_ack_stream)) != 0)
+ return ret;
+ if (hp - header != s->dst_end - s->dst) {
+ header[0] |= QUICLY_FRAME_TYPE_STREAM_BIT_LEN;
+ *hp++ = 0; /* empty length */
+ }
+ memcpy(s->dst, header, hp - header);
+ s->dst += hp - header;
+ end_off = off;
+ wrote_all = 1;
+ is_fin = 1;
+ goto UpdateState;
+ }
+ if ((ret = allocate_ack_eliciting_frame(stream->conn, s, hp - header + 1, &sent, on_ack_stream)) != 0)
+ return ret;
+ frame_type_at = s->dst;
+ memcpy(s->dst, header, hp - header);
+ s->dst += hp - header;
+ capacity = s->dst_end - s->dst;
+ /* cap by max_stream_data */
+ if (off + capacity > stream->_send_aux.max_stream_data)
+ capacity = stream->_send_aux.max_stream_data - off;
+ /* cap by max_data */
+ if (off + capacity > stream->sendstate.size_inflight) {
+ uint64_t new_bytes = off + capacity - stream->sendstate.size_inflight;
+ if (new_bytes > stream->conn->egress.max_data.permitted - stream->conn->egress.max_data.sent) {
+ size_t max_stream_data =
+ stream->sendstate.size_inflight + stream->conn->egress.max_data.permitted - stream->conn->egress.max_data.sent;
+ capacity = max_stream_data - off;
+ }
+ }
+ }
+ { /* cap the capacity to the current range */
+ uint64_t range_capacity = stream->sendstate.pending.ranges[0].end - off;
+ if (!quicly_sendstate_is_open(&stream->sendstate) && off + range_capacity > stream->sendstate.final_size) {
+ assert(range_capacity > 1); /* see the special case above */
+ range_capacity -= 1;
+ }
+ if (capacity > range_capacity)
+ capacity = range_capacity;
+ }
+
+ /* write payload */
+ assert(capacity != 0);
+ len = capacity;
+ stream->callbacks->on_send_emit(stream, (size_t)(off - stream->sendstate.acked.ranges[0].end), s->dst, &len, &wrote_all);
+ if (stream->conn->super.state >= QUICLY_STATE_CLOSING) {
+ return QUICLY_ERROR_IS_CLOSING;
+ } else if (stream->_send_aux.reset_stream.sender_state != QUICLY_SENDER_STATE_NONE) {
+ return 0;
+ }
+ assert(len <= capacity);
+ assert(len != 0);
+
+ /* update s->dst, insert length if necessary */
+ if (frame_type_at == NULL || len < s->dst_end - s->dst) {
+ if (frame_type_at != NULL)
+ *frame_type_at |= QUICLY_FRAME_TYPE_STREAM_BIT_LEN;
+ size_t len_of_len = quicly_encodev_capacity(len);
+ if (len_of_len + len > s->dst_end - s->dst) {
+ len = s->dst_end - s->dst - len_of_len;
+ wrote_all = 0;
+ }
+ memmove(s->dst + len_of_len, s->dst, len);
+ s->dst = quicly_encodev(s->dst, len);
+ }
+ s->dst += len;
+ end_off = off + len;
+
+ /* determine if the frame incorporates FIN */
+ if (!quicly_sendstate_is_open(&stream->sendstate) && end_off == stream->sendstate.final_size) {
+ assert(end_off + 1 == stream->sendstate.pending.ranges[stream->sendstate.pending.num_ranges - 1].end);
+ assert(frame_type_at != NULL);
+ is_fin = 1;
+ *frame_type_at |= QUICLY_FRAME_TYPE_STREAM_BIT_FIN;
+ } else {
+ is_fin = 0;
+ }
+
+UpdateState:
+ QUICLY_PROBE(STREAM_SEND, stream->conn, probe_now(), stream, off, end_off - off, is_fin);
+ QUICLY_PROBE(QUICTRACE_SEND_STREAM, stream->conn, probe_now(), stream, off, end_off - off, is_fin);
+ /* update sendstate (and also MAX_DATA counter) */
+ if (stream->sendstate.size_inflight < end_off) {
+ if (stream->stream_id >= 0)
+ stream->conn->egress.max_data.sent += end_off - stream->sendstate.size_inflight;
+ stream->sendstate.size_inflight = end_off;
+ }
+ if ((ret = quicly_ranges_subtract(&stream->sendstate.pending, off, end_off + is_fin)) != 0)
+ return ret;
+ if (wrote_all) {
+ if ((ret = quicly_ranges_subtract(&stream->sendstate.pending, stream->sendstate.size_inflight, UINT64_MAX)) != 0)
+ return ret;
+ }
+
+ /* setup sentmap */
+ sent->data.stream.stream_id = stream->stream_id;
+ sent->data.stream.args.start = off;
+ sent->data.stream.args.end = end_off + is_fin;
+
+ return 0;
+}
+
+/**
+ * Returns the timeout for sentmap entries. This timeout is also used as the duration of CLOSING / DRAINING state, and therefore be
+ * longer than 3PTO. At the moment, the value is 4PTO.
+ */
+static int64_t get_sentmap_expiration_time(quicly_conn_t *conn)
+{
+ return quicly_rtt_get_pto(&conn->egress.loss.rtt, conn->super.peer.transport_params.max_ack_delay,
+ conn->egress.loss.conf->min_pto) *
+ 4;
+}
+
+static void init_acks_iter(quicly_conn_t *conn, quicly_sentmap_iter_t *iter)
+{
+ /* TODO find a better threshold */
+ int64_t retire_before = now - get_sentmap_expiration_time(conn);
+ const quicly_sent_packet_t *sent;
+
+ quicly_sentmap_init_iter(&conn->egress.sentmap, iter);
+
+ while ((sent = quicly_sentmap_get(iter))->sent_at <= retire_before && sent->bytes_in_flight == 0)
+ quicly_sentmap_update(&conn->egress.sentmap, iter, QUICLY_SENTMAP_EVENT_EXPIRED, conn);
+}
+
+int discard_sentmap_by_epoch(quicly_conn_t *conn, unsigned ack_epochs)
+{
+ quicly_sentmap_iter_t iter;
+ const quicly_sent_packet_t *sent;
+ int ret = 0;
+
+ init_acks_iter(conn, &iter);
+
+ while ((sent = quicly_sentmap_get(&iter))->packet_number != UINT64_MAX) {
+ if ((ack_epochs & (1u << sent->ack_epoch)) != 0) {
+ if ((ret = quicly_sentmap_update(&conn->egress.sentmap, &iter, QUICLY_SENTMAP_EVENT_EXPIRED, conn)) != 0)
+ return ret;
+ } else {
+ quicly_sentmap_skip(&iter);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Determine frames to be retransmitted on crypto timeout or PTO.
+ */
+static int mark_packets_as_lost(quicly_conn_t *conn, size_t count)
+{
+ quicly_sentmap_iter_t iter;
+ int ret;
+
+ assert(count != 0);
+
+ init_acks_iter(conn, &iter);
+
+ while (quicly_sentmap_get(&iter)->packet_number < conn->egress.max_lost_pn)
+ quicly_sentmap_skip(&iter);
+
+ do {
+ const quicly_sent_packet_t *sent = quicly_sentmap_get(&iter);
+ uint64_t pn;
+ if ((pn = sent->packet_number) == UINT64_MAX) {
+ assert(conn->egress.sentmap.bytes_in_flight == 0);
+ break;
+ }
+ if (sent->bytes_in_flight != 0)
+ --count;
+ if ((ret = quicly_sentmap_update(&conn->egress.sentmap, &iter, QUICLY_SENTMAP_EVENT_LOST, conn)) != 0)
+ return ret;
+ conn->egress.max_lost_pn = pn + 1;
+ } while (count != 0);
+
+ return 0;
+}
+
+/* this function ensures that the value returned in loss_time is when the next
+ * application timer should be set for loss detection. if no timer is required,
+ * loss_time is set to INT64_MAX.
+ */
+static int do_detect_loss(quicly_loss_t *ld, uint64_t largest_acked, uint32_t delay_until_lost, int64_t *loss_time)
+{
+ quicly_conn_t *conn = (void *)((char *)ld - offsetof(quicly_conn_t, egress.loss));
+ quicly_sentmap_iter_t iter;
+ const quicly_sent_packet_t *sent;
+ uint64_t largest_newly_lost_pn = UINT64_MAX;
+ int ret;
+
+ *loss_time = INT64_MAX;
+
+ init_acks_iter(conn, &iter);
+
+ /* Mark packets as lost if they are smaller than the largest_acked and outside either time-threshold or packet-threshold
+ * windows.
+ */
+ while ((sent = quicly_sentmap_get(&iter))->packet_number < largest_acked &&
+ (sent->sent_at <= now - delay_until_lost || /* time threshold */
+ (largest_acked >= QUICLY_LOSS_DEFAULT_PACKET_THRESHOLD &&
+ sent->packet_number <= largest_acked - QUICLY_LOSS_DEFAULT_PACKET_THRESHOLD))) { /* packet threshold */
+ if (sent->bytes_in_flight != 0 && conn->egress.max_lost_pn <= sent->packet_number) {
+ if (sent->packet_number != largest_newly_lost_pn) {
+ ++conn->super.stats.num_packets.lost;
+ largest_newly_lost_pn = sent->packet_number;
+ quicly_cc_on_lost(&conn->egress.cc, sent->bytes_in_flight, sent->packet_number, conn->egress.packet_number);
+ QUICLY_PROBE(PACKET_LOST, conn, probe_now(), largest_newly_lost_pn);
+ QUICLY_PROBE(QUICTRACE_LOST, conn, probe_now(), largest_newly_lost_pn);
+ }
+ if ((ret = quicly_sentmap_update(&conn->egress.sentmap, &iter, QUICLY_SENTMAP_EVENT_LOST, conn)) != 0)
+ return ret;
+ } else {
+ quicly_sentmap_skip(&iter);
+ }
+ }
+ if (largest_newly_lost_pn != UINT64_MAX) {
+ conn->egress.max_lost_pn = largest_newly_lost_pn + 1;
+ QUICLY_PROBE(CC_CONGESTION, conn, probe_now(), conn->egress.max_lost_pn, conn->egress.sentmap.bytes_in_flight,
+ conn->egress.cc.cwnd);
+ QUICLY_PROBE(QUICTRACE_CC_LOST, conn, probe_now(), &conn->egress.loss.rtt, conn->egress.cc.cwnd,
+ conn->egress.sentmap.bytes_in_flight);
+ }
+
+ /* schedule time-threshold alarm if there is a packet outstanding that is smaller than largest_acked */
+ while (sent->packet_number < largest_acked && sent->sent_at != INT64_MAX) {
+ if (sent->bytes_in_flight != 0) {
+ *loss_time = sent->sent_at + delay_until_lost;
+ break;
+ }
+ quicly_sentmap_skip(&iter);
+ sent = quicly_sentmap_get(&iter);
+ }
+
+ return 0;
+}
+
+static int send_max_streams(quicly_conn_t *conn, int uni, quicly_send_context_t *s)
+{
+ if (!should_send_max_streams(conn, uni))
+ return 0;
+
+ quicly_maxsender_t *maxsender = uni ? conn->ingress.max_streams.uni : conn->ingress.max_streams.bidi;
+ struct st_quicly_conn_streamgroup_state_t *group = uni ? &conn->super.peer.uni : &conn->super.peer.bidi;
+ int ret;
+
+ uint64_t new_count =
+ group->next_stream_id / 4 +
+ (uni ? conn->super.ctx->transport_params.max_streams_uni : conn->super.ctx->transport_params.max_streams_bidi) -
+ group->num_streams;
+
+ quicly_sent_t *sent;
+ if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_MAX_STREAMS_FRAME_CAPACITY, &sent, on_ack_max_streams)) != 0)
+ return ret;
+ s->dst = quicly_encode_max_streams_frame(s->dst, uni, new_count);
+ sent->data.max_streams.uni = uni;
+ quicly_maxsender_record(maxsender, new_count, &sent->data.max_streams.args);
+
+ QUICLY_PROBE(MAX_STREAMS_SEND, conn, probe_now(), new_count, uni);
+
+ return 0;
+}
+
+static int send_streams_blocked(quicly_conn_t *conn, int uni, quicly_send_context_t *s)
+{
+ quicly_linklist_t *blocked_list = uni ? &conn->egress.pending_streams.blocked.uni : &conn->egress.pending_streams.blocked.bidi;
+ int ret;
+
+ if (!quicly_linklist_is_linked(blocked_list))
+ return 0;
+
+ struct st_quicly_max_streams_t *max_streams = uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi;
+ quicly_stream_t *oldest_blocked_stream =
+ (void *)((char *)blocked_list->next - offsetof(quicly_stream_t, _send_aux.pending_link.control));
+ assert(max_streams->count == oldest_blocked_stream->stream_id / 4);
+
+ if (!quicly_maxsender_should_send_blocked(&max_streams->blocked_sender, max_streams->count))
+ return 0;
+
+ quicly_sent_t *sent;
+ if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_STREAMS_BLOCKED_FRAME_CAPACITY, &sent, on_ack_streams_blocked)) != 0)
+ return ret;
+ s->dst = quicly_encode_streams_blocked_frame(s->dst, uni, max_streams->count);
+ sent->data.streams_blocked.uni = uni;
+ quicly_maxsender_record(&max_streams->blocked_sender, max_streams->count, &sent->data.streams_blocked.args);
+
+ QUICLY_PROBE(STREAMS_BLOCKED_SEND, conn, probe_now(), max_streams->count, uni);
+
+ return 0;
+}
+
+static void open_blocked_streams(quicly_conn_t *conn, int uni)
+{
+ uint64_t count;
+ quicly_linklist_t *anchor;
+
+ if (uni) {
+ count = conn->egress.max_streams.uni.count;
+ anchor = &conn->egress.pending_streams.blocked.uni;
+ } else {
+ count = conn->egress.max_streams.bidi.count;
+ anchor = &conn->egress.pending_streams.blocked.bidi;
+ }
+
+ while (quicly_linklist_is_linked(anchor)) {
+ quicly_stream_t *stream = (void *)((char *)anchor->next - offsetof(quicly_stream_t, _send_aux.pending_link.control));
+ if (stream->stream_id / 4 >= count)
+ break;
+ assert(stream->streams_blocked);
+ quicly_linklist_unlink(&stream->_send_aux.pending_link.control);
+ stream->streams_blocked = 0;
+ stream->_send_aux.max_stream_data = quicly_stream_is_unidirectional(stream->stream_id)
+ ? conn->super.peer.transport_params.max_stream_data.uni
+ : conn->super.peer.transport_params.max_stream_data.bidi_remote;
+ /* TODO retain separate flags for stream states so that we do not always need to sched for both control and data */
+ sched_stream_control(stream);
+ resched_stream_data(stream);
+ }
+}
+
+static int send_handshake_done(quicly_conn_t *conn, quicly_send_context_t *s)
+{
+ quicly_sent_t *sent;
+ int ret;
+
+ if ((ret = allocate_ack_eliciting_frame(conn, s, 1, &sent, on_ack_handshake_done)) != 0)
+ goto Exit;
+ *s->dst++ = QUICLY_FRAME_TYPE_HANDSHAKE_DONE;
+ conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT;
+ QUICLY_PROBE(HANDSHAKE_DONE_SEND, conn, probe_now());
+
+ ret = 0;
+Exit:
+ return ret;
+}
+
+static int send_resumption_token(quicly_conn_t *conn, quicly_send_context_t *s)
+{
+ quicly_address_token_plaintext_t token;
+ ptls_buffer_t tokenbuf;
+ uint8_t tokenbuf_small[128];
+ quicly_sent_t *sent;
+ int ret;
+
+ ptls_buffer_init(&tokenbuf, tokenbuf_small, sizeof(tokenbuf_small));
+
+ /* build token */
+ token =
+ (quicly_address_token_plaintext_t){QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION, conn->super.ctx->now->cb(conn->super.ctx->now)};
+ token.remote = conn->super.peer.address;
+ /* TODO fill token.resumption */
+
+ /* encrypt */
+ if ((ret = conn->super.ctx->generate_resumption_token->cb(conn->super.ctx->generate_resumption_token, conn, &tokenbuf,
+ &token)) != 0)
+ goto Exit;
+
+ /* emit frame */
+ if ((ret = allocate_ack_eliciting_frame(conn, s, quicly_new_token_frame_capacity(ptls_iovec_init(tokenbuf.base, tokenbuf.off)),
+ &sent, on_ack_new_token)) != 0)
+ goto Exit;
+ sent->data.new_token.generation = conn->egress.new_token.generation;
+ s->dst = quicly_encode_new_token_frame(s->dst, ptls_iovec_init(tokenbuf.base, tokenbuf.off));
+ conn->egress.pending_flows &= ~QUICLY_PENDING_FLOW_NEW_TOKEN_BIT;
+
+ QUICLY_PROBE(NEW_TOKEN_SEND, conn, probe_now(), tokenbuf.base, tokenbuf.off, sent->data.new_token.generation);
+ ret = 0;
+Exit:
+ ptls_buffer_dispose(&tokenbuf);
+ return ret;
+}
+
+quicly_datagram_t *quicly_send_version_negotiation(quicly_context_t *ctx, struct sockaddr *dest_addr, ptls_iovec_t dest_cid,
+ struct sockaddr *src_addr, ptls_iovec_t src_cid)
+{
+ quicly_datagram_t *packet;
+ uint8_t *dst;
+
+ if ((packet = ctx->packet_allocator->alloc_packet(ctx->packet_allocator, ctx->max_packet_size)) == NULL)
+ return NULL;
+ set_address(&packet->dest, dest_addr);
+ set_address(&packet->src, src_addr);
+ dst = packet->data.base;
+
+ /* type_flags */
+ ctx->tls->random_bytes(dst, 1);
+ *dst |= QUICLY_LONG_HEADER_BIT;
+ ++dst;
+ /* version */
+ dst = quicly_encode32(dst, 0);
+ /* connection-id */
+ *dst++ = dest_cid.len;
+ if (dest_cid.len != 0) {
+ memcpy(dst, dest_cid.base, dest_cid.len);
+ dst += dest_cid.len;
+ }
+ *dst++ = src_cid.len;
+ if (src_cid.len != 0) {
+ memcpy(dst, src_cid.base, src_cid.len);
+ dst += src_cid.len;
+ }
+ /* supported_versions */
+ dst = quicly_encode32(dst, QUICLY_PROTOCOL_VERSION);
+
+ packet->data.len = dst - packet->data.base;
+
+ return packet;
+}
+
+int quicly_retry_calc_cidpair_hash(ptls_hash_algorithm_t *sha256, ptls_iovec_t client_cid, ptls_iovec_t server_cid, uint64_t *value)
+{
+ uint8_t digest[PTLS_SHA256_DIGEST_SIZE], buf[(QUICLY_MAX_CID_LEN_V1 + 1) * 2], *p = buf;
+ int ret;
+
+ *p++ = (uint8_t)client_cid.len;
+ memcpy(p, client_cid.base, client_cid.len);
+ p += client_cid.len;
+ *p++ = (uint8_t)server_cid.len;
+ memcpy(p, server_cid.base, server_cid.len);
+ p += server_cid.len;
+
+ if ((ret = ptls_calc_hash(sha256, digest, buf, p - buf)) != 0)
+ return ret;
+ p = digest;
+ *value = quicly_decode64((void *)&p);
+
+ return 0;
+}
+
+quicly_datagram_t *quicly_send_retry(quicly_context_t *ctx, ptls_aead_context_t *token_encrypt_ctx, struct sockaddr *dest_addr,
+ ptls_iovec_t dest_cid, struct sockaddr *src_addr, ptls_iovec_t src_cid, ptls_iovec_t odcid,
+ ptls_iovec_t token_prefix, ptls_iovec_t appdata, ptls_aead_context_t **retry_aead_cache)
+{
+ quicly_address_token_plaintext_t token;
+ quicly_datagram_t *packet = NULL;
+ ptls_buffer_t buf;
+ int ret;
+
+ assert(!(src_cid.len == odcid.len && memcmp(src_cid.base, odcid.base, src_cid.len) == 0));
+
+ /* build token as plaintext */
+ token = (quicly_address_token_plaintext_t){QUICLY_ADDRESS_TOKEN_TYPE_RETRY, ctx->now->cb(ctx->now)};
+ set_address(&token.remote, dest_addr);
+ set_address(&token.local, src_addr);
+
+ set_cid(&token.retry.odcid, odcid);
+ if ((ret = quicly_retry_calc_cidpair_hash(get_aes128gcmsha256(ctx)->hash, dest_cid, src_cid, &token.retry.cidpair_hash)) != 0)
+ goto Exit;
+ if (appdata.len != 0) {
+ assert(appdata.len <= sizeof(token.appdata.bytes));
+ memcpy(token.appdata.bytes, appdata.base, appdata.len);
+ token.appdata.len = appdata.len;
+ }
+
+ /* start building the packet */
+ if ((packet = ctx->packet_allocator->alloc_packet(ctx->packet_allocator, ctx->max_packet_size)) == NULL)
+ goto Exit;
+ set_address(&packet->dest, dest_addr);
+ set_address(&packet->src, src_addr);
+ ptls_buffer_init(&buf, packet->data.base, ctx->max_packet_size);
+
+ /* first generate a pseudo packet */
+ ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, odcid.base, odcid.len); });
+ ctx->tls->random_bytes(buf.base + buf.off, 1);
+ buf.base[buf.off] = QUICLY_PACKET_TYPE_RETRY | (buf.base[buf.off] & 0x0f);
+ ++buf.off;
+ ptls_buffer_push32(&buf, QUICLY_PROTOCOL_VERSION);
+ ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, dest_cid.base, dest_cid.len); });
+ ptls_buffer_push_block(&buf, 1, { ptls_buffer_pushv(&buf, src_cid.base, src_cid.len); });
+ if (token_prefix.len != 0) {
+ assert(token_prefix.len <= buf.capacity - buf.off);
+ memcpy(buf.base + buf.off, token_prefix.base, token_prefix.len);
+ buf.off += token_prefix.len;
+ }
+ if ((ret = quicly_encrypt_address_token(ctx->tls->random_bytes, token_encrypt_ctx, &buf, buf.off - token_prefix.len, &token)) !=
+ 0)
+ goto Exit;
+
+ /* append AEAD tag */
+ ret = ptls_buffer_reserve(&buf, PTLS_AESGCM_TAG_SIZE);
+ assert(ret == 0);
+ assert(!buf.is_allocated);
+ {
+ ptls_aead_context_t *aead =
+ retry_aead_cache != NULL && *retry_aead_cache != NULL ? *retry_aead_cache : create_retry_aead(ctx, 1);
+ ptls_aead_encrypt(aead, buf.base + buf.off, "", 0, 0, buf.base, buf.off);
+ if (retry_aead_cache != NULL) {
+ *retry_aead_cache = aead;
+ } else {
+ ptls_aead_free(aead);
+ }
+ }
+ buf.off += PTLS_AESGCM_TAG_SIZE;
+
+ /* convert the image to a Retry packet, by stripping the ODCID field */
+ memmove(buf.base, buf.base + odcid.len + 1, buf.off - (odcid.len + 1));
+ buf.off -= odcid.len + 1;
+
+ packet->data.len = buf.off;
+ ret = 0;
+
+Exit:
+ if (ret != 0) {
+ if (packet != NULL)
+ ctx->packet_allocator->free_packet(ctx->packet_allocator, packet);
+ }
+ return packet;
+}
+
+static int send_handshake_flow(quicly_conn_t *conn, size_t epoch, quicly_send_context_t *s, int ack_only, int send_probe)
+{
+ struct st_quicly_pn_space_t *ack_space = NULL;
+ int ret = 0;
+
+ switch (epoch) {
+ case QUICLY_EPOCH_INITIAL:
+ if (conn->initial == NULL || (s->current.cipher = &conn->initial->cipher.egress)->aead == NULL)
+ return 0;
+ s->current.first_byte = QUICLY_PACKET_TYPE_INITIAL;
+ ack_space = &conn->initial->super;
+ break;
+ case QUICLY_EPOCH_HANDSHAKE:
+ if (conn->handshake == NULL || (s->current.cipher = &conn->handshake->cipher.egress)->aead == NULL)
+ return 0;
+ s->current.first_byte = QUICLY_PACKET_TYPE_HANDSHAKE;
+ ack_space = &conn->handshake->super;
+ break;
+ default:
+ assert(!"logic flaw");
+ return 0;
+ }
+
+ /* send ACK */
+ if (ack_space != NULL && ack_space->unacked_count != 0)
+ if ((ret = send_ack(conn, ack_space, s)) != 0)
+ goto Exit;
+
+ if (!ack_only) {
+ /* send data */
+ while ((conn->egress.pending_flows & (uint8_t)(1 << epoch)) != 0) {
+ quicly_stream_t *stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + epoch));
+ assert(stream != NULL);
+ if ((ret = quicly_send_stream(stream, s)) != 0)
+ goto Exit;
+ resched_stream_data(stream);
+ send_probe = 0;
+ }
+
+ /* send probe if requested */
+ if (send_probe) {
+ if ((ret = _do_allocate_frame(conn, s, 1, 1)) != 0)
+ goto Exit;
+ *s->dst++ = QUICLY_FRAME_TYPE_PING;
+ conn->egress.last_retransmittable_sent_at = now;
+ }
+ }
+
+Exit:
+ return ret;
+}
+
+static int send_connection_close(quicly_conn_t *conn, quicly_send_context_t *s)
+{
+ int ret;
+
+ /* write frame */
+ if ((ret = allocate_frame(conn, s,
+ quicly_close_frame_capacity(conn->egress.connection_close.error_code,
+ conn->egress.connection_close.frame_type,
+ conn->egress.connection_close.reason_phrase))) != 0)
+ return ret;
+ s->dst = quicly_encode_close_frame(s->dst, conn->egress.connection_close.error_code, conn->egress.connection_close.frame_type,
+ conn->egress.connection_close.reason_phrase);
+
+ /* update counter */
+ ++conn->egress.connection_close.num_sent;
+
+ /* probe */
+ if (conn->egress.connection_close.frame_type != UINT64_MAX) {
+ QUICLY_PROBE(TRANSPORT_CLOSE_SEND, conn, probe_now(), conn->egress.connection_close.error_code,
+ conn->egress.connection_close.frame_type, conn->egress.connection_close.reason_phrase);
+ } else {
+ QUICLY_PROBE(APPLICATION_CLOSE_SEND, conn, probe_now(), conn->egress.connection_close.error_code,
+ conn->egress.connection_close.reason_phrase);
+ }
+
+ return 0;
+}
+
+static int update_traffic_key_cb(ptls_update_traffic_key_t *self, ptls_t *tls, int is_enc, size_t epoch, const void *secret)
+{
+ quicly_conn_t *conn = *ptls_get_data_ptr(tls);
+ ptls_context_t *tlsctx = ptls_get_context(tls);
+ ptls_cipher_suite_t *cipher = ptls_get_cipher(tls);
+ ptls_cipher_context_t **hp_slot;
+ ptls_aead_context_t **aead_slot;
+ int ret;
+ static const char *log_labels[2][4] = {
+ {NULL, "QUIC_CLIENT_EARLY_TRAFFIC_SECRET", "QUIC_CLIENT_HANDSHAKE_TRAFFIC_SECRET", "QUIC_CLIENT_TRAFFIC_SECRET_0"},
+ {NULL, NULL, "QUIC_SERVER_HANDSHAKE_TRAFFIC_SECRET", "QUIC_SERVER_TRAFFIC_SECRET_0"}};
+ const char *log_label = log_labels[ptls_is_server(tls) == is_enc][epoch];
+
+ QUICLY_PROBE(CRYPTO_UPDATE_SECRET, conn, probe_now(), is_enc, epoch, log_label,
+ QUICLY_PROBE_HEXDUMP(secret, cipher->hash->digest_size));
+
+ if (tlsctx->log_event != NULL) {
+ char hexbuf[PTLS_MAX_DIGEST_SIZE * 2 + 1];
+ ptls_hexdump(hexbuf, secret, cipher->hash->digest_size);
+ tlsctx->log_event->cb(tlsctx->log_event, tls, log_label, "%s", hexbuf);
+ }
+
+#define SELECT_CIPHER_CONTEXT(p) \
+ do { \
+ hp_slot = &(p)->header_protection; \
+ aead_slot = &(p)->aead; \
+ } while (0)
+
+ switch (epoch) {
+ case QUICLY_EPOCH_0RTT:
+ assert(is_enc == quicly_is_client(conn));
+ if (conn->application == NULL && (ret = setup_application_space(conn)) != 0)
+ return ret;
+ if (is_enc) {
+ SELECT_CIPHER_CONTEXT(&conn->application->cipher.egress.key);
+ } else {
+ hp_slot = &conn->application->cipher.ingress.header_protection.zero_rtt;
+ aead_slot = &conn->application->cipher.ingress.aead[1];
+ }
+ break;
+ case QUICLY_EPOCH_HANDSHAKE:
+ if (conn->handshake == NULL && (ret = setup_handshake_space_and_flow(conn, QUICLY_EPOCH_HANDSHAKE)) != 0)
+ return ret;
+ SELECT_CIPHER_CONTEXT(is_enc ? &conn->handshake->cipher.egress : &conn->handshake->cipher.ingress);
+ break;
+ case QUICLY_EPOCH_1RTT: {
+ if (is_enc)
+ if ((ret = apply_peer_transport_params(conn)) != 0)
+ return ret;
+ if (conn->application == NULL && (ret = setup_application_space(conn)) != 0)
+ return ret;
+ uint8_t *secret_store;
+ if (is_enc) {
+ if (conn->application->cipher.egress.key.aead != NULL)
+ dispose_cipher(&conn->application->cipher.egress.key);
+ SELECT_CIPHER_CONTEXT(&conn->application->cipher.egress.key);
+ secret_store = conn->application->cipher.egress.secret;
+ } else {
+ hp_slot = &conn->application->cipher.ingress.header_protection.one_rtt;
+ aead_slot = &conn->application->cipher.ingress.aead[0];
+ secret_store = conn->application->cipher.ingress.secret;
+ }
+ memcpy(secret_store, secret, cipher->hash->digest_size);
+ } break;
+ default:
+ assert(!"logic flaw");
+ break;
+ }
+
+#undef SELECT_CIPHER_CONTEXT
+
+ if ((ret = setup_cipher(conn, epoch, is_enc, hp_slot, aead_slot, cipher->aead, cipher->hash, secret)) != 0)
+ return ret;
+
+ if (epoch == QUICLY_EPOCH_1RTT && is_enc) {
+ /* update states now that we have 1-RTT write key */
+ conn->application->one_rtt_writable = 1;
+ open_blocked_streams(conn, 1);
+ open_blocked_streams(conn, 0);
+ /* send the first resumption token using the 0.5 RTT window */
+ if (!quicly_is_client(conn) && conn->super.ctx->generate_resumption_token != NULL) {
+ ret = quicly_send_resumption_token(conn);
+ assert(ret == 0);
+ }
+ }
+
+ return 0;
+}
+
+static int do_send(quicly_conn_t *conn, quicly_send_context_t *s)
+{
+ int restrict_sending = 0, ack_only = 0, ret;
+ size_t min_packets_to_send = 0;
+
+ /* handle timeouts */
+ if (conn->egress.loss.alarm_at <= now) {
+ if ((ret = quicly_loss_on_alarm(&conn->egress.loss, conn->egress.packet_number - 1,
+ conn->egress.loss.largest_acked_packet_plus1 - 1, do_detect_loss, &min_packets_to_send,
+ &restrict_sending)) != 0)
+ goto Exit;
+ assert(min_packets_to_send > 0);
+ assert(min_packets_to_send <= s->max_packets);
+
+ if (restrict_sending) {
+ /* PTO (try to send new data when handshake is done, otherwise retire oldest handshake packets and retransmit) */
+ QUICLY_PROBE(PTO, conn, probe_now(), conn->egress.sentmap.bytes_in_flight, conn->egress.cc.cwnd,
+ conn->egress.loss.pto_count);
+ if (ptls_handshake_is_complete(conn->crypto.tls) && scheduler_can_send(conn)) {
+ /* we have something to send (TODO we might want to make sure that we emit something even when the stream scheduler
+ * in fact sends nothing) */
+ } else {
+ /* mark something inflight as lost */
+ if ((ret = mark_packets_as_lost(conn, min_packets_to_send)) != 0)
+ goto Exit;
+ }
+ }
+ } else if (conn->idle_timeout.at <= now) {
+ QUICLY_PROBE(IDLE_TIMEOUT, conn, probe_now());
+ conn->super.state = QUICLY_STATE_DRAINING;
+ destroy_all_streams(conn, 0, 0);
+ return QUICLY_ERROR_FREE_CONNECTION;
+ }
+
+ s->send_window = calc_send_window(conn, min_packets_to_send * conn->super.ctx->max_packet_size, restrict_sending);
+ if (s->send_window == 0)
+ ack_only = 1;
+
+ /* send handshake flows */
+ if ((ret = send_handshake_flow(conn, QUICLY_EPOCH_INITIAL, s, ack_only,
+ restrict_sending ||
+ (conn->super.peer.address_validation.send_probe && conn->handshake == NULL))) != 0)
+ goto Exit;
+ if ((ret = send_handshake_flow(conn, QUICLY_EPOCH_HANDSHAKE, s, ack_only,
+ restrict_sending || conn->super.peer.address_validation.send_probe)) != 0)
+ goto Exit;
+
+ /* send encrypted frames */
+ if (conn->application != NULL && (s->current.cipher = &conn->application->cipher.egress.key)->header_protection != NULL) {
+ s->current.first_byte = conn->application->one_rtt_writable ? QUICLY_QUIC_BIT : QUICLY_PACKET_TYPE_0RTT;
+ /* acks */
+ if (conn->application->one_rtt_writable && conn->egress.send_ack_at <= now && conn->application->super.unacked_count != 0) {
+ if ((ret = send_ack(conn, &conn->application->super, s)) != 0)
+ goto Exit;
+ }
+ if (!ack_only) {
+ /* PTO, always send PING. This is the easiest thing to do in terms of timer control. */
+ if (restrict_sending) {
+ if ((ret = _do_allocate_frame(conn, s, 1, 1)) != 0)
+ goto Exit;
+ *s->dst++ = QUICLY_FRAME_TYPE_PING;
+ }
+ /* take actions only permitted for short header packets */
+ if (conn->application->one_rtt_writable) {
+ /* send HANDSHAKE_DONE */
+ if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT) != 0 &&
+ (ret = send_handshake_done(conn, s)) != 0)
+ goto Exit;
+ /* post-handshake messages */
+ if ((conn->egress.pending_flows & (uint8_t)(1 << QUICLY_EPOCH_1RTT)) != 0) {
+ quicly_stream_t *stream = quicly_get_stream(conn, -(1 + QUICLY_EPOCH_1RTT));
+ assert(stream != NULL);
+ if ((ret = quicly_send_stream(stream, s)) != 0)
+ goto Exit;
+ resched_stream_data(stream);
+ }
+ /* respond to all pending received PATH_CHALLENGE frames */
+ if (conn->egress.path_challenge.head != NULL) {
+ do {
+ struct st_quicly_pending_path_challenge_t *c = conn->egress.path_challenge.head;
+ if ((ret = allocate_frame(conn, s, QUICLY_PATH_CHALLENGE_FRAME_CAPACITY)) != 0)
+ goto Exit;
+ s->dst = quicly_encode_path_challenge_frame(s->dst, c->is_response, c->data);
+ conn->egress.path_challenge.head = c->next;
+ free(c);
+ } while (conn->egress.path_challenge.head != NULL);
+ conn->egress.path_challenge.tail_ref = &conn->egress.path_challenge.head;
+ }
+ /* send max_streams frames */
+ if ((ret = send_max_streams(conn, 1, s)) != 0)
+ goto Exit;
+ if ((ret = send_max_streams(conn, 0, s)) != 0)
+ goto Exit;
+ /* send connection-level flow control frame */
+ if (should_send_max_data(conn)) {
+ quicly_sent_t *sent;
+ if ((ret = allocate_ack_eliciting_frame(conn, s, QUICLY_MAX_DATA_FRAME_CAPACITY, &sent, on_ack_max_data)) != 0)
+ goto Exit;
+ uint64_t new_value = conn->ingress.max_data.bytes_consumed + conn->super.ctx->transport_params.max_data;
+ s->dst = quicly_encode_max_data_frame(s->dst, new_value);
+ quicly_maxsender_record(&conn->ingress.max_data.sender, new_value, &sent->data.max_data.args);
+ QUICLY_PROBE(MAX_DATA_SEND, conn, probe_now(), new_value);
+ }
+ /* send streams_blocked frames */
+ if ((ret = send_streams_blocked(conn, 1, s)) != 0)
+ goto Exit;
+ if ((ret = send_streams_blocked(conn, 0, s)) != 0)
+ goto Exit;
+ /* send NEW_TOKEN */
+ if ((conn->egress.pending_flows & QUICLY_PENDING_FLOW_NEW_TOKEN_BIT) != 0 &&
+ (ret = send_resumption_token(conn, s)) != 0)
+ goto Exit;
+ }
+ /* send stream-level control frames */
+ while (s->num_packets != s->max_packets && quicly_linklist_is_linked(&conn->egress.pending_streams.control)) {
+ quicly_stream_t *stream = (void *)((char *)conn->egress.pending_streams.control.next -
+ offsetof(quicly_stream_t, _send_aux.pending_link.control));
+ if ((ret = send_stream_control_frames(stream, s)) != 0)
+ goto Exit;
+ quicly_linklist_unlink(&stream->_send_aux.pending_link.control);
+ }
+ /* send STREAM frames */
+ if ((ret = conn->super.ctx->stream_scheduler->do_send(conn->super.ctx->stream_scheduler, conn, s)) != 0)
+ goto Exit;
+ }
+ }
+
+Exit:
+ if (ret == QUICLY_ERROR_SENDBUF_FULL)
+ ret = 0;
+ if (ret == 0 && s->target.packet != NULL)
+ commit_send_packet(conn, s, 0);
+ if (ret == 0) {
+ if (conn->application == NULL || conn->application->super.unacked_count == 0)
+ conn->egress.send_ack_at = INT64_MAX; /* we have sent ACKs for every epoch (or before address validation) */
+ update_loss_alarm(conn);
+ if (s->num_packets != 0)
+ update_idle_timeout(conn, 0);
+ }
+ return ret;
+}
+
+int quicly_send(quicly_conn_t *conn, quicly_datagram_t **packets, size_t *num_packets)
+{
+ quicly_send_context_t s = {{NULL, -1}, {NULL, NULL, NULL}, packets, *num_packets};
+ int ret;
+
+ update_now(conn->super.ctx);
+
+ /* bail out if there's nothing is scheduled to be sent */
+ if (now < quicly_get_first_timeout(conn)) {
+ *num_packets = 0;
+ return 0;
+ }
+
+ QUICLY_PROBE(SEND, conn, probe_now(), conn->super.state,
+ QUICLY_PROBE_HEXDUMP(conn->super.peer.cid.cid, conn->super.peer.cid.len));
+
+ if (conn->super.state >= QUICLY_STATE_CLOSING) {
+ quicly_sentmap_iter_t iter;
+ init_acks_iter(conn, &iter);
+ /* check if the connection can be closed now (after 3 pto) */
+ if (conn->super.state == QUICLY_STATE_DRAINING || conn->egress.connection_close.num_sent != 0) {
+ if (quicly_sentmap_get(&iter)->packet_number == UINT64_MAX)
+ return QUICLY_ERROR_FREE_CONNECTION;
+ }
+ if (conn->super.state == QUICLY_STATE_CLOSING && conn->egress.send_ack_at <= now) {
+ destroy_all_streams(conn, 0, 0); /* delayed until the emission of CONNECTION_CLOSE frame to allow quicly_close to be
+ * called from a stream handler */
+ if (conn->application != NULL && conn->application->one_rtt_writable) {
+ s.current.cipher = &conn->application->cipher.egress.key;
+ s.current.first_byte = QUICLY_QUIC_BIT;
+ } else if (conn->handshake != NULL && (s.current.cipher = &conn->handshake->cipher.egress)->aead != NULL) {
+ s.current.first_byte = QUICLY_PACKET_TYPE_HANDSHAKE;
+ } else {
+ s.current.cipher = &conn->initial->cipher.egress;
+ assert(s.current.cipher->aead != NULL);
+ s.current.first_byte = QUICLY_PACKET_TYPE_INITIAL;
+ }
+ if ((ret = send_connection_close(conn, &s)) != 0)
+ return ret;
+ if ((ret = commit_send_packet(conn, &s, 0)) != 0)
+ return ret;
+ }
+ /* wait at least 1ms */
+ if ((conn->egress.send_ack_at = quicly_sentmap_get(&iter)->sent_at + get_sentmap_expiration_time(conn)) <= now)
+ conn->egress.send_ack_at = now + 1;
+ *num_packets = s.num_packets;
+ return 0;
+ }
+
+ /* emit packets */
+ if ((ret = do_send(conn, &s)) != 0)
+ return ret;
+ /* We might see the timer going back to the past, if time-threshold loss timer fires first without being able to make any
+ * progress (i.e. due to the payload of lost packet being cancelled), then PTO for the previously sent packet. To accomodate
+ * that, we allow to rerun the do_send function just once.
+ */
+ if (s.num_packets == 0 && conn->egress.loss.alarm_at <= now) {
+ assert(conn->egress.loss.alarm_at == now);
+ if ((ret = do_send(conn, &s)) != 0)
+ return ret;
+ }
+ assert_consistency(conn, 1);
+
+ *num_packets = s.num_packets;
+ return ret;
+}
+
+quicly_datagram_t *quicly_send_close_invalid_token(quicly_context_t *ctx, struct sockaddr *dest_addr, ptls_iovec_t dest_cid,
+ struct sockaddr *src_addr, ptls_iovec_t src_cid, const char *err_desc)
+{
+ struct st_quicly_cipher_context_t egress = {};
+ quicly_datagram_t *dgram = NULL;
+
+ /* setup keys */
+ if (setup_initial_encryption(get_aes128gcmsha256(ctx), NULL, &egress, src_cid, 0, NULL) != 0)
+ goto Exit;
+
+ /* allocate packet, set peer address */
+ if ((dgram = ctx->packet_allocator->alloc_packet(ctx->packet_allocator, ctx->max_packet_size)) == NULL)
+ goto Exit;
+ set_address(&dgram->dest, dest_addr);
+ set_address(&dgram->src, src_addr);
+
+ uint8_t *dst = dgram->data.base, *length_at;
+
+ /* build packet */
+ QUICLY_BUILD_ASSERT(QUICLY_SEND_PN_SIZE == 2);
+ *dst++ = QUICLY_PACKET_TYPE_INITIAL | 0x1 /* 2-byte PN */;
+ dst = quicly_encode32(dst, QUICLY_PROTOCOL_VERSION);
+ *dst++ = dest_cid.len;
+ memcpy(dst, dest_cid.base, dest_cid.len);
+ dst += dest_cid.len;
+ *dst++ = src_cid.len;
+ memcpy(dst, src_cid.base, src_cid.len);
+ dst += src_cid.len;
+ *dst++ = 0; /* token_length = 0 */
+ length_at = dst++; /* length_at to be filled in later as 1-byte varint */
+ *dst++ = 0; /* PN = 0 */
+ *dst++ = 0; /* ditto */
+ uint8_t *payload_from = dst;
+ dst = quicly_encode_close_frame(dst, QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_INVALID_TOKEN),
+ QUICLY_FRAME_TYPE_PADDING, err_desc);
+
+ /* determine the size of the packet, make adjustments */
+ dst += egress.aead->algo->tag_size;
+ assert(dst - dgram->data.base <= ctx->max_packet_size);
+ assert(dst - length_at - 1 < 64);
+ *length_at = dst - length_at - 1;
+ dgram->data.len = dst - dgram->data.base;
+
+ /* encrypt packet */
+ ptls_aead_encrypt(egress.aead, payload_from, payload_from, dst - payload_from - egress.aead->algo->tag_size, 0,
+ dgram->data.base, payload_from - dgram->data.base);
+ quicly_default_crypto_engine.finalize_send_packet(&quicly_default_crypto_engine, NULL, egress.header_protection, egress.aead,
+ dgram, 0, payload_from - dgram->data.base, 0);
+
+Exit:
+ if (egress.aead != NULL)
+ dispose_cipher(&egress);
+ return dgram;
+}
+
+quicly_datagram_t *quicly_send_stateless_reset(quicly_context_t *ctx, struct sockaddr *dest_addr, struct sockaddr *src_addr,
+ const void *src_cid)
+{
+ quicly_datagram_t *dgram;
+
+ /* allocate packet, set peer address */
+ if ((dgram = ctx->packet_allocator->alloc_packet(ctx->packet_allocator, QUICLY_STATELESS_RESET_PACKET_MIN_LEN)) == NULL)
+ return NULL;
+ set_address(&dgram->dest, dest_addr);
+ set_address(&dgram->src, src_addr);
+
+ /* build stateless reset packet */
+ ctx->tls->random_bytes(dgram->data.base, QUICLY_STATELESS_RESET_PACKET_MIN_LEN - QUICLY_STATELESS_RESET_TOKEN_LEN);
+ dgram->data.base[0] = (dgram->data.base[0] & ~QUICLY_LONG_HEADER_BIT) | QUICLY_QUIC_BIT;
+ if (!ctx->cid_encryptor->generate_stateless_reset_token(
+ ctx->cid_encryptor, dgram->data.base + QUICLY_STATELESS_RESET_PACKET_MIN_LEN - QUICLY_STATELESS_RESET_TOKEN_LEN,
+ src_cid)) {
+ ctx->packet_allocator->free_packet(ctx->packet_allocator, dgram);
+ return NULL;
+ }
+ dgram->data.len = QUICLY_STATELESS_RESET_PACKET_MIN_LEN;
+
+ return dgram;
+}
+
+int quicly_send_resumption_token(quicly_conn_t *conn)
+{
+ if (conn->super.state <= QUICLY_STATE_CONNECTED) {
+ ++conn->egress.new_token.generation;
+ conn->egress.pending_flows |= QUICLY_PENDING_FLOW_NEW_TOKEN_BIT;
+ }
+ return 0;
+}
+
+static int on_end_closing(quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ /* we stop accepting frames by the time this ack callback is being registered */
+ assert(event != QUICLY_SENTMAP_EVENT_ACKED);
+ return 0;
+}
+
+static int enter_close(quicly_conn_t *conn, int host_is_initiating, int wait_draining)
+{
+ int ret;
+
+ assert(conn->super.state < QUICLY_STATE_CLOSING);
+
+ /* release all inflight info, register a close timeout */
+ if ((ret = discard_sentmap_by_epoch(conn, ~0u)) != 0)
+ return ret;
+ if ((ret = quicly_sentmap_prepare(&conn->egress.sentmap, conn->egress.packet_number, now, QUICLY_EPOCH_INITIAL)) != 0)
+ return ret;
+ if (quicly_sentmap_allocate(&conn->egress.sentmap, on_end_closing) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ quicly_sentmap_commit(&conn->egress.sentmap, 0);
+ ++conn->egress.packet_number;
+
+ if (host_is_initiating) {
+ conn->super.state = QUICLY_STATE_CLOSING;
+ conn->egress.send_ack_at = 0;
+ } else {
+ conn->super.state = QUICLY_STATE_DRAINING;
+ conn->egress.send_ack_at = wait_draining ? now + get_sentmap_expiration_time(conn) : 0;
+ }
+
+ update_loss_alarm(conn);
+
+ return 0;
+}
+
+int initiate_close(quicly_conn_t *conn, int err, uint64_t frame_type, const char *reason_phrase)
+{
+ uint16_t quic_error_code;
+
+ if (conn->super.state >= QUICLY_STATE_CLOSING)
+ return 0;
+
+ if (reason_phrase == NULL)
+ reason_phrase = "";
+
+ /* convert error code to QUIC error codes */
+ if (err == 0) {
+ quic_error_code = 0;
+ frame_type = QUICLY_FRAME_TYPE_PADDING;
+ } else if (QUICLY_ERROR_IS_QUIC_TRANSPORT(err)) {
+ quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(err);
+ } else if (QUICLY_ERROR_IS_QUIC_APPLICATION(err)) {
+ quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(err);
+ frame_type = UINT64_MAX;
+ } else if (PTLS_ERROR_GET_CLASS(err) == PTLS_ERROR_CLASS_SELF_ALERT) {
+ quic_error_code = QUICLY_TRANSPORT_ERROR_TLS_ALERT_BASE + PTLS_ERROR_TO_ALERT(err);
+ } else {
+ quic_error_code = QUICLY_ERROR_GET_ERROR_CODE(QUICLY_TRANSPORT_ERROR_INTERNAL);
+ }
+
+ conn->egress.connection_close.error_code = quic_error_code;
+ conn->egress.connection_close.frame_type = frame_type;
+ conn->egress.connection_close.reason_phrase = reason_phrase;
+ return enter_close(conn, 1, 0);
+}
+
+int quicly_close(quicly_conn_t *conn, int err, const char *reason_phrase)
+{
+ assert(err == 0 || QUICLY_ERROR_IS_QUIC_APPLICATION(err) || QUICLY_ERROR_IS_CONCEALED(err));
+ update_now(conn->super.ctx);
+
+ return initiate_close(conn, err, QUICLY_FRAME_TYPE_PADDING /* used when err == 0 */, reason_phrase);
+}
+
+static int get_stream_or_open_if_new(quicly_conn_t *conn, uint64_t stream_id, quicly_stream_t **stream)
+{
+ int ret = 0;
+
+ if ((*stream = quicly_get_stream(conn, stream_id)) != NULL)
+ goto Exit;
+
+ if (quicly_stream_is_client_initiated(stream_id) != quicly_is_client(conn)) {
+ /* check if stream id is within the bounds */
+ if (stream_id / 4 >= quicly_get_ingress_max_streams(conn, quicly_stream_is_unidirectional(stream_id))) {
+ ret = QUICLY_TRANSPORT_ERROR_STREAM_LIMIT;
+ goto Exit;
+ }
+ /* open new streams upto given id */
+ struct st_quicly_conn_streamgroup_state_t *group = get_streamgroup_state(conn, stream_id);
+ if (group->next_stream_id <= stream_id) {
+ uint64_t max_stream_data_local, max_stream_data_remote;
+ if (quicly_stream_is_unidirectional(stream_id)) {
+ max_stream_data_local = conn->super.ctx->transport_params.max_stream_data.uni;
+ max_stream_data_remote = 0;
+ } else {
+ max_stream_data_local = conn->super.ctx->transport_params.max_stream_data.bidi_remote;
+ max_stream_data_remote = conn->super.peer.transport_params.max_stream_data.bidi_local;
+ }
+ do {
+ if ((*stream = open_stream(conn, group->next_stream_id, (uint32_t)max_stream_data_local, max_stream_data_remote)) ==
+ NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ if ((ret = conn->super.ctx->stream_open->cb(conn->super.ctx->stream_open, *stream)) != 0) {
+ *stream = NULL;
+ goto Exit;
+ }
+ ++group->num_streams;
+ group->next_stream_id += 4;
+ } while (stream_id != (*stream)->stream_id);
+ }
+ }
+
+Exit:
+ return ret;
+}
+
+static int handle_crypto_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_stream_frame_t frame;
+ quicly_stream_t *stream;
+ int ret;
+
+ if ((ret = quicly_decode_crypto_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+ stream = quicly_get_stream(conn, -(quicly_stream_id_t)(1 + state->epoch));
+ assert(stream != NULL);
+ return apply_stream_frame(stream, &frame);
+}
+
+static int handle_stream_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_stream_frame_t frame;
+ quicly_stream_t *stream;
+ int ret;
+
+ if ((ret = quicly_decode_stream_frame(state->frame_type, &state->src, state->end, &frame)) != 0)
+ return ret;
+ QUICLY_PROBE(QUICTRACE_RECV_STREAM, conn, probe_now(), frame.stream_id, frame.offset, frame.data.len, (int)frame.is_fin);
+ if ((ret = get_stream_or_open_if_new(conn, frame.stream_id, &stream)) != 0 || stream == NULL)
+ return ret;
+ return apply_stream_frame(stream, &frame);
+}
+
+static int handle_reset_stream_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_reset_stream_frame_t frame;
+ quicly_stream_t *stream;
+ int ret;
+
+ if ((ret = quicly_decode_reset_stream_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ if ((ret = get_stream_or_open_if_new(conn, frame.stream_id, &stream)) != 0 || stream == NULL)
+ return ret;
+
+ if (!quicly_recvstate_transfer_complete(&stream->recvstate)) {
+ uint64_t bytes_missing;
+ if ((ret = quicly_recvstate_reset(&stream->recvstate, frame.final_size, &bytes_missing)) != 0)
+ return ret;
+ stream->conn->ingress.max_data.bytes_consumed += bytes_missing;
+ stream->callbacks->on_receive_reset(stream, QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.app_error_code));
+ if (stream->conn->super.state >= QUICLY_STATE_CLOSING)
+ return QUICLY_ERROR_IS_CLOSING;
+ if (stream_is_destroyable(stream))
+ destroy_stream(stream, 0);
+ }
+
+ return 0;
+}
+
+static int handle_ack_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_ack_frame_t frame;
+ quicly_sentmap_iter_t iter;
+ struct {
+ uint64_t pn;
+ int64_t sent_at;
+ } largest_newly_acked = {UINT64_MAX, INT64_MAX};
+ size_t bytes_acked = 0;
+ int includes_ack_eliciting = 0, ret;
+
+ if ((ret = quicly_decode_ack_frame(&state->src, state->end, &frame, state->frame_type == QUICLY_FRAME_TYPE_ACK_ECN)) != 0)
+ return ret;
+
+ uint64_t pn_acked = frame.smallest_acknowledged;
+
+ switch (state->epoch) {
+ case QUICLY_EPOCH_0RTT:
+ return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+ case QUICLY_EPOCH_HANDSHAKE:
+ conn->super.peer.address_validation.send_probe = 0;
+ break;
+ default:
+ break;
+ }
+
+ init_acks_iter(conn, &iter);
+
+ /* TODO log PNs being ACKed too late */
+
+ size_t gap_index = frame.num_gaps;
+ while (1) {
+ assert(frame.ack_block_lengths[gap_index] != 0);
+ /* Ack blocks are organized in the ACK frame and consequently in the ack_block_lengths array from the largest acked down.
+ * Processing acks in packet number order requires processing the ack blocks in reverse order. */
+ uint64_t pn_block_max = pn_acked + frame.ack_block_lengths[gap_index] - 1;
+ QUICLY_PROBE(QUICTRACE_RECV_ACK, conn, probe_now(), pn_acked, pn_block_max);
+ while (quicly_sentmap_get(&iter)->packet_number < pn_acked)
+ quicly_sentmap_skip(&iter);
+ do {
+ const quicly_sent_packet_t *sent = quicly_sentmap_get(&iter);
+ uint64_t pn_sent = sent->packet_number;
+ assert(pn_acked <= pn_sent);
+ if (pn_acked < pn_sent) {
+ /* set pn_acked to pn_sent; or past the end of the ack block, for use with the next ack block */
+ if (pn_sent <= pn_block_max) {
+ pn_acked = pn_sent;
+ } else {
+ pn_acked = pn_block_max + 1;
+ break;
+ }
+ }
+ /* process newly acked packet */
+ if (state->epoch != sent->ack_epoch)
+ return QUICLY_PROTOCOL_VERSION;
+ int is_late_ack = 0;
+ if (sent->ack_eliciting) {
+ includes_ack_eliciting = 1;
+ if (sent->bytes_in_flight == 0) {
+ is_late_ack = 1;
+ ++conn->super.stats.num_packets.late_acked;
+ }
+ }
+ ++conn->super.stats.num_packets.ack_received;
+ largest_newly_acked.pn = pn_acked;
+ largest_newly_acked.sent_at = sent->sent_at;
+ QUICLY_PROBE(PACKET_ACKED, conn, probe_now(), pn_acked, is_late_ack);
+ if (sent->bytes_in_flight != 0) {
+ bytes_acked += sent->bytes_in_flight;
+ }
+ if ((ret = quicly_sentmap_update(&conn->egress.sentmap, &iter, QUICLY_SENTMAP_EVENT_ACKED, conn)) != 0)
+ return ret;
+ if (state->epoch == QUICLY_EPOCH_1RTT) {
+ struct st_quicly_application_space_t *space = conn->application;
+ if (space->cipher.egress.key_update_pn.last <= pn_acked) {
+ space->cipher.egress.key_update_pn.last = UINT64_MAX;
+ space->cipher.egress.key_update_pn.next = conn->egress.packet_number + conn->super.ctx->max_packets_per_key;
+ QUICLY_PROBE(CRYPTO_SEND_KEY_UPDATE_CONFIRMED, conn, probe_now(), space->cipher.egress.key_update_pn.next);
+ }
+ }
+ ++pn_acked;
+ } while (pn_acked <= pn_block_max);
+ assert(pn_acked == pn_block_max + 1);
+ if (gap_index-- == 0)
+ break;
+ pn_acked += frame.gaps[gap_index];
+ }
+
+ QUICLY_PROBE(QUICTRACE_RECV_ACK_DELAY, conn, probe_now(), frame.ack_delay);
+
+ /* Update loss detection engine on ack. The function uses ack_delay only when the largest_newly_acked is also the largest acked
+ * so far. So, it does not matter if the ack_delay being passed in does not apply to the largest_newly_acked. */
+ quicly_loss_on_ack_received(&conn->egress.loss, largest_newly_acked.pn, now, largest_newly_acked.sent_at, frame.ack_delay,
+ includes_ack_eliciting);
+
+ /* OnPacketAcked and OnPacketAckedCC */
+ if (bytes_acked > 0) {
+ quicly_cc_on_acked(&conn->egress.cc, (uint32_t)bytes_acked, frame.largest_acknowledged,
+ (uint32_t)(conn->egress.sentmap.bytes_in_flight + bytes_acked));
+ QUICLY_PROBE(QUICTRACE_CC_ACK, conn, probe_now(), &conn->egress.loss.rtt, conn->egress.cc.cwnd,
+ conn->egress.sentmap.bytes_in_flight);
+ }
+
+ QUICLY_PROBE(CC_ACK_RECEIVED, conn, probe_now(), frame.largest_acknowledged, bytes_acked, conn->egress.cc.cwnd,
+ conn->egress.sentmap.bytes_in_flight);
+
+ /* loss-detection */
+ quicly_loss_detect_loss(&conn->egress.loss, frame.largest_acknowledged, do_detect_loss);
+ update_loss_alarm(conn);
+
+ return 0;
+}
+
+static int handle_max_stream_data_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_max_stream_data_frame_t frame;
+ quicly_stream_t *stream;
+ int ret;
+
+ if ((ret = quicly_decode_max_stream_data_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ QUICLY_PROBE(MAX_STREAM_DATA_RECEIVE, conn, probe_now(), frame.stream_id, frame.max_stream_data);
+
+ if (!quicly_stream_has_send_side(quicly_is_client(conn), frame.stream_id))
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+
+ if ((stream = quicly_get_stream(conn, frame.stream_id)) == NULL)
+ return 0;
+
+ if (frame.max_stream_data < stream->_send_aux.max_stream_data)
+ return 0;
+ stream->_send_aux.max_stream_data = frame.max_stream_data;
+
+ if (stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE)
+ resched_stream_data(stream);
+
+ return 0;
+}
+
+static int handle_data_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_data_blocked_frame_t frame;
+ int ret;
+
+ if ((ret = quicly_decode_data_blocked_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ QUICLY_PROBE(DATA_BLOCKED_RECEIVE, conn, probe_now(), frame.offset);
+
+ quicly_maxsender_request_transmit(&conn->ingress.max_data.sender);
+ if (should_send_max_data(conn))
+ conn->egress.send_ack_at = 0;
+
+ return 0;
+}
+
+static int handle_stream_data_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_stream_data_blocked_frame_t frame;
+ quicly_stream_t *stream;
+ int ret;
+
+ if ((ret = quicly_decode_stream_data_blocked_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ QUICLY_PROBE(STREAM_DATA_BLOCKED_RECEIVE, conn, probe_now(), frame.stream_id, frame.offset);
+
+ if (!quicly_stream_has_receive_side(quicly_is_client(conn), frame.stream_id))
+ return QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+
+ if ((stream = quicly_get_stream(conn, frame.stream_id)) != NULL) {
+ quicly_maxsender_request_transmit(&stream->_send_aux.max_stream_data_sender);
+ if (should_send_max_stream_data(stream))
+ sched_stream_control(stream);
+ }
+
+ return 0;
+}
+
+static int handle_streams_blocked_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_streams_blocked_frame_t frame;
+ int uni = state->frame_type == QUICLY_FRAME_TYPE_STREAMS_BLOCKED_UNI, ret;
+
+ if ((ret = quicly_decode_streams_blocked_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ QUICLY_PROBE(STREAMS_BLOCKED_RECEIVE, conn, probe_now(), frame.count, uni);
+
+ quicly_maxsender_t *maxsender = uni ? conn->ingress.max_streams.uni : conn->ingress.max_streams.bidi;
+ if (maxsender != NULL) {
+ quicly_maxsender_request_transmit(maxsender);
+ if (should_send_max_streams(conn, uni))
+ conn->egress.send_ack_at = 0;
+ }
+
+ return 0;
+}
+
+static int handle_max_streams_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_max_streams_frame_t frame;
+ int uni = state->frame_type == QUICLY_FRAME_TYPE_MAX_STREAMS_UNI, ret;
+
+ if ((ret = quicly_decode_max_streams_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ QUICLY_PROBE(MAX_STREAMS_RECEIVE, conn, probe_now(), frame.count, uni);
+
+ if ((ret = update_max_streams(uni ? &conn->egress.max_streams.uni : &conn->egress.max_streams.bidi, frame.count)) != 0)
+ return ret;
+
+ open_blocked_streams(conn, uni);
+
+ return 0;
+}
+
+static int handle_path_challenge_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_path_challenge_frame_t frame;
+ int ret;
+
+ if ((ret = quicly_decode_path_challenge_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+ return schedule_path_challenge(conn, 1, frame.data);
+}
+
+static int handle_path_response_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+}
+
+static int handle_new_token_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_new_token_frame_t frame;
+ int ret;
+
+ if ((ret = quicly_decode_new_token_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+ QUICLY_PROBE(NEW_TOKEN_RECEIVE, conn, probe_now(), frame.token.base, frame.token.len);
+ if (conn->super.ctx->save_resumption_token == NULL)
+ return 0;
+ return conn->super.ctx->save_resumption_token->cb(conn->super.ctx->save_resumption_token, conn, frame.token);
+}
+
+static int handle_stop_sending_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_stop_sending_frame_t frame;
+ quicly_stream_t *stream;
+ int ret;
+
+ if ((ret = quicly_decode_stop_sending_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ if ((ret = get_stream_or_open_if_new(conn, frame.stream_id, &stream)) != 0 || stream == NULL)
+ return ret;
+
+ if (quicly_sendstate_is_open(&stream->sendstate)) {
+ /* reset the stream, then notify the application */
+ int err = QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.app_error_code);
+ quicly_reset_stream(stream, err);
+ stream->callbacks->on_send_stop(stream, err);
+ if (stream->conn->super.state >= QUICLY_STATE_CLOSING)
+ return QUICLY_ERROR_IS_CLOSING;
+ }
+
+ return 0;
+}
+
+static int handle_max_data_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_max_data_frame_t frame;
+ int ret;
+
+ if ((ret = quicly_decode_max_data_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ QUICLY_PROBE(MAX_DATA_RECEIVE, conn, probe_now(), frame.max_data);
+
+ if (frame.max_data < conn->egress.max_data.permitted)
+ return 0;
+ conn->egress.max_data.permitted = frame.max_data;
+
+ return 0;
+}
+
+static int negotiate_using_version(quicly_conn_t *conn, uint32_t version)
+{
+ /* set selected version */
+ conn->super.version = version;
+ QUICLY_PROBE(VERSION_SWITCH, conn, probe_now(), version);
+
+ /* reschedule all the packets that have been sent for immediate resend */
+ return discard_sentmap_by_epoch(conn, ~0u);
+}
+
+static int handle_version_negotiation_packet(quicly_conn_t *conn, quicly_decoded_packet_t *packet)
+{
+#define CAN_SELECT(v) ((v) != conn->super.version && (v) == QUICLY_PROTOCOL_VERSION)
+
+ const uint8_t *src = packet->octets.base + packet->encrypted_off, *end = packet->octets.base + packet->octets.len;
+
+ if (src == end || (end - src) % 4 != 0)
+ return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+ while (src != end) {
+ uint32_t supported_version = quicly_decode32(&src);
+ if (CAN_SELECT(supported_version))
+ return negotiate_using_version(conn, supported_version);
+ }
+ return QUICLY_ERROR_NO_COMPATIBLE_VERSION;
+
+#undef CAN_SELECT
+}
+
+static int compare_socket_address(struct sockaddr *x, struct sockaddr *y)
+{
+#define CMP(a, b) \
+ if (a != b) \
+ return a < b ? -1 : 1
+
+ CMP(x->sa_family, y->sa_family);
+
+ if (x->sa_family == AF_INET) {
+ struct sockaddr_in *xin = (void *)x, *yin = (void *)y;
+ CMP(ntohl(xin->sin_addr.s_addr), ntohl(yin->sin_addr.s_addr));
+ CMP(ntohs(xin->sin_port), ntohs(yin->sin_port));
+ } else if (x->sa_family == AF_INET6) {
+ struct sockaddr_in6 *xin6 = (void *)x, *yin6 = (void *)y;
+ int r = memcmp(xin6->sin6_addr.s6_addr, yin6->sin6_addr.s6_addr, sizeof(xin6->sin6_addr.s6_addr));
+ if (r != 0)
+ return r;
+ CMP(ntohs(xin6->sin6_port), ntohs(yin6->sin6_port));
+ CMP(xin6->sin6_flowinfo, yin6->sin6_flowinfo);
+ CMP(xin6->sin6_scope_id, yin6->sin6_scope_id);
+ } else if (x->sa_family == AF_UNSPEC) {
+ return 1;
+ } else {
+ assert(!"unknown sa_family");
+ }
+
+#undef CMP
+ return 0;
+}
+
+static int is_stateless_reset(quicly_conn_t *conn, quicly_decoded_packet_t *decoded)
+{
+ switch (decoded->_is_stateless_reset_cached) {
+ case QUICLY__DECODED_PACKET_CACHED_IS_STATELESS_RESET:
+ return 1;
+ case QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET:
+ return 0;
+ default:
+ break;
+ }
+
+ if (conn->super.peer.stateless_reset.token == NULL)
+ return 0;
+ if (decoded->octets.len < QUICLY_STATELESS_RESET_PACKET_MIN_LEN)
+ return 0;
+ if (memcmp(decoded->octets.base + decoded->octets.len - QUICLY_STATELESS_RESET_TOKEN_LEN,
+ conn->super.peer.stateless_reset.token, QUICLY_STATELESS_RESET_TOKEN_LEN) != 0)
+ return 0;
+
+ return 1;
+}
+
+int quicly_is_destination(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr,
+ quicly_decoded_packet_t *decoded)
+{
+ if (QUICLY_PACKET_IS_LONG_HEADER(decoded->octets.base[0])) {
+ /* long header: validate address, then consult the CID */
+ if (compare_socket_address(&conn->super.peer.address.sa, src_addr) != 0)
+ return 0;
+ if (conn->super.host.address.sa.sa_family != AF_UNSPEC &&
+ compare_socket_address(&conn->super.host.address.sa, dest_addr) != 0)
+ return 0;
+ /* server may see the CID generated by the client for Initial and 0-RTT packets */
+ if (!quicly_is_client(conn) && decoded->cid.dest.might_be_client_generated) {
+ if (quicly_cid_is_equal(&conn->super.host.offered_cid, decoded->cid.dest.encrypted))
+ goto Found;
+ }
+ }
+
+ if (conn->super.ctx->cid_encryptor != NULL) {
+ if (conn->super.master_id.master_id == decoded->cid.dest.plaintext.master_id &&
+ conn->super.master_id.thread_id == decoded->cid.dest.plaintext.thread_id &&
+ conn->super.master_id.node_id == decoded->cid.dest.plaintext.node_id)
+ goto Found;
+ if (is_stateless_reset(conn, decoded))
+ goto Found_StatelessReset;
+ } else {
+ if (compare_socket_address(&conn->super.peer.address.sa, src_addr) == 0)
+ goto Found;
+ if (conn->super.host.address.sa.sa_family != AF_UNSPEC &&
+ compare_socket_address(&conn->super.host.address.sa, dest_addr) != 0)
+ return 0;
+ }
+
+ /* not found */
+ return 0;
+
+Found:
+ decoded->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET;
+ return 1;
+
+Found_StatelessReset:
+ decoded->_is_stateless_reset_cached = QUICLY__DECODED_PACKET_CACHED_IS_STATELESS_RESET;
+ return 1;
+}
+
+static int handle_close(quicly_conn_t *conn, int err, uint64_t frame_type, ptls_iovec_t reason_phrase)
+{
+ int ret;
+
+ if (conn->super.state >= QUICLY_STATE_CLOSING)
+ return 0;
+
+ /* switch to closing state, notify the app (at this moment the streams are accessible), then destroy the streams */
+ if ((ret = enter_close(conn, 0, err != QUICLY_ERROR_RECEIVED_STATELESS_RESET)) != 0)
+ return ret;
+ if (conn->super.ctx->closed_by_peer != NULL)
+ conn->super.ctx->closed_by_peer->cb(conn->super.ctx->closed_by_peer, conn, err, frame_type,
+ (const char *)reason_phrase.base, reason_phrase.len);
+ destroy_all_streams(conn, err, 0);
+
+ return 0;
+}
+
+static int handle_transport_close_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_transport_close_frame_t frame;
+ int ret;
+
+ if ((ret = quicly_decode_transport_close_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ QUICLY_PROBE(TRANSPORT_CLOSE_RECEIVE, conn, probe_now(), frame.error_code, frame.frame_type,
+ QUICLY_PROBE_ESCAPE_UNSAFE_STRING(frame.reason_phrase.base, frame.reason_phrase.len));
+ return handle_close(conn, QUICLY_ERROR_FROM_TRANSPORT_ERROR_CODE(frame.error_code), frame.frame_type, frame.reason_phrase);
+}
+
+static int handle_application_close_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_application_close_frame_t frame;
+ int ret;
+
+ if ((ret = quicly_decode_application_close_frame(&state->src, state->end, &frame)) != 0)
+ return ret;
+
+ QUICLY_PROBE(APPLICATION_CLOSE_RECEIVE, conn, probe_now(), frame.error_code,
+ QUICLY_PROBE_ESCAPE_UNSAFE_STRING(frame.reason_phrase.base, frame.reason_phrase.len));
+ return handle_close(conn, QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(frame.error_code), UINT64_MAX, frame.reason_phrase);
+}
+
+static int handle_padding_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ return 0;
+}
+
+static int handle_ping_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ return 0;
+}
+
+static int handle_new_connection_id_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ quicly_new_connection_id_frame_t frame;
+ return quicly_decode_new_connection_id_frame(&state->src, state->end, &frame);
+}
+
+static int handle_retire_connection_id_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+}
+
+static int handle_handshake_done_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
+{
+ int ret;
+
+ QUICLY_PROBE(HANDSHAKE_DONE_RECEIVE, conn, probe_now());
+
+ if (!quicly_is_client(conn))
+ return QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+
+ assert(conn->initial == NULL);
+ if (conn->handshake == NULL)
+ return 0;
+
+ conn->super.peer.address_validation.send_probe = 0;
+ if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_HANDSHAKE)) != 0)
+ return ret;
+ update_loss_alarm(conn);
+ return 0;
+}
+
+static int handle_payload(quicly_conn_t *conn, size_t epoch, const uint8_t *_src, size_t _len, uint64_t *offending_frame_type,
+ int *is_ack_only)
+{
+ /* clang-format off */
+
+ /* `frame_handlers` is an array of frame handlers and the properties of the frames, indexed by the ID of the frame. */
+ static const struct {
+ int (*cb)(quicly_conn_t *, struct st_quicly_handle_payload_state_t *); /* callback function that handles the frame */
+ uint8_t permitted_epochs; /* the epochs the frame can appear, calculated as bitwise-or of `1 << epoch` */
+ uint8_t ack_eliciting; /* boolean indicating if the frame is ack-eliciting */
+ } frame_handlers[] = {
+#define FRAME(n, i, z, h, o, ae) \
+ { \
+ handle_##n##_frame, \
+ (i << QUICLY_EPOCH_INITIAL) | (z << QUICLY_EPOCH_0RTT) | (h << QUICLY_EPOCH_HANDSHAKE) | (o << QUICLY_EPOCH_1RTT), \
+ ae \
+ }
+ /* +----------------------+-------------------+---------------+
+ * | | permitted epochs | |
+ * | frame +----+----+----+----+ ack-eliciting |
+ * | | IN | 0R | HS | 1R | |
+ * +----------------------+----+----+----+----+---------------+ */
+ FRAME( padding , 1 , 1 , 1 , 1 , 0 ), /* 0 */
+ FRAME( ping , 1 , 1 , 1 , 1 , 1 ),
+ FRAME( ack , 1 , 0 , 1 , 1 , 0 ),
+ FRAME( ack , 1 , 0 , 1 , 1 , 0 ),
+ FRAME( reset_stream , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( stop_sending , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( crypto , 1 , 0 , 1 , 1 , 1 ),
+ FRAME( new_token , 0 , 0 , 0 , 1 , 1 ),
+ FRAME( stream , 0 , 1 , 0 , 1 , 1 ), /* 8 */
+ FRAME( stream , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( stream , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( stream , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( stream , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( stream , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( stream , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( stream , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( max_data , 0 , 1 , 0 , 1 , 1 ), /* 16 */
+ FRAME( max_stream_data , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( max_streams , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( max_streams , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( data_blocked , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( stream_data_blocked , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( streams_blocked , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( streams_blocked , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( new_connection_id , 0 , 1 , 0 , 1 , 1 ), /* 24 */
+ FRAME( retire_connection_id , 0 , 0 , 0 , 1 , 1 ),
+ FRAME( path_challenge , 0 , 1 , 0 , 1 , 1 ),
+ FRAME( path_response , 0 , 0 , 0 , 1 , 1 ),
+ FRAME( transport_close , 1 , 1 , 1 , 1 , 0 ),
+ FRAME( application_close , 0 , 1 , 0 , 1 , 0 ),
+ FRAME( handshake_done , 0, 0 , 0 , 1 , 1 ),
+ /* +----------------------+----+----+----+----+---------------+ */
+#undef FRAME
+ };
+ /* clang-format on */
+
+ struct st_quicly_handle_payload_state_t state = {_src, _src + _len, epoch};
+ size_t num_frames = 0, num_frames_ack_eliciting = 0;
+ int ret;
+
+ do {
+ state.frame_type = *state.src++;
+ if (state.frame_type >= sizeof(frame_handlers) / sizeof(frame_handlers[0])) {
+ ret = QUICLY_TRANSPORT_ERROR_FRAME_ENCODING;
+ break;
+ }
+ if ((frame_handlers[state.frame_type].permitted_epochs & (1 << epoch)) == 0) {
+ ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+ break;
+ }
+ num_frames += 1;
+ num_frames_ack_eliciting += frame_handlers[state.frame_type].ack_eliciting;
+ if ((ret = (*frame_handlers[state.frame_type].cb)(conn, &state)) != 0)
+ break;
+ } while (state.src != state.end);
+
+ *is_ack_only = num_frames_ack_eliciting == 0;
+ if (ret != 0)
+ *offending_frame_type = state.frame_type;
+ return ret;
+}
+
+static int handle_stateless_reset(quicly_conn_t *conn)
+{
+ QUICLY_PROBE(STATELESS_RESET_RECEIVE, conn, probe_now());
+ return handle_close(conn, QUICLY_ERROR_RECEIVED_STATELESS_RESET, UINT64_MAX, ptls_iovec_init("", 0));
+}
+
+static int validate_retry_tag(quicly_decoded_packet_t *packet, quicly_cid_t *odcid, ptls_aead_context_t *retry_aead)
+{
+ size_t pseudo_packet_len = 1 + odcid->len + packet->encrypted_off;
+ uint8_t pseudo_packet[pseudo_packet_len];
+ pseudo_packet[0] = odcid->len;
+ memcpy(pseudo_packet + 1, odcid->cid, odcid->len);
+ memcpy(pseudo_packet + 1 + odcid->len, packet->octets.base, packet->encrypted_off);
+ return ptls_aead_decrypt(retry_aead, packet->octets.base + packet->encrypted_off, packet->octets.base + packet->encrypted_off,
+ PTLS_AESGCM_TAG_SIZE, 0, pseudo_packet, pseudo_packet_len) == 0;
+}
+
+int quicly_accept(quicly_conn_t **conn, quicly_context_t *ctx, struct sockaddr *dest_addr, struct sockaddr *src_addr,
+ quicly_decoded_packet_t *packet, quicly_address_token_plaintext_t *address_token,
+ const quicly_cid_plaintext_t *new_cid, ptls_handshake_properties_t *handshake_properties)
+{
+ struct st_quicly_cipher_context_t ingress_cipher = {NULL}, egress_cipher = {NULL};
+ ptls_iovec_t payload;
+ uint64_t next_expected_pn, pn, offending_frame_type = QUICLY_FRAME_TYPE_PADDING;
+ int is_ack_only, ret;
+
+ *conn = NULL;
+
+ update_now(ctx);
+
+ /* process initials only */
+ if ((packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) != QUICLY_PACKET_TYPE_INITIAL) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ if (packet->version != QUICLY_PROTOCOL_VERSION) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ if (packet->cid.dest.encrypted.len < 8) {
+ ret = QUICLY_TRANSPORT_ERROR_PROTOCOL_VIOLATION;
+ goto Exit;
+ }
+ if ((ret = setup_initial_encryption(get_aes128gcmsha256(ctx), &ingress_cipher, &egress_cipher, packet->cid.dest.encrypted, 0,
+ NULL)) != 0)
+ goto Exit;
+ next_expected_pn = 0; /* is this correct? do we need to take care of underflow? */
+ if ((ret = decrypt_packet(ingress_cipher.header_protection, aead_decrypt_fixed_key, ingress_cipher.aead, &next_expected_pn,
+ packet, &pn, &payload)) != 0)
+ goto Exit;
+
+ /* create connection */
+ if ((*conn = create_connection(ctx, NULL, src_addr, dest_addr, new_cid, handshake_properties)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ (*conn)->super.state = QUICLY_STATE_CONNECTED;
+ set_cid(&(*conn)->super.peer.cid, packet->cid.src);
+ set_cid(&(*conn)->super.host.offered_cid, packet->cid.dest.encrypted);
+ if (address_token != NULL) {
+ (*conn)->super.peer.address_validation.validated = 1;
+ if (address_token->type == QUICLY_ADDRESS_TOKEN_TYPE_RETRY)
+ set_cid(&(*conn)->retry_odcid, ptls_iovec_init(address_token->retry.odcid.cid, address_token->retry.odcid.len));
+ }
+ if ((ret = setup_handshake_space_and_flow(*conn, QUICLY_EPOCH_INITIAL)) != 0)
+ goto Exit;
+ (*conn)->initial->super.next_expected_packet_number = next_expected_pn;
+ (*conn)->initial->cipher.ingress = ingress_cipher;
+ ingress_cipher = (struct st_quicly_cipher_context_t){NULL};
+ (*conn)->initial->cipher.egress = egress_cipher;
+ egress_cipher = (struct st_quicly_cipher_context_t){NULL};
+ (*conn)->crypto.handshake_properties.collected_extensions = server_collected_extensions;
+
+ QUICLY_PROBE(ACCEPT, *conn, probe_now(), QUICLY_PROBE_HEXDUMP(packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len),
+ address_token);
+ QUICLY_PROBE(CRYPTO_DECRYPT, *conn, probe_now(), pn, payload.base, payload.len);
+ QUICLY_PROBE(QUICTRACE_RECV, *conn, probe_now(), pn);
+
+ /* handle the input; we ignore is_ack_only, we consult if there's any output from TLS in response to CH anyways */
+ (*conn)->super.stats.num_packets.received += 1;
+ (*conn)->super.stats.num_bytes.received += packet->octets.len;
+ if ((ret = handle_payload(*conn, QUICLY_EPOCH_INITIAL, payload.base, payload.len, &offending_frame_type, &is_ack_only)) != 0)
+ goto Exit;
+ if ((ret = record_receipt(*conn, &(*conn)->initial->super, pn, 0, QUICLY_EPOCH_INITIAL)) != 0)
+ goto Exit;
+
+Exit:
+ if (*conn != NULL && ret != 0) {
+ initiate_close(*conn, ret, offending_frame_type, "");
+ ret = 0;
+ }
+ return ret;
+}
+
+int quicly_receive(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr, quicly_decoded_packet_t *packet)
+{
+ ptls_cipher_context_t *header_protection;
+ struct {
+ int (*cb)(void *, uint64_t, quicly_decoded_packet_t *, size_t, size_t *);
+ void *ctx;
+ } aead;
+ struct st_quicly_pn_space_t **space;
+ size_t epoch;
+ ptls_iovec_t payload;
+ uint64_t pn, offending_frame_type = QUICLY_FRAME_TYPE_PADDING;
+ int is_ack_only, ret;
+
+ update_now(conn->super.ctx);
+
+ QUICLY_PROBE(RECEIVE, conn, probe_now(), QUICLY_PROBE_HEXDUMP(packet->cid.dest.encrypted.base, packet->cid.dest.encrypted.len),
+ packet->octets.base, packet->octets.len);
+
+ if (is_stateless_reset(conn, packet)) {
+ ret = handle_stateless_reset(conn);
+ goto Exit;
+ }
+
+ /* FIXME check peer address */
+
+ switch (conn->super.state) {
+ case QUICLY_STATE_CLOSING:
+ ++conn->egress.connection_close.num_packets_received;
+ /* respond with a CONNECTION_CLOSE frame using exponential back-off */
+ if (__builtin_popcountl(conn->egress.connection_close.num_packets_received) == 1)
+ conn->egress.send_ack_at = 0;
+ ret = 0;
+ goto Exit;
+ case QUICLY_STATE_DRAINING:
+ ret = 0;
+ goto Exit;
+ default:
+ break;
+ }
+
+ if (QUICLY_PACKET_IS_LONG_HEADER(packet->octets.base[0])) {
+ if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT) {
+ if (packet->version == 0)
+ return handle_version_negotiation_packet(conn, packet);
+ }
+ if (packet->version != QUICLY_PROTOCOL_VERSION) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ switch (packet->octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) {
+ case QUICLY_PACKET_TYPE_RETRY: {
+ assert(packet->encrypted_off + PTLS_AESGCM_TAG_SIZE == packet->octets.len);
+ /* check the packet */
+ if (quicly_cid_is_equal(&conn->super.peer.cid, packet->cid.src)) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ /* do not accept a second Retry */
+ if (conn->retry_odcid.len != 0) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ ptls_aead_context_t *retry_aead = create_retry_aead(conn->super.ctx, 0);
+ int retry_ok = validate_retry_tag(packet, &conn->super.peer.cid, retry_aead);
+ ptls_aead_free(retry_aead);
+ if (!retry_ok) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ /* check size of the Retry packet */
+ if (packet->token.len > QUICLY_MAX_TOKEN_LEN) {
+ ret = QUICLY_ERROR_PACKET_IGNORED; /* TODO this is a immediate fatal error, chose a better error code */
+ goto Exit;
+ }
+ /* store token and ODCID */
+ free(conn->token.base);
+ if ((conn->token.base = malloc(packet->token.len)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ memcpy(conn->token.base, packet->token.base, packet->token.len);
+ conn->token.len = packet->token.len;
+ conn->retry_odcid = conn->super.peer.cid;
+ /* update DCID */
+ set_cid(&conn->super.peer.cid, packet->cid.src);
+ /* replace initial keys */
+ dispose_cipher(&conn->initial->cipher.ingress);
+ dispose_cipher(&conn->initial->cipher.egress);
+ if ((ret = setup_initial_encryption(get_aes128gcmsha256(conn->super.ctx), &conn->initial->cipher.ingress,
+ &conn->initial->cipher.egress,
+ ptls_iovec_init(conn->super.peer.cid.cid, conn->super.peer.cid.len), 1, NULL)) != 0)
+ goto Exit;
+ /* schedule retransmit */
+ ret = discard_sentmap_by_epoch(conn, ~0u);
+ goto Exit;
+ } break;
+ case QUICLY_PACKET_TYPE_INITIAL:
+ if (conn->initial == NULL || (header_protection = conn->initial->cipher.ingress.header_protection) == NULL) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ /* update cid if this is the first Initial packet that's being received */
+ if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT) {
+ assert(quicly_is_client(conn));
+ memcpy(conn->super.peer.cid.cid, packet->cid.src.base, packet->cid.src.len);
+ conn->super.peer.cid.len = packet->cid.src.len;
+ }
+ aead.cb = aead_decrypt_fixed_key;
+ aead.ctx = conn->initial->cipher.ingress.aead;
+ space = (void *)&conn->initial;
+ epoch = QUICLY_EPOCH_INITIAL;
+ break;
+ case QUICLY_PACKET_TYPE_HANDSHAKE:
+ if (conn->handshake == NULL || (header_protection = conn->handshake->cipher.ingress.header_protection) == NULL) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ aead.cb = aead_decrypt_fixed_key;
+ aead.ctx = conn->handshake->cipher.ingress.aead;
+ space = (void *)&conn->handshake;
+ epoch = QUICLY_EPOCH_HANDSHAKE;
+ break;
+ case QUICLY_PACKET_TYPE_0RTT:
+ if (quicly_is_client(conn)) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ if (conn->application == NULL ||
+ (header_protection = conn->application->cipher.ingress.header_protection.zero_rtt) == NULL) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ aead.cb = aead_decrypt_fixed_key;
+ aead.ctx = conn->application->cipher.ingress.aead[1];
+ space = (void *)&conn->application;
+ epoch = QUICLY_EPOCH_0RTT;
+ break;
+ default:
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ } else {
+ /* short header packet */
+ if (conn->application == NULL ||
+ (header_protection = conn->application->cipher.ingress.header_protection.one_rtt) == NULL) {
+ ret = QUICLY_ERROR_PACKET_IGNORED;
+ goto Exit;
+ }
+ aead.cb = aead_decrypt_1rtt;
+ aead.ctx = conn;
+ space = (void *)&conn->application;
+ epoch = QUICLY_EPOCH_1RTT;
+ }
+
+ /* decrypt */
+ if ((ret = decrypt_packet(header_protection, aead.cb, aead.ctx, &(*space)->next_expected_packet_number, packet, &pn,
+ &payload)) != 0) {
+ ++conn->super.stats.num_packets.decryption_failed;
+ QUICLY_PROBE(CRYPTO_DECRYPT, conn, probe_now(), pn, NULL, 0);
+ goto Exit;
+ }
+
+ QUICLY_PROBE(CRYPTO_DECRYPT, conn, probe_now(), pn, payload.base, payload.len);
+ QUICLY_PROBE(QUICTRACE_RECV, conn, probe_now(), pn);
+
+ /* update states */
+ if (conn->super.state == QUICLY_STATE_FIRSTFLIGHT)
+ conn->super.state = QUICLY_STATE_CONNECTED;
+ conn->super.stats.num_packets.received += 1;
+ conn->super.stats.num_bytes.received += packet->octets.len;
+
+ /* state updates, that are triggered by the receipt of a packet */
+ if (epoch == QUICLY_EPOCH_HANDSHAKE && conn->initial != NULL) {
+ /* Discard Initial space before processing the payload of the Handshake packet to avoid the chance of an ACK frame included
+ * in the Handshake packet setting a loss timer for the Initial packet. */
+ if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_INITIAL)) != 0)
+ goto Exit;
+ update_loss_alarm(conn);
+ conn->super.peer.address_validation.validated = 1;
+ }
+
+ /* handle the payload */
+ if ((ret = handle_payload(conn, epoch, payload.base, payload.len, &offending_frame_type, &is_ack_only)) != 0)
+ goto Exit;
+ if (*space != NULL && conn->super.state < QUICLY_STATE_CLOSING) {
+ if ((ret = record_receipt(conn, *space, pn, is_ack_only, epoch)) != 0)
+ goto Exit;
+ }
+
+ /* state updates post payload processing */
+ switch (epoch) {
+ case QUICLY_EPOCH_INITIAL:
+ assert(conn->initial != NULL);
+ if (quicly_is_client(conn) && conn->handshake != NULL && conn->handshake->cipher.egress.aead != NULL) {
+ if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_INITIAL)) != 0)
+ goto Exit;
+ update_loss_alarm(conn);
+ }
+ break;
+ case QUICLY_EPOCH_HANDSHAKE:
+ if (quicly_is_client(conn)) {
+ /* Running as a client.
+ * Respect "disable_migration" TP sent by the peer at the end of the TLS handshake. */
+ if (conn->super.host.address.sa.sa_family == AF_UNSPEC && dest_addr != NULL && dest_addr->sa_family != AF_UNSPEC &&
+ ptls_handshake_is_complete(conn->crypto.tls) && conn->super.peer.transport_params.disable_active_migration)
+ set_address(&conn->super.host.address, dest_addr);
+ } else {
+ /* Running as a server.
+ * If handshake was just completed, drop handshake context, schedule the first emission of HANDSHAKE_DONE frame. */
+ if (ptls_handshake_is_complete(conn->crypto.tls)) {
+ if ((ret = discard_handshake_context(conn, QUICLY_EPOCH_HANDSHAKE)) != 0)
+ goto Exit;
+ assert(conn->handshake == NULL);
+ conn->egress.pending_flows |= QUICLY_PENDING_FLOW_HANDSHAKE_DONE_BIT;
+ }
+ }
+ break;
+ case QUICLY_EPOCH_1RTT:
+ if (!is_ack_only && should_send_max_data(conn))
+ conn->egress.send_ack_at = 0;
+ break;
+ default:
+ break;
+ }
+
+ update_idle_timeout(conn, 1);
+
+Exit:
+ switch (ret) {
+ case 0:
+ /* Avoid time in the past being emitted by quicly_get_first_timeout. We hit the condition below when retransmission is
+ * suspended by the 3x limit (in which case we have loss.alarm_at set but return INT64_MAX from quicly_get_first_timeout
+ * until we receive something from the client).
+ */
+ if (conn->egress.loss.alarm_at < now)
+ conn->egress.loss.alarm_at = now;
+ assert_consistency(conn, 0);
+ break;
+ case QUICLY_ERROR_PACKET_IGNORED:
+ break;
+ default: /* close connection */
+ initiate_close(conn, ret, offending_frame_type, "");
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+int quicly_open_stream(quicly_conn_t *conn, quicly_stream_t **_stream, int uni)
+{
+ quicly_stream_t *stream;
+ struct st_quicly_conn_streamgroup_state_t *group;
+ uint64_t *max_stream_count;
+ uint32_t max_stream_data_local;
+ uint64_t max_stream_data_remote;
+ int ret;
+ /* determine the states */
+ if (uni) {
+ group = &conn->super.host.uni;
+ max_stream_count = &conn->egress.max_streams.uni.count;
+ max_stream_data_local = 0;
+ max_stream_data_remote = conn->super.peer.transport_params.max_stream_data.uni;
+ } else {
+ group = &conn->super.host.bidi;
+ max_stream_count = &conn->egress.max_streams.bidi.count;
+ max_stream_data_local = (uint32_t)conn->super.ctx->transport_params.max_stream_data.bidi_local;
+ max_stream_data_remote = conn->super.peer.transport_params.max_stream_data.bidi_remote;
+ }
+
+ /* open */
+ if ((stream = open_stream(conn, group->next_stream_id, max_stream_data_local, max_stream_data_remote)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ ++group->num_streams;
+ group->next_stream_id += 4;
+
+ /* adjust blocked */
+ if (stream->stream_id / 4 >= *max_stream_count) {
+ stream->streams_blocked = 1;
+ quicly_linklist_insert((uni ? &conn->egress.pending_streams.blocked.uni : &conn->egress.pending_streams.blocked.bidi)->prev,
+ &stream->_send_aux.pending_link.control);
+ }
+
+ /* application-layer initialization */
+ if ((ret = conn->super.ctx->stream_open->cb(conn->super.ctx->stream_open, stream)) != 0)
+ return ret;
+
+ *_stream = stream;
+ return 0;
+}
+
+void quicly_reset_stream(quicly_stream_t *stream, int err)
+{
+ assert(quicly_stream_has_send_side(quicly_is_client(stream->conn), stream->stream_id));
+ assert(QUICLY_ERROR_IS_QUIC_APPLICATION(err));
+ assert(stream->_send_aux.reset_stream.sender_state == QUICLY_SENDER_STATE_NONE);
+ assert(!quicly_sendstate_transfer_complete(&stream->sendstate));
+
+ /* dispose sendbuf state */
+ quicly_sendstate_reset(&stream->sendstate);
+
+ /* setup RESET_STREAM */
+ stream->_send_aux.reset_stream.sender_state = QUICLY_SENDER_STATE_SEND;
+ stream->_send_aux.reset_stream.error_code = QUICLY_ERROR_GET_ERROR_CODE(err);
+
+ /* schedule for delivery */
+ sched_stream_control(stream);
+ resched_stream_data(stream);
+}
+
+void quicly_request_stop(quicly_stream_t *stream, int err)
+{
+ assert(quicly_stream_has_receive_side(quicly_is_client(stream->conn), stream->stream_id));
+ assert(QUICLY_ERROR_IS_QUIC_APPLICATION(err));
+
+ /* send STOP_SENDING if the incoming side of the stream is still open */
+ if (stream->recvstate.eos == UINT64_MAX && stream->_send_aux.stop_sending.sender_state == QUICLY_SENDER_STATE_NONE) {
+ stream->_send_aux.stop_sending.sender_state = QUICLY_SENDER_STATE_SEND;
+ stream->_send_aux.stop_sending.error_code = QUICLY_ERROR_GET_ERROR_CODE(err);
+ sched_stream_control(stream);
+ }
+}
+
+socklen_t quicly_get_socklen(struct sockaddr *sa)
+{
+ switch (sa->sa_family) {
+ case AF_INET:
+ return sizeof(struct sockaddr_in);
+ case AF_INET6:
+ return sizeof(struct sockaddr_in6);
+ default:
+ assert(!"unexpected socket type");
+ return 0;
+ }
+}
+
+char *quicly_escape_unsafe_string(char *buf, const void *bytes, size_t len)
+{
+ char *dst = buf;
+ const char *src = bytes, *end = src + len;
+
+ for (; src != end; ++src) {
+ if ((0x20 <= *src && *src <= 0x7e) && !(*src == '"' || *src == '\'' || *src == '\\')) {
+ *dst++ = *src;
+ } else {
+ *dst++ = '\\';
+ *dst++ = 'x';
+ quicly_byte_to_hex(dst, (uint8_t)*src);
+ dst += 2;
+ }
+ }
+ *dst = '\0';
+
+ return buf;
+}
+
+char *quicly_hexdump(const uint8_t *bytes, size_t len, size_t indent)
+{
+ size_t i, line, row, bufsize = indent == SIZE_MAX ? len * 2 + 1 : (indent + 5 + 3 * 16 + 2 + 16 + 1) * ((len + 15) / 16) + 1;
+ char *buf, *p;
+
+ if ((buf = malloc(bufsize)) == NULL)
+ return NULL;
+ p = buf;
+ if (indent == SIZE_MAX) {
+ for (i = 0; i != len; ++i) {
+ quicly_byte_to_hex(p, bytes[i]);
+ p += 2;
+ }
+ } else {
+ for (line = 0; line * 16 < len; ++line) {
+ for (i = 0; i < indent; ++i)
+ *p++ = ' ';
+ quicly_byte_to_hex(p, (line >> 4) & 0xff);
+ p += 2;
+ quicly_byte_to_hex(p, (line << 4) & 0xff);
+ p += 2;
+ *p++ = ' ';
+ for (row = 0; row < 16; ++row) {
+ *p++ = row == 8 ? '-' : ' ';
+ if (line * 16 + row < len) {
+ quicly_byte_to_hex(p, bytes[line * 16 + row]);
+ p += 2;
+ } else {
+ *p++ = ' ';
+ *p++ = ' ';
+ }
+ }
+ *p++ = ' ';
+ *p++ = ' ';
+ for (row = 0; row < 16; ++row) {
+ if (line * 16 + row < len) {
+ int ch = bytes[line * 16 + row];
+ *p++ = 0x20 <= ch && ch < 0x7f ? ch : '.';
+ } else {
+ *p++ = ' ';
+ }
+ }
+ *p++ = '\n';
+ }
+ }
+ *p++ = '\0';
+
+ assert(p - buf <= bufsize);
+
+ return buf;
+}
+
+void quicly_amend_ptls_context(ptls_context_t *ptls)
+{
+ static ptls_update_traffic_key_t update_traffic_key = {update_traffic_key_cb};
+
+ ptls->omit_end_of_early_data = 1;
+ ptls->max_early_data_size = UINT32_MAX;
+ ptls->update_traffic_key = &update_traffic_key;
+}
+
+int quicly_encrypt_address_token(void (*random_bytes)(void *, size_t), ptls_aead_context_t *aead, ptls_buffer_t *buf,
+ size_t start_off, const quicly_address_token_plaintext_t *plaintext)
+{
+ int ret;
+
+ /* type and IV */
+ if ((ret = ptls_buffer_reserve(buf, 1 + aead->algo->iv_size)) != 0)
+ goto Exit;
+ buf->base[buf->off++] = plaintext->type;
+ random_bytes(buf->base + buf->off, aead->algo->iv_size);
+ buf->off += aead->algo->iv_size;
+
+ size_t enc_start = buf->off;
+
+ /* data */
+ ptls_buffer_push64(buf, plaintext->issued_at);
+ {
+ uint16_t port;
+ ptls_buffer_push_block(buf, 1, {
+ switch (plaintext->remote.sa.sa_family) {
+ case AF_INET:
+ ptls_buffer_pushv(buf, &plaintext->remote.sin.sin_addr.s_addr, 4);
+ port = ntohs(plaintext->remote.sin.sin_port);
+ break;
+ case AF_INET6:
+ ptls_buffer_pushv(buf, &plaintext->remote.sin6.sin6_addr, 16);
+ port = ntohs(plaintext->remote.sin6.sin6_port);
+ break;
+ default:
+ assert(!"unspported address type");
+ break;
+ }
+ });
+ ptls_buffer_push16(buf, port);
+ }
+ switch (plaintext->type) {
+ case QUICLY_ADDRESS_TOKEN_TYPE_RETRY:
+ ptls_buffer_push_block(buf, 1, { ptls_buffer_pushv(buf, plaintext->retry.odcid.cid, plaintext->retry.odcid.len); });
+ ptls_buffer_push64(buf, plaintext->retry.cidpair_hash);
+ break;
+ case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION:
+ ptls_buffer_push_block(buf, 1, { ptls_buffer_pushv(buf, plaintext->resumption.bytes, plaintext->resumption.len); });
+ break;
+ default:
+ assert(!"unexpected token type");
+ abort();
+ }
+ ptls_buffer_push_block(buf, 1, { ptls_buffer_pushv(buf, plaintext->appdata.bytes, plaintext->appdata.len); });
+
+ /* encrypt, abusing the internal API to supply full IV */
+ if ((ret = ptls_buffer_reserve(buf, aead->algo->tag_size)) != 0)
+ goto Exit;
+ aead->do_encrypt_init(aead, buf->base + enc_start - aead->algo->iv_size, buf->base + start_off, enc_start - start_off);
+ ptls_aead_encrypt_update(aead, buf->base + enc_start, buf->base + enc_start, buf->off - enc_start);
+ ptls_aead_encrypt_final(aead, buf->base + buf->off);
+ buf->off += aead->algo->tag_size;
+
+Exit:
+ return ret;
+}
+
+int quicly_decrypt_address_token(ptls_aead_context_t *aead, quicly_address_token_plaintext_t *plaintext, const void *_token,
+ size_t len, size_t prefix_len, const char **err_desc)
+{
+ const uint8_t *const token = _token;
+ uint8_t ptbuf[QUICLY_MAX_PACKET_SIZE];
+ size_t ptlen;
+
+ assert(len < QUICLY_MAX_PACKET_SIZE);
+
+ *err_desc = NULL;
+
+ /* check if can get type and decrypt */
+ if (len < prefix_len + 1 + aead->algo->iv_size + aead->algo->tag_size) {
+ *err_desc = "token too small";
+ return PTLS_ALERT_DECODE_ERROR;
+ }
+
+ /* check type */
+ switch (token[prefix_len]) {
+ case QUICLY_ADDRESS_TOKEN_TYPE_RETRY:
+ plaintext->type = QUICLY_ADDRESS_TOKEN_TYPE_RETRY;
+ break;
+ case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION:
+ plaintext->type = QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION;
+ break;
+ default:
+ *err_desc = "unknown token type";
+ return PTLS_ALERT_DECODE_ERROR;
+ }
+
+ /* `goto Exit` can only happen below this line, and that is guaranteed by declaring `ret` here */
+ int ret;
+
+ /* decrypt */
+ if ((ptlen = aead->do_decrypt(aead, ptbuf, token + prefix_len + 1 + aead->algo->iv_size,
+ len - (prefix_len + 1 + aead->algo->iv_size), token + prefix_len + 1, token,
+ prefix_len + 1 + aead->algo->iv_size)) == SIZE_MAX) {
+ ret = PTLS_ALERT_DECRYPT_ERROR;
+ *err_desc = "token decryption failure";
+ goto Exit;
+ }
+
+ /* parse */
+ const uint8_t *src = ptbuf, *end = src + ptlen;
+ if ((ret = ptls_decode64(&plaintext->issued_at, &src, end)) != 0)
+ goto Exit;
+ {
+ in_port_t *portaddr;
+ ptls_decode_open_block(src, end, 1, {
+ switch (end - src) {
+ case 4: /* ipv4 */
+ plaintext->remote.sin.sin_family = AF_INET;
+ memcpy(&plaintext->remote.sin.sin_addr.s_addr, src, 4);
+ portaddr = &plaintext->remote.sin.sin_port;
+ break;
+ case 16: /* ipv6 */
+ plaintext->remote.sin6.sin6_family = AF_INET6;
+ memcpy(&plaintext->remote.sin6.sin6_addr, src, 16);
+ portaddr = &plaintext->remote.sin6.sin6_port;
+ break;
+ default:
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ src = end;
+ });
+ uint16_t port;
+ if ((ret = ptls_decode16(&port, &src, end)) != 0)
+ goto Exit;
+ *portaddr = htons(port);
+ }
+ switch (plaintext->type) {
+ case QUICLY_ADDRESS_TOKEN_TYPE_RETRY:
+ ptls_decode_open_block(src, end, 1, {
+ if ((plaintext->retry.odcid.len = end - src) > sizeof(plaintext->retry.odcid.cid)) {
+ ret = PTLS_ALERT_DECODE_ERROR;
+ goto Exit;
+ }
+ memcpy(plaintext->retry.odcid.cid, src, plaintext->retry.odcid.len);
+ src = end;
+ });
+ if ((ret = ptls_decode64(&plaintext->retry.cidpair_hash, &src, end)) != 0)
+ goto Exit;
+ break;
+ case QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION:
+ ptls_decode_open_block(src, end, 1, {
+ QUICLY_BUILD_ASSERT(sizeof(plaintext->resumption.bytes) >= 256);
+ plaintext->resumption.len = end - src;
+ memcpy(plaintext->resumption.bytes, src, plaintext->resumption.len);
+ src = end;
+ });
+ break;
+ default:
+ assert(!"unexpected token type");
+ abort();
+ }
+ ptls_decode_block(src, end, 1, {
+ QUICLY_BUILD_ASSERT(sizeof(plaintext->appdata.bytes) >= 256);
+ plaintext->appdata.len = end - src;
+ memcpy(plaintext->appdata.bytes, src, plaintext->appdata.len);
+ src = end;
+ });
+ ret = 0;
+
+Exit:
+ if (ret != 0) {
+ if (*err_desc == NULL)
+ *err_desc = "token decode error";
+ /* promote the error to one that triggers the emission of INVALID_TOKEN_ERROR, if the token looked like a retry */
+ if (plaintext->type == QUICLY_ADDRESS_TOKEN_TYPE_RETRY)
+ ret = QUICLY_TRANSPORT_ERROR_INVALID_TOKEN;
+ }
+ return ret;
+}
+
+void quicly_stream_noop_on_destroy(quicly_stream_t *stream, int err)
+{
+}
+
+void quicly_stream_noop_on_send_shift(quicly_stream_t *stream, size_t delta)
+{
+}
+
+void quicly_stream_noop_on_send_emit(quicly_stream_t *stream, size_t off, void *dst, size_t *len, int *wrote_all)
+{
+}
+
+void quicly_stream_noop_on_send_stop(quicly_stream_t *stream, int err)
+{
+}
+
+void quicly_stream_noop_on_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len)
+{
+}
+
+void quicly_stream_noop_on_receive_reset(quicly_stream_t *stream, int err)
+{
+}
+
+const quicly_stream_callbacks_t quicly_stream_noop_callbacks = {
+ quicly_stream_noop_on_destroy, quicly_stream_noop_on_send_shift, quicly_stream_noop_on_send_emit,
+ quicly_stream_noop_on_send_stop, quicly_stream_noop_on_receive, quicly_stream_noop_on_receive_reset};
+
+void quicly__debug_printf(quicly_conn_t *conn, const char *function, int line, const char *fmt, ...)
+{
+#if QUICLY_USE_EMBEDDED_PROBES || QUICLY_USE_DTRACE
+ char buf[1024];
+ va_list args;
+
+ if (!QUICLY_DEBUG_MESSAGE_ENABLED())
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ QUICLY_DEBUG_MESSAGE(conn, function, line, buf);
+#endif
+}
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_h
+#define quicly_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <netinet/in.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include "contrib/quicly/picotls/picotls.h"
+#include "contrib/quicly/constants.h"
+#include "contrib/quicly/frame.h"
+#include "contrib/quicly/linklist.h"
+#include "contrib/quicly/loss.h"
+#include "contrib/quicly/cc.h"
+#include "contrib/quicly/recvstate.h"
+#include "contrib/quicly/sendstate.h"
+#include "contrib/quicly/maxsender.h"
+
+#ifndef QUICLY_DEBUG
+#define QUICLY_DEBUG 0
+#endif
+
+/* invariants! */
+#define QUICLY_LONG_HEADER_BIT 0x80
+#define QUICLY_QUIC_BIT 0x40
+#define QUICLY_KEY_PHASE_BIT 0x4
+#define QUICLY_LONG_HEADER_RESERVED_BITS 0xc
+#define QUICLY_SHORT_HEADER_RESERVED_BITS 0x18
+
+#define QUICLY_PACKET_TYPE_INITIAL (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0)
+#define QUICLY_PACKET_TYPE_0RTT (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x10)
+#define QUICLY_PACKET_TYPE_HANDSHAKE (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x20)
+#define QUICLY_PACKET_TYPE_RETRY (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x30)
+#define QUICLY_PACKET_TYPE_BITMASK 0xf0
+
+#define QUICLY_PACKET_IS_LONG_HEADER(first_byte) (((first_byte)&QUICLY_LONG_HEADER_BIT) != 0)
+
+#define QUICLY_PROTOCOL_VERSION 0xff00001b
+
+#define QUICLY_PACKET_IS_INITIAL(first_byte) (((first_byte)&0xf0) == 0xc0)
+
+#define QUICLY_STATELESS_RESET_PACKET_MIN_LEN 39
+
+#define QUICLY_MAX_PN_SIZE 4 /* maximum defined by the RFC used for calculating header protection sampling offset */
+#define QUICLY_SEND_PN_SIZE 2 /* size of PN used for sending */
+
+#define QUICLY_AEAD_BASE_LABEL "tls13 quic "
+
+typedef union st_quicly_address_t {
+ struct sockaddr sa;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+} quicly_address_t;
+
+typedef struct st_quicly_datagram_t {
+ ptls_iovec_t data;
+ quicly_address_t dest, src;
+} quicly_datagram_t;
+
+typedef struct st_quicly_cid_t quicly_cid_t;
+typedef struct st_quicly_cid_plaintext_t quicly_cid_plaintext_t;
+typedef struct st_quicly_context_t quicly_context_t;
+typedef struct st_quicly_stream_t quicly_stream_t;
+typedef struct st_quicly_send_context_t quicly_send_context_t;
+typedef struct st_quicly_address_token_plaintext_t quicly_address_token_plaintext_t;
+
+#define QUICLY_CALLBACK_TYPE0(ret, name) \
+ typedef struct st_quicly_##name##_t { \
+ ret (*cb)(struct st_quicly_##name##_t * self); \
+ } quicly_##name##_t
+
+#define QUICLY_CALLBACK_TYPE(ret, name, ...) \
+ typedef struct st_quicly_##name##_t { \
+ ret (*cb)(struct st_quicly_##name##_t * self, __VA_ARGS__); \
+ } quicly_##name##_t
+
+/**
+ * allocates a packet buffer
+ */
+typedef struct st_quicly_packet_allocator_t {
+ quicly_datagram_t *(*alloc_packet)(struct st_quicly_packet_allocator_t *self, size_t payloadsize);
+ void (*free_packet)(struct st_quicly_packet_allocator_t *self, quicly_datagram_t *packet);
+} quicly_packet_allocator_t;
+
+/**
+ * CID encryption
+ */
+typedef struct st_quicly_cid_encryptor_t {
+ /**
+ * encrypts CID and optionally generates a stateless reset token
+ */
+ void (*encrypt_cid)(struct st_quicly_cid_encryptor_t *self, quicly_cid_t *encrypted, void *stateless_reset_token,
+ const quicly_cid_plaintext_t *plaintext);
+ /**
+ * decrypts CID. plaintext->thread_id should contain a randomly distributed number when validation fails, so that the value can
+ * be used for distributing load among the threads within the process.
+ * @param len length of encrypted bytes if known, or 0 if unknown (short header packet)
+ * @return length of the CID, or SIZE_MAX if decryption failed
+ */
+ size_t (*decrypt_cid)(struct st_quicly_cid_encryptor_t *self, quicly_cid_plaintext_t *plaintext, const void *encrypted,
+ size_t len);
+ /**
+ * generates a stateless reset token (returns if generated)
+ */
+ int (*generate_stateless_reset_token)(struct st_quicly_cid_encryptor_t *self, void *token, const void *cid);
+} quicly_cid_encryptor_t;
+
+/**
+ * stream scheduler
+ */
+typedef struct st_quicly_stream_scheduler_t {
+ /**
+ * returns if there's any data to send.
+ * @param conn_is_flow_capped if the connection-level flow control window is currently saturated
+ */
+ int (*can_send)(struct st_quicly_stream_scheduler_t *sched, quicly_conn_t *conn, int conn_is_saturated);
+ /**
+ * Called by quicly to emit stream data. The scheduler should repeatedly choose a stream and call `quicly_send_stream` until
+ * `quicly_can_send_stream` returns false.
+ */
+ int (*do_send)(struct st_quicly_stream_scheduler_t *sched, quicly_conn_t *conn, quicly_send_context_t *s);
+ /**
+ *
+ */
+ int (*update_state)(struct st_quicly_stream_scheduler_t *sched, quicly_stream_t *stream);
+} quicly_stream_scheduler_t;
+
+/**
+ * called when stream is being open. Application is expected to create it's corresponding state and tie it to stream->data.
+ */
+QUICLY_CALLBACK_TYPE(int, stream_open, quicly_stream_t *stream);
+/**
+ * called when the connection is closed by peer
+ */
+QUICLY_CALLBACK_TYPE(void, closed_by_peer, quicly_conn_t *conn, int err, uint64_t frame_type, const char *reason,
+ size_t reason_len);
+/**
+ * returns current time in milliseconds
+ */
+QUICLY_CALLBACK_TYPE0(int64_t, now);
+/**
+ * called when a NEW_TOKEN token is received on a connection
+ */
+QUICLY_CALLBACK_TYPE(int, save_resumption_token, quicly_conn_t *conn, ptls_iovec_t token);
+/**
+ *
+ */
+QUICLY_CALLBACK_TYPE(int, generate_resumption_token, quicly_conn_t *conn, ptls_buffer_t *buf,
+ quicly_address_token_plaintext_t *token);
+/**
+ * crypto offload API
+ */
+typedef struct st_quicly_crypto_engine_t {
+ /**
+ * Callback used for setting up the header protection keys / packet protection keys. The callback MUST initialize or replace
+ * `header_protect_ctx` and `packet_protect_ctx` as specified by QUIC-TLS. This callback might be called more than once for
+ * 1-RTT epoch, when the key is updated. In such case, there is no need to update the header protection context, and therefore
+ * `header_protect_ctx` will be NULL.
+ *
+ * @param header_protect_ctx address of where the header protection context should be written. Might be NULL when called to
+ * handle 1-RTT Key Update.
+ * @param packet_protect_ctx address of where the packet protection context should be written.
+ * @param secret the secret from which the protection keys is derived. The length of the secret is
+ `hash->digest_size`.
+ * @note At the moment, the callback is not invoked for Initial keys when running as server.
+ */
+ int (*setup_cipher)(struct st_quicly_crypto_engine_t *engine, quicly_conn_t *conn, size_t epoch, int is_enc,
+ ptls_cipher_context_t **header_protect_ctx, ptls_aead_context_t **packet_protect_ctx,
+ ptls_aead_algorithm_t *aead, ptls_hash_algorithm_t *hash, const void *secret);
+ /**
+ * Callback used for sealing the send packet. What "sealing" means depends on the packet_protection_ctx being returned by the
+ * `setup_cipher` callback.
+ * * If the packet protection context was a real cipher, the payload of the packet is already encrypted when this callback is
+ * invoked. The responsibility of this callback is to apply header protection.
+ * * If the packet protection context was a fake (i.e. path-through) cipher, the responsibility of this callback is to
+ * AEAD-protect the packet and to apply header protection.
+ * The protection can be delayed until after `quicly_datagram_t` is returned by the `quicly_send` function.
+ */
+ void (*finalize_send_packet)(struct st_quicly_crypto_engine_t *engine, quicly_conn_t *conn,
+ ptls_cipher_context_t *header_protect_ctx, ptls_aead_context_t *packet_protect_ctx,
+ quicly_datagram_t *packet, size_t first_byte_at, size_t payload_from, int coalesced);
+} quicly_crypto_engine_t;
+
+typedef struct st_quicly_max_stream_data_t {
+ uint64_t bidi_local, bidi_remote, uni;
+} quicly_max_stream_data_t;
+
+/**
+ * Transport Parameters; the struct contains "configuration parameters", ODCID is managed separately
+ */
+typedef struct st_quicly_transport_parameters_t {
+ /**
+ * in octets
+ */
+ quicly_max_stream_data_t max_stream_data;
+ /**
+ * in octets
+ */
+ uint64_t max_data;
+ /**
+ * in milliseconds
+ */
+ uint64_t max_idle_timeout;
+ /**
+ *
+ */
+ uint64_t max_streams_bidi;
+ /**
+ *
+ */
+ uint64_t max_streams_uni;
+ /**
+ * quicly ignores the value set for quicly_context_t::transport_parameters
+ */
+ uint8_t ack_delay_exponent;
+ /**
+ * in milliseconds; quicly ignores the value set for quicly_context_t::transport_parameters
+ */
+ uint16_t max_ack_delay;
+ /**
+ *
+ */
+ uint8_t disable_active_migration : 1;
+} quicly_transport_parameters_t;
+
+struct st_quicly_cid_t {
+ uint8_t cid[QUICLY_MAX_CID_LEN_V1];
+ uint8_t len;
+};
+
+/**
+ * Guard value. We would never send path_id of this value.
+ */
+#define QUICLY_MAX_PATH_ID UINT8_MAX
+
+/**
+ * The structure of CID issued by quicly.
+ *
+ * Authentication of the CID can be done by validating if server_id and thread_id contain correct values.
+ */
+struct st_quicly_cid_plaintext_t {
+ /**
+ * the internal "connection ID" unique to each connection (rather than QUIC's CID being unique to each path)
+ */
+ uint32_t master_id;
+ /**
+ * path ID of the connection; we issue up to 255 CIDs per connection (see QUICLY_MAX_PATH_ID)
+ */
+ uint32_t path_id : 8;
+ /**
+ * for intra-node routing
+ */
+ uint32_t thread_id : 24;
+ /**
+ * for inter-node routing; available only when using a 16-byte cipher to encrypt CIDs, otherwise set to zero. See
+ * quicly_context_t::is_clustered.
+ */
+ uint64_t node_id;
+};
+
+struct st_quicly_context_t {
+ /**
+ * tls context to use
+ */
+ ptls_context_t *tls;
+ /**
+ * MTU
+ */
+ uint16_t max_packet_size;
+ /**
+ * loss detection parameters
+ */
+ quicly_loss_conf_t loss;
+ /**
+ * transport parameters
+ */
+ quicly_transport_parameters_t transport_params;
+ /**
+ * number of packets that can be sent without a key update
+ */
+ uint64_t max_packets_per_key;
+ /**
+ * maximum number of bytes that can be transmitted on a CRYPTO stream (per each epoch)
+ */
+ uint64_t max_crypto_bytes;
+ /**
+ * client-only
+ */
+ unsigned enforce_version_negotiation : 1;
+ /**
+ * if inter-node routing is used (by utilising quicly_cid_plaintext_t::node_id)
+ */
+ unsigned is_clustered : 1;
+ /**
+ * expand client hello so that it does not fit into one datagram
+ */
+ unsigned expand_client_hello : 1;
+ /**
+ * callback for allocating memory for raw packet
+ */
+ quicly_packet_allocator_t *packet_allocator;
+ /**
+ *
+ */
+ quicly_cid_encryptor_t *cid_encryptor;
+ /**
+ * callback called when a new stream is opened by peer
+ */
+ quicly_stream_open_t *stream_open;
+ /**
+ * callbacks for scheduling stream data
+ */
+ quicly_stream_scheduler_t *stream_scheduler;
+ /**
+ * callback called when a connection is closed by peer
+ */
+ quicly_closed_by_peer_t *closed_by_peer;
+ /**
+ * returns current time in milliseconds
+ */
+ quicly_now_t *now;
+ /**
+ * called wen a NEW_TOKEN token is being received
+ */
+ quicly_save_resumption_token_t *save_resumption_token;
+ /**
+ *
+ */
+ quicly_generate_resumption_token_t *generate_resumption_token;
+ /**
+ * crypto engine (offload API)
+ */
+ quicly_crypto_engine_t *crypto_engine;
+};
+
+/**
+ * connection state
+ */
+typedef enum {
+ /**
+ * before observing the first message from peer
+ */
+ QUICLY_STATE_FIRSTFLIGHT,
+ /**
+ * while connected
+ */
+ QUICLY_STATE_CONNECTED,
+ /**
+ * sending close, but haven't seen the peer sending close
+ */
+ QUICLY_STATE_CLOSING,
+ /**
+ * we do not send CLOSE (at the moment), enter draining mode when receiving CLOSE
+ */
+ QUICLY_STATE_DRAINING
+} quicly_state_t;
+
+struct st_quicly_conn_streamgroup_state_t {
+ uint32_t num_streams;
+ quicly_stream_id_t next_stream_id;
+};
+
+/**
+ * Values that do not need to be gathered upon the invocation of `quicly_get_stats`. We use typedef to define the same fields in
+ * the same order for quicly_stats_t and `struct st_quicly_public_conn_t::stats`.
+ */
+#define QUICLY_STATS_PREBUILT_FIELDS \
+ struct { \
+ uint64_t received; \
+ uint64_t decryption_failed; \
+ uint64_t sent; \
+ uint64_t lost; \
+ uint64_t ack_received; \
+ uint64_t late_acked; \
+ } num_packets; \
+ struct { \
+ uint64_t received; \
+ uint64_t sent; \
+ } num_bytes
+
+typedef struct st_quicly_stats_t {
+ /**
+ * The pre-built fields. This MUST be the first member of `quicly_stats_t` so that we can use `memcpy`.
+ */
+ QUICLY_STATS_PREBUILT_FIELDS;
+ /**
+ * RTT
+ */
+ quicly_rtt_t rtt;
+ /**
+ * Congestion control (experimental; TODO cherry-pick what can be exposed as part of a stable API)
+ */
+ quicly_cc_t cc;
+} quicly_stats_t;
+
+/**
+ * The state of the default stream scheduler.
+ * `active` is a linked-list of streams for which STREAM frames can be emitted. `blocked` is a linked-list of streams that have
+ * something to be sent but are currently blocked by the connection-level flow control.
+ * When the `can_send` callback of the default stream scheduler is invoked with the `conn_is_saturated` flag set, connections that
+ * are blocked are eventually moved to the `blocked` list. When the callback is invoked without the flag being set, all the
+ * connections in the `blocked` list is moved to the `active` list and the `in_saturated_mode` is cleared.
+ */
+struct st_quicly_default_scheduler_state_t {
+ quicly_linklist_t active;
+ quicly_linklist_t blocked;
+};
+
+struct _st_quicly_conn_public_t {
+ quicly_context_t *ctx;
+ quicly_state_t state;
+ /**
+ * identifier assigned by the application. `path_id` stores the next value to be issued
+ */
+ quicly_cid_plaintext_t master_id;
+ struct {
+ /**
+ * the local address (may be AF_UNSPEC)
+ */
+ quicly_address_t address;
+ /**
+ * the SCID used in long header packets
+ */
+ quicly_cid_t src_cid;
+ /**
+ * stateless reset token announced by the host. We have only one token per connection. The token will cached in this
+ * variable when the generate_stateless_reset_token is non-NULL.
+ */
+ uint8_t stateless_reset_token[QUICLY_STATELESS_RESET_TOKEN_LEN];
+ /**
+ * TODO clear this at some point (probably when the server releases all the keys below epoch=3)
+ */
+ quicly_cid_t offered_cid;
+ struct st_quicly_conn_streamgroup_state_t bidi, uni;
+ } host;
+ struct {
+ /**
+ * the remote address (cannot be AF_UNSPEC)
+ */
+ quicly_address_t address;
+ /**
+ * CID used for emitting the packets
+ */
+ quicly_cid_t cid;
+ /**
+ * stateless reset token corresponding to the CID
+ */
+ struct {
+ uint8_t *token;
+ uint8_t _buf[QUICLY_STATELESS_RESET_TOKEN_LEN];
+ } stateless_reset;
+ struct st_quicly_conn_streamgroup_state_t bidi, uni;
+ quicly_transport_parameters_t transport_params;
+ struct {
+ unsigned validated : 1;
+ unsigned send_probe : 1;
+ } address_validation;
+ } peer;
+ struct st_quicly_default_scheduler_state_t _default_scheduler;
+ struct {
+ QUICLY_STATS_PREBUILT_FIELDS;
+ } stats;
+ uint32_t version;
+ void *data;
+};
+
+typedef enum {
+ /**
+ * initial state
+ */
+ QUICLY_SENDER_STATE_NONE,
+ /**
+ * to be sent. Changes to UNACKED when sent out by quicly_send
+ */
+ QUICLY_SENDER_STATE_SEND,
+ /**
+ * inflight. changes to SEND (when packet is deemed lost), or ACKED (when packet is ACKed)
+ */
+ QUICLY_SENDER_STATE_UNACKED,
+ /**
+ * the sent value acknowledged by peer
+ */
+ QUICLY_SENDER_STATE_ACKED,
+} quicly_sender_state_t;
+
+/**
+ * API that allows applications to specify it's own send / receive buffer. The callback should be assigned by the
+ * `quicly_context_t::on_stream_open` callback.
+ */
+typedef struct st_quicly_stream_callbacks_t {
+ /**
+ * called when the stream is destroyed
+ */
+ void (*on_destroy)(quicly_stream_t *stream, int err);
+ /**
+ * called whenever data can be retired from the send buffer, specifying the amount that can be newly removed
+ */
+ void (*on_send_shift)(quicly_stream_t *stream, size_t delta);
+ /**
+ * asks the application to fill the frame payload. `off` is the offset within the buffer (the beginning position of the buffer
+ * changes as `on_send_shift` is invoked). `len` is an in/out argument that specifies the size of the buffer / amount of data
+ * being written. `wrote_all` is a boolean out parameter indicating if the application has written all the available data.
+ * As this callback is triggered by calling quicly_stream_sync_sendbuf (stream, 1) when tx data is present, it assumes data
+ * to be available - that is `len` return value should be non-zero.
+ */
+ void (*on_send_emit)(quicly_stream_t *stream, size_t off, void *dst, size_t *len, int *wrote_all);
+ /**
+ * called when a STOP_SENDING frame is received. Do not call `quicly_reset_stream` in response. The stream will be
+ * automatically reset by quicly.
+ */
+ void (*on_send_stop)(quicly_stream_t *stream, int err);
+ /**
+ * called when data is newly received. `off` is the offset within the buffer (the beginning position changes as the application
+ * calls `quicly_stream_sync_recvbuf`. Applications should consult `quicly_stream_t::recvstate` to see if it has contiguous
+ * input.
+ */
+ void (*on_receive)(quicly_stream_t *stream, size_t off, const void *src, size_t len);
+ /**
+ * called when a RESET_STREAM frame is received
+ */
+ void (*on_receive_reset)(quicly_stream_t *stream, int err);
+} quicly_stream_callbacks_t;
+
+struct st_quicly_stream_t {
+ /**
+ *
+ */
+ quicly_conn_t *conn;
+ /**
+ * stream id
+ */
+ quicly_stream_id_t stream_id;
+ /**
+ *
+ */
+ const quicly_stream_callbacks_t *callbacks;
+ /**
+ * send buffer
+ */
+ quicly_sendstate_t sendstate;
+ /**
+ * receive buffer
+ */
+ quicly_recvstate_t recvstate;
+ /**
+ *
+ */
+ void *data;
+ /**
+ *
+ */
+ unsigned streams_blocked : 1;
+ /**
+ *
+ */
+ struct {
+ /**
+ * send window
+ */
+ uint64_t max_stream_data;
+ /**
+ *
+ */
+ struct {
+ quicly_sender_state_t sender_state;
+ uint16_t error_code;
+ } stop_sending;
+ /**
+ * reset_stream
+ */
+ struct {
+ /**
+ * STATE_NONE until RST is generated
+ */
+ quicly_sender_state_t sender_state;
+ uint16_t error_code;
+ } reset_stream;
+ /**
+ * sends receive window updates to peer
+ */
+ quicly_maxsender_t max_stream_data_sender;
+ /**
+ * linklist of pending streams
+ */
+ struct {
+ quicly_linklist_t control; /* links to conn_t::control (or to conn_t::streams_blocked if the blocked flag is set) */
+ quicly_linklist_t default_scheduler;
+ } pending_link;
+ } _send_aux;
+ /**
+ *
+ */
+ struct {
+ /**
+ * size of the receive window
+ */
+ uint32_t window;
+ /**
+ * Maximum number of ranges (i.e. gaps + 1) permitted in `recvstate.ranges`.
+ * As discussed in https://github.com/h2o/quicly/issues/278, this value should be propotional to the size of the receive
+ * window, so that the receive window can be maintained even in the worst case, where every one of the two packets being
+ * sent are received.
+ */
+ uint32_t max_ranges;
+ } _recv_aux;
+};
+
+typedef struct st_quicly_decoded_packet_t {
+ /**
+ * octets of the entire packet
+ */
+ ptls_iovec_t octets;
+ /**
+ * Connection ID(s)
+ */
+ struct {
+ /**
+ * destination CID
+ */
+ struct {
+ /**
+ * CID visible on wire
+ */
+ ptls_iovec_t encrypted;
+ /**
+ * the decrypted CID; note that the value is not authenticated
+ */
+ quicly_cid_plaintext_t plaintext;
+ /**
+ *
+ */
+ unsigned might_be_client_generated : 1;
+ } dest;
+ /**
+ * source CID; {NULL, 0} if is a short header packet
+ */
+ ptls_iovec_t src;
+ } cid;
+ /**
+ * version; 0 if is a short header packet
+ */
+ uint32_t version;
+ /**
+ * token if available; otherwise {NULL, 0}
+ */
+ ptls_iovec_t token;
+ /**
+ * starting offset of data (i.e., version-dependent area of a long header packet (version numbers in case of VN), AEAD tag (in
+ * case of retry), encrypted PN (if decrypted.pn is UINT64_MAX) or data (if decrypted_pn is not UINT64_MAX))
+ */
+ size_t encrypted_off;
+ /**
+ * size of the datagram
+ */
+ size_t datagram_size;
+ /**
+ * when decrypted.pn is not UINT64_MAX, indicates that the packet has been decrypted prior to being passed to `quicly_receive`.
+ */
+ struct {
+ uint64_t pn;
+ uint64_t key_phase;
+ } decrypted;
+ /**
+ *
+ */
+ enum {
+ QUICLY__DECODED_PACKET_CACHED_MAYBE_STATELESS_RESET = 0,
+ QUICLY__DECODED_PACKET_CACHED_IS_STATELESS_RESET,
+ QUICLY__DECODED_PACKET_CACHED_NOT_STATELESS_RESET
+ } _is_stateless_reset_cached;
+} quicly_decoded_packet_t;
+
+struct st_quicly_address_token_plaintext_t {
+ enum { QUICLY_ADDRESS_TOKEN_TYPE_RETRY, QUICLY_ADDRESS_TOKEN_TYPE_RESUMPTION } type;
+ uint64_t issued_at;
+ quicly_address_t local, remote;
+ union {
+ struct {
+ quicly_cid_t odcid;
+ uint64_t cidpair_hash;
+ } retry;
+ struct {
+ uint8_t bytes[256];
+ size_t len;
+ } resumption;
+ };
+ struct {
+ uint8_t bytes[256];
+ size_t len;
+ } appdata;
+};
+
+/**
+ *
+ */
+size_t quicly_decode_packet(quicly_context_t *ctx, quicly_decoded_packet_t *packet, const uint8_t *src, size_t len);
+/**
+ *
+ */
+uint64_t quicly_determine_packet_number(uint32_t truncated, size_t num_bits, uint64_t expected);
+/**
+ *
+ */
+static int quicly_cid_is_equal(const quicly_cid_t *cid, ptls_iovec_t vec);
+/**
+ *
+ */
+static quicly_context_t *quicly_get_context(quicly_conn_t *conn);
+/**
+ *
+ */
+static const quicly_cid_plaintext_t *quicly_get_master_id(quicly_conn_t *conn);
+/**
+ *
+ */
+static const quicly_cid_t *quicly_get_offered_cid(quicly_conn_t *conn);
+/**
+ *
+ */
+static const quicly_cid_t *quicly_get_peer_cid(quicly_conn_t *conn);
+/**
+ *
+ */
+static const quicly_transport_parameters_t *quicly_get_peer_transport_parameters(quicly_conn_t *conn);
+/**
+ *
+ */
+static quicly_state_t quicly_get_state(quicly_conn_t *conn);
+/**
+ *
+ */
+int quicly_connection_is_ready(quicly_conn_t *conn);
+/**
+ *
+ */
+static uint32_t quicly_num_streams(quicly_conn_t *conn);
+/**
+ *
+ */
+static int quicly_is_client(quicly_conn_t *conn);
+/**
+ *
+ */
+static quicly_stream_id_t quicly_get_host_next_stream_id(quicly_conn_t *conn, int uni);
+/**
+ *
+ */
+static quicly_stream_id_t quicly_get_peer_next_stream_id(quicly_conn_t *conn, int uni);
+/**
+ * Returns the local address of the connection. This may be AF_UNSPEC, indicating that the operating system is choosing the address.
+ */
+static struct sockaddr *quicly_get_sockname(quicly_conn_t *conn);
+/**
+ * Returns the remote address of the connection. This would never be AF_UNSPEC.
+ */
+static struct sockaddr *quicly_get_peername(quicly_conn_t *conn);
+/**
+ *
+ */
+int quicly_get_stats(quicly_conn_t *conn, quicly_stats_t *stats);
+/**
+ *
+ */
+void quicly_get_max_data(quicly_conn_t *conn, uint64_t *send_permitted, uint64_t *sent, uint64_t *consumed);
+/**
+ *
+ */
+static void **quicly_get_data(quicly_conn_t *conn);
+/**
+ * destroys a connection object.
+ */
+void quicly_free(quicly_conn_t *conn);
+/**
+ * closes the connection. `err` is the application error code using the coalesced scheme (see QUICLY_ERROR_* macros), or zero (no
+ * error; indicating idle close). An application should continue calling quicly_recieve and quicly_send, until they return
+ * QUICLY_ERROR_FREE_CONNECTION. At this point, it is should call quicly_free.
+ */
+int quicly_close(quicly_conn_t *conn, int err, const char *reason_phrase);
+/**
+ *
+ */
+int64_t quicly_get_first_timeout(quicly_conn_t *conn);
+/**
+ *
+ */
+uint64_t quicly_get_next_expected_packet_number(quicly_conn_t *conn);
+/**
+ * returns if the connection is currently capped by connection-level flow control.
+ */
+int quicly_is_flow_capped(quicly_conn_t *conn);
+/**
+ * checks if quicly_send_stream can be invoked
+ * @return a boolean indicating if quicly_send_stream can be called immediately
+ */
+int quicly_can_send_stream_data(quicly_conn_t *conn, quicly_send_context_t *s);
+/**
+ * Sends data of given stream. Called by stream scheduler. Only streams that can send some data or EOS should be specified. It is
+ * the responsibilty of the stream scheduler to maintain a list of such streams.
+ */
+int quicly_send_stream(quicly_stream_t *stream, quicly_send_context_t *s);
+/**
+ *
+ */
+quicly_datagram_t *quicly_send_version_negotiation(quicly_context_t *ctx, struct sockaddr *dest_addr, ptls_iovec_t dest_cid,
+ struct sockaddr *src_addr, ptls_iovec_t src_cid);
+/**
+ *
+ */
+int quicly_retry_calc_cidpair_hash(ptls_hash_algorithm_t *sha256, ptls_iovec_t client_cid, ptls_iovec_t server_cid,
+ uint64_t *value);
+/**
+ * @param retry_aead_cache pointer to `ptls_aead_context_t *` that the function can store a AEAD context for future reuse. The cache
+ * cannot be shared between multiple threads. Can be set to NULL when caching is unnecessary.
+ */
+quicly_datagram_t *quicly_send_retry(quicly_context_t *ctx, ptls_aead_context_t *token_encrypt_ctx, struct sockaddr *dest_addr,
+ ptls_iovec_t dest_cid, struct sockaddr *src_addr, ptls_iovec_t src_cid, ptls_iovec_t odcid,
+ ptls_iovec_t token_prefix, ptls_iovec_t appdata, ptls_aead_context_t **retry_aead_cache);
+/**
+ *
+ */
+int quicly_send(quicly_conn_t *conn, quicly_datagram_t **packets, size_t *num_packets);
+/**
+ *
+ */
+quicly_datagram_t *quicly_send_close_invalid_token(quicly_context_t *ctx, struct sockaddr *dest_addr, ptls_iovec_t dest_cid,
+ struct sockaddr *src_addr, ptls_iovec_t src_cid, const char *err_desc);
+/**
+ *
+ */
+quicly_datagram_t *quicly_send_stateless_reset(quicly_context_t *ctx, struct sockaddr *dest_addr, struct sockaddr *src_addr,
+ const void *src_cid);
+/**
+ *
+ */
+int quicly_send_resumption_token(quicly_conn_t *conn);
+/**
+ *
+ */
+int quicly_receive(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr, quicly_decoded_packet_t *packet);
+/**
+ * consults if the incoming packet identified by (dest_addr, src_addr, decoded) belongs to the given connection
+ */
+int quicly_is_destination(quicly_conn_t *conn, struct sockaddr *dest_addr, struct sockaddr *src_addr,
+ quicly_decoded_packet_t *decoded);
+/**
+ *
+ */
+int quicly_encode_transport_parameter_list(ptls_buffer_t *buf, int is_client, const quicly_transport_parameters_t *params,
+ const quicly_cid_t *odcid, const void *stateless_reset_token, int expand);
+/**
+ *
+ */
+int quicly_decode_transport_parameter_list(quicly_transport_parameters_t *params, quicly_cid_t *odcid, void *stateless_reset_token,
+ int is_client, const uint8_t *src, const uint8_t *end);
+/**
+ * Initiates a new connection.
+ * @param new_cid the CID to be used for the connection. path_id is ignored.
+ */
+int quicly_connect(quicly_conn_t **conn, quicly_context_t *ctx, const char *server_name, struct sockaddr *dest_addr,
+ struct sockaddr *src_addr, const quicly_cid_plaintext_t *new_cid, ptls_iovec_t address_token,
+ ptls_handshake_properties_t *handshake_properties,
+ const quicly_transport_parameters_t *resumed_transport_params);
+/**
+ * accepts a new connection
+ * @param new_cid The CID to be used for the connection. When an error is being returned, the application can reuse the CID
+ * provided to the function.
+ * @param address_token An validated address validation token, if any. Applications MUST validate the address validation token
+ * before calling this function, dropping the ones that failed to validate. When a token is supplied,
+ * `quicly_accept` will consult the values being supplied assuming that the peer's address has been validated.
+ */
+int quicly_accept(quicly_conn_t **conn, quicly_context_t *ctx, struct sockaddr *dest_addr, struct sockaddr *src_addr,
+ quicly_decoded_packet_t *packet, quicly_address_token_plaintext_t *address_token,
+ const quicly_cid_plaintext_t *new_cid, ptls_handshake_properties_t *handshake_properties);
+/**
+ *
+ */
+ptls_t *quicly_get_tls(quicly_conn_t *conn);
+/**
+ *
+ */
+quicly_stream_id_t quicly_get_ingress_max_streams(quicly_conn_t *conn, int uni);
+/**
+ *
+ */
+quicly_stream_t *quicly_get_stream(quicly_conn_t *conn, quicly_stream_id_t stream_id);
+/**
+ *
+ */
+int quicly_open_stream(quicly_conn_t *conn, quicly_stream_t **stream, int unidirectional);
+/**
+ *
+ */
+void quicly_reset_stream(quicly_stream_t *stream, int err);
+/**
+ *
+ */
+void quicly_request_stop(quicly_stream_t *stream, int err);
+/**
+ *
+ */
+static int quicly_stop_requested(quicly_stream_t *stream);
+/**
+ *
+ */
+int quicly_stream_sync_sendbuf(quicly_stream_t *stream, int activate);
+/**
+ *
+ */
+void quicly_stream_sync_recvbuf(quicly_stream_t *stream, size_t shift_amount);
+/**
+ *
+ */
+static int quicly_stream_is_client_initiated(quicly_stream_id_t stream_id);
+/**
+ *
+ */
+static int quicly_stream_is_unidirectional(quicly_stream_id_t stream_id);
+/**
+ *
+ */
+static int quicly_stream_has_send_side(int is_client, quicly_stream_id_t stream_id);
+/**
+ *
+ */
+static int quicly_stream_has_receive_side(int is_client, quicly_stream_id_t stream_id);
+/**
+ *
+ */
+static int quicly_stream_is_self_initiated(quicly_stream_t *stream);
+/**
+ *
+ */
+void quicly_amend_ptls_context(ptls_context_t *ptls);
+/**
+ * Encrypts an address token by serializing the plaintext structure and appending an authentication tag. Bytes between `start_off`
+ * and `buf->off` (at the moment of invocation) is considered part of a token covered by AAD.
+ */
+int quicly_encrypt_address_token(void (*random_bytes)(void *, size_t), ptls_aead_context_t *aead, ptls_buffer_t *buf,
+ size_t start_off, const quicly_address_token_plaintext_t *plaintext);
+/**
+ * Decrypts an address token.
+ * If decryption succeeds, returns zero. If the token is unusable due to decryption failure, returns PTLS_DECODE_ERROR. If the token
+ * is unusable and the connection should be reset, returns QUICLY_ERROR_INVALID_TOKEN.
+ */
+int quicly_decrypt_address_token(ptls_aead_context_t *aead, quicly_address_token_plaintext_t *plaintext, const void *src,
+ size_t len, size_t prefix_len, const char **err_desc);
+/**
+ *
+ */
+static void quicly_byte_to_hex(char *dst, uint8_t v);
+/**
+ *
+ */
+socklen_t quicly_get_socklen(struct sockaddr *sa);
+/**
+ * Builds a safe string. Supplied buffer MUST be 4x + 1 bytes bigger than the input.
+ */
+char *quicly_escape_unsafe_string(char *dst, const void *bytes, size_t len);
+/**
+ *
+ */
+char *quicly_hexdump(const uint8_t *bytes, size_t len, size_t indent);
+/**
+ *
+ */
+void quicly_stream_noop_on_destroy(quicly_stream_t *stream, int err);
+/**
+ *
+ */
+void quicly_stream_noop_on_send_shift(quicly_stream_t *stream, size_t delta);
+/**
+ *
+ */
+void quicly_stream_noop_on_send_emit(quicly_stream_t *stream, size_t off, void *dst, size_t *len, int *wrote_all);
+/**
+ *
+ */
+void quicly_stream_noop_on_send_stop(quicly_stream_t *stream, int err);
+/**
+ *
+ */
+void quicly_stream_noop_on_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len);
+/**
+ *
+ */
+void quicly_stream_noop_on_receive_reset(quicly_stream_t *stream, int err);
+
+extern const quicly_stream_callbacks_t quicly_stream_noop_callbacks;
+
+/* inline definitions */
+
+inline quicly_state_t quicly_get_state(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return c->state;
+}
+
+inline uint32_t quicly_num_streams(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return c->host.bidi.num_streams + c->host.uni.num_streams + c->peer.bidi.num_streams + c->peer.uni.num_streams;
+}
+
+inline int quicly_cid_is_equal(const quicly_cid_t *cid, ptls_iovec_t vec)
+{
+ return cid->len == vec.len && memcmp(cid->cid, vec.base, vec.len) == 0;
+}
+
+inline quicly_context_t *quicly_get_context(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return c->ctx;
+}
+
+inline const quicly_cid_plaintext_t *quicly_get_master_id(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return &c->master_id;
+}
+
+inline const quicly_cid_t *quicly_get_offered_cid(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return &c->host.offered_cid;
+}
+
+inline const quicly_cid_t *quicly_get_peer_cid(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return &c->peer.cid;
+}
+
+inline const quicly_transport_parameters_t *quicly_get_peer_transport_parameters(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return &c->peer.transport_params;
+}
+
+inline int quicly_is_client(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return (c->host.bidi.next_stream_id & 1) == 0;
+}
+
+inline quicly_stream_id_t quicly_get_host_next_stream_id(quicly_conn_t *conn, int uni)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return uni ? c->host.uni.next_stream_id : c->host.bidi.next_stream_id;
+}
+
+inline quicly_stream_id_t quicly_get_peer_next_stream_id(quicly_conn_t *conn, int uni)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return uni ? c->peer.uni.next_stream_id : c->peer.bidi.next_stream_id;
+}
+
+inline struct sockaddr *quicly_get_sockname(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return &c->host.address.sa;
+}
+
+inline struct sockaddr *quicly_get_peername(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return &c->peer.address.sa;
+}
+
+inline void **quicly_get_data(quicly_conn_t *conn)
+{
+ struct _st_quicly_conn_public_t *c = (struct _st_quicly_conn_public_t *)conn;
+ return &c->data;
+}
+
+inline int quicly_stop_requested(quicly_stream_t *stream)
+{
+ return stream->_send_aux.stop_sending.sender_state != QUICLY_SENDER_STATE_NONE;
+}
+
+inline int quicly_stream_is_client_initiated(quicly_stream_id_t stream_id)
+{
+ if (stream_id < 0)
+ return (stream_id & 1) != 0;
+ return (stream_id & 1) == 0;
+}
+
+inline int quicly_stream_is_unidirectional(quicly_stream_id_t stream_id)
+{
+ if (stream_id < 0)
+ return 0;
+ return (stream_id & 2) != 0;
+}
+
+inline int quicly_stream_has_send_side(int is_client, quicly_stream_id_t stream_id)
+{
+ if (!quicly_stream_is_unidirectional(stream_id))
+ return 1;
+ return is_client == quicly_stream_is_client_initiated(stream_id);
+}
+
+inline int quicly_stream_has_receive_side(int is_client, quicly_stream_id_t stream_id)
+{
+ if (!quicly_stream_is_unidirectional(stream_id))
+ return 1;
+ return is_client != quicly_stream_is_client_initiated(stream_id);
+}
+
+inline int quicly_stream_is_self_initiated(quicly_stream_t *stream)
+{
+ return quicly_stream_is_client_initiated(stream->stream_id) == quicly_is_client(stream->conn);
+}
+
+inline void quicly_byte_to_hex(char *dst, uint8_t v)
+{
+ dst[0] = "0123456789abcdef"[v >> 4];
+ dst[1] = "0123456789abcdef"[v & 0xf];
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include "contrib/quicly/picotls/picotls.h"
+#include "contrib/quicly/constants.h"
+#include "contrib/quicly/ranges.h"
+
+#define COPY(dst, src, n) \
+ do { \
+ size_t _n = (n); \
+ if (_n != 0) \
+ memcpy((dst), (src), sizeof(quicly_range_t) * _n); \
+ } while (0)
+#define MOVE(dst, src, n) \
+ do { \
+ size_t _n = (n); \
+ if (_n != 0) \
+ memmove((dst), (src), sizeof(quicly_range_t) * _n); \
+ } while (0)
+
+static int insert_at(quicly_ranges_t *ranges, uint64_t start, uint64_t end, size_t slot)
+{
+ if (ranges->num_ranges == ranges->capacity) {
+ size_t new_capacity = ranges->capacity < 4 ? 4 : ranges->capacity * 2;
+ quicly_range_t *new_ranges = malloc(new_capacity * sizeof(*new_ranges));
+ if (new_ranges == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ COPY(new_ranges, ranges->ranges, slot);
+ COPY(new_ranges + slot + 1, ranges->ranges + slot, ranges->num_ranges - slot);
+ if (ranges->ranges != &ranges->_initial)
+ free(ranges->ranges);
+ ranges->ranges = new_ranges;
+ ranges->capacity = new_capacity;
+ } else {
+ MOVE(ranges->ranges + slot + 1, ranges->ranges + slot, ranges->num_ranges - slot);
+ }
+ ranges->ranges[slot] = (quicly_range_t){start, end};
+ ++ranges->num_ranges;
+ return 0;
+}
+
+void quicly_ranges_drop_by_range_indices(quicly_ranges_t *ranges, size_t begin_range_index, size_t end_range_index)
+{
+ assert(begin_range_index < end_range_index);
+
+ MOVE(ranges->ranges + begin_range_index, ranges->ranges + end_range_index, ranges->num_ranges - end_range_index);
+ ranges->num_ranges -= end_range_index - begin_range_index;
+ if (ranges->capacity > 4 && ranges->num_ranges * 3 <= ranges->capacity) {
+ size_t new_capacity = ranges->capacity / 2;
+ quicly_range_t *new_ranges = realloc(ranges->ranges, new_capacity * sizeof(*new_ranges));
+ if (new_ranges != NULL) {
+ ranges->ranges = new_ranges;
+ ranges->capacity = new_capacity;
+ }
+ }
+}
+
+static inline int merge_update(quicly_ranges_t *ranges, uint64_t start, uint64_t end, size_t slot, size_t end_slot)
+{
+ if (start < ranges->ranges[slot].start)
+ ranges->ranges[slot].start = start;
+ ranges->ranges[slot].end = end < ranges->ranges[end_slot].end ? ranges->ranges[end_slot].end : end;
+
+ if (slot != end_slot)
+ quicly_ranges_drop_by_range_indices(ranges, slot + 1, end_slot + 1);
+
+ return 0;
+}
+
+int quicly_ranges_init_with_range(quicly_ranges_t *ranges, uint64_t start, uint64_t end)
+{
+ quicly_ranges_init(ranges);
+ return insert_at(ranges, start, end, 0);
+}
+
+int quicly_ranges_add(quicly_ranges_t *ranges, uint64_t start, uint64_t end)
+{
+ size_t slot, end_slot;
+
+ assert(start <= end);
+
+ if (start == end)
+ return 0;
+
+ if (ranges->num_ranges == 0) {
+ return insert_at(ranges, start, end, 0);
+ } else if (ranges->ranges[ranges->num_ranges - 1].end < start) {
+ return insert_at(ranges, start, end, ranges->num_ranges);
+ }
+
+ /* find the slot that should contain `end` */
+ for (slot = ranges->num_ranges - 1;; --slot) {
+ if (ranges->ranges[slot].start <= end)
+ break;
+ if (slot == 0)
+ return insert_at(ranges, start, end, 0);
+ }
+ end_slot = slot;
+
+ /* find the slot that should contain `start` */
+ do {
+ if (ranges->ranges[slot].end == start) {
+ return merge_update(ranges, start, end, slot, end_slot);
+ } else if (ranges->ranges[slot].end < start) {
+ if (slot++ == end_slot) {
+ return insert_at(ranges, start, end, slot);
+ } else {
+ return merge_update(ranges, start, end, slot, end_slot);
+ }
+ }
+ } while (slot-- != 0);
+
+ return merge_update(ranges, start, end, 0, end_slot);
+}
+
+int quicly_ranges_subtract(quicly_ranges_t *ranges, uint64_t start, uint64_t end)
+{
+ size_t shrink_from, slot;
+
+ assert(start <= end);
+
+ if (start == end)
+ return 0;
+
+ if (ranges->num_ranges == 0) {
+ return 0;
+ } else if (end <= ranges->ranges[0].start) {
+ return 0;
+ } else if (ranges->ranges[ranges->num_ranges - 1].end <= start) {
+ return 0;
+ }
+
+ /* find the first overlapping slot */
+ for (slot = 0; ranges->ranges[slot].end < start; ++slot)
+ ;
+
+ if (end <= ranges->ranges[slot].end) {
+ /* first overlapping slot is the only slot that we will ever modify */
+ if (end <= ranges->ranges[slot].start)
+ return 0;
+ if (start <= ranges->ranges[slot].start) {
+ ranges->ranges[slot].start = end;
+ } else if (end == ranges->ranges[slot].end) {
+ ranges->ranges[slot].end = start;
+ } else {
+ /* split */
+ int ret;
+ if ((ret = insert_at(ranges, end, ranges->ranges[slot].end, slot + 1)) != 0)
+ return ret;
+ ranges->ranges[slot].end = start;
+ return 0;
+ }
+ /* remove the slot if the range has become empty */
+ if (ranges->ranges[slot].start == ranges->ranges[slot].end)
+ quicly_ranges_drop_by_range_indices(ranges, slot, slot + 1);
+ return 0;
+ }
+
+ /* specified region covers multiple slots */
+ if (start <= ranges->ranges[slot].start) {
+ shrink_from = slot;
+ } else {
+ ranges->ranges[slot].end = start;
+ shrink_from = slot + 1;
+ }
+
+ /* find the last overlapping slot */
+ for (++slot; slot != ranges->num_ranges; ++slot) {
+ if (end <= ranges->ranges[slot].start)
+ break;
+ if (end < ranges->ranges[slot].end) {
+ ranges->ranges[slot].start = end;
+ break;
+ }
+ }
+
+ /* remove shrink_from..slot */
+ if (shrink_from != slot)
+ quicly_ranges_drop_by_range_indices(ranges, shrink_from, slot);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_ranges_h
+#define quicly_ranges_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+typedef struct st_quicly_range_t {
+ uint64_t start;
+ uint64_t end; /* non-inclusive */
+} quicly_range_t;
+
+typedef struct st_quicly_ranges_t {
+ quicly_range_t *ranges;
+ size_t num_ranges, capacity;
+ quicly_range_t _initial;
+} quicly_ranges_t;
+
+/**
+ * initializes the structure
+ */
+static void quicly_ranges_init(quicly_ranges_t *ranges);
+/**
+ * initializes the structure, registering given range
+ */
+int quicly_ranges_init_with_range(quicly_ranges_t *ranges, uint64_t start, uint64_t end);
+/**
+ * clears the structure
+ */
+static void quicly_ranges_clear(quicly_ranges_t *ranges);
+/**
+ * adds given range, returns 0 if successful
+ */
+int quicly_ranges_add(quicly_ranges_t *ranges, uint64_t start, uint64_t end);
+/**
+ * subtracts given range, returns 0 if sucessful
+ */
+int quicly_ranges_subtract(quicly_ranges_t *ranges, uint64_t start, uint64_t end);
+/**
+ * removes ranges->ranges[I] where begin_index <= I && I < end_index
+ */
+void quicly_ranges_drop_by_range_indices(quicly_ranges_t *ranges, size_t begin_index, size_t end_index);
+
+/* inline functions */
+
+inline void quicly_ranges_init(quicly_ranges_t *ranges)
+{
+ ranges->ranges = &ranges->_initial;
+ ranges->num_ranges = 0;
+ ranges->capacity = 1;
+}
+
+inline void quicly_ranges_clear(quicly_ranges_t *ranges)
+{
+ if (ranges->ranges != &ranges->_initial) {
+ free(ranges->ranges);
+ ranges->ranges = &ranges->_initial;
+ }
+ ranges->num_ranges = 0;
+ ranges->capacity = 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include "contrib/quicly/constants.h"
+#include "contrib/quicly/recvstate.h"
+
+void quicly_recvstate_init(quicly_recvstate_t *state)
+{
+ quicly_ranges_init_with_range(&state->received, 0, 0);
+ state->data_off = 0;
+ state->eos = UINT64_MAX;
+}
+
+void quicly_recvstate_init_closed(quicly_recvstate_t *state)
+{
+ quicly_ranges_init(&state->received);
+ state->data_off = 0;
+ state->eos = 0;
+}
+
+void quicly_recvstate_dispose(quicly_recvstate_t *state)
+{
+ quicly_ranges_clear(&state->received);
+}
+
+int quicly_recvstate_update(quicly_recvstate_t *state, uint64_t off, size_t *len, int is_fin, size_t max_ranges)
+{
+ int ret;
+
+ assert(!quicly_recvstate_transfer_complete(state));
+
+ /* eos handling */
+ if (state->eos == UINT64_MAX) {
+ if (is_fin) {
+ state->eos = off + *len;
+ if (state->eos < state->received.ranges[state->received.num_ranges - 1].end)
+ return QUICLY_TRANSPORT_ERROR_FINAL_SIZE;
+ }
+ } else {
+ if (off + *len > state->eos)
+ return QUICLY_TRANSPORT_ERROR_FINAL_SIZE;
+ }
+
+ /* no state change; entire data has already been received */
+ if (off + *len <= state->data_off) {
+ *len = 0;
+ if (state->received.ranges[0].end == state->eos)
+ goto Complete;
+ return 0;
+ }
+
+ /* adjust if partially received */
+ if (off < state->data_off) {
+ size_t delta = state->data_off - off;
+ off += delta;
+ *len -= delta;
+ }
+
+ /* update received range */
+ if (*len != 0) {
+ if ((ret = quicly_ranges_add(&state->received, off, off + *len)) != 0)
+ return ret;
+ if (state->received.num_ranges > max_ranges)
+ return QUICLY_ERROR_STATE_EXHAUSTION;
+ }
+ if (state->received.num_ranges == 1 && state->received.ranges[0].start == 0 && state->received.ranges[0].end == state->eos)
+ goto Complete;
+
+ return 0;
+
+Complete:
+ quicly_ranges_clear(&state->received);
+ return 0;
+}
+
+int quicly_recvstate_reset(quicly_recvstate_t *state, uint64_t eos_at, uint64_t *bytes_missing)
+{
+ assert(!quicly_recvstate_transfer_complete(state));
+
+ /* validate */
+ if (state->eos != UINT64_MAX && state->eos != eos_at)
+ return QUICLY_TRANSPORT_ERROR_FINAL_SIZE;
+ if (eos_at < state->received.ranges[state->received.num_ranges - 1].end)
+ return QUICLY_TRANSPORT_ERROR_FINAL_SIZE;
+
+ /* calculate bytes missing */
+ *bytes_missing = eos_at - state->received.ranges[state->received.num_ranges - 1].end;
+
+ /* clear the received range */
+ quicly_ranges_clear(&state->received);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_recvstate_h
+#define quicly_recvstate_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <assert.h>
+#include <stddef.h>
+#include "contrib/quicly/picotls/picotls.h"
+#include "contrib/quicly/ranges.h"
+
+typedef struct st_quicly_recvstate_t {
+ /**
+ * ranges that have been received (starts and remains non-empty until transfer completes)
+ */
+ quicly_ranges_t received;
+ /**
+ * starting offset of data
+ */
+ uint64_t data_off;
+ /**
+ * end_of_stream offset (or UINT64_MAX)
+ */
+ uint64_t eos;
+} quicly_recvstate_t;
+
+void quicly_recvstate_init(quicly_recvstate_t *state);
+void quicly_recvstate_init_closed(quicly_recvstate_t *state);
+void quicly_recvstate_dispose(quicly_recvstate_t *state);
+static int quicly_recvstate_transfer_complete(quicly_recvstate_t *state);
+static size_t quicly_recvstate_bytes_available(quicly_recvstate_t *state);
+/**
+ * Records that the range identified by (off, *len) has been received. When 0 (success) is returned, *len contains the number of
+ * bytes that might have been newly received and therefore need to be written to the receive buffer (this number of bytes counts
+ * backward from the end of given range).
+ */
+int quicly_recvstate_update(quicly_recvstate_t *state, uint64_t off, size_t *len, int is_fin, size_t max_ranges);
+int quicly_recvstate_reset(quicly_recvstate_t *state, uint64_t eos_at, uint64_t *bytes_missing);
+
+/* inline definitions */
+
+inline int quicly_recvstate_transfer_complete(quicly_recvstate_t *state)
+{
+ return state->received.num_ranges == 0;
+}
+
+inline size_t quicly_recvstate_bytes_available(quicly_recvstate_t *state)
+{
+ uint64_t total = quicly_recvstate_transfer_complete(state) ? state->eos : state->received.ranges[0].end;
+ assert(state->data_off <= total);
+ return total - state->data_off;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include "contrib/quicly/picotls/picotls.h"
+#include "contrib/quicly/constants.h"
+#include "contrib/quicly/sendstate.h"
+
+void quicly_sendstate_init(quicly_sendstate_t *state)
+{
+ quicly_ranges_init_with_range(&state->acked, 0, 0);
+ quicly_ranges_init(&state->pending);
+ state->size_inflight = 0;
+ state->final_size = UINT64_MAX;
+}
+
+void quicly_sendstate_init_closed(quicly_sendstate_t *state)
+{
+ quicly_sendstate_init(state);
+ state->acked.ranges[0].end = 1;
+ state->final_size = 0;
+}
+
+void quicly_sendstate_dispose(quicly_sendstate_t *state)
+{
+ quicly_ranges_clear(&state->acked);
+ quicly_ranges_clear(&state->pending);
+ state->final_size = 0;
+ state->size_inflight = 0;
+}
+
+int quicly_sendstate_can_send(quicly_sendstate_t *state, const uint64_t *max_stream_data)
+{
+ if (state->pending.num_ranges != 0) {
+ /* the flow is capped either by MAX_STREAM_DATA or (in case we are hitting connection-level flow control) by the number of
+ * bytes we've already sent */
+ uint64_t blocked_at = max_stream_data != NULL ? *max_stream_data : state->size_inflight;
+ if (state->pending.ranges[0].start < blocked_at)
+ return 1;
+ /* we can always send EOS, if that is the only thing to be sent */
+ if (state->pending.ranges[0].start >= state->final_size) {
+ assert(state->pending.ranges[0].start == state->final_size);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int quicly_sendstate_activate(quicly_sendstate_t *state)
+{
+ uint64_t end_off = state->final_size;
+
+ /* take EOS position into account */
+ if (end_off != UINT64_MAX)
+ ++end_off;
+
+ /* do nothing if already active */
+ if (state->pending.num_ranges != 0 && state->pending.ranges[state->pending.num_ranges - 1].end == end_off)
+ return 0;
+
+ return quicly_ranges_add(&state->pending, state->size_inflight, end_off);
+}
+
+int quicly_sendstate_shutdown(quicly_sendstate_t *state, uint64_t final_size)
+{
+ int ret;
+
+ assert(quicly_sendstate_is_open(state));
+ assert(state->size_inflight <= final_size);
+
+ if (state->pending.num_ranges != 0 && state->pending.ranges[state->pending.num_ranges - 1].end == UINT64_MAX) {
+ state->pending.ranges[state->pending.num_ranges - 1].end = final_size + 1;
+ } else {
+ if ((ret = quicly_ranges_add(&state->pending, state->size_inflight, final_size + 1)) != 0)
+ return ret;
+ }
+
+ state->final_size = final_size;
+ return 0;
+}
+
+void quicly_sendstate_reset(quicly_sendstate_t *state)
+{
+ int ret;
+
+ if (state->final_size == UINT64_MAX)
+ state->final_size = state->size_inflight;
+
+ ret = quicly_ranges_add(&state->acked, 0, state->final_size + 1);
+ assert(ret == 0 && "guaranteed to succeed, because the numebr of ranges never increases");
+ quicly_ranges_clear(&state->pending);
+}
+
+int quicly_sendstate_acked(quicly_sendstate_t *state, quicly_sendstate_sent_t *args, int is_active, size_t *bytes_to_shift)
+{
+ uint64_t prev_sent_upto = state->acked.ranges[0].end;
+ int ret;
+
+ /* adjust acked and pending ranges */
+ if ((ret = quicly_ranges_add(&state->acked, args->start, args->end)) != 0)
+ return ret;
+ if (!is_active) {
+ if ((ret = quicly_ranges_subtract(&state->pending, args->start, args->end)) != 0)
+ return ret;
+ }
+ assert(state->pending.num_ranges == 0 || state->acked.ranges[0].end <= state->pending.ranges[0].start);
+
+ /* calculate number of bytes that can be retired from the send buffer */
+ if (prev_sent_upto != state->acked.ranges[0].end) {
+ uint64_t sent_upto = state->acked.ranges[0].end;
+ if (sent_upto > state->final_size) {
+ /* adjust EOS position */
+ assert(sent_upto == state->final_size + 1);
+ --sent_upto;
+ }
+ *bytes_to_shift = sent_upto - prev_sent_upto;
+ } else {
+ *bytes_to_shift = 0;
+ }
+
+ return 0;
+}
+
+int quicly_sendstate_lost(quicly_sendstate_t *state, quicly_sendstate_sent_t *args)
+{
+ uint64_t start = args->start, end = args->end;
+ size_t acked_slot = 0;
+ int ret;
+
+ while (start < end) {
+ if (start < state->acked.ranges[acked_slot].end)
+ start = state->acked.ranges[acked_slot].end;
+ ++acked_slot;
+ if (acked_slot == state->acked.num_ranges || end <= state->acked.ranges[acked_slot].start) {
+ if (!(start < end))
+ return 0;
+ return quicly_ranges_add(&state->pending, start, end);
+ }
+ if (start < state->acked.ranges[acked_slot].start) {
+ if ((ret = quicly_ranges_add(&state->pending, start, state->acked.ranges[acked_slot].start)) != 0)
+ return ret;
+ }
+ }
+
+ assert(state->acked.ranges[0].end <= state->pending.ranges[0].start);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_sendstate_h
+#define quicly_sendstate_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "contrib/quicly/ranges.h"
+
+typedef struct st_quicly_sendstate_t {
+ /**
+ * ranges that have been acked (guaranteed to be non-empty; i.e., acked.ranges[0].end == contiguous_acked_offset). Offset may
+ * include the EOS position.
+ */
+ quicly_ranges_t acked;
+ /**
+ * ranges that needs to be sent. Offset may include the EOS position.
+ */
+ quicly_ranges_t pending;
+ /**
+ * number of bytes that have been inflight (regardless of acked or not). Used for capping max_data, therefore does not include
+ * eos.
+ */
+ uint64_t size_inflight;
+ /**
+ * UINT64_MAX until closed. Does not include the EOS position.
+ */
+ uint64_t final_size;
+} quicly_sendstate_t;
+
+typedef struct st_quicly_sendstate_sent_t {
+ uint64_t start;
+ uint64_t end;
+} quicly_sendstate_sent_t;
+
+void quicly_sendstate_init(quicly_sendstate_t *state);
+void quicly_sendstate_init_closed(quicly_sendstate_t *state);
+void quicly_sendstate_dispose(quicly_sendstate_t *state);
+static int quicly_sendstate_transfer_complete(quicly_sendstate_t *state);
+static int quicly_sendstate_is_open(quicly_sendstate_t *state);
+/**
+ * Returns if some data or EOS can be sent for the stream. When `max_stream_data` is non-NULL, stream-level flow control is tested.
+ * When `max_stream_data` is NULL, retruns if something can be sent when the connection is capped by the connection-level flow
+ * control.
+ */
+int quicly_sendstate_can_send(quicly_sendstate_t *state, const uint64_t *max_stream_data);
+int quicly_sendstate_activate(quicly_sendstate_t *state);
+int quicly_sendstate_shutdown(quicly_sendstate_t *state, uint64_t final_size);
+void quicly_sendstate_reset(quicly_sendstate_t *state);
+int quicly_sendstate_acked(quicly_sendstate_t *state, quicly_sendstate_sent_t *args, int is_active, size_t *bytes_to_shift);
+int quicly_sendstate_lost(quicly_sendstate_t *state, quicly_sendstate_sent_t *args);
+
+/* inline definitions */
+
+inline int quicly_sendstate_transfer_complete(quicly_sendstate_t *state)
+{
+ return state->final_size != UINT64_MAX && state->acked.ranges[0].end == state->final_size + 1;
+}
+
+inline int quicly_sendstate_is_open(quicly_sendstate_t *state)
+{
+ return state->final_size == UINT64_MAX;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <stdlib.h>
+#include "contrib/quicly/picotls/picotls.h"
+#include "contrib/quicly/sentmap.h"
+
+const quicly_sent_t quicly_sentmap__end_iter = {quicly_sentmap__type_packet, {{UINT64_MAX, INT64_MAX}}};
+
+static void next_entry(quicly_sentmap_iter_t *iter)
+{
+ if (--iter->count != 0) {
+ ++iter->p;
+ } else if (*(iter->ref = &(*iter->ref)->next) == NULL) {
+ iter->p = (quicly_sent_t *)&quicly_sentmap__end_iter;
+ iter->count = 0;
+ return;
+ } else {
+ assert((*iter->ref)->num_entries != 0);
+ iter->count = (*iter->ref)->num_entries;
+ iter->p = (*iter->ref)->entries;
+ }
+ while (iter->p->acked == NULL)
+ ++iter->p;
+}
+
+static struct st_quicly_sent_block_t **free_block(quicly_sentmap_t *map, struct st_quicly_sent_block_t **ref)
+{
+ static const struct st_quicly_sent_block_t dummy = {NULL};
+ static const struct st_quicly_sent_block_t *const dummy_ref = &dummy;
+ struct st_quicly_sent_block_t *block = *ref;
+
+ if (block->next != NULL) {
+ *ref = block->next;
+ assert((*ref)->num_entries != 0);
+ } else {
+ assert(block == map->tail);
+ if (ref == &map->head) {
+ map->head = NULL;
+ map->tail = NULL;
+ } else {
+ map->tail = (void *)((char *)ref - offsetof(struct st_quicly_sent_block_t, next));
+ map->tail->next = NULL;
+ }
+ ref = (struct st_quicly_sent_block_t **)&dummy_ref;
+ }
+
+ free(block);
+ return ref;
+}
+
+static void discard_entry(quicly_sentmap_t *map, quicly_sentmap_iter_t *iter)
+{
+ assert(iter->p->acked != NULL);
+ iter->p->acked = NULL;
+
+ struct st_quicly_sent_block_t *block = *iter->ref;
+ if (--block->num_entries == 0) {
+ iter->ref = free_block(map, iter->ref);
+ block = *iter->ref;
+ iter->p = block->entries - 1;
+ iter->count = block->num_entries + 1;
+ }
+}
+
+void quicly_sentmap_dispose(quicly_sentmap_t *map)
+{
+ struct st_quicly_sent_block_t *block;
+
+ while ((block = map->head) != NULL) {
+ map->head = block->next;
+ free(block);
+ }
+}
+
+int quicly_sentmap_prepare(quicly_sentmap_t *map, uint64_t packet_number, int64_t now, uint8_t ack_epoch)
+{
+ assert(map->_pending_packet == NULL);
+
+ if ((map->_pending_packet = quicly_sentmap_allocate(map, quicly_sentmap__type_packet)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ map->_pending_packet->data.packet = (quicly_sent_packet_t){packet_number, now, ack_epoch};
+ return 0;
+}
+
+struct st_quicly_sent_block_t *quicly_sentmap__new_block(quicly_sentmap_t *map)
+{
+ struct st_quicly_sent_block_t *block;
+
+ if ((block = malloc(sizeof(*block))) == NULL)
+ return NULL;
+
+ block->next = NULL;
+ block->num_entries = 0;
+ block->next_insert_at = 0;
+ if (map->tail != NULL) {
+ map->tail->next = block;
+ map->tail = block;
+ } else {
+ map->head = map->tail = block;
+ }
+
+ return block;
+}
+
+void quicly_sentmap_skip(quicly_sentmap_iter_t *iter)
+{
+ do {
+ next_entry(iter);
+ } while (iter->p->acked != quicly_sentmap__type_packet);
+}
+
+int quicly_sentmap_update(quicly_sentmap_t *map, quicly_sentmap_iter_t *iter, quicly_sentmap_event_t event,
+ struct st_quicly_conn_t *conn)
+{
+ quicly_sent_packet_t packet;
+ int notify_lost = 0, ret = 0;
+
+ assert(iter->p != &quicly_sentmap__end_iter);
+ assert(iter->p->acked == quicly_sentmap__type_packet);
+
+ /* copy packet info */
+ packet = iter->p->data.packet;
+
+ /* update packet-level metrics (make adjustments to notify the loss when discarding a packet that is still deemed inflight) */
+ if (packet.bytes_in_flight != 0) {
+ if (event == QUICLY_SENTMAP_EVENT_EXPIRED)
+ notify_lost = 1;
+ assert(map->bytes_in_flight >= packet.bytes_in_flight);
+ map->bytes_in_flight -= packet.bytes_in_flight;
+ }
+ iter->p->data.packet.bytes_in_flight = 0;
+
+ /* Remove entry from sentmap, unless packet is deemed lost. If lost, then hold on to this packet until removed by a
+ * QUICLY_SENTMAP_EVENT_EXPIRED event. */
+ if (event != QUICLY_SENTMAP_EVENT_LOST)
+ discard_entry(map, iter);
+
+ /* iterate through the frames */
+ for (next_entry(iter); iter->p->acked != quicly_sentmap__type_packet; next_entry(iter)) {
+ if (notify_lost && ret == 0)
+ ret = iter->p->acked(conn, &packet, iter->p, QUICLY_SENTMAP_EVENT_LOST);
+ if (ret == 0)
+ ret = iter->p->acked(conn, &packet, iter->p, event);
+ if (event != QUICLY_SENTMAP_EVENT_LOST)
+ discard_entry(map, iter);
+ }
+
+ return ret;
+}
+
+int quicly_sentmap__type_packet(struct st_quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event)
+{
+ assert(!"quicly_sentmap__type_packet cannot be called");
+ return QUICLY_TRANSPORT_ERROR_INTERNAL;
+}
--- /dev/null
+/*
+ * Copyright (c) 2017 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_sentmap_h
+#define quicly_sentmap_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <assert.h>
+#include <stdint.h>
+#include "contrib/quicly/constants.h"
+#include "contrib/quicly/maxsender.h"
+#include "contrib/quicly/sendstate.h"
+
+struct st_quicly_conn_t;
+typedef struct st_quicly_sent_t quicly_sent_t;
+
+typedef struct st_quicly_sent_packet_t {
+ /**
+ *
+ */
+ uint64_t packet_number;
+ /**
+ *
+ */
+ int64_t sent_at;
+ /**
+ * epoch to be acked in
+ */
+ uint8_t ack_epoch;
+ /**
+ *
+ */
+ uint8_t ack_eliciting : 1;
+ /**
+ * number of bytes in-flight for the packet (becomes zero once deemed lost)
+ */
+ uint16_t bytes_in_flight;
+} quicly_sent_packet_t;
+
+typedef enum en_quicly_sentmap_event_t {
+ /**
+ * a packet (or a frame) has been acked
+ */
+ QUICLY_SENTMAP_EVENT_ACKED,
+ /**
+ * a packet (or a frame) is deemed lost
+ */
+ QUICLY_SENTMAP_EVENT_LOST,
+ /**
+ * a packet (or a frame) is being removed from the sentmap (e.g., after 3 pto, the epoch being discarded)
+ */
+ QUICLY_SENTMAP_EVENT_EXPIRED
+} quicly_sentmap_event_t;
+
+typedef int (*quicly_sent_acked_cb)(struct st_quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *data,
+ quicly_sentmap_event_t event);
+
+struct st_quicly_sent_t {
+ quicly_sent_acked_cb acked;
+ union {
+ quicly_sent_packet_t packet;
+ struct {
+ quicly_range_t range;
+ } ack;
+ struct {
+ quicly_stream_id_t stream_id;
+ quicly_sendstate_sent_t args;
+ } stream;
+ struct {
+ quicly_stream_id_t stream_id;
+ quicly_maxsender_sent_t args;
+ } max_stream_data;
+ struct {
+ quicly_maxsender_sent_t args;
+ } max_data;
+ struct {
+ int uni;
+ quicly_maxsender_sent_t args;
+ } max_streams;
+ struct {
+ int uni;
+ quicly_maxsender_sent_t args;
+ } streams_blocked;
+ struct {
+ quicly_stream_id_t stream_id;
+ } stream_state_sender;
+ struct {
+ int is_inflight;
+ uint64_t generation;
+ } new_token;
+ } data;
+};
+
+struct st_quicly_sent_block_t {
+ /**
+ * next block if exists (or NULL)
+ */
+ struct st_quicly_sent_block_t *next;
+ /**
+ * number of entries in the block
+ */
+ size_t num_entries;
+ /**
+ * insertion index within `entries`
+ */
+ size_t next_insert_at;
+ /**
+ * slots
+ */
+ quicly_sent_t entries[16];
+};
+
+/**
+ * quicly_sentmap_t is a structure that holds a list of sent objects being tracked. The list is a list of packet header and
+ * frame-level objects of that packet. Packet header is identified by quicly_sent_t::acked being quicly_sent__type_header.
+ *
+ * The transport writes to the sentmap in the following way:
+ * 1. call quicly_sentmap_prepare
+ * 2. repeatedly call quicly_sentmap_allocate to allocate frame-level objects and initialize them
+ * 3. call quicly_sentmap_commit
+ *
+ * The transport iterates (and mutates) the sentmap in the following way:
+ * 1. call quicly_sentmap_init_iter
+ * 2. call quicly_sentmap_get to obtain the packet header that the iterator points to
+ * 3. call quicly_sentmap_update to update the states of the packet that the iterator points to (as well as the state of the frames
+ * that were part of the packet) and move the iterator to the next packet header. The function is also used for discarding
+ * entries from the sent map.
+ * 4. call quicly_sentmap_skip to move the iterator to the next packet header
+ *
+ * Note that quicly_sentmap_update and quicly_sentmap_skip move the iterator to the next packet header.
+ */
+typedef struct st_quicly_sentmap_t {
+ /**
+ * the linked list includes entries that are deemed lost (up to 3*SRTT) as well
+ */
+ struct st_quicly_sent_block_t *head, *tail;
+ /**
+ * bytes in-flight
+ */
+ size_t bytes_in_flight;
+ /**
+ * is non-NULL between prepare and commit, pointing to the packet header that is being written to
+ */
+ quicly_sent_t *_pending_packet;
+} quicly_sentmap_t;
+
+typedef struct st_quicly_sentmap_iter_t {
+ quicly_sent_t *p;
+ size_t count;
+ struct st_quicly_sent_block_t **ref;
+} quicly_sentmap_iter_t;
+
+extern const quicly_sent_t quicly_sentmap__end_iter;
+
+/**
+ * initializes the sentmap
+ */
+static void quicly_sentmap_init(quicly_sentmap_t *map);
+/**
+ *
+ */
+void quicly_sentmap_dispose(quicly_sentmap_t *map);
+
+/**
+ * if transaction is open (i.e. between prepare and commit)
+ */
+static int quicly_sentmap_is_open(quicly_sentmap_t *map);
+/**
+ * prepares a write
+ */
+int quicly_sentmap_prepare(quicly_sentmap_t *map, uint64_t packet_number, int64_t now, uint8_t ack_epoch);
+/**
+ * commits a write
+ */
+static void quicly_sentmap_commit(quicly_sentmap_t *map, uint16_t bytes_in_flight);
+/**
+ * Allocates a slot to contain a callback for a frame. The function MUST be called after _prepare but before _commit.
+ */
+static quicly_sent_t *quicly_sentmap_allocate(quicly_sentmap_t *map, quicly_sent_acked_cb acked);
+
+/**
+ * initializes the iterator
+ */
+static void quicly_sentmap_init_iter(quicly_sentmap_t *map, quicly_sentmap_iter_t *iter);
+/**
+ * returns the current packet pointed to by the iterator
+ */
+static const quicly_sent_packet_t *quicly_sentmap_get(quicly_sentmap_iter_t *iter);
+/**
+ * advances the iterator to the next packet
+ */
+void quicly_sentmap_skip(quicly_sentmap_iter_t *iter);
+/**
+ * updates the state of the packet being pointed to by the iterator, _and advances to the next packet_
+ */
+int quicly_sentmap_update(quicly_sentmap_t *map, quicly_sentmap_iter_t *iter, quicly_sentmap_event_t event,
+ struct st_quicly_conn_t *conn);
+
+struct st_quicly_sent_block_t *quicly_sentmap__new_block(quicly_sentmap_t *map);
+int quicly_sentmap__type_packet(struct st_quicly_conn_t *conn, const quicly_sent_packet_t *packet, quicly_sent_t *sent,
+ quicly_sentmap_event_t event);
+
+/* inline definitions */
+
+inline void quicly_sentmap_init(quicly_sentmap_t *map)
+{
+ *map = (quicly_sentmap_t){NULL};
+}
+
+inline int quicly_sentmap_is_open(quicly_sentmap_t *map)
+{
+ return map->_pending_packet != NULL;
+}
+
+inline void quicly_sentmap_commit(quicly_sentmap_t *map, uint16_t bytes_in_flight)
+{
+ assert(quicly_sentmap_is_open(map));
+
+ if (bytes_in_flight != 0) {
+ map->_pending_packet->data.packet.ack_eliciting = 1;
+ map->_pending_packet->data.packet.bytes_in_flight = bytes_in_flight;
+ map->bytes_in_flight += bytes_in_flight;
+ }
+ map->_pending_packet = NULL;
+}
+
+inline quicly_sent_t *quicly_sentmap_allocate(quicly_sentmap_t *map, quicly_sent_acked_cb acked)
+{
+ struct st_quicly_sent_block_t *block;
+
+ if ((block = map->tail) == NULL || block->next_insert_at == sizeof(block->entries) / sizeof(block->entries[0])) {
+ if ((block = quicly_sentmap__new_block(map)) == NULL)
+ return NULL;
+ }
+
+ quicly_sent_t *sent = block->entries + block->next_insert_at++;
+ ++block->num_entries;
+
+ sent->acked = acked;
+
+ return sent;
+}
+
+inline void quicly_sentmap_init_iter(quicly_sentmap_t *map, quicly_sentmap_iter_t *iter)
+{
+ iter->ref = &map->head;
+ if (map->head != NULL) {
+ assert(map->head->num_entries != 0);
+ for (iter->p = map->head->entries; iter->p->acked == NULL; ++iter->p)
+ ;
+ assert(iter->p->acked == quicly_sentmap__type_packet);
+ iter->count = map->head->num_entries;
+ } else {
+ iter->p = (quicly_sent_t *)&quicly_sentmap__end_iter;
+ iter->count = 0;
+ }
+}
+
+inline const quicly_sent_packet_t *quicly_sentmap_get(quicly_sentmap_iter_t *iter)
+{
+ assert(iter->p->acked == quicly_sentmap__type_packet);
+ return &iter->p->data.packet;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2018 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include "contrib/quicly/streambuf.h"
+
+static void convert_error(quicly_stream_t *stream, int err)
+{
+ assert(err != 0);
+ if (QUICLY_ERROR_IS_QUIC_APPLICATION(err)) {
+ if (quicly_stream_has_send_side(quicly_is_client(stream->conn), stream->stream_id) &&
+ quicly_sendstate_is_open(&stream->sendstate))
+ quicly_reset_stream(stream, err);
+ if (quicly_stream_has_receive_side(quicly_is_client(stream->conn), stream->stream_id))
+ quicly_request_stop(stream, err);
+ } else {
+ quicly_close(stream->conn, QUICLY_ERROR_IS_QUIC_TRANSPORT(err) ? err : QUICLY_TRANSPORT_ERROR_INTERNAL, NULL);
+ }
+}
+
+void quicly_sendbuf_dispose(quicly_sendbuf_t *sb)
+{
+ size_t i;
+
+ for (i = 0; i != sb->vecs.size; ++i) {
+ quicly_sendbuf_vec_t *vec = sb->vecs.entries + i;
+ if (vec->cb->discard_vec != NULL)
+ vec->cb->discard_vec(vec);
+ }
+ free(sb->vecs.entries);
+}
+
+void quicly_sendbuf_shift(quicly_stream_t *stream, quicly_sendbuf_t *sb, size_t delta)
+{
+ size_t i;
+
+ for (i = 0; delta != 0; ++i) {
+ assert(i < sb->vecs.size);
+ quicly_sendbuf_vec_t *first_vec = sb->vecs.entries + i;
+ size_t bytes_in_first_vec = first_vec->len - sb->off_in_first_vec;
+ if (delta < bytes_in_first_vec) {
+ sb->off_in_first_vec += delta;
+ break;
+ }
+ delta -= bytes_in_first_vec;
+ if (first_vec->cb->discard_vec != NULL)
+ first_vec->cb->discard_vec(first_vec);
+ sb->off_in_first_vec = 0;
+ }
+ if (i != 0) {
+ if (sb->vecs.size != i) {
+ memmove(sb->vecs.entries, sb->vecs.entries + i, (sb->vecs.size - i) * sizeof(*sb->vecs.entries));
+ sb->vecs.size -= i;
+ } else {
+ free(sb->vecs.entries);
+ sb->vecs.entries = NULL;
+ sb->vecs.size = 0;
+ sb->vecs.capacity = 0;
+ }
+ }
+ quicly_stream_sync_sendbuf(stream, 0);
+}
+
+void quicly_sendbuf_emit(quicly_stream_t *stream, quicly_sendbuf_t *sb, size_t off, void *dst, size_t *len, int *wrote_all)
+{
+ size_t vec_index, capacity = *len;
+ int ret;
+
+ off += sb->off_in_first_vec;
+ for (vec_index = 0; capacity != 0 && vec_index < sb->vecs.size; ++vec_index) {
+ quicly_sendbuf_vec_t *vec = sb->vecs.entries + vec_index;
+ if (off < vec->len) {
+ size_t bytes_flatten = vec->len - off;
+ int partial = 0;
+ if (capacity < bytes_flatten) {
+ bytes_flatten = capacity;
+ partial = 1;
+ }
+ if ((ret = vec->cb->flatten_vec(vec, dst, off, bytes_flatten)) != 0) {
+ convert_error(stream, ret);
+ return;
+ }
+ dst = (uint8_t *)dst + bytes_flatten;
+ capacity -= bytes_flatten;
+ off = 0;
+ if (partial)
+ break;
+ } else {
+ off -= vec->len;
+ }
+ }
+
+ if (capacity == 0 && vec_index < sb->vecs.size) {
+ *wrote_all = 0;
+ } else {
+ *len = *len - capacity;
+ *wrote_all = 1;
+ }
+}
+
+static int flatten_raw(quicly_sendbuf_vec_t *vec, void *dst, size_t off, size_t len)
+{
+ memcpy(dst, (uint8_t *)vec->cbdata + off, len);
+ return 0;
+}
+
+static void discard_raw(quicly_sendbuf_vec_t *vec)
+{
+ free(vec->cbdata);
+}
+
+int quicly_sendbuf_write(quicly_stream_t *stream, quicly_sendbuf_t *sb, const void *src, size_t len)
+{
+ static const quicly_streambuf_sendvec_callbacks_t raw_callbacks = {flatten_raw, discard_raw};
+ quicly_sendbuf_vec_t vec = {&raw_callbacks, len, NULL};
+ int ret;
+
+ assert(quicly_sendstate_is_open(&stream->sendstate));
+
+ if ((vec.cbdata = malloc(len)) == NULL) {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Error;
+ }
+ memcpy(vec.cbdata, src, len);
+ if ((ret = quicly_sendbuf_write_vec(stream, sb, &vec)) != 0)
+ goto Error;
+ return 0;
+
+Error:
+ free(vec.cbdata);
+ return ret;
+}
+
+int quicly_sendbuf_write_vec(quicly_stream_t *stream, quicly_sendbuf_t *sb, quicly_sendbuf_vec_t *vec)
+{
+ assert(sb->vecs.size <= sb->vecs.capacity);
+
+ if (sb->vecs.size == sb->vecs.capacity) {
+ quicly_sendbuf_vec_t *new_entries;
+ size_t new_capacity = sb->vecs.capacity == 0 ? 4 : sb->vecs.capacity * 2;
+ if ((new_entries = realloc(sb->vecs.entries, new_capacity * sizeof(*sb->vecs.entries))) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ sb->vecs.entries = new_entries;
+ sb->vecs.capacity = new_capacity;
+ }
+ sb->vecs.entries[sb->vecs.size++] = *vec;
+ sb->bytes_written += vec->len;
+
+ return quicly_stream_sync_sendbuf(stream, 1);
+}
+
+void quicly_recvbuf_shift(quicly_stream_t *stream, ptls_buffer_t *rb, size_t delta)
+{
+ assert(delta <= rb->off);
+ rb->off -= delta;
+ memmove(rb->base, rb->base + delta, rb->off);
+
+ quicly_stream_sync_recvbuf(stream, delta);
+}
+
+ptls_iovec_t quicly_recvbuf_get(quicly_stream_t *stream, ptls_buffer_t *rb)
+{
+ size_t avail;
+
+ if (quicly_recvstate_transfer_complete(&stream->recvstate)) {
+ avail = rb->off;
+ } else if (stream->recvstate.data_off < stream->recvstate.received.ranges[0].end) {
+ avail = stream->recvstate.received.ranges[0].end - stream->recvstate.data_off;
+ } else {
+ avail = 0;
+ }
+
+ return ptls_iovec_init(rb->base, avail);
+}
+
+int quicly_recvbuf_receive(quicly_stream_t *stream, ptls_buffer_t *rb, size_t off, const void *src, size_t len)
+{
+ if (len != 0) {
+ int ret;
+ if ((ret = ptls_buffer_reserve(rb, off + len - rb->off)) != 0) {
+ convert_error(stream, ret);
+ return -1;
+ }
+ memcpy(rb->base + off, src, len);
+ if (rb->off < off + len)
+ rb->off = off + len;
+ }
+ return 0;
+}
+
+int quicly_streambuf_create(quicly_stream_t *stream, size_t sz)
+{
+ quicly_streambuf_t *sbuf;
+
+ assert(sz >= sizeof(*sbuf));
+ assert(stream->data == NULL);
+
+ if ((sbuf = malloc(sz)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+ quicly_sendbuf_init(&sbuf->egress);
+ ptls_buffer_init(&sbuf->ingress, "", 0);
+ if (sz != sizeof(*sbuf))
+ memset((char *)sbuf + sizeof(*sbuf), 0, sz - sizeof(*sbuf));
+
+ stream->data = sbuf;
+ return 0;
+}
+
+void quicly_streambuf_destroy(quicly_stream_t *stream, int err)
+{
+ quicly_streambuf_t *sbuf = stream->data;
+
+ quicly_sendbuf_dispose(&sbuf->egress);
+ ptls_buffer_dispose(&sbuf->ingress);
+ free(sbuf);
+ stream->data = NULL;
+}
+
+void quicly_streambuf_egress_emit(quicly_stream_t *stream, size_t off, void *dst, size_t *len, int *wrote_all)
+{
+ quicly_streambuf_t *sbuf = stream->data;
+ quicly_sendbuf_emit(stream, &sbuf->egress, off, dst, len, wrote_all);
+}
+
+int quicly_streambuf_egress_shutdown(quicly_stream_t *stream)
+{
+ quicly_streambuf_t *sbuf = stream->data;
+ quicly_sendstate_shutdown(&stream->sendstate, sbuf->egress.bytes_written);
+ return quicly_stream_sync_sendbuf(stream, 1);
+}
+
+int quicly_streambuf_ingress_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len)
+{
+ quicly_streambuf_t *sbuf = stream->data;
+ return quicly_recvbuf_receive(stream, &sbuf->ingress, off, src, len);
+}
--- /dev/null
+/*
+ * Copyright (c) 2018 Fastly, Kazuho Oku
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef quicly_streambuf_h
+#define quicly_streambuf_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include "contrib/quicly/picotls/picotls.h"
+#include "contrib/quicly/quicly.h"
+
+typedef struct st_quicly_sendbuf_vec_t quicly_sendbuf_vec_t;
+
+/**
+ * Callback that flattens the contents of an iovec.
+ * @param dst the destination
+ * @param off offset within the iovec from where serialization should happen
+ * @param len number of bytes to serialize
+ * @return 0 if successful, otherwise an error code
+ */
+typedef int (*quicly_sendbuf_flatten_vec_cb)(quicly_sendbuf_vec_t *vec, void *dst, size_t off, size_t len);
+/**
+ * An optional callback that is called when an iovec is discarded.
+ */
+typedef void (*quicly_sendbuf_discard_vec_cb)(quicly_sendbuf_vec_t *vec);
+
+typedef struct st_quicly_streambuf_sendvec_callbacks_t {
+ quicly_sendbuf_flatten_vec_cb flatten_vec;
+ quicly_sendbuf_discard_vec_cb discard_vec;
+} quicly_streambuf_sendvec_callbacks_t;
+
+struct st_quicly_sendbuf_vec_t {
+ const quicly_streambuf_sendvec_callbacks_t *cb;
+ size_t len;
+ void *cbdata;
+};
+
+/**
+ * A simple stream-level send buffer that can be used to store data to be sent.
+ */
+typedef struct st_quicly_sendbuf_t {
+ struct {
+ quicly_sendbuf_vec_t *entries;
+ size_t size, capacity;
+ } vecs;
+ size_t off_in_first_vec;
+ uint64_t bytes_written;
+} quicly_sendbuf_t;
+
+/**
+ * Inilializes the send buffer.
+ */
+static void quicly_sendbuf_init(quicly_sendbuf_t *sb);
+/**
+ * Disposes of the send buffer.
+ */
+void quicly_sendbuf_dispose(quicly_sendbuf_t *sb);
+/**
+ * The concrete function to be used when `quicly_stream_callbacks_t::on_send_shift` is being invoked (i.e., applications using
+ * `quicly_sendbuf_t` as the stream-level send buffer should call this function from it's `on_send_shift` callback).
+ */
+void quicly_sendbuf_shift(quicly_stream_t *stream, quicly_sendbuf_t *sb, size_t delta);
+/**
+ * The concrete function for `quicly_stream_callbacks_t::on_send_emit`.
+ */
+void quicly_sendbuf_emit(quicly_stream_t *stream, quicly_sendbuf_t *sb, size_t off, void *dst, size_t *len, int *wrote_all);
+/**
+ * Appends some bytes to the send buffer. The data being appended is copied.
+ */
+int quicly_sendbuf_write(quicly_stream_t *stream, quicly_sendbuf_t *sb, const void *src, size_t len);
+/**
+ * Appends a vector to the send buffer. Members of the `quicly_sendbuf_vec_t` are copied.
+ */
+int quicly_sendbuf_write_vec(quicly_stream_t *stream, quicly_sendbuf_t *sb, quicly_sendbuf_vec_t *vec);
+
+/**
+ * Pops the specified amount of bytes at the beginning of the simple stream-level receive buffer (which in fact is `ptls_buffer_t`).
+ */
+void quicly_recvbuf_shift(quicly_stream_t *stream, ptls_buffer_t *rb, size_t delta);
+/**
+ * Returns an iovec that refers to the data available in the receive buffer. Applications are expected to call `quicly_recvbuf_get`
+ * to first peek at the received data, process the bytes they can, then call `quicly_recvbuf_shift` to pop the bytes that have been
+ * processed.
+ */
+ptls_iovec_t quicly_recvbuf_get(quicly_stream_t *stream, ptls_buffer_t *rb);
+/**
+ * The concrete function for `quicly_stream_callbacks_t::on_receive`.
+ */
+int quicly_recvbuf_receive(quicly_stream_t *stream, ptls_buffer_t *rb, size_t off, const void *src, size_t len);
+
+/**
+ * The simple stream buffer. The API assumes that stream->data points to quicly_streambuf_t. Applications can extend the structure
+ * by passing arbitrary size to `quicly_streambuf_create`.
+ */
+typedef struct st_quicly_streambuf_t {
+ quicly_sendbuf_t egress;
+ ptls_buffer_t ingress;
+} quicly_streambuf_t;
+
+int quicly_streambuf_create(quicly_stream_t *stream, size_t sz);
+void quicly_streambuf_destroy(quicly_stream_t *stream, int err);
+static void quicly_streambuf_egress_shift(quicly_stream_t *stream, size_t delta);
+void quicly_streambuf_egress_emit(quicly_stream_t *stream, size_t off, void *dst, size_t *len, int *wrote_all);
+static int quicly_streambuf_egress_write(quicly_stream_t *stream, const void *src, size_t len);
+static int quicly_streambuf_egress_write_vec(quicly_stream_t *stream, quicly_sendbuf_vec_t *vec);
+int quicly_streambuf_egress_shutdown(quicly_stream_t *stream);
+static void quicly_streambuf_ingress_shift(quicly_stream_t *stream, size_t delta);
+static ptls_iovec_t quicly_streambuf_ingress_get(quicly_stream_t *stream);
+/**
+ * Writes given data into `quicly_stream_buf_t::ingress` and returns 0 if successful. Upon failure, `quicly_close` is called
+ * automatically, and a non-zero value is returned. Applications can ignore the returned value, or use it to find out if it can use
+ * the information stored in the ingress buffer.
+ */
+int quicly_streambuf_ingress_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len);
+
+/* inline definitions */
+
+inline void quicly_sendbuf_init(quicly_sendbuf_t *sb)
+{
+ memset(sb, 0, sizeof(*sb));
+}
+
+inline void quicly_streambuf_egress_shift(quicly_stream_t *stream, size_t delta)
+{
+ quicly_streambuf_t *sbuf = (quicly_streambuf_t *)stream->data;
+ quicly_sendbuf_shift(stream, &sbuf->egress, delta);
+}
+
+inline int quicly_streambuf_egress_write(quicly_stream_t *stream, const void *src, size_t len)
+{
+ quicly_streambuf_t *sbuf = (quicly_streambuf_t *)stream->data;
+ return quicly_sendbuf_write(stream, &sbuf->egress, src, len);
+}
+
+inline int quicly_streambuf_egress_write_vec(quicly_stream_t *stream, quicly_sendbuf_vec_t *vec)
+{
+ quicly_streambuf_t *sbuf = (quicly_streambuf_t *)stream->data;
+ return quicly_sendbuf_write_vec(stream, &sbuf->egress, vec);
+}
+
+inline void quicly_streambuf_ingress_shift(quicly_stream_t *stream, size_t delta)
+{
+ quicly_streambuf_t *sbuf = (quicly_streambuf_t *)stream->data;
+ quicly_recvbuf_shift(stream, &sbuf->ingress, delta);
+}
+
+inline ptls_iovec_t quicly_streambuf_ingress_get(quicly_stream_t *stream)
+{
+ quicly_streambuf_t *sbuf = (quicly_streambuf_t *)stream->data;
+ return quicly_recvbuf_get(stream, &sbuf->ingress);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
-/* Copyright (C) 2015-2019 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+/* Copyright (C) 2015-2020 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
* SPDX-License-Identifier: GPL-3.0-or-later
*/
#include "contrib/base64.h"
#include "daemon/network.h"
#include "daemon/tls.h"
+#include "daemon/quic.h"
#include "daemon/worker.h"
#include <stdlib.h>
/** Listen on an address list represented by the top of lua stack.
* \note kind ownership is not transferred
* \return success */
-static bool net_listen_addrs(lua_State *L, int port, bool tls, const char *kind, bool freebind)
+static bool net_listen_addrs(lua_State *L, int port, bool tls, bool quic, const char *kind, bool freebind)
{
/* Case: table with 'addr' field; only follow that field directly. */
lua_getfield(L, -1, "addr");
if (str != NULL) {
struct engine *engine = engine_luaget(L);
int ret = 0;
- endpoint_flags_t flags = { .tls = tls, .freebind = freebind };
- if (!kind && !flags.tls) { /* normal UDP */
+ endpoint_flags_t flags = { .tls = tls, .quic = quic, .freebind = freebind };
+ if (!kind && !flags.tls) { /* common for normal UDP and QUIC */
flags.sock_type = SOCK_DGRAM;
ret = network_listen(&engine->net, str, port, flags);
}
- if (!kind && ret == 0) { /* common for normal TCP and TLS */
+ if (!kind && !flags.quic && ret == 0) { /* common for normal TCP and TLS */
flags.sock_type = SOCK_STREAM;
ret = network_listen(&engine->net, str, port, flags);
}
lua_error_p(L, "bad type for address");
lua_pushnil(L);
while (lua_next(L, -2)) {
- if (!net_listen_addrs(L, port, tls, kind, freebind))
+ if (!net_listen_addrs(L, port, tls, quic, kind, freebind))
return false;
lua_pop(L, 1);
}
}
}
+ bool quic = (port == KR_DNS_QUIC_PORT);
bool tls = (port == KR_DNS_TLS_PORT);
bool freebind = false;
const char *kind = NULL;
lua_getfield(L, 3, "kind");
const char *k = lua_tostring(L, -1);
if (k && strcasecmp(k, "dns") == 0) {
+ quic = tls = false;
+ } else
+ if (k && strcasecmp(k, "quic") == 0) {
+ quic = true;
tls = false;
} else
if (k && strcasecmp(k, "tls") == 0) {
+ quic = false;
tls = true;
} else
if (k) {
/* Now focus on the first argument. */
lua_settop(L, 1);
- if (!net_listen_addrs(L, port, tls, kind, freebind))
+ if (!net_listen_addrs(L, port, tls, quic, kind, freebind))
lua_error_p(L, "net.listen() failed to bind");
lua_pushboolean(L, true);
return 1;
return 0;
}
+static int net_quic(lua_State *L)
+{
+ struct engine *engine = engine_luaget(L);
+ if (!engine) {
+ return 0;
+ }
+ struct network *net = &engine->net;
+ if (!net) {
+ return 0;
+ }
+
+ /* Only return current credentials. */
+ if (lua_gettop(L) == 0) {
+ /* No credentials configured yet. */
+ if (!net->quic_credentials) {
+ return 0;
+ }
+ lua_newtable(L);
+ lua_pushstring(L, net->quic_credentials->quic_cert);
+ lua_setfield(L, -2, "cert_file");
+ lua_pushstring(L, net->quic_credentials->quic_key);
+ lua_setfield(L, -2, "key_file");
+ return 1;
+ }
+
+ if ((lua_gettop(L) != 2) || !lua_isstring(L, 1) || !lua_isstring(L, 2))
+ lua_error_p(L, "net.quic takes two parameters: (\"cert_file\", \"key_file\")");
+
+ int r = quic_certificate_set(net, lua_tostring(L, 1), lua_tostring(L, 2));
+ lua_error_maybe(L, r);
+
+ lua_pushboolean(L, true);
+ return 1;
+}
+
int kr_bindings_net(lua_State *L)
{
static const luaL_Reg lib[] = {
{ "bpf_set", net_bpf_set },
{ "bpf_clear", net_bpf_clear },
{ "register_endpoint_kind", net_register_endpoint_kind },
+ { "quic", net_quic },
{ NULL, NULL }
};
luaL_register(L, "net", lib);
#include "daemon/network.h"
#include "daemon/worker.h"
#include "daemon/tls.h"
+#include "daemon/quic.h"
#include "daemon/session.h"
#define negotiate_bufsize(func, handle, bufsize_want) do { \
return fd;
}
-int io_listen_udp(uv_loop_t *loop, uv_udp_t *handle, int fd)
+int io_listen_udp(uv_loop_t *loop, uv_udp_t *handle, int fd, bool has_quic)
{
+ uv_udp_recv_cb recv = has_quic ? &quic_recv : &udp_recv;
if (!handle) {
return kr_error(EINVAL);
}
uv_handle_t *h = (uv_handle_t *)handle;
check_bufsize(h);
/* Handle is already created, just create context. */
- struct session *s = session_new(h, false);
+ struct session *s = session_new(h, false, has_quic);
assert(s);
session_flags(s)->outgoing = false;
abort(); /* It might be nontrivial not to leak something here. */
}
- return io_start_read(h);
+ return uv_udp_recv_start((uv_udp_t *)h, &handle_getbuf, recv);
}
void tcp_timeout_trigger(uv_timer_t *timer)
if (ret != 0) {
return ret;
}
- struct session *s = session_new(handle, has_tls);
+ struct session *s = session_new(handle, has_tls, false);
if (s == NULL) {
ret = -1;
}
/** Bind address into a file-descriptor (only, no libuv). type is e.g. SOCK_DGRAM */
int io_bind(const struct sockaddr *addr, int type, const endpoint_flags_t *flags);
/** Initialize a UDP handle and start listening. */
-int io_listen_udp(uv_loop_t *loop, uv_udp_t *handle, int fd);
+int io_listen_udp(uv_loop_t *loop, uv_udp_t *handle, int fd, bool has_quic);
/** Initialize a TCP handle and start listening. */
int io_listen_tcp(uv_loop_t *loop, uv_tcp_t *handle, int fd, int tcp_backlog, bool has_tls);
/** Initialize a pipe handle and start listening. */
'tls.c',
'tls_ephemeral_credentials.c',
'tls_session_ticket-srv.c',
+ 'quic.c',
'udp_queue.c',
'worker.c',
'zimport.c',
if (!ep->handle) {
return kr_error(ENOMEM);
}
- return io_listen_udp(net->loop, ep_handle, ep->fd);
+ return io_listen_udp(net->loop, ep_handle, ep->fd, ep->flags.quic);
} /* else */
if (ep->flags.sock_type == SOCK_STREAM) {
+ if (ep->flags.quic) {
+ assert(!EINVAL);
+ return kr_error(EINVAL);
+ }
uv_tcp_t *ep_handle = malloc(sizeof(uv_tcp_t));
ep->handle = (uv_handle_t *)ep_handle;
if (!ep->handle) {
#pragma once
#include "daemon/tls.h"
+#include "daemon/quic.h"
#include "lib/generic/array.h"
#include "lib/generic/map.h"
typedef struct {
int sock_type; /**< SOCK_DGRAM or SOCK_STREAM */
bool tls; /**< only used together with .kind == NULL and .tcp */
+ bool quic; /**< only used together with .kind == NULL and .udp */
const char *kind; /**< tag for other types than the three usual */
bool freebind; /**< used for binding to non-local address **/
} endpoint_flags_t;
if (f1.kind && f2.kind)
return strcasecmp(f1.kind, f2.kind);
else
- return f1.tls == f2.tls && f1.kind == f2.kind;
+ return f1.tls == f2.tls && f1.kind == f2.kind && f1.quic == f2.quic;
}
/** Wrapper for a single socket to listen on.
bool missing_kind_is_error;
struct tls_credentials *tls_credentials;
+ struct quic_credentials *quic_credentials;
tls_client_params_t *tls_client_params; /**< Use tls_client_params_*() functions. */
struct tls_session_ticket_ctx *tls_session_ticket_ctx;
struct net_tcp_param tcp;
--- /dev/null
+
+#include <stdlib.h>
+#include <openssl/pem.h>
+
+#include "daemon/quic.h"
+#include "daemon/io.h"
+#include "daemon/worker.h"
+#include "daemon/session.h"
+
+#include "contrib/ucw/mempool.h"
+#include "contrib/quicly/defaults.h"
+#include "contrib/quicly/streambuf.h"
+#include "contrib/quicly/picotls/picotls/openssl.h"
+
+static void on_stop_sending(quicly_stream_t *stream, int err)
+{
+ fprintf(stderr, "received STOP_SENDING: %" PRIu16 "\n", QUICLY_ERROR_GET_ERROR_CODE(err));
+ quicly_close(stream->conn, QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(0), "");
+}
+
+static void on_receive(quicly_stream_t *stream, size_t off, const void *src, size_t len)
+{
+ struct session *s = quic_get_session(stream->conn);
+ session_quic_get_server_ctx(s)->processed_stream = stream;
+
+ quicly_streambuf_t *sbuf = (quicly_streambuf_t *)stream->data;
+ if (sbuf->ingress.is_allocated) {
+ free(sbuf->ingress.base);
+ sbuf->ingress.is_allocated = false;
+ }
+ sbuf->ingress.base = session_wirebuf_get_free_start(s);
+ sbuf->ingress.capacity = session_wirebuf_get_free_size(s);
+
+ /* read input to receive buffer */
+ if (quicly_streambuf_ingress_receive(stream, off, src, len) != 0) {
+ return;
+ }
+
+ /* obtain contiguous bytes from the receive buffer */
+ ptls_iovec_t input = quicly_streambuf_ingress_get(stream);
+
+ if (quicly_recvstate_transfer_complete(&stream->recvstate)) {
+ if (quicly_sendstate_is_open(&stream->sendstate)) {
+ session_wirebuf_consume(s, input.base, input.len);
+ session_wirebuf_process(s, quicly_get_peername(stream->conn));
+ session_wirebuf_discard(s);
+ //quicly_streambuf_egress_shutdown(stream);
+ }
+ else {
+ //TODO error (not sure)
+ worker_submit(s, NULL, NULL);
+ }
+ }
+
+ /* remove used bytes from receive buffer */
+ quicly_streambuf_ingress_shift(stream, input.len);
+}
+
+static void on_receive_reset(quicly_stream_t *stream, int err)
+{
+ fprintf(stderr, "received RESET_STREAM: %" PRIu16 "\n", QUICLY_ERROR_GET_ERROR_CODE(err));
+ quicly_close(stream->conn, QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(0), "");
+}
+
+static int on_stream_open(quicly_stream_open_t *self, quicly_stream_t *stream)
+{
+ static const quicly_stream_callbacks_t stream_callbacks = {
+ quicly_streambuf_destroy, quicly_streambuf_egress_shift, quicly_streambuf_egress_emit,
+ on_stop_sending, on_receive, on_receive_reset
+ };
+
+ int ret;
+ if ((ret = quicly_streambuf_create(stream, sizeof(quicly_streambuf_t))) != 0) {
+ return ret;
+ }
+ stream->callbacks = &stream_callbacks;
+
+ return 0;
+}
+
+static quicly_stream_open_t stream_open = {on_stream_open};
+
+struct quic_ctx_t* new_quic()
+{
+ struct quic_ctx_t *ctx = (struct quic_ctx_t *)calloc(1, sizeof(struct quic_ctx_t));
+ if (!ctx) {
+ return NULL;
+ }
+
+ ctx->quicly = quicly_spec_context;
+ ctx->quicly.stream_open = &stream_open;
+ memset(ctx->conns, 0, sizeof(ctx->conns));
+
+ return ctx;
+}
+
+static int str_replace(char **where_ptr, const char *with)
+{
+ char *copy = with ? strdup(with) : NULL;
+ if (with && !copy) {
+ return kr_error(ENOMEM);
+ }
+
+ free(*where_ptr);
+ *where_ptr = copy;
+ return kr_ok();
+}
+
+int quic_certificate_set(struct network *net, const char *quic_cert, const char *quic_key)
+{
+ if (!net) {
+ return kr_error(EINVAL);
+ }
+ struct quic_credentials *quic_credentials = (struct quic_credentials *)calloc(1, sizeof(*quic_credentials));
+ if (quic_credentials == NULL) {
+ return kr_error(ENOMEM);
+ }
+
+ quic_credentials->credentials.random_bytes = ptls_openssl_random_bytes;
+ quic_credentials->credentials.get_time = &ptls_get_time;
+ quic_credentials->credentials.key_exchanges = ptls_openssl_key_exchanges;
+ quic_credentials->credentials.cipher_suites = ptls_openssl_cipher_suites;
+
+ quicly_amend_ptls_context(&quic_credentials->credentials);
+
+ int err = 0;
+ if (err = ptls_load_certificates(&quic_credentials->credentials, quic_cert)) {
+ kr_log_error("[quic] ptls_load_certificates() failed: (%d) %s\n",
+ err, gnutls_strerror_name(err));
+ //tls_credentials_free(tls_credentials);
+ return kr_error(ENOMEM);
+ }
+
+ FILE *fp;
+ if ((fp = fopen(quic_key, "r")) == NULL) {
+ fprintf(stderr, "failed to open file:%s:%s\n", quic_key, strerror(errno));
+ //exit(1);
+ return kr_error(EIO);
+ }
+ EVP_PKEY *pkey = PEM_read_PrivateKey(fp, NULL, NULL, NULL);
+ fclose(fp);
+ if (pkey == NULL) {
+ fprintf(stderr, "failed to load private key from file:%s\n", quic_key);
+ //exit(1);
+ return kr_error(EIO);
+ }
+ ptls_openssl_init_sign_certificate(&quic_credentials->sign_certificate, pkey);
+ EVP_PKEY_free(pkey);
+ quic_credentials->credentials.sign_certificate = &quic_credentials->sign_certificate.super;
+
+ if ((quic_credentials->credentials.certificates.count != 0) != (quic_credentials->credentials.sign_certificate != NULL)) {
+ return kr_error(EINVAL);
+ }
+
+ if ((str_replace(&quic_credentials->quic_cert, quic_cert) != 0) ||
+ (str_replace(&quic_credentials->quic_key, quic_key) != 0)) {
+ //tls_credentials_free(tls_credentials);
+ return kr_error(ENOMEM);
+ }
+
+ /* Exchange the x509 credentials */
+ struct quic_credentials *old_credentials = net->quic_credentials;
+
+ /* Start using the new x509_credentials */
+ net->quic_credentials = quic_credentials;
+
+ if (old_credentials) {
+ free(old_credentials->quic_cert);
+ free(old_credentials->quic_key);
+ //TODO free properly
+ }
+
+ return kr_ok();
+}
+
+static int on_send(uv_udp_send_t *req, int status)
+{
+ free(req);
+}
+
+void quic_recv(uv_udp_t *handle, ssize_t nread, const uv_buf_t *buf, const struct sockaddr *addr, unsigned flags)
+{
+ uv_loop_t *loop = handle->loop;
+ struct worker_ctx *worker = loop->data;
+ struct session *s = handle->data;
+ struct quic_ctx_t *ctx = session_quic_get_server_ctx(s);
+
+ //TODO idk how to set quicly.tls only once when net session is set (in better way), need someones help
+ if (unlikely(ctx->quicly.tls == NULL)) {
+ ctx->quicly.tls = &worker->engine->net.quic_credentials->credentials;
+ }
+
+ if (session_flags(s)->closing) {
+ return;
+ }
+
+ if (nread <= 0) {
+ if (nread < 0) { /* Error response, notify resolver */
+ worker_submit(s, NULL, NULL);
+ } /* nread == 0 is for freeing buffers, we don't need to do this */
+ return;
+ }
+ if (addr->sa_family == AF_UNSPEC) {
+ return;
+ }
+
+ printf("recv: %ld\n", nread);
+
+ if (session_flags(s)->outgoing) {
+ const struct sockaddr *peer = session_get_peer(s);
+ assert(peer->sa_family != AF_UNSPEC);
+ if (kr_sockaddr_cmp(peer, addr) != 0) {
+ kr_log_verbose("[io] <= ignoring UDP from unexpected address '%s'\n",
+ kr_straddr(addr));
+ return;
+ }
+ }
+
+ size_t off, i, packet_len;
+ /* split UDP datagram into multiple QUIC packets */
+ for (off = 0; off < nread; off += packet_len) {
+ quicly_decoded_packet_t decoded;
+ if ((packet_len = quicly_decode_packet(&ctx->quicly, &decoded, buf->base + off, nread - off)) == SIZE_MAX) {
+ return;
+ }
+ /* find the corresponding connection (TODO handle version negotiation, rebinding, retry, etc.) */
+ for (i = 0; ctx->conns[i] != NULL; ++i) {
+ if (quicly_is_destination(ctx->conns[i], NULL, addr, &decoded)) {
+ break;
+ }
+ }
+
+ if (ctx->conns[i] != NULL) {
+ /* let the current connection handle ingress packets */
+ quicly_receive(ctx->conns[i], NULL, addr, &decoded);
+ } else {
+ quicly_accept(&ctx->conns + i, &ctx->quicly, NULL, addr, &decoded, NULL, &ctx->next_cid, NULL);
+ }
+
+ if (ctx->conns[i] == NULL) {
+ continue;
+ }
+
+ quicly_datagram_t *dgrams[16];
+ size_t num_dgrams = sizeof(dgrams) / sizeof(*dgrams);
+ int ret = quicly_send(ctx->conns[i], dgrams, &num_dgrams);
+ switch (ret) {
+ case 0:
+ {
+ size_t j;
+ size_t sent = 0;
+ for (j = 0; j < num_dgrams; ++j) {
+ sent += dgrams[j]->data.len;
+ uv_udp_send_t*ioreq = malloc(sizeof(uv_udp_send_t));
+ uv_udp_send(ioreq, handle, (uv_buf_t *)&dgrams[j]->data, 1, &dgrams[j]->dest.sin, on_send);
+ //ctx->quicly.packet_allocator->free_packet(ctx->quicly.packet_allocator, dgrams[j]);
+ }
+ if (sent) {
+ printf("send %d\n", sent);
+ }
+ } break;
+ case QUICLY_ERROR_FREE_CONNECTION:
+ /* connection has been closed, free */
+ quicly_free(ctx->conns[i]);
+ memmove(ctx->conns + i, ctx->conns + i + 1, sizeof(ctx->conns) - sizeof(ctx->conns[0]) * (i + 1));
+ --i;
+ break;
+ default:
+ fprintf(stderr, "quicly_send returned %d\n", ret);
+ return;
+ }
+ }
+ //ssize_t consumed = session_wirebuf_consume(s, (const uint8_t *)buf->base, nread);
+ //assert(consumed == nread); (void)consumed;
+ //session_wirebuf_process(s, addr);
+ //session_wirebuf_discard(s);
+ mp_flush(worker->pkt_pool.ctx);
+}
+
+int quic_write(uv_udp_send_t *ioreq, uv_udp_t *handle, const uv_buf_t *buf, unsigned int nbuf, quicly_stream_t *stream)
+{
+ for (size_t i = 0; i < nbuf; ++i) {
+ quicly_streambuf_egress_write(stream, buf[i].base, buf[i].len);
+ }
+ quicly_streambuf_egress_shutdown(stream);
+
+ quicly_datagram_t *dgrams[16];
+ size_t num_dgrams = sizeof(dgrams) / sizeof(*dgrams);
+ int ret = quicly_send(stream->conn, dgrams, &num_dgrams);
+
+ size_t j, sent = 0;
+ for (j = 0; j < num_dgrams; ++j) {
+ sent += dgrams[j]->data.len;
+ uv_udp_send_t *req = malloc(sizeof(uv_udp_send_t));
+ uv_udp_send(req, handle, buf, nbuf, quicly_get_peername(stream->conn), on_send);
+ }
+ if (sent) {
+ printf("send %d\n", sent);
+ }
+
+ struct worker_ctx *worker = handle->loop->data;
+ mp_flush(worker->pkt_pool.ctx);
+
+ return 0;
+}
+
+struct session *quic_get_session(quicly_conn_t *conn)
+{
+ return *(struct session **)(quicly_get_context(conn) + 1);
+}
--- /dev/null
+
+#pragma once
+
+#include "contrib/quicly/quicly.h"
+#include "contrib/quicly/constants.h"
+#include "contrib/quicly/picotls/picotls.h"
+#include "contrib/quicly/picotls/picotls/openssl.h"
+
+#include "lib/utils.h"
+#include "lib/defines.h"
+
+#include <uv.h>
+#include <sys/socket.h>
+#include <libknot/packet/pkt.h>
+
+struct session;
+
+struct quic_ctx_t;
+
+struct worker_ctx;
+struct network;
+
+struct quic_credentials {
+ char *quic_cert;
+ char *quic_key;
+ ptls_context_t credentials;
+ ptls_openssl_sign_certificate_t sign_certificate;
+};
+
+struct quic_ctx_t {
+ quicly_context_t quicly;
+ struct session *session; /*! Keep session right under `quicly_context_t quicly`, it's used offset to access this member */
+ quicly_conn_t *conns[256]; //TODO use some hashmap struct or just malloc this array
+ quicly_cid_plaintext_t next_cid;
+ quicly_stream_t *processed_stream;
+};
+
+struct quic_ctx_t* new_quic();
+int quic_certificate_set(struct network *net, const char *quic_cert, const char *quic_key);
+void quic_recv(uv_udp_t *handle, ssize_t nread, const uv_buf_t *buf, const struct sockaddr *addr, unsigned flags);
+int quic_write(uv_udp_send_t *ioreq, uv_udp_t *handle, const uv_buf_t *buf, unsigned int nbuf, quicly_stream_t *stream);
+struct session *quic_get_session(quicly_conn_t *conn);
#include "daemon/session.h"
#include "daemon/engine.h"
#include "daemon/tls.h"
+#include "daemon/quic.h"
#include "daemon/worker.h"
#include "daemon/io.h"
#include "lib/generic/queue.h"
struct tls_ctx_t *tls_ctx; /**< server side tls-related data. */
struct tls_client_ctx_t *tls_client_ctx; /**< client side tls-related data. */
+ struct quic_ctx_t *quic_ctx;
+
trie_t *tasks; /**< list of tasks assotiated with given session. */
queue_t(struct qr_task *) waiting; /**< list of tasks waiting for sending to upstream. */
return tls_ctx;
}
+struct quic_ctx_t *session_quic_get_server_ctx(const struct session *session)
+{
+ return session->quic_ctx;
+}
+
+void session_quic_set_server_ctx(struct session *session, struct quic_ctx_t *ctx)
+{
+ session->sflags.has_quic = true;
+ session->quic_ctx = ctx;
+ ctx->session = session;
+}
+
+
uv_handle_t *session_get_handle(struct session *session)
{
return session->handle;
return h->data;
}
-struct session *session_new(uv_handle_t *handle, bool has_tls)
+struct session *session_new(uv_handle_t *handle, bool has_tls, bool has_quic)
{
if (!handle) {
return NULL;
struct worker_ctx *worker = handle->loop->data;
session->wire_buf = worker->wire_buf;
session->wire_buf_size = sizeof(worker->wire_buf);
+
+ if (has_quic) {
+ session_quic_set_server_ctx(session, new_quic());
+ //session->sflags.has_quic = true;
+ //session->quic_ctx = new_quic();
+ }
}
uv_timer_init(handle->loop, &session->timeout);
bool outgoing : 1; /**< True: to upstream; false: from a client. */
bool throttled : 1; /**< True: data reading from peer is temporarily stopped. */
bool has_tls : 1; /**< True: given session uses TLS. */
+ bool has_quic : 1; /**< True: given session uses QUIC. */
bool connected : 1; /**< True: TCP connection is established. */
bool closing : 1; /**< True: session close sequence is in progress. */
bool wirebuf_error : 1; /**< True: last operation with wirebuf ended up with an error. */
/* Allocate new session for a libuv handle.
* If handle->tyoe is UV_UDP, tls parameter will be ignored. */
-struct session *session_new(uv_handle_t *handle, bool has_tls);
+struct session *session_new(uv_handle_t *handle, bool has_tls, bool has_quic);
/* Clear and free given session. */
void session_free(struct session *session);
/* Clear session. */
* server and client. */
struct tls_common_ctx *session_tls_get_common_ctx(const struct session *session);
+
+struct quic_ctx_t *session_quic_get_server_ctx(const struct session *session);
+void session_quic_set_server_ctx(struct session *session, struct quic_ctx_t *ctx);
+//void session_quic_set_credentials(struct session *session, ptls_context_t *credentials);
+
+
/** Get pointer to underlying libuv handle for IO operations. */
uv_handle_t *session_get_handle(struct session *session);
struct session *session_get(uv_handle_t *h);
#include "daemon/io.h"
#include "daemon/session.h"
#include "daemon/tls.h"
+#include "daemon/quic.h"
#include "daemon/udp_queue.h"
#include "daemon/zimport.h"
#include "lib/layer.h"
bool finished : 1;
bool leading : 1;
uint64_t creation_time;
+ quicly_stream_t *quic_stream;
};
req->qsource.dst_addr = session_get_sockname(session);
req->qsource.flags.tcp = session_get_handle(session)->type == UV_TCP;
req->qsource.flags.tls = session_flags(session)->has_tls;
+ req->qsource.flags.quic = session_flags(session)->has_quic;
/* We need to store a copy of peer address. */
memcpy(&ctx->source.addr.ip, peer, kr_sockaddr_len(peer));
req->qsource.addr = &ctx->source.addr.ip;
uv_write_t *write_req = (uv_write_t *)ioreq;
write_req->data = task;
ret = tls_write(write_req, handle, pkt, &on_write);
+ } else if (session_flags(session)->has_quic) {
+ uv_udp_send_t *send_req = (uv_udp_send_t *)ioreq;
+ uv_buf_t buf = { (char *)pkt->wire, pkt->size };
+ send_req->data = task;
+ ret = quic_write(send_req, (uv_udp_t *)handle, &buf, 1, task->quic_stream);
+ //ret = uv_udp_send(send_req, (uv_udp_t *)handle, &buf, 1, addr, &on_send);
} else if (handle->type == UV_UDP) {
uv_udp_send_t *send_req = (uv_udp_send_t *)ioreq;
uv_buf_t buf = { (char *)pkt->wire, pkt->size };
if (src_handle->type != UV_UDP && src_handle->type != UV_TCP) {
assert(false);
ret = kr_error(EINVAL);
- } else if (src_handle->type == UV_UDP && ENABLE_SENDMMSG) {
+ } else if (src_handle->type == UV_UDP && ENABLE_SENDMMSG && !session_flags(source_session)->has_quic) {
int fd;
ret = uv_fileno(src_handle, &fd);
assert(!ret);
return kr_error(ENOMEM);
}
+ if (session_flags(session)->has_quic) {
+ struct quic_ctx_t *ctx = session_quic_get_server_ctx(session);
+ task->quic_stream = ctx->processed_stream;
+ ctx->processed_stream = NULL;
+ }
+
if (handle->type == UV_TCP && qr_task_register(task, session)) {
return kr_error(ENOMEM);
}
* Defines.
*/
#define KR_DNS_PORT 53
+#define KR_DNS_QUIC_PORT 784
#define KR_DNS_TLS_PORT 853
#define KR_EDNS_VERSION 0
#define KR_EDNS_PAYLOAD 4096 /* Default UDP payload (max unfragmented UDP is 1452B) */
bool tcp:1; /**< true if the request is not on UDP; only meaningful if (dst_addr). */
bool tls:1; /**< true if the request is encrypted; only meaningful if (dst_addr). */
bool http:1; /**< true if the request is on HTTP; only meaningful if (dst_addr). */
+ bool quic:1; /**< true if the request is on QUIC; only meaningful if (dst_addr). */
};
/**