int quic_aead_iv_build(unsigned char *iv, size_t ivlen,
unsigned char *aead_iv, size_t aead_ivlen, uint64_t pn);
+/* HP protection (AES) */
+int quic_tls_dec_aes_ctx_init(EVP_CIPHER_CTX **aes_ctx,
+ const EVP_CIPHER *aes, unsigned char *key);
+int quic_tls_enc_aes_ctx_init(EVP_CIPHER_CTX **aes_ctx,
+ const EVP_CIPHER *aes, unsigned char *key);
+int quic_tls_aes_decrypt(unsigned char *out,
+ const unsigned char *in, size_t inlen,
+ EVP_CIPHER_CTX *ctx);
+int quic_tls_aes_encrypt(unsigned char *out,
+ const unsigned char *in, size_t inlen,
+ EVP_CIPHER_CTX *ctx);
+
static inline const EVP_CIPHER *tls_aead(const SSL_CIPHER *cipher)
{
switch (SSL_CIPHER_get_id(cipher)) {
ctx->tx.keylen = 0;
}
+ /* RX HP protection */
+ EVP_CIPHER_CTX_free(ctx->rx.hp_ctx);
+ /* RX AEAD decryption */
EVP_CIPHER_CTX_free(ctx->rx.ctx);
pool_free(pool_head_quic_tls_iv, ctx->rx.iv);
pool_free(pool_head_quic_tls_key, ctx->rx.key);
+ /* TX HP protection */
+ EVP_CIPHER_CTX_free(ctx->tx.hp_ctx);
+ /* TX AEAD encryption */
EVP_CIPHER_CTX_free(ctx->tx.ctx);
pool_free(pool_head_quic_tls_iv, ctx->tx.iv);
pool_free(pool_head_quic_tls_key, ctx->tx.key);
return 0;
}
+/* Initialize <*aes_ctx> AES cipher context with <key> as key for encryption */
+int quic_tls_enc_aes_ctx_init(EVP_CIPHER_CTX **aes_ctx,
+ const EVP_CIPHER *aes, unsigned char *key)
+{
+ EVP_CIPHER_CTX *ctx;
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx)
+ return 0;
+
+ if (!EVP_EncryptInit_ex(ctx, aes, NULL, key, NULL))
+ goto err;
+
+ *aes_ctx = ctx;
+ return 1;
+
+ err:
+ EVP_CIPHER_CTX_free(ctx);
+ return 0;
+}
+
+/* Encrypt <inlen> bytes from <in> buffer into <out> with <ctx> as AES
+ * cipher context. This is the responsability of the caller to check there
+ * is at least <inlen> bytes of available space in <out> buffer.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_aes_encrypt(unsigned char *out,
+ const unsigned char *in, size_t inlen,
+ EVP_CIPHER_CTX *ctx)
+{
+ int ret = 0;
+
+ if (!EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, in) ||
+ !EVP_EncryptUpdate(ctx, out, &ret, out, inlen) ||
+ !EVP_EncryptFinal_ex(ctx, out, &ret))
+ return 0;
+
+ return 1;
+}
+
+/* Initialize <*aes_ctx> AES cipher context with <key> as key for decryption */
+int quic_tls_dec_aes_ctx_init(EVP_CIPHER_CTX **aes_ctx,
+ const EVP_CIPHER *aes, unsigned char *key)
+{
+ EVP_CIPHER_CTX *ctx;
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx)
+ return 0;
+
+ if (!EVP_DecryptInit_ex(ctx, aes, NULL, key, NULL))
+ goto err;
+
+ *aes_ctx = ctx;
+ return 1;
+
+ err:
+ EVP_CIPHER_CTX_free(ctx);
+ return 0;
+}
+
+/* Decrypt <in> data into <out> with <ctx> as AES cipher context.
+ * This is the responsability of the caller to check there is at least
+ * <outlen> bytes into <in> buffer.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_aes_decrypt(unsigned char *out,
+ const unsigned char *in, size_t inlen,
+ EVP_CIPHER_CTX *ctx)
+{
+ int ret = 0;
+
+ if (!EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, in) ||
+ !EVP_DecryptUpdate(ctx, out, &ret, out, inlen) ||
+ !EVP_DecryptFinal_ex(ctx, out, &ret))
+ return 0;
+
+ return 1;
+}
+
/* Initialize the cipher context for TX part of <tls_ctx> QUIC TLS context.
* Return 1 if succeeded, 0 if not.
*/
goto leave;
}
+ if (!quic_tls_dec_aes_ctx_init(&rx->hp_ctx, rx->hp, rx->hp_key)) {
+ TRACE_ERROR("could not initial RX TLS cipher context for HP", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
/* Enqueue this connection asap if we could derive O-RTT secrets as
* listener. Note that a listener derives only RX secrets for this
* level.
goto leave;
}
+ if (!quic_tls_enc_aes_ctx_init(&tx->hp_ctx, tx->hp, tx->hp_key)) {
+ TRACE_ERROR("could not initial TX TLS cipher context for HP", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
if (level == ssl_encryption_application) {
struct quic_tls_kp *prv_rx = &qc->ku.prv_rx;
struct quic_tls_kp *nxt_rx = &qc->ku.nxt_rx;
struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx,
int64_t largest_pn, unsigned char *pn, unsigned char *byte0)
{
- int ret, outlen, i, pnlen;
+ int ret, i, pnlen;
uint64_t packet_number;
uint32_t truncated_pn = 0;
unsigned char mask[5] = {0};
unsigned char *sample;
EVP_CIPHER_CTX *cctx = NULL;
- unsigned char *hp_key;
TRACE_ENTER(QUIC_EV_CONN_RMHP, qc);
sample = pn + QUIC_PACKET_PN_MAXLEN;
- hp_key = tls_ctx->rx.hp_key;
- if (!EVP_DecryptInit_ex(cctx, tls_ctx->rx.hp, NULL, hp_key, sample) ||
- !EVP_DecryptUpdate(cctx, mask, &outlen, mask, sizeof mask) ||
- !EVP_DecryptFinal_ex(cctx, mask, &outlen)) {
- TRACE_ERROR("decryption failed", QUIC_EV_CONN_RMHP, qc, pkt);
+ if (!quic_tls_aes_decrypt(mask, sample, sizeof mask, tls_ctx->rx.hp_ctx)) {
+ TRACE_ERROR("HP removing failed", QUIC_EV_CONN_RMHP, qc, pkt);
goto leave;
}
char *buf_area = NULL;
struct listener *l = NULL;
struct quic_cc_algo *cc_algo = NULL;
-
+ struct quic_tls_ctx *ictx;
TRACE_ENTER(QUIC_EV_CONN_INIT);
qc = pool_zalloc(pool_head_quic_conn);
if (!qc) {
!quic_conn_init_idle_timer_task(qc))
goto err;
- if (!qc_new_isecs(qc, &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].tls_ctx,
- qc->original_version, dcid->data, dcid->len, 1))
+ ictx = &qc->els[QUIC_TLS_ENC_LEVEL_INITIAL].tls_ctx;
+ if (!qc_new_isecs(qc, ictx,qc->original_version, dcid->data, dcid->len, 1))
goto err;
+ if (!quic_tls_dec_aes_ctx_init(&ictx->rx.hp_ctx, ictx->rx.hp, ictx->rx.hp_key) ||
+ !quic_tls_enc_aes_ctx_init(&ictx->tx.hp_ctx, ictx->tx.hp, ictx->tx.hp_key))
+ goto err;
+
TRACE_LEAVE(QUIC_EV_CONN_INIT, qc);
return qc;
* with <aead> as AEAD cipher and <key> as secret key.
* Returns 1 if succeeded or 0 if failed.
*/
-static int quic_apply_header_protection(struct quic_conn *qc,
- unsigned char *buf, unsigned char *pn, size_t pnlen,
- const EVP_CIPHER *aead, const unsigned char *key)
+static int quic_apply_header_protection(struct quic_conn *qc, unsigned char *buf,
+ unsigned char *pn, size_t pnlen,
+ struct quic_tls_ctx *tls_ctx)
+
{
- int i, outlen, ret = 0;
- EVP_CIPHER_CTX *ctx;
+ int i, ret = 0;
/* We need an IV of at least 5 bytes: one byte for bytes #0
* and at most 4 bytes for the packet number
*/
unsigned char mask[5] = {0};
+ EVP_CIPHER_CTX *aes_ctx = tls_ctx->tx.hp_ctx;
TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
- ctx = EVP_CIPHER_CTX_new();
- if (!ctx) {
- TRACE_ERROR("cipher context allocation failed", QUIC_EV_CONN_TXPKT, qc);
- goto out;
- }
-
- if (!EVP_EncryptInit_ex(ctx, aead, NULL, key, pn + QUIC_PACKET_PN_MAXLEN) ||
- !EVP_EncryptUpdate(ctx, mask, &outlen, mask, sizeof mask) ||
- !EVP_EncryptFinal_ex(ctx, mask, &outlen)) {
- TRACE_ERROR("cipher context allocation failed", QUIC_EV_CONN_TXPKT, qc);
+ if (!quic_tls_aes_encrypt(mask, pn + QUIC_PACKET_PN_MAXLEN, sizeof mask, aes_ctx)) {
+ TRACE_ERROR("could not apply header protection", QUIC_EV_CONN_TXPKT, qc);
goto out;
}
for (i = 0; i < pnlen; i++)
pn[i] ^= mask[i + 1];
- EVP_CIPHER_CTX_free(ctx);
-
ret = 1;
out:
TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
end += QUIC_TLS_TAG_LEN;
pkt->len += QUIC_TLS_TAG_LEN;
- if (!quic_apply_header_protection(qc, beg, buf_pn, pn_len,
- tls_ctx->tx.hp, tls_ctx->tx.hp_key)) {
+ if (!quic_apply_header_protection(qc, beg, buf_pn, pn_len, tls_ctx)) {
// trace already emitted by function above
*err = -2;
goto err;