/*
- * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
#include "internal/quic_channel.h"
#include "internal/quic_error.h"
#include "internal/quic_rx_depack.h"
+#include "internal/quic_lcidm.h"
+#include "internal/quic_srtm.h"
+#include "internal/qlog_event_helpers.h"
#include "../ssl_local.h"
#include "quic_channel_local.h"
+#include "quic_port_local.h"
+#include "quic_engine_local.h"
/*
* NOTE: While this channel implementation currently has basic server support,
* not suitable for network use. In particular, it does not implement address
* validation, anti-amplification or retry logic.
*
- * TODO(QUIC): Implement address validation and anti-amplification
- * TODO(QUIC): Implement retry logic
+ * TODO(QUIC SERVER): Implement address validation and anti-amplification
+ * TODO(QUIC SERVER): Implement retry logic
*/
-#define INIT_DCID_LEN 8
-#define INIT_CRYPTO_BUF_LEN 8192
-#define INIT_APP_BUF_LEN 8192
+#define INIT_CRYPTO_RECV_BUF_LEN 16384
+#define INIT_CRYPTO_SEND_BUF_LEN 16384
+#define INIT_APP_BUF_LEN 8192
/*
* Interval before we force a PING to ensure NATs don't timeout. This is based
*/
#define MAX_NAT_INTERVAL (ossl_ms2time(25000))
-static void ch_rx_pre(QUIC_CHANNEL *ch);
-static int ch_rx(QUIC_CHANNEL *ch);
+/*
+ * Our maximum ACK delay on the TX side. This is up to us to choose. Note that
+ * this could differ from QUIC_DEFAULT_MAX_DELAY in future as that is a protocol
+ * value which determines the value of the maximum ACK delay if the
+ * max_ack_delay transport parameter is not set.
+ */
+#define DEFAULT_MAX_ACK_DELAY QUIC_DEFAULT_MAX_ACK_DELAY
+
+DEFINE_LIST_OF_IMPL(ch, QUIC_CHANNEL);
+
+static void ch_save_err_state(QUIC_CHANNEL *ch);
+static int ch_rx(QUIC_CHANNEL *ch, int channel_only);
static int ch_tx(QUIC_CHANNEL *ch);
-static void ch_tick(QUIC_TICK_RESULT *res, void *arg, uint32_t flags);
-static void ch_rx_handle_packet(QUIC_CHANNEL *ch);
+static int ch_tick_tls(QUIC_CHANNEL *ch, int channel_only);
+static void ch_rx_handle_packet(QUIC_CHANNEL *ch, int channel_only);
static OSSL_TIME ch_determine_next_tick_deadline(QUIC_CHANNEL *ch);
static int ch_retry(QUIC_CHANNEL *ch,
const unsigned char *retry_token,
size_t *consumed, void *arg);
static OSSL_TIME get_time(void *arg);
static uint64_t get_stream_limit(int uni, void *arg);
-static int rx_early_validate(QUIC_PN pn, int pn_space, void *arg);
+static int rx_late_validate(QUIC_PN pn, int pn_space, void *arg);
static void rxku_detected(QUIC_PN pn, void *arg);
static int ch_retry(QUIC_CHANNEL *ch,
const unsigned char *retry_token,
static void ch_on_idle_timeout(QUIC_CHANNEL *ch);
static void ch_update_idle(QUIC_CHANNEL *ch);
static void ch_update_ping_deadline(QUIC_CHANNEL *ch);
-static void ch_raise_net_error(QUIC_CHANNEL *ch);
static void ch_on_terminating_timeout(QUIC_CHANNEL *ch);
static void ch_start_terminating(QUIC_CHANNEL *ch,
const QUIC_TERMINATE_CAUSE *tcause,
int force_immediate);
-static void ch_default_packet_handler(QUIC_URXE *e, void *arg);
-static int ch_server_on_new_conn(QUIC_CHANNEL *ch, const BIO_ADDR *peer,
- const QUIC_CONN_ID *peer_scid,
- const QUIC_CONN_ID *peer_dcid);
static void ch_on_txp_ack_tx(const OSSL_QUIC_FRAME_ACK *ack, uint32_t pn_space,
void *arg);
+static void ch_rx_handle_version_neg(QUIC_CHANNEL *ch, OSSL_QRX_PKT *pkt);
+static void ch_raise_version_neg_failure(QUIC_CHANNEL *ch);
+static void ch_record_state_transition(QUIC_CHANNEL *ch, uint32_t new_state);
-static int gen_rand_conn_id(OSSL_LIB_CTX *libctx, size_t len, QUIC_CONN_ID *cid)
+DEFINE_LHASH_OF_EX(QUIC_SRT_ELEM);
+
+QUIC_NEEDS_LOCK
+static QLOG *ch_get_qlog(QUIC_CHANNEL *ch)
{
- if (len > QUIC_MAX_CONN_ID_LEN)
- return 0;
+#ifndef OPENSSL_NO_QLOG
+ QLOG_TRACE_INFO qti = {0};
- cid->id_len = (unsigned char)len;
+ if (ch->qlog != NULL)
+ return ch->qlog;
- if (RAND_bytes_ex(libctx, cid->id, len, len * 8) != 1) {
- cid->id_len = 0;
- return 0;
+ if (!ch->use_qlog)
+ return NULL;
+
+ qti.odcid = ch->init_dcid;
+ qti.title = NULL;
+ qti.description = NULL;
+ qti.group_id = NULL;
+ qti.is_server = ch->is_server;
+ qti.now_cb = get_time;
+ qti.now_cb_arg = ch;
+ if ((ch->qlog = ossl_qlog_new_from_env(&qti)) == NULL) {
+ ch->use_qlog = 0; /* don't try again */
+ return NULL;
}
- return 1;
+ return ch->qlog;
+#else
+ return NULL;
+#endif
}
/*
* QUIC Channel Initialization and Teardown
* ========================================
*/
-#define DEFAULT_INIT_CONN_RXFC_WND (2 * 1024 * 1024)
-#define DEFAULT_CONN_RXFC_MAX_WND_MUL 5
+#define DEFAULT_INIT_CONN_RXFC_WND (768 * 1024)
+#define DEFAULT_CONN_RXFC_MAX_WND_MUL 20
-#define DEFAULT_INIT_STREAM_RXFC_WND (2 * 1024 * 1024)
-#define DEFAULT_STREAM_RXFC_MAX_WND_MUL 5
+#define DEFAULT_INIT_STREAM_RXFC_WND (512 * 1024)
+#define DEFAULT_STREAM_RXFC_MAX_WND_MUL 12
#define DEFAULT_INIT_CONN_MAX_STREAMS 100
OSSL_QRX_ARGS qrx_args = {0};
QUIC_TLS_ARGS tls_args = {0};
uint32_t pn_space;
- size_t rx_short_cid_len = ch->is_server ? INIT_DCID_LEN : 0;
+ size_t rx_short_dcid_len;
+ size_t tx_init_dcid_len;
+
+ if (ch->port == NULL || ch->lcidm == NULL || ch->srtm == NULL)
+ goto err;
+
+ rx_short_dcid_len = ossl_quic_port_get_rx_short_dcid_len(ch->port);
+ tx_init_dcid_len = ossl_quic_port_get_tx_init_dcid_len(ch->port);
/* For clients, generate our initial DCID. */
if (!ch->is_server
- && !gen_rand_conn_id(ch->libctx, INIT_DCID_LEN, &ch->init_dcid))
+ && !ossl_quic_gen_rand_conn_id(ch->port->engine->libctx, tx_init_dcid_len,
+ &ch->init_dcid))
goto err;
/* We plug in a network write BIO to the QTX later when we get one. */
- qtx_args.libctx = ch->libctx;
+ qtx_args.libctx = ch->port->engine->libctx;
qtx_args.mdpl = QUIC_MIN_INITIAL_DGRAM_LEN;
ch->rx_max_udp_payload_size = qtx_args.mdpl;
get_time, ch))
goto err;
- if (!ossl_quic_rxfc_init_for_stream_count(&ch->max_streams_bidi_rxfc,
- DEFAULT_INIT_CONN_MAX_STREAMS,
- get_time, ch))
+ for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space)
+ if (!ossl_quic_rxfc_init_standalone(&ch->crypto_rxfc[pn_space],
+ INIT_CRYPTO_RECV_BUF_LEN,
+ get_time, ch))
+ goto err;
+
+ if (!ossl_quic_rxfc_init_standalone(&ch->max_streams_bidi_rxfc,
+ DEFAULT_INIT_CONN_MAX_STREAMS,
+ get_time, ch))
goto err;
- if (!ossl_quic_rxfc_init_for_stream_count(&ch->max_streams_uni_rxfc,
- DEFAULT_INIT_CONN_MAX_STREAMS,
- get_time, ch))
+ if (!ossl_quic_rxfc_init_standalone(&ch->max_streams_uni_rxfc,
+ DEFAULT_INIT_CONN_MAX_STREAMS,
+ get_time, ch))
goto err;
if (!ossl_statm_init(&ch->statm))
ch->have_qsm = 1;
+ if (!ch->is_server
+ && !ossl_quic_lcidm_generate_initial(ch->lcidm, ch, &txp_args.cur_scid))
+ goto err;
+
/* We use a zero-length SCID. */
txp_args.cur_dcid = ch->init_dcid;
txp_args.ack_delay_exponent = 3;
txp_args.cc_data = ch->cc_data;
txp_args.now = get_time;
txp_args.now_arg = ch;
+ txp_args.qlog = ch_get_qlog(ch);
for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space) {
- ch->crypto_send[pn_space] = ossl_quic_sstream_new(INIT_CRYPTO_BUF_LEN);
+ ch->crypto_send[pn_space] = ossl_quic_sstream_new(INIT_CRYPTO_SEND_BUF_LEN);
if (ch->crypto_send[pn_space] == NULL)
goto err;
ossl_quic_tx_packetiser_set_ack_tx_cb(ch->txp, ch_on_txp_ack_tx, ch);
- if ((ch->demux = ossl_quic_demux_new(/*BIO=*/NULL,
- /*Short CID Len=*/rx_short_cid_len,
- get_time, ch)) == NULL)
- goto err;
-
- /*
- * If we are a server, setup our handler for packets not corresponding to
- * any known DCID on our end. This is for handling clients establishing new
- * connections.
- */
- if (ch->is_server)
- ossl_quic_demux_set_default_handler(ch->demux,
- ch_default_packet_handler,
- ch);
-
- qrx_args.libctx = ch->libctx;
- qrx_args.demux = ch->demux;
- qrx_args.short_conn_id_len = rx_short_cid_len;
+ qrx_args.libctx = ch->port->engine->libctx;
+ qrx_args.demux = ch->port->demux;
+ qrx_args.short_conn_id_len = rx_short_dcid_len;
qrx_args.max_deferred = 32;
if ((ch->qrx = ossl_qrx_new(&qrx_args)) == NULL)
goto err;
- if (!ossl_qrx_set_early_validation_cb(ch->qrx,
- rx_early_validate,
- ch))
+ if (!ossl_qrx_set_late_validation_cb(ch->qrx,
+ rx_late_validate,
+ ch))
goto err;
if (!ossl_qrx_set_key_update_cb(ch->qrx,
ch))
goto err;
- if (!ch->is_server && !ossl_qrx_add_dst_conn_id(ch->qrx, &txp_args.cur_scid))
- goto err;
-
for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space) {
ch->crypto_recv[pn_space] = ossl_quic_rstream_new(NULL, NULL, 0);
if (ch->crypto_recv[pn_space] == NULL)
if ((ch->qtls = ossl_quic_tls_new(&tls_args)) == NULL)
goto err;
+ ch->tx_max_ack_delay = DEFAULT_MAX_ACK_DELAY;
ch->rx_max_ack_delay = QUIC_DEFAULT_MAX_ACK_DELAY;
ch->rx_ack_delay_exp = QUIC_DEFAULT_ACK_DELAY_EXP;
ch->rx_active_conn_id_limit = QUIC_MIN_ACTIVE_CONN_ID_LIMIT;
ch->rx_enc_level = QUIC_ENC_LEVEL_INITIAL;
ch->txku_threshold_override = UINT64_MAX;
+ ossl_ackm_set_tx_max_ack_delay(ch->ackm, ossl_ms2time(ch->tx_max_ack_delay));
+ ossl_ackm_set_rx_max_ack_delay(ch->ackm, ossl_ms2time(ch->rx_max_ack_delay));
+
/*
* Determine the QUIC Transport Parameters and serialize the transport
* parameters block. (For servers, we do this later as we must defer
goto err;
ch_update_idle(ch);
- ossl_quic_reactor_init(&ch->rtor, ch_tick, ch,
- ch_determine_next_tick_deadline(ch));
+ ossl_list_ch_insert_tail(&ch->port->channel_list, ch);
+ ch->on_port_list = 1;
return 1;
err:
++pn_space)
ossl_ackm_on_pkt_space_discarded(ch->ackm, pn_space);
+ ossl_quic_lcidm_cull(ch->lcidm, ch);
+ ossl_quic_srtm_cull(ch->srtm, ch);
ossl_quic_tx_packetiser_free(ch->txp);
ossl_quic_txpim_free(ch->txpim);
ossl_quic_cfq_free(ch->cfq);
ossl_quic_tls_free(ch->qtls);
ossl_qrx_free(ch->qrx);
- ossl_quic_demux_free(ch->demux);
OPENSSL_free(ch->local_transport_params);
+ OPENSSL_free((char *)ch->terminate_cause.reason);
OSSL_ERR_STATE_free(ch->err_state);
+ OPENSSL_free(ch->ack_range_scratch);
+
+ if (ch->on_port_list) {
+ ossl_list_ch_remove(&ch->port->channel_list, ch);
+ ch->on_port_list = 0;
+ }
+
+#ifndef OPENSSL_NO_QLOG
+ if (ch->qlog != NULL)
+ ossl_qlog_flush(ch->qlog); /* best effort */
+
+ ossl_qlog_free(ch->qlog);
+#endif
}
QUIC_CHANNEL *ossl_quic_channel_new(const QUIC_CHANNEL_ARGS *args)
if ((ch = OPENSSL_zalloc(sizeof(*ch))) == NULL)
return NULL;
- ch->libctx = args->libctx;
- ch->propq = args->propq;
+ ch->port = args->port;
ch->is_server = args->is_server;
ch->tls = args->tls;
- ch->mutex = args->mutex;
- ch->now_cb = args->now_cb;
- ch->now_cb_arg = args->now_cb_arg;
+ ch->lcidm = args->lcidm;
+ ch->srtm = args->srtm;
+#ifndef OPENSSL_NO_QLOG
+ ch->use_qlog = args->use_qlog;
+#endif
if (!ch_init(ch)) {
OPENSSL_free(ch);
int ossl_quic_channel_get_peer_addr(QUIC_CHANNEL *ch, BIO_ADDR *peer_addr)
{
+ if (!ch->addressed_mode)
+ return 0;
+
*peer_addr = ch->cur_peer_addr;
return 1;
}
int ossl_quic_channel_set_peer_addr(QUIC_CHANNEL *ch, const BIO_ADDR *peer_addr)
{
- ch->cur_peer_addr = *peer_addr;
+ if (ch->state != QUIC_CHANNEL_STATE_IDLE)
+ return 0;
+
+ if (peer_addr == NULL || BIO_ADDR_family(peer_addr) == AF_UNSPEC) {
+ BIO_ADDR_clear(&ch->cur_peer_addr);
+ ch->addressed_mode = 0;
+ return 1;
+ }
+
+ ch->cur_peer_addr = *peer_addr;
+ ch->addressed_mode = 1;
return 1;
}
QUIC_REACTOR *ossl_quic_channel_get_reactor(QUIC_CHANNEL *ch)
{
- return &ch->rtor;
+ return ossl_quic_port_get0_reactor(ch->port);
}
QUIC_STREAM_MAP *ossl_quic_channel_get_qsm(QUIC_CHANNEL *ch)
return ch != NULL && ch->state == QUIC_CHANNEL_STATE_ACTIVE;
}
-int ossl_quic_channel_is_terminating(const QUIC_CHANNEL *ch)
+int ossl_quic_channel_is_closing(const QUIC_CHANNEL *ch)
{
- if (ch->state == QUIC_CHANNEL_STATE_TERMINATING_CLOSING
- || ch->state == QUIC_CHANNEL_STATE_TERMINATING_DRAINING)
- return 1;
+ return ch->state == QUIC_CHANNEL_STATE_TERMINATING_CLOSING;
+}
- return 0;
+static int ossl_quic_channel_is_draining(const QUIC_CHANNEL *ch)
+{
+ return ch->state == QUIC_CHANNEL_STATE_TERMINATING_DRAINING;
}
-int ossl_quic_channel_is_terminated(const QUIC_CHANNEL *ch)
+static int ossl_quic_channel_is_terminating(const QUIC_CHANNEL *ch)
{
- if (ch->state == QUIC_CHANNEL_STATE_TERMINATED)
- return 1;
+ return ossl_quic_channel_is_closing(ch)
+ || ossl_quic_channel_is_draining(ch);
+}
- return 0;
+int ossl_quic_channel_is_terminated(const QUIC_CHANNEL *ch)
+{
+ return ch->state == QUIC_CHANNEL_STATE_TERMINATED;
}
int ossl_quic_channel_is_term_any(const QUIC_CHANNEL *ch)
QUIC_DEMUX *ossl_quic_channel_get0_demux(QUIC_CHANNEL *ch)
{
- return ch->demux;
+ return ch->port->demux;
+}
+
+QUIC_PORT *ossl_quic_channel_get0_port(QUIC_CHANNEL *ch)
+{
+ return ch->port;
+}
+
+QUIC_ENGINE *ossl_quic_channel_get0_engine(QUIC_CHANNEL *ch)
+{
+ return ossl_quic_port_get0_engine(ch->port);
}
CRYPTO_MUTEX *ossl_quic_channel_get_mutex(QUIC_CHANNEL *ch)
{
- return ch->mutex;
+ return ossl_quic_port_get0_mutex(ch->port);
}
int ossl_quic_channel_has_pending(const QUIC_CHANNEL *ch)
{
- return ossl_quic_demux_has_pending(ch->demux)
+ return ossl_quic_demux_has_pending(ch->port->demux)
|| ossl_qrx_processed_read_pending(ch->qrx);
}
{
QUIC_CHANNEL *ch = arg;
- if (ch->now_cb == NULL)
- return ossl_time_now();
-
- return ch->now_cb(ch->now_cb_arg);
+ return ossl_quic_port_get_time(ch->port);
}
/* Used by QSM. */
* Called by QRX to determine if a packet is potentially invalid before trying
* to decrypt it.
*/
-static int rx_early_validate(QUIC_PN pn, int pn_space, void *arg)
+static int rx_late_validate(QUIC_PN pn, int pn_space, void *arg)
{
QUIC_CHANNEL *ch = arg;
{
QUIC_CHANNEL *ch = arg;
QUIC_RSTREAM *rstream;
+ OSSL_RTT_INFO rtt_info;
+ uint32_t rx_pn_space = ossl_quic_enc_level_to_pn_space(ch->rx_enc_level);
- rstream = ch->crypto_recv[ossl_quic_enc_level_to_pn_space(ch->rx_enc_level)];
+ rstream = ch->crypto_recv[rx_pn_space];
if (rstream == NULL)
return 0;
+ ossl_statm_get_rtt_info(ossl_quic_channel_get_statm(ch), &rtt_info);
+ if (!ossl_quic_rxfc_on_retire(&ch->crypto_rxfc[rx_pn_space], bytes_read,
+ rtt_info.smoothed_rtt))
+ return 0;
+
return ossl_quic_rstream_release_record(rstream, bytes_read);
}
* Was not a valid QUIC handshake if we did not get valid transport
* params.
*/
- ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
+ ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_CRYPTO_MISSING_EXT,
OSSL_QUIC_FRAME_TYPE_CRYPTO,
"no transport parameters received");
return 0;
OPENSSL_free(ch->local_transport_params);
ch->local_transport_params = NULL;
+ /* Tell the QRX it can now process 1-RTT packets. */
+ ossl_qrx_allow_1rtt_processing(ch->qrx);
+
/* Tell TXP the handshake is complete. */
ossl_quic_tx_packetiser_notify_handshake_complete(ch->txp);
ossl_quic_tx_packetiser_schedule_handshake_done(ch->txp);
}
+ ch_record_state_transition(ch, ch->state);
return 1;
}
{
QUIC_CHANNEL *ch = arg;
- ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_CRYPTO_ERR_BEGIN + alert_code,
- 0, "handshake alert");
+ /*
+ * RFC 9001 s. 4.4: More specifically, servers MUST NOT send post-handshake
+ * TLS CertificateRequest messages, and clients MUST treat receipt of such
+ * messages as a connection error of type PROTOCOL_VIOLATION.
+ */
+ if (alert_code == SSL_AD_UNEXPECTED_MESSAGE
+ && ch->handshake_complete
+ && ossl_quic_tls_is_cert_request(ch->qtls))
+ ossl_quic_channel_raise_protocol_error(ch,
+ QUIC_ERR_PROTOCOL_VIOLATION,
+ 0,
+ "Post-handshake TLS "
+ "CertificateRequest received");
+ /*
+ * RFC 9001 s. 4.6.1: Servers MUST NOT send the early_data extension with a
+ * max_early_data_size field set to any value other than 0xffffffff. A
+ * client MUST treat receipt of a NewSessionTicket that contains an
+ * early_data extension with any other value as a connection error of type
+ * PROTOCOL_VIOLATION.
+ */
+ else if (alert_code == SSL_AD_ILLEGAL_PARAMETER
+ && ch->handshake_complete
+ && ossl_quic_tls_has_bad_max_early_data(ch->qtls))
+ ossl_quic_channel_raise_protocol_error(ch,
+ QUIC_ERR_PROTOCOL_VIOLATION,
+ 0,
+ "Bad max_early_data received");
+ else
+ ossl_quic_channel_raise_protocol_error(ch,
+ QUIC_ERR_CRYPTO_ERR_BEGIN
+ + alert_code,
+ 0, "handshake alert");
+
return 1;
}
x " sent when not performing a retry"
#define TP_REASON_REQUIRED(x) \
x " was not sent but is required"
+#define TP_REASON_INTERNAL_ERROR(x) \
+ x " encountered internal error"
static void txfc_bump_cwm_bidi(QUIC_STREAM *s, void *arg)
{
int got_initial_max_stream_data_uni = 0;
int got_initial_max_streams_bidi = 0;
int got_initial_max_streams_uni = 0;
+ int got_stateless_reset_token = 0;
+ int got_preferred_addr = 0;
int got_ack_delay_exp = 0;
int got_max_ack_delay = 0;
int got_max_udp_payload_size = 0;
int got_disable_active_migration = 0;
QUIC_CONN_ID cid;
const char *reason = "bad transport parameter";
+ ossl_unused uint64_t rx_max_idle_timeout = 0;
+ ossl_unused const void *stateless_reset_token_p = NULL;
+ QUIC_PREFERRED_ADDR pfa;
- if (ch->got_remote_transport_params)
+ if (ch->got_remote_transport_params) {
+ reason = "multiple transport parameter extensions";
goto malformed;
+ }
- if (!PACKET_buf_init(&pkt, params, params_len))
+ if (!PACKET_buf_init(&pkt, params, params_len)) {
+ ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR, 0,
+ "internal error (packet buf init)");
return 0;
+ }
while (PACKET_remaining(&pkt) > 0) {
if (!ossl_quic_wire_peek_transport_param(&pkt, &id))
goto malformed;
}
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
/* Must match our initial DCID. */
if (!ossl_quic_conn_id_eq(&ch->init_dcid, &cid)) {
reason = TP_REASON_EXPECTED_VALUE("ORIG_DCID");
goto malformed;
}
+#endif
got_orig_dcid = 1;
break;
if (got_max_ack_delay) {
/* must not appear more than once */
reason = TP_REASON_DUP("MAX_ACK_DELAY");
- return 0;
+ goto malformed;
}
if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
}
ch->rx_max_ack_delay = v;
+ ossl_ackm_set_rx_max_ack_delay(ch->ackm,
+ ossl_ms2time(ch->rx_max_ack_delay));
+
got_max_ack_delay = 1;
break;
if (got_initial_max_streams_bidi) {
/* must not appear more than once */
reason = TP_REASON_DUP("INITIAL_MAX_STREAMS_BIDI");
- return 0;
+ goto malformed;
}
if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
ch_update_idle(ch);
got_max_idle_timeout = 1;
+ rx_max_idle_timeout = v;
break;
case QUIC_TPARAM_MAX_UDP_PAYLOAD_SIZE:
break;
case QUIC_TPARAM_STATELESS_RESET_TOKEN:
- /* TODO(QUIC): Handle stateless reset tokens. */
+ if (got_stateless_reset_token) {
+ reason = TP_REASON_DUP("STATELESS_RESET_TOKEN");
+ goto malformed;
+ }
+
/*
- * We ignore these for now, but we must ensure a client doesn't
- * send them.
+ * We must ensure a client doesn't send them because we don't have
+ * processing for them.
+ *
+ * TODO(QUIC SERVER): remove this restriction
*/
if (ch->is_server) {
reason = TP_REASON_SERVER_ONLY("STATELESS_RESET_TOKEN");
reason = TP_REASON_MALFORMED("STATELESS_RESET_TOKEN");
goto malformed;
}
+ if (!ossl_quic_srtm_add(ch->srtm, ch, ch->cur_remote_seq_num,
+ (const QUIC_STATELESS_RESET_TOKEN *)body)) {
+ reason = TP_REASON_INTERNAL_ERROR("STATELESS_RESET_TOKEN");
+ goto malformed;
+ }
+ stateless_reset_token_p = body;
+ got_stateless_reset_token = 1;
break;
case QUIC_TPARAM_PREFERRED_ADDR:
- /* TODO(QUIC): Handle preferred address. */
- if (ch->is_server) {
- reason = TP_REASON_SERVER_ONLY("PREFERRED_ADDR");
- goto malformed;
- }
-
- body = ossl_quic_wire_decode_transport_param_bytes(&pkt, &id, &len);
- if (body == NULL) {
- reason = TP_REASON_MALFORMED("PREFERRED_ADDR");
- goto malformed;
+ {
+ /* TODO(QUIC FUTURE): Handle preferred address. */
+ if (got_preferred_addr) {
+ reason = TP_REASON_DUP("PREFERRED_ADDR");
+ goto malformed;
+ }
+
+ /*
+ * RFC 9000 s. 18.2: "A server that chooses a zero-length
+ * connection ID MUST NOT provide a preferred address.
+ * Similarly, a server MUST NOT include a zero-length connection
+ * ID in this transport parameter. A client MUST treat a
+ * violation of these requirements as a connection error of type
+ * TRANSPORT_PARAMETER_ERROR."
+ */
+ if (ch->is_server) {
+ reason = TP_REASON_SERVER_ONLY("PREFERRED_ADDR");
+ goto malformed;
+ }
+
+ if (ch->cur_remote_dcid.id_len == 0) {
+ reason = "PREFERRED_ADDR provided for zero-length CID";
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_preferred_addr(&pkt, &pfa)) {
+ reason = TP_REASON_MALFORMED("PREFERRED_ADDR");
+ goto malformed;
+ }
+
+ if (pfa.cid.id_len == 0) {
+ reason = "zero-length CID in PREFERRED_ADDR";
+ goto malformed;
+ }
+
+ got_preferred_addr = 1;
}
-
break;
case QUIC_TPARAM_DISABLE_ACTIVE_MIGRATION:
ch->got_remote_transport_params = 1;
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(ch_get_qlog(ch), transport, parameters_set)
+ QLOG_STR("owner", "remote");
+
+ if (got_orig_dcid)
+ QLOG_CID("original_destination_connection_id",
+ &ch->init_dcid);
+ if (got_initial_scid)
+ QLOG_CID("original_source_connection_id",
+ &ch->init_dcid);
+ if (got_retry_scid)
+ QLOG_CID("retry_source_connection_id",
+ &ch->retry_scid);
+ if (got_initial_max_data)
+ QLOG_U64("initial_max_data",
+ ossl_quic_txfc_get_cwm(&ch->conn_txfc));
+ if (got_initial_max_stream_data_bidi_local)
+ QLOG_U64("initial_max_stream_data_bidi_local",
+ ch->rx_init_max_stream_data_bidi_local);
+ if (got_initial_max_stream_data_bidi_remote)
+ QLOG_U64("initial_max_stream_data_bidi_remote",
+ ch->rx_init_max_stream_data_bidi_remote);
+ if (got_initial_max_stream_data_uni)
+ QLOG_U64("initial_max_stream_data_uni",
+ ch->rx_init_max_stream_data_uni);
+ if (got_initial_max_streams_bidi)
+ QLOG_U64("initial_max_streams_bidi",
+ ch->max_local_streams_bidi);
+ if (got_initial_max_streams_uni)
+ QLOG_U64("initial_max_streams_uni",
+ ch->max_local_streams_uni);
+ if (got_ack_delay_exp)
+ QLOG_U64("ack_delay_exponent", ch->rx_ack_delay_exp);
+ if (got_max_ack_delay)
+ QLOG_U64("max_ack_delay", ch->rx_max_ack_delay);
+ if (got_max_udp_payload_size)
+ QLOG_U64("max_udp_payload_size", ch->rx_max_udp_payload_size);
+ if (got_max_idle_timeout)
+ QLOG_U64("max_idle_timeout", rx_max_idle_timeout);
+ if (got_active_conn_id_limit)
+ QLOG_U64("active_connection_id_limit", ch->rx_active_conn_id_limit);
+ if (got_stateless_reset_token)
+ QLOG_BIN("stateless_reset_token", stateless_reset_token_p,
+ QUIC_STATELESS_RESET_TOKEN_LEN);
+ if (got_preferred_addr) {
+ QLOG_BEGIN("preferred_addr")
+ QLOG_U64("port_v4", pfa.ipv4_port);
+ QLOG_U64("port_v6", pfa.ipv6_port);
+ QLOG_BIN("ip_v4", pfa.ipv4, sizeof(pfa.ipv4));
+ QLOG_BIN("ip_v6", pfa.ipv6, sizeof(pfa.ipv6));
+ QLOG_BIN("stateless_reset_token", pfa.stateless_reset.token,
+ sizeof(pfa.stateless_reset.token));
+ QLOG_CID("connection_id", &pfa.cid);
+ QLOG_END()
+ }
+ QLOG_BOOL("disable_active_migration", got_disable_active_migration);
+ QLOG_EVENT_END()
+#endif
+
if (got_initial_max_data || got_initial_max_stream_data_bidi_remote
|| got_initial_max_streams_bidi || got_initial_max_streams_uni)
/*
goto err;
if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_ACTIVE_CONN_ID_LIMIT,
- 2))
+ QUIC_MIN_ACTIVE_CONN_ID_LIMIT))
+ goto err;
+
+ if (ch->tx_max_ack_delay != QUIC_DEFAULT_MAX_ACK_DELAY
+ && !ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_MAX_ACK_DELAY,
+ ch->tx_max_ack_delay))
goto err;
if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_DATA,
buf_len))
goto err;
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(ch_get_qlog(ch), transport, parameters_set)
+ QLOG_STR("owner", "local");
+ QLOG_BOOL("disable_active_migration", 1);
+ if (ch->is_server) {
+ QLOG_CID("original_destination_connection_id", &ch->init_dcid);
+ QLOG_CID("initial_source_connection_id", &ch->cur_local_cid);
+ } else {
+ QLOG_STR("initial_source_connection_id", "");
+ }
+ QLOG_U64("max_idle_timeout", ch->max_idle_timeout);
+ QLOG_U64("max_udp_payload_size", QUIC_MIN_INITIAL_DGRAM_LEN);
+ QLOG_U64("active_connection_id_limit", QUIC_MIN_ACTIVE_CONN_ID_LIMIT);
+ QLOG_U64("max_ack_delay", ch->tx_max_ack_delay);
+ QLOG_U64("initial_max_data", ossl_quic_rxfc_get_cwm(&ch->conn_rxfc));
+ QLOG_U64("initial_max_stream_data_bidi_local",
+ ch->tx_init_max_stream_data_bidi_local);
+ QLOG_U64("initial_max_stream_data_bidi_remote",
+ ch->tx_init_max_stream_data_bidi_remote);
+ QLOG_U64("initial_max_stream_data_uni",
+ ch->tx_init_max_stream_data_uni);
+ QLOG_U64("initial_max_streams_bidi",
+ ossl_quic_rxfc_get_cwm(&ch->max_streams_bidi_rxfc));
+ QLOG_U64("initial_max_streams_uni",
+ ossl_quic_rxfc_get_cwm(&ch->max_streams_uni_rxfc));
+ QLOG_EVENT_END()
+#endif
+
ok = 1;
err:
if (wpkt_valid)
* at least everything network I/O related. Best effort - not allowed to fail
* "loudly".
*/
-static void ch_tick(QUIC_TICK_RESULT *res, void *arg, uint32_t flags)
+void ossl_quic_channel_subtick(QUIC_CHANNEL *ch, QUIC_TICK_RESULT *res,
+ uint32_t flags)
{
OSSL_TIME now, deadline;
- QUIC_CHANNEL *ch = arg;
int channel_only = (flags & QUIC_REACTOR_TICK_FLAG_CHANNEL_ONLY) != 0;
/*
* When we tick the QUIC connection, we do everything we need to do
- * periodically. In order, we:
+ * periodically. Network I/O handling will already have been performed
+ * as necessary by the QUIC port. Thus, in order, we:
*
- * - handle any incoming data from the network;
- * - handle any timer events which are due to fire (ACKM, etc.)
- * - write any data to the network due to be sent, to the extent
- * possible;
+ * - handle any packets the DEMUX has queued up for us;
+ * - handle any timer events which are due to fire (ACKM, etc.);
+ * - generate any packets which need to be sent;
* - determine the time at which we should next be ticked.
*/
}
}
- /* Handle RXKU timeouts. */
- ch_rxku_tick(ch);
+ if (!ch->port->engine->inhibit_tick) {
+ /* Handle RXKU timeouts. */
+ ch_rxku_tick(ch);
- /* Handle any incoming data from network. */
- ch_rx_pre(ch);
-
- do {
- /* Process queued incoming packets. */
- ch_rx(ch);
+ do {
+ /* Process queued incoming packets. */
+ ch->did_tls_tick = 0;
+ ch->have_new_rx_secret = 0;
+ ch_rx(ch, channel_only);
- /*
- * Allow the handshake layer to check for any new incoming data and generate
- * new outgoing data.
- */
- ch->have_new_rx_secret = 0;
- if (!channel_only)
- ossl_quic_tls_tick(ch->qtls);
+ /*
+ * Allow the handshake layer to check for any new incoming data and
+ * generate new outgoing data.
+ */
+ if (!ch->did_tls_tick)
+ ch_tick_tls(ch, channel_only);
- /*
- * If the handshake layer gave us a new secret, we need to do RX again
- * because packets that were not previously processable and were
- * deferred might now be processable.
- *
- * TODO(QUIC): Consider handling this in the yield_secret callback.
- */
- } while (ch->have_new_rx_secret);
+ /*
+ * If the handshake layer gave us a new secret, we need to do RX
+ * again because packets that were not previously processable and
+ * were deferred might now be processable.
+ *
+ * TODO(QUIC FUTURE): Consider handling this in the yield_secret callback.
+ */
+ } while (ch->have_new_rx_secret);
+ }
/*
- * Handle any timer events which are due to fire; namely, the loss detection
- * deadline and the idle timeout.
+ * Handle any timer events which are due to fire; namely, the loss
+ * detection deadline and the idle timeout.
*
- * ACKM ACK generation deadline is polled by TXP, so we don't need to handle
- * it here.
+ * ACKM ACK generation deadline is polled by TXP, so we don't need to
+ * handle it here.
*/
now = get_time(ch);
if (ossl_time_compare(now, ch->idle_deadline) >= 0) {
/*
- * Idle timeout differs from normal protocol violation because we do not
- * send a CONN_CLOSE frame; go straight to TERMINATED.
+ * Idle timeout differs from normal protocol violation because we do
+ * not send a CONN_CLOSE frame; go straight to TERMINATED.
*/
- ch_on_idle_timeout(ch);
+ if (!ch->port->engine->inhibit_tick)
+ ch_on_idle_timeout(ch);
+
res->net_read_desired = 0;
res->net_write_desired = 0;
res->tick_deadline = ossl_time_infinite();
return;
}
- deadline = ossl_ackm_get_loss_detection_deadline(ch->ackm);
- if (!ossl_time_is_zero(deadline) && ossl_time_compare(now, deadline) >= 0)
- ossl_ackm_on_timeout(ch->ackm);
+ if (!ch->port->engine->inhibit_tick) {
+ deadline = ossl_ackm_get_loss_detection_deadline(ch->ackm);
+ if (!ossl_time_is_zero(deadline)
+ && ossl_time_compare(now, deadline) >= 0)
+ ossl_ackm_on_timeout(ch->ackm);
- /* If a ping is due, inform TXP. */
- if (ossl_time_compare(now, ch->ping_deadline) >= 0) {
- int pn_space = ossl_quic_enc_level_to_pn_space(ch->tx_enc_level);
+ /* If a ping is due, inform TXP. */
+ if (ossl_time_compare(now, ch->ping_deadline) >= 0) {
+ int pn_space = ossl_quic_enc_level_to_pn_space(ch->tx_enc_level);
- ossl_quic_tx_packetiser_schedule_ack_eliciting(ch->txp, pn_space);
- }
+ ossl_quic_tx_packetiser_schedule_ack_eliciting(ch->txp, pn_space);
- /* Write any data to the network due to be sent. */
- ch_tx(ch);
+ /*
+ * If we have no CC budget at this time we cannot process the above
+ * PING request immediately. In any case we have scheduled the
+ * request so bump the ping deadline. If we don't do this we will
+ * busy-loop endlessly as the above deadline comparison condition
+ * will still be met.
+ */
+ ch_update_ping_deadline(ch);
+ }
+
+ /* Queue any data to be sent for transmission. */
+ ch_tx(ch);
- /* Do stream GC. */
- ossl_quic_stream_map_gc(&ch->qsm);
+ /* Do stream GC. */
+ ossl_quic_stream_map_gc(&ch->qsm);
+ }
/* Determine the time at which we should next be ticked. */
res->tick_deadline = ch_determine_next_tick_deadline(ch);
/*
- * Always process network input unless we are now terminated.
- * Although we had not terminated at the beginning of this tick, network
- * errors in ch_rx_pre() or ch_tx() may have caused us to transition to the
- * Terminated state.
+ * Always process network input unless we are now terminated. Although we
+ * had not terminated at the beginning of this tick, network errors in
+ * ch_tx() may have caused us to transition to the Terminated state.
*/
res->net_read_desired = !ossl_quic_channel_is_terminated(ch);
- /* We want to write to the network if we have any in our queue. */
+ /* We want to write to the network if we have any data in our TX queue. */
res->net_write_desired
= (!ossl_quic_channel_is_terminated(ch)
&& ossl_qtx_get_queue_len_datagrams(ch->qtx) > 0);
}
-/* Process incoming datagrams, if any. */
-static void ch_rx_pre(QUIC_CHANNEL *ch)
+static int ch_tick_tls(QUIC_CHANNEL *ch, int channel_only)
{
- int ret;
+ uint64_t error_code;
+ const char *error_msg;
+ ERR_STATE *error_state = NULL;
- if (!ch->is_server && !ch->have_sent_any_pkt)
- return;
+ if (channel_only)
+ return 1;
- /*
- * Get DEMUX to BIO_recvmmsg from the network and queue incoming datagrams
- * to the appropriate QRX instance.
- */
- ret = ossl_quic_demux_pump(ch->demux);
- if (ret == QUIC_DEMUX_PUMP_RES_PERMANENT_FAIL)
- /*
- * We don't care about transient failure, but permanent failure means we
- * should tear down the connection as though a protocol violation
- * occurred. Skip straight to the Terminating state as there is no point
- * trying to send CONNECTION_CLOSE frames if the network BIO is not
- * operating correctly.
- */
- ch_raise_net_error(ch);
+ ch->did_tls_tick = 1;
+ ossl_quic_tls_tick(ch->qtls);
+
+ if (ossl_quic_tls_get_error(ch->qtls, &error_code, &error_msg,
+ &error_state)) {
+ ossl_quic_channel_raise_protocol_error_state(ch, error_code, 0,
+ error_msg, error_state);
+ return 0;
+ }
+
+ return 1;
}
/* Check incoming forged packet limit and terminate connection if needed. */
}
/* Process queued incoming packets and handle frames, if any. */
-static int ch_rx(QUIC_CHANNEL *ch)
+static int ch_rx(QUIC_CHANNEL *ch, int channel_only)
{
int handled_any = 0;
+ const int closing = ossl_quic_channel_is_closing(ch);
if (!ch->is_server && !ch->have_sent_any_pkt)
/*
if (!ossl_qrx_read_pkt(ch->qrx, &ch->qrx_pkt))
break;
- if (!handled_any)
+ /* Track the amount of data received while in the closing state */
+ if (closing)
+ ossl_quic_tx_packetiser_record_received_closing_bytes(
+ ch->txp, ch->qrx_pkt->hdr->len);
+
+ if (!handled_any) {
ch_update_idle(ch);
+ ch_update_ping_deadline(ch);
+ }
- ch_rx_handle_packet(ch); /* best effort */
+ ch_rx_handle_packet(ch, channel_only); /* best effort */
/*
* Regardless of the outcome of frame handling, unref the packet.
* When in TERMINATING - CLOSING, generate a CONN_CLOSE frame whenever we
* process one or more incoming packets.
*/
- if (handled_any && ch->state == QUIC_CHANNEL_STATE_TERMINATING_CLOSING)
+ if (handled_any && closing)
ch->conn_close_queued = 1;
return 1;
}
+static int bio_addr_eq(const BIO_ADDR *a, const BIO_ADDR *b)
+{
+ if (BIO_ADDR_family(a) != BIO_ADDR_family(b))
+ return 0;
+
+ switch (BIO_ADDR_family(a)) {
+ case AF_INET:
+ return !memcmp(&a->s_in.sin_addr,
+ &b->s_in.sin_addr,
+ sizeof(a->s_in.sin_addr))
+ && a->s_in.sin_port == b->s_in.sin_port;
+#if OPENSSL_USE_IPV6
+ case AF_INET6:
+ return !memcmp(&a->s_in6.sin6_addr,
+ &b->s_in6.sin6_addr,
+ sizeof(a->s_in6.sin6_addr))
+ && a->s_in6.sin6_port == b->s_in6.sin6_port;
+#endif
+ default:
+ return 0; /* not supported */
+ }
+
+ return 1;
+}
+
/* Handles the packet currently in ch->qrx_pkt->hdr. */
-static void ch_rx_handle_packet(QUIC_CHANNEL *ch)
+static void ch_rx_handle_packet(QUIC_CHANNEL *ch, int channel_only)
{
uint32_t enc_level;
+ int old_have_processed_any_pkt = ch->have_processed_any_pkt;
assert(ch->qrx_pkt != NULL);
+ /*
+ * RFC 9000 s. 10.2.1 Closing Connection State:
+ * An endpoint that is closing is not required to process any
+ * received frame.
+ */
+ if (!ossl_quic_channel_is_active(ch))
+ return;
+
if (ossl_quic_pkt_type_is_encrypted(ch->qrx_pkt->hdr->type)) {
if (!ch->have_received_enc_pkt) {
ch->cur_remote_dcid = ch->init_scid = ch->qrx_pkt->hdr->src_conn_id;
return;
}
+ /*
+ * RFC 9000 s. 9.6: "If a client receives packets from a new server address
+ * when the client has not initiated a migration to that address, the client
+ * SHOULD discard these packets."
+ *
+ * We need to be a bit careful here as due to the BIO abstraction layer an
+ * application is liable to be weird and lie to us about peer addresses.
+ * Only apply this check if we actually are using a real AF_INET or AF_INET6
+ * address.
+ */
+ if (!ch->is_server
+ && ch->qrx_pkt->peer != NULL
+ && (
+ BIO_ADDR_family(&ch->cur_peer_addr) == AF_INET
+#if OPENSSL_USE_IPV6
+ || BIO_ADDR_family(&ch->cur_peer_addr) == AF_INET6
+#endif
+ )
+ && !bio_addr_eq(ch->qrx_pkt->peer, &ch->cur_peer_addr))
+ return;
+
if (!ch->is_server
&& ch->have_received_enc_pkt
&& ossl_quic_pkt_type_has_scid(ch->qrx_pkt->hdr->type)) {
/*
- * RFC 9000 s. 7.2. "Once a client has received a valid Initial packet
+ * RFC 9000 s. 7.2: "Once a client has received a valid Initial packet
* from the server, it MUST discard any subsequent packet it receives on
* that connection with a different SCID."
*/
*/
return;
+ ch->have_processed_any_pkt = 1;
+
+ /*
+ * RFC 9000 s. 17.2: "An endpoint MUST treat receipt of a packet that has a
+ * non-zero value for [the reserved bits] after removing both packet and
+ * header protection as a connection error of type PROTOCOL_VIOLATION."
+ */
+ if (ossl_quic_pkt_type_is_encrypted(ch->qrx_pkt->hdr->type)
+ && ch->qrx_pkt->hdr->reserved != 0) {
+ ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
+ 0, "packet header reserved bits");
+ return;
+ }
+
/* Handle incoming packet. */
switch (ch->qrx_pkt->hdr->type) {
case QUIC_PKT_TYPE_RETRY:
*/
return;
+ /*
+ * RFC 9000 s 17.2.5.2: After the client has received and processed an
+ * Initial or Retry packet from the server, it MUST discard any
+ * subsequent Retry packets that it receives.
+ */
+ if (ch->have_received_enc_pkt)
+ return;
+
if (ch->qrx_pkt->hdr->len <= QUIC_RETRY_INTEGRITY_TAG_LEN)
/* Packets with zero-length Retry Tokens are invalid. */
return;
/*
- * TODO(QUIC): Theoretically this should probably be in the QRX.
+ * TODO(QUIC FUTURE): Theoretically this should probably be in the QRX.
* However because validation is dependent on context (namely the
* client's initial DCID) we can't do this cleanly. In the future we
* should probably add a callback to the QRX to let it call us (via
* than allow the QRX to emit a potentially malformed packet to the
* upper layers. However, special casing this will do for now.
*/
- if (!ossl_quic_validate_retry_integrity_tag(ch->libctx,
- ch->propq,
+ if (!ossl_quic_validate_retry_integrity_tag(ch->port->engine->libctx,
+ ch->port->engine->propq,
ch->qrx_pkt->hdr,
&ch->init_dcid))
/* Malformed retry packet, ignore. */
return;
- ch_retry(ch, ch->qrx_pkt->hdr->data,
- ch->qrx_pkt->hdr->len - QUIC_RETRY_INTEGRITY_TAG_LEN,
- &ch->qrx_pkt->hdr->src_conn_id);
+ if (!ch_retry(ch, ch->qrx_pkt->hdr->data,
+ ch->qrx_pkt->hdr->len - QUIC_RETRY_INTEGRITY_TAG_LEN,
+ &ch->qrx_pkt->hdr->src_conn_id))
+ ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR,
+ 0, "handling retry packet");
break;
case QUIC_PKT_TYPE_0RTT:
return;
/*
- * TODO(QUIC): Implement 0-RTT on the server side. We currently do
- * not need to implement this as a client can only do 0-RTT if we
+ * TODO(QUIC 0RTT): Implement 0-RTT on the server side. We currently
+ * do not need to implement this as a client can only do 0-RTT if we
* have given it permission to in a previous session.
*/
break;
case QUIC_PKT_TYPE_INITIAL:
case QUIC_PKT_TYPE_HANDSHAKE:
case QUIC_PKT_TYPE_1RTT:
- if (ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_HANDSHAKE)
+ if (ch->is_server && ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_HANDSHAKE)
/*
* We automatically drop INITIAL EL keys when first successfully
* decrypting a HANDSHAKE packet, as per the RFC.
break;
}
+ if (!ch->is_server
+ && ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_INITIAL
+ && ch->qrx_pkt->hdr->token_len > 0) {
+ /*
+ * RFC 9000 s. 17.2.2: Clients that receive an Initial packet with a
+ * non-zero Token Length field MUST either discard the packet or
+ * generate a connection error of type PROTOCOL_VIOLATION.
+ *
+ * TODO(QUIC FUTURE): consider the implications of RFC 9000 s. 10.2.3
+ * Immediate Close during the Handshake:
+ * However, at the cost of reducing feedback about
+ * errors for legitimate peers, some forms of denial of
+ * service can be made more difficult for an attacker
+ * if endpoints discard illegal packets rather than
+ * terminating a connection with CONNECTION_CLOSE. For
+ * this reason, endpoints MAY discard packets rather
+ * than immediately close if errors are detected in
+ * packets that lack authentication.
+ * I.e. should we drop this packet instead of closing the connection?
+ */
+ ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
+ 0, "client received initial token");
+ break;
+ }
+
/* This packet contains frames, pass to the RXDP. */
ossl_quic_handle_frames(ch, ch->qrx_pkt); /* best effort */
+
+ if (ch->did_crypto_frame)
+ ch_tick_tls(ch, channel_only);
+
+ break;
+
+ case QUIC_PKT_TYPE_VERSION_NEG:
+ /*
+ * "A client MUST discard any Version Negotiation packet if it has
+ * received and successfully processed any other packet."
+ */
+ if (!old_have_processed_any_pkt)
+ ch_rx_handle_version_neg(ch, ch->qrx_pkt);
+
break;
default:
}
}
-/*
- * This is called by the demux when we get a packet not destined for any known
- * DCID.
- */
-static void ch_default_packet_handler(QUIC_URXE *e, void *arg)
+static void ch_rx_handle_version_neg(QUIC_CHANNEL *ch, OSSL_QRX_PKT *pkt)
{
- QUIC_CHANNEL *ch = arg;
- PACKET pkt;
- QUIC_PKT_HDR hdr;
-
- if (!ossl_assert(ch->is_server))
- goto undesirable;
-
/*
- * We only support one connection to our server currently, so if we already
- * started one, ignore any new connection attempts.
+ * We do not support version negotiation at this time. As per RFC 9000 s.
+ * 6.2., we MUST abandon the connection attempt if we receive a Version
+ * Negotiation packet, unless we have already successfully processed another
+ * incoming packet, or the packet lists the QUIC version we want to use.
*/
- if (ch->state != QUIC_CHANNEL_STATE_IDLE)
- goto undesirable;
+ PACKET vpkt;
+ unsigned long v;
- /*
- * We have got a packet for an unknown DCID. This might be an attempt to
- * open a new connection.
- */
- if (e->data_len < QUIC_MIN_INITIAL_DGRAM_LEN)
- goto undesirable;
-
- if (!PACKET_buf_init(&pkt, ossl_quic_urxe_data(e), e->data_len))
- goto err;
-
- /*
- * We set short_conn_id_len to SIZE_MAX here which will cause the decode
- * operation to fail if we get a 1-RTT packet. This is fine since we only
- * care about Initial packets.
- */
- if (!ossl_quic_wire_decode_pkt_hdr(&pkt, SIZE_MAX, 1, 0, &hdr, NULL))
- goto undesirable;
+ if (!PACKET_buf_init(&vpkt, pkt->hdr->data, pkt->hdr->len))
+ return;
- switch (hdr.version) {
- case QUIC_VERSION_1:
+ while (PACKET_remaining(&vpkt) > 0) {
+ if (!PACKET_get_net_4(&vpkt, &v))
break;
- case QUIC_VERSION_NONE:
- default:
- /* Unknown version or proactive version negotiation request, bail. */
- /* TODO(QUIC): Handle version negotiation on server side */
- goto undesirable;
+ if ((uint32_t)v == QUIC_VERSION_1)
+ return;
}
- /*
- * We only care about Initial packets which might be trying to establish a
- * connection.
- */
- if (hdr.type != QUIC_PKT_TYPE_INITIAL)
- goto undesirable;
+ /* No match, this is a failure case. */
+ ch_raise_version_neg_failure(ch);
+}
- /*
- * Assume this is a valid attempt to initiate a connection.
- *
- * We do not register the DCID in the initial packet we received and that
- * DCID is not actually used again, thus after provisioning the correct
- * Initial keys derived from it (which is done in the call below) we pass
- * the received packet directly to the QRX so that it can process it as a
- * one-time thing, instead of going through the usual DEMUX DCID-based
- * routing.
- */
- if (!ch_server_on_new_conn(ch, &e->peer,
- &hdr.src_conn_id,
- &hdr.dst_conn_id))
- goto err;
+static void ch_raise_version_neg_failure(QUIC_CHANNEL *ch)
+{
+ QUIC_TERMINATE_CAUSE tcause = {0};
- ossl_qrx_inject_urxe(ch->qrx, e);
- return;
+ tcause.error_code = QUIC_ERR_CONNECTION_REFUSED;
+ tcause.reason = "version negotiation failure";
+ tcause.reason_len = strlen(tcause.reason);
-err:
- ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR, 0,
- "internal error");
-undesirable:
- ossl_quic_demux_release_urxe(ch->demux, e);
+ /*
+ * Skip TERMINATING state; this is not considered a protocol error and we do
+ * not send CONNECTION_CLOSE.
+ */
+ ch_start_terminating(ch, &tcause, 1);
}
/* Try to generate packets and if possible, flush them to the network. */
static int ch_tx(QUIC_CHANNEL *ch)
{
QUIC_TXP_STATUS status;
+ int res;
+
+ /*
+ * RFC 9000 s. 10.2.2: Draining Connection State:
+ * While otherwise identical to the closing state, an endpoint
+ * in the draining state MUST NOT send any packets.
+ * and:
+ * An endpoint MUST NOT send further packets.
+ */
+ if (ossl_quic_channel_is_draining(ch))
+ return 0;
- if (ch->state == QUIC_CHANNEL_STATE_TERMINATING_CLOSING) {
+ if (ossl_quic_channel_is_closing(ch)) {
/*
* While closing, only send CONN_CLOSE if we've received more traffic
* from the peer. Once we tell the TXP to generate CONN_CLOSE, all
* future calls to it generate CONN_CLOSE frames, so otherwise we would
* just constantly generate CONN_CLOSE frames.
+ *
+ * Confirming to RFC 9000 s. 10.2.1 Closing Connection State:
+ * An endpoint SHOULD limit the rate at which it generates
+ * packets in the closing state.
*/
if (!ch->conn_close_queued)
return 0;
ch->rxku_pending_confirm_done = 0;
- /*
- * Send a packet, if we need to. Best effort. The TXP consults the CC and
- * applies any limitations imposed by it, so we don't need to do it here.
- *
- * Best effort. In particular if TXP fails for some reason we should still
- * flush any queued packets which we already generated.
- */
- switch (ossl_quic_tx_packetiser_generate(ch->txp,
- TX_PACKETISER_ARCHETYPE_NORMAL,
- &status)) {
- case TX_PACKETISER_RES_SENT_PKT:
- ch->have_sent_any_pkt = 1; /* Packet was sent */
-
+ /* Loop until we stop generating packets to send */
+ do {
/*
- * RFC 9000 s. 10.1. 'An endpoint also restarts its idle timer when
- * sending an ack-eliciting packet if no other ack-eliciting packets
- * have been sent since last receiving and processing a packet.'
- */
- if (status.sent_ack_eliciting && !ch->have_sent_ack_eliciting_since_rx) {
- ch_update_idle(ch);
- ch->have_sent_ack_eliciting_since_rx = 1;
- }
+ * Send packet, if we need to. Best effort. The TXP consults the CC and
+ * applies any limitations imposed by it, so we don't need to do it here.
+ *
+ * Best effort. In particular if TXP fails for some reason we should
+ * still flush any queued packets which we already generated.
+ */
+ res = ossl_quic_tx_packetiser_generate(ch->txp, &status);
+ if (status.sent_pkt > 0) {
+ ch->have_sent_any_pkt = 1; /* Packet(s) were sent */
+ ch->port->have_sent_any_pkt = 1;
+
+ /*
+ * RFC 9000 s. 10.1. 'An endpoint also restarts its idle timer when
+ * sending an ack-eliciting packet if no other ack-eliciting packets
+ * have been sent since last receiving and processing a packet.'
+ */
+ if (status.sent_ack_eliciting
+ && !ch->have_sent_ack_eliciting_since_rx) {
+ ch_update_idle(ch);
+ ch->have_sent_ack_eliciting_since_rx = 1;
+ }
- if (ch->rxku_pending_confirm_done)
- ch->rxku_pending_confirm = 0;
+ if (!ch->is_server && status.sent_handshake)
+ /*
+ * RFC 9001 s. 4.9.1: A client MUST discard Initial keys when it
+ * first sends a Handshake packet.
+ */
+ ch_discard_el(ch, QUIC_ENC_LEVEL_INITIAL);
- ch_update_ping_deadline(ch);
- break;
+ if (ch->rxku_pending_confirm_done)
+ ch->rxku_pending_confirm = 0;
- case TX_PACKETISER_RES_NO_PKT:
- break; /* No packet was sent */
- default:
- ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR, 0,
- "internal error");
- break; /* Internal failure (e.g. allocation, assertion) */
- }
+ ch_update_ping_deadline(ch);
+ }
+
+ if (!res) {
+ /*
+ * One case where TXP can fail is if we reach a TX PN of 2**62 - 1.
+ * As per RFC 9000 s. 12.3, if this happens we MUST close the
+ * connection without sending a CONNECTION_CLOSE frame. This is
+ * actually handled as an emergent consequence of our design, as the
+ * TX packetiser will never transmit another packet when the TX PN
+ * reaches the limit.
+ *
+ * Calling the below function terminates the connection; its attempt
+ * to schedule a CONNECTION_CLOSE frame will not actually cause a
+ * packet to be transmitted for this reason.
+ */
+ ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR,
+ 0,
+ "internal error (txp generate)");
+ break;
+ }
+ } while (status.sent_pkt > 0);
/* Flush packets to network. */
switch (ossl_qtx_flush_net(ch->qtx)) {
case QTX_FLUSH_NET_RES_PERMANENT_FAIL:
default:
/* Permanent underlying network BIO, start terminating. */
- ch_raise_net_error(ch);
+ ossl_quic_port_raise_net_error(ch->port, ch);
break;
}
deadline = ossl_time_infinite();
/*
- * If the CC will let us send acks, check the ack deadline for all
- * enc_levels that are actually provisioned
+ * Check the ack deadline for all enc_levels that are actually provisioned.
+ * ACKs aren't restricted by CC.
*/
- if (ch->cc_method->get_tx_allowance(ch->cc_data) > 0) {
- for (i = 0; i < QUIC_ENC_LEVEL_NUM; i++) {
- if (ossl_qtx_is_enc_level_provisioned(ch->qtx, i)) {
- deadline = ossl_time_min(deadline,
- ossl_ackm_get_ack_deadline(ch->ackm,
- ossl_quic_enc_level_to_pn_space(i)));
- }
+ for (i = 0; i < QUIC_ENC_LEVEL_NUM; i++) {
+ if (ossl_qtx_is_enc_level_provisioned(ch->qtx, i)) {
+ deadline = ossl_time_min(deadline,
+ ossl_ackm_get_ack_deadline(ch->ackm,
+ ossl_quic_enc_level_to_pn_space(i)));
}
}
- /* When will CC let us send more? */
- if (ossl_quic_tx_packetiser_has_pending(ch->txp, TX_PACKETISER_ARCHETYPE_NORMAL,
- TX_PACKETISER_BYPASS_CC))
- deadline = ossl_time_min(deadline,
- ch->cc_method->get_wakeup_deadline(ch->cc_data));
+ /*
+ * When do we need to send an ACK-eliciting packet to reset the idle
+ * deadline timer for the peer?
+ */
+ if (!ossl_time_is_infinite(ch->ping_deadline))
+ deadline = ossl_time_min(deadline, ch->ping_deadline);
+
+ /* Apply TXP wakeup deadline. */
+ deadline = ossl_time_min(deadline,
+ ossl_quic_tx_packetiser_get_deadline(ch->txp));
/* Is the terminating timer armed? */
if (ossl_quic_channel_is_terminating(ch))
deadline = ossl_time_min(deadline,
ch->idle_deadline);
- /*
- * When do we need to send an ACK-eliciting packet to reset the idle
- * deadline timer for the peer?
- */
- if (!ossl_time_is_infinite(ch->ping_deadline))
- deadline = ossl_time_min(deadline,
- ch->ping_deadline);
-
/* When does the RXKU process complete? */
if (ch->rxku_in_progress)
deadline = ossl_time_min(deadline, ch->rxku_update_end_deadline);
}
/*
- * QUIC Channel: Network BIO Configuration
- * =======================================
+ * QUIC Channel: Lifecycle Events
+ * ==============================
*/
-/* Determines whether we can support a given poll descriptor. */
-static int validate_poll_descriptor(const BIO_POLL_DESCRIPTOR *d)
-{
- if (d->type == BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD && d->value.fd < 0)
- return 0;
-
- return 1;
-}
-
-BIO *ossl_quic_channel_get_net_rbio(QUIC_CHANNEL *ch)
-{
- return ch->net_rbio;
-}
-
-BIO *ossl_quic_channel_get_net_wbio(QUIC_CHANNEL *ch)
-{
- return ch->net_wbio;
-}
-
/*
- * QUIC_CHANNEL does not ref any BIO it is provided with, nor is any ref
- * transferred to it. The caller (i.e., QUIC_CONNECTION) is responsible for
- * ensuring the BIO lasts until the channel is freed or the BIO is switched out
- * for another BIO by a subsequent successful call to this function.
+ * Record a state transition. This is not necessarily a change to ch->state but
+ * also includes the handshake becoming complete or confirmed, etc.
*/
-int ossl_quic_channel_set_net_rbio(QUIC_CHANNEL *ch, BIO *net_rbio)
+static void ch_record_state_transition(QUIC_CHANNEL *ch, uint32_t new_state)
{
- BIO_POLL_DESCRIPTOR d = {0};
-
- if (ch->net_rbio == net_rbio)
- return 1;
+ uint32_t old_state = ch->state;
- if (net_rbio != NULL) {
- if (!BIO_get_rpoll_descriptor(net_rbio, &d))
- /* Non-pollable BIO */
- d.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
-
- if (!validate_poll_descriptor(&d))
- return 0;
- }
+ ch->state = new_state;
- ossl_quic_reactor_set_poll_r(&ch->rtor, &d);
- ossl_quic_demux_set_bio(ch->demux, net_rbio);
- ch->net_rbio = net_rbio;
- return 1;
+ ossl_qlog_event_connectivity_connection_state_updated(ch_get_qlog(ch),
+ old_state,
+ new_state,
+ ch->handshake_complete,
+ ch->handshake_confirmed);
}
-int ossl_quic_channel_set_net_wbio(QUIC_CHANNEL *ch, BIO *net_wbio)
-{
- BIO_POLL_DESCRIPTOR d = {0};
-
- if (ch->net_wbio == net_wbio)
- return 1;
-
- if (net_wbio != NULL) {
- if (!BIO_get_wpoll_descriptor(net_wbio, &d))
- /* Non-pollable BIO */
- d.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
-
- if (!validate_poll_descriptor(&d))
- return 0;
- }
-
- ossl_quic_reactor_set_poll_w(&ch->rtor, &d);
- ossl_qtx_set_bio(ch->qtx, net_wbio);
- ch->net_wbio = net_wbio;
- return 1;
-}
-
-/*
- * QUIC Channel: Lifecycle Events
- * ==============================
- */
int ossl_quic_channel_start(QUIC_CHANNEL *ch)
{
if (ch->is_server)
return 0;
/* Plug in secrets for the Initial EL. */
- if (!ossl_quic_provide_initial_secret(ch->libctx,
- ch->propq,
+ if (!ossl_quic_provide_initial_secret(ch->port->engine->libctx,
+ ch->port->engine->propq,
&ch->init_dcid,
ch->is_server,
ch->qrx, ch->qtx))
return 0;
/* Change state. */
- ch->state = QUIC_CHANNEL_STATE_ACTIVE;
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_ACTIVE);
ch->doing_proactive_ver_neg = 0; /* not currently supported */
+ ossl_qlog_event_connectivity_connection_started(ch_get_qlog(ch),
+ &ch->init_dcid);
+
/* Handshake layer: start (e.g. send CH). */
- if (!ossl_quic_tls_tick(ch->qtls))
+ if (!ch_tick_tls(ch, /*channel_only=*/0))
return 0;
- ossl_quic_reactor_tick(&ch->rtor, 0); /* best effort */
+ ossl_quic_reactor_tick(ossl_quic_port_get0_reactor(ch->port), 0); /* best effort */
return 1;
}
/* Start a locally initiated connection shutdown. */
-void ossl_quic_channel_local_close(QUIC_CHANNEL *ch, uint64_t app_error_code)
+void ossl_quic_channel_local_close(QUIC_CHANNEL *ch, uint64_t app_error_code,
+ const char *app_reason)
{
QUIC_TERMINATE_CAUSE tcause = {0};
tcause.app = 1;
tcause.error_code = app_error_code;
+ tcause.reason = app_reason;
+ tcause.reason_len = app_reason != NULL ? strlen(app_reason) : 0;
ch_start_terminating(ch, &tcause, 0);
}
{
void *buf;
+ /*
+ * RFC 9000 s. 17.2.5.1: "A client MUST discard a Retry packet that contains
+ * a SCID field that is identical to the DCID field of its initial packet."
+ */
+ if (ossl_quic_conn_id_eq(&ch->init_dcid, retry_scid))
+ return 1;
+
/* We change to using the SCID in the Retry packet as the DCID. */
if (!ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, retry_scid))
return 0;
if ((buf = OPENSSL_memdup(retry_token, retry_token_len)) == NULL)
return 0;
- ossl_quic_tx_packetiser_set_initial_token(ch->txp, buf, retry_token_len,
- free_token, NULL);
+ if (!ossl_quic_tx_packetiser_set_initial_token(ch->txp, buf,
+ retry_token_len,
+ free_token, NULL)) {
+ /*
+ * This may fail if the token we receive is too big for us to ever be
+ * able to transmit in an outgoing Initial packet.
+ */
+ ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INVALID_TOKEN, 0,
+ "received oversize token");
+ OPENSSL_free(buf);
+ return 0;
+ }
ch->retry_scid = *retry_scid;
ch->doing_retry = 1;
* Plug in new secrets for the Initial EL. This is the only time we change
* the secrets for an EL after we already provisioned it.
*/
- if (!ossl_quic_provide_initial_secret(ch->libctx,
- ch->propq,
+ if (!ossl_quic_provide_initial_secret(ch->port->engine->libctx,
+ ch->port->engine->propq,
&ch->retry_scid,
/*is_server=*/0,
ch->qrx, ch->qtx))
ch_discard_el(ch, QUIC_ENC_LEVEL_HANDSHAKE);
ch->handshake_confirmed = 1;
+ ch_record_state_transition(ch, ch->state);
ossl_ackm_on_handshake_confirmed(ch->ackm);
return 1;
}
* Any successive calls have their termination cause data discarded;
* once we start sending a CONNECTION_CLOSE frame, we don't change the details
* in it.
+ *
+ * This conforms to RFC 9000 s. 10.2.1: Closing Connection State:
+ * To minimize the state that an endpoint maintains for a closing
+ * connection, endpoints MAY send the exact same packet in response
+ * to any received packet.
+ *
+ * We don't drop any connection state (specifically packet protection keys)
+ * even though we are permitted to. This conforms to RFC 9000 s. 10.2.1:
+ * Closing Connection State:
+ * An endpoint MAY retain packet protection keys for incoming
+ * packets to allow it to read and process a CONNECTION_CLOSE frame.
+ *
+ * Note that we do not conform to these two from the same section:
+ * An endpoint's selected connection ID and the QUIC version
+ * are sufficient information to identify packets for a closing
+ * connection; the endpoint MAY discard all other connection state.
+ * and:
+ * An endpoint MAY drop packet protection keys when entering the
+ * closing state and send a packet containing a CONNECTION_CLOSE
+ * frame in response to any UDP datagram that is received.
*/
+static void copy_tcause(QUIC_TERMINATE_CAUSE *dst,
+ const QUIC_TERMINATE_CAUSE *src)
+{
+ dst->error_code = src->error_code;
+ dst->frame_type = src->frame_type;
+ dst->app = src->app;
+ dst->remote = src->remote;
+
+ dst->reason = NULL;
+ dst->reason_len = 0;
+
+ if (src->reason != NULL && src->reason_len > 0) {
+ size_t l = src->reason_len;
+ char *r;
+
+ if (l >= SIZE_MAX)
+ --l;
+
+ /*
+ * If this fails, dst->reason becomes NULL and we simply do not use a
+ * reason. This ensures termination is infallible.
+ */
+ dst->reason = r = OPENSSL_memdup(src->reason, l + 1);
+ if (r == NULL)
+ return;
+
+ r[l] = '\0';
+ dst->reason_len = l;
+ }
+}
+
static void ch_start_terminating(QUIC_CHANNEL *ch,
const QUIC_TERMINATE_CAUSE *tcause,
int force_immediate)
{
+ /* No point sending anything if we haven't sent anything yet. */
+ if (!ch->have_sent_any_pkt)
+ force_immediate = 1;
+
switch (ch->state) {
default:
case QUIC_CHANNEL_STATE_IDLE:
- ch->terminate_cause = *tcause;
+ copy_tcause(&ch->terminate_cause, tcause);
ch_on_terminating_timeout(ch);
break;
case QUIC_CHANNEL_STATE_ACTIVE:
- ch->terminate_cause = *tcause;
+ copy_tcause(&ch->terminate_cause, tcause);
+
+ ossl_qlog_event_connectivity_connection_closed(ch_get_qlog(ch), tcause);
if (!force_immediate) {
- ch->state = tcause->remote ? QUIC_CHANNEL_STATE_TERMINATING_DRAINING
- : QUIC_CHANNEL_STATE_TERMINATING_CLOSING;
+ ch_record_state_transition(ch, tcause->remote
+ ? QUIC_CHANNEL_STATE_TERMINATING_DRAINING
+ : QUIC_CHANNEL_STATE_TERMINATING_CLOSING);
+ /*
+ * RFC 9000 s. 10.2 Immediate Close
+ * These states SHOULD persist for at least three times
+ * the current PTO interval as defined in [QUIC-RECOVERY].
+ */
ch->terminate_deadline
= ossl_time_add(get_time(ch),
ossl_time_multiply(ossl_ackm_get_pto_duration(ch->ackm),
f.error_code = ch->terminate_cause.error_code;
f.frame_type = ch->terminate_cause.frame_type;
f.is_app = ch->terminate_cause.app;
+ f.reason = (char *)ch->terminate_cause.reason;
+ f.reason_len = ch->terminate_cause.reason_len;
ossl_quic_tx_packetiser_schedule_conn_close(ch->txp, &f);
+ /*
+ * RFC 9000 s. 10.2.2 Draining Connection State:
+ * An endpoint that receives a CONNECTION_CLOSE frame MAY
+ * send a single packet containing a CONNECTION_CLOSE
+ * frame before entering the draining state, using a
+ * NO_ERROR code if appropriate
+ */
ch->conn_close_queued = 1;
}
} else {
if (force_immediate)
ch_on_terminating_timeout(ch);
else if (tcause->remote)
- ch->state = QUIC_CHANNEL_STATE_TERMINATING_DRAINING;
+ /*
+ * RFC 9000 s. 10.2.2 Draining Connection State:
+ * An endpoint MAY enter the draining state from the
+ * closing state if it receives a CONNECTION_CLOSE frame,
+ * which indicates that the peer is also closing or draining.
+ */
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_TERMINATING_DRAINING);
break;
tcause.app = f->is_app;
tcause.error_code = f->error_code;
tcause.frame_type = f->frame_type;
-
+ tcause.reason = f->reason;
+ tcause.reason_len = f->reason_len;
ch_start_terminating(ch, &tcause, 0);
}
static int ch_enqueue_retire_conn_id(QUIC_CHANNEL *ch, uint64_t seq_num)
{
- BUF_MEM *buf_mem;
+ BUF_MEM *buf_mem = NULL;
WPACKET wpkt;
size_t l;
+ ossl_quic_srtm_remove(ch->srtm, ch, seq_num);
+
if ((buf_mem = BUF_MEM_new()) == NULL)
- return 0;
+ goto err;
if (!WPACKET_init(&wpkt, buf_mem))
goto err;
goto err;
if (ossl_quic_cfq_add_frame(ch->cfq, 1, QUIC_PN_SPACE_APP,
- OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID,
+ OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID, 0,
(unsigned char *)buf_mem->data, l,
free_frame_data, NULL) == NULL)
goto err;
return;
/* We allow only two active connection ids; first check some constraints */
-
if (ch->cur_remote_dcid.id_len == 0) {
/* Changing from 0 length connection id is disallowed */
ossl_quic_channel_raise_protocol_error(ch,
}
if (new_remote_seq_num > ch->cur_remote_seq_num) {
+ /* Add new stateless reset token */
+ if (!ossl_quic_srtm_add(ch->srtm, ch, new_remote_seq_num,
+ &f->stateless_reset)) {
+ ossl_quic_channel_raise_protocol_error(
+ ch, QUIC_ERR_CONNECTION_ID_LIMIT_ERROR,
+ OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
+ "unable to store stateless reset token");
+
+ return;
+ }
ch->cur_remote_seq_num = new_remote_seq_num;
ch->cur_remote_dcid = f->conn_id;
ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, &ch->cur_remote_dcid);
}
+
/*
* RFC 9000-5.1.2: Upon receipt of an increased Retire Prior To
* field, the peer MUST stop using the corresponding connection IDs
* and retire them with RETIRE_CONNECTION_ID frames before adding the
* newly provided connection ID to the set of active connection IDs.
*/
+
+ /*
+ * Note: RFC 9000 s. 19.15 says:
+ * "An endpoint that receives a NEW_CONNECTION_ID frame with a sequence
+ * number smaller than the Retire Prior To field of a previously received
+ * NEW_CONNECTION_ID frame MUST send a corresponding
+ * RETIRE_CONNECTION_ID frame that retires the newly received connection
+ * ID, unless it has already done so for that sequence number."
+ *
+ * Since we currently always queue RETIRE_CONN_ID frames based on the Retire
+ * Prior To field of a NEW_CONNECTION_ID frame immediately upon receiving
+ * that NEW_CONNECTION_ID frame, by definition this will always be met.
+ * This may change in future when we change our CID handling.
+ */
while (new_retire_prior_to > ch->cur_retire_prior_to) {
if (!ch_enqueue_retire_conn_id(ch, ch->cur_retire_prior_to))
break;
OSSL_ERR_STATE_save(ch->err_state);
}
-static void ch_raise_net_error(QUIC_CHANNEL *ch)
+void ossl_quic_channel_inject(QUIC_CHANNEL *ch, QUIC_URXE *e)
+{
+ ossl_qrx_inject_urxe(ch->qrx, e);
+}
+
+void ossl_quic_channel_on_stateless_reset(QUIC_CHANNEL *ch)
+{
+ QUIC_TERMINATE_CAUSE tcause = {0};
+
+ tcause.error_code = QUIC_ERR_NO_ERROR;
+ tcause.remote = 1;
+ ch_start_terminating(ch, &tcause, 0);
+}
+
+void ossl_quic_channel_raise_net_error(QUIC_CHANNEL *ch)
{
QUIC_TERMINATE_CAUSE tcause = {0};
+ if (ch->net_error)
+ return;
+
ch->net_error = 1;
- ch_save_err_state(ch);
tcause.error_code = QUIC_ERR_INTERNAL_ERROR;
+ tcause.reason = "network BIO I/O error";
+ tcause.reason_len = strlen(tcause.reason);
/*
* Skip Terminating state and go directly to Terminated, no point trying to
if (ch == NULL)
return;
- OSSL_ERR_STATE_restore(ch->err_state);
+ if (!ossl_quic_port_is_running(ch->port))
+ ossl_quic_port_restore_err_state(ch->port);
+ else
+ OSSL_ERR_STATE_restore(ch->err_state);
}
-void ossl_quic_channel_raise_protocol_error(QUIC_CHANNEL *ch,
- uint64_t error_code,
- uint64_t frame_type,
- const char *reason)
+void ossl_quic_channel_raise_protocol_error_loc(QUIC_CHANNEL *ch,
+ uint64_t error_code,
+ uint64_t frame_type,
+ const char *reason,
+ ERR_STATE *err_state,
+ const char *src_file,
+ int src_line,
+ const char *src_func)
{
QUIC_TERMINATE_CAUSE tcause = {0};
+ int err_reason = error_code == QUIC_ERR_INTERNAL_ERROR
+ ? ERR_R_INTERNAL_ERROR : SSL_R_QUIC_PROTOCOL_ERROR;
+ const char *err_str = ossl_quic_err_to_string(error_code);
+ const char *err_str_pfx = " (", *err_str_sfx = ")";
+ const char *ft_str = NULL;
+ const char *ft_str_pfx = " (", *ft_str_sfx = ")";
+
+ if (ch->protocol_error)
+ /* Only the first call to this function matters. */
+ return;
+
+ if (err_str == NULL) {
+ err_str = "";
+ err_str_pfx = "";
+ err_str_sfx = "";
+ }
+
+ /*
+ * If we were provided an underlying error state, restore it and then append
+ * our ERR on top as a "cover letter" error.
+ */
+ if (err_state != NULL)
+ OSSL_ERR_STATE_restore(err_state);
+
+ if (frame_type != 0) {
+ ft_str = ossl_quic_frame_type_to_string(frame_type);
+ if (ft_str == NULL) {
+ ft_str = "";
+ ft_str_pfx = "";
+ ft_str_sfx = "";
+ }
+
+ ERR_raise_data(ERR_LIB_SSL, err_reason,
+ "QUIC error code: 0x%llx%s%s%s "
+ "(triggered by frame type: 0x%llx%s%s%s), reason: \"%s\"",
+ (unsigned long long) error_code,
+ err_str_pfx, err_str, err_str_sfx,
+ (unsigned long long) frame_type,
+ ft_str_pfx, ft_str, ft_str_sfx,
+ reason);
+ } else {
+ ERR_raise_data(ERR_LIB_SSL, err_reason,
+ "QUIC error code: 0x%llx%s%s%s, reason: \"%s\"",
+ (unsigned long long) error_code,
+ err_str_pfx, err_str, err_str_sfx,
+ reason);
+ }
- if (error_code == QUIC_ERR_INTERNAL_ERROR)
- /* Internal errors might leave some errors on the stack. */
- ch_save_err_state(ch);
+ if (src_file != NULL)
+ ERR_set_debug(src_file, src_line, src_func);
+
+ ch_save_err_state(ch);
tcause.error_code = error_code;
tcause.frame_type = frame_type;
+ tcause.reason = reason;
+ tcause.reason_len = strlen(reason);
+ ch->protocol_error = 1;
ch_start_terminating(ch, &tcause, 0);
}
*/
static void ch_on_terminating_timeout(QUIC_CHANNEL *ch)
{
- ch->state = QUIC_CHANNEL_STATE_TERMINATED;
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_TERMINATED);
+}
+
+/*
+ * Determines the effective idle timeout duration. This is based on the idle
+ * timeout values that we and our peer signalled in transport parameters
+ * but have some limits applied.
+ */
+static OSSL_TIME ch_get_effective_idle_timeout_duration(QUIC_CHANNEL *ch)
+{
+ OSSL_TIME pto;
+
+ if (ch->max_idle_timeout == 0)
+ return ossl_time_infinite();
+
+ /*
+ * RFC 9000 s. 10.1: Idle Timeout
+ * To avoid excessively small idle timeout periods, endpoints
+ * MUST increase the idle timeout period to be at least three
+ * times the current Probe Timeout (PTO). This allows for
+ * multiple PTOs to expire, and therefore multiple probes to
+ * be sent and lost, prior to idle timeout.
+ */
+ pto = ossl_ackm_get_pto_duration(ch->ackm);
+ return ossl_time_max(ossl_ms2time(ch->max_idle_timeout),
+ ossl_time_multiply(pto, 3));
}
/*
*/
static void ch_update_idle(QUIC_CHANNEL *ch)
{
- if (ch->max_idle_timeout == 0)
- ch->idle_deadline = ossl_time_infinite();
- else
- ch->idle_deadline = ossl_time_add(get_time(ch),
- ossl_ms2time(ch->max_idle_timeout));
+ ch->idle_deadline = ossl_time_add(get_time(ch),
+ ch_get_effective_idle_timeout_duration(ch));
}
/*
*/
static void ch_update_ping_deadline(QUIC_CHANNEL *ch)
{
- if (ch->max_idle_timeout > 0) {
- /*
- * Maximum amount of time without traffic before we send a PING to keep
- * the connection open. Usually we use max_idle_timeout/2, but ensure
- * the period never exceeds the assumed NAT interval to ensure NAT
- * devices don't have their state time out (RFC 9000 s. 10.1.2).
- */
- OSSL_TIME max_span
- = ossl_time_divide(ossl_ms2time(ch->max_idle_timeout), 2);
-
- max_span = ossl_time_min(max_span, MAX_NAT_INTERVAL);
+ OSSL_TIME max_span, idle_duration;
- ch->ping_deadline = ossl_time_add(get_time(ch), max_span);
- } else {
+ idle_duration = ch_get_effective_idle_timeout_duration(ch);
+ if (ossl_time_is_infinite(idle_duration)) {
ch->ping_deadline = ossl_time_infinite();
+ return;
}
+
+ /*
+ * Maximum amount of time without traffic before we send a PING to keep
+ * the connection open. Usually we use max_idle_timeout/2, but ensure
+ * the period never exceeds the assumed NAT interval to ensure NAT
+ * devices don't have their state time out (RFC 9000 s. 10.1.2).
+ */
+ max_span = ossl_time_divide(idle_duration, 2);
+ max_span = ossl_time_min(max_span, MAX_NAT_INTERVAL);
+ ch->ping_deadline = ossl_time_add(get_time(ch), max_span);
}
/* Called when the idle timeout expires. */
ch->terminate_cause.error_code = UINT64_MAX;
ch->terminate_cause.frame_type = 0;
- ch->state = QUIC_CHANNEL_STATE_TERMINATED;
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_TERMINATED);
}
/* Called when we, as a server, get a new incoming connection. */
-static int ch_server_on_new_conn(QUIC_CHANNEL *ch, const BIO_ADDR *peer,
- const QUIC_CONN_ID *peer_scid,
- const QUIC_CONN_ID *peer_dcid)
+int ossl_quic_channel_on_new_conn(QUIC_CHANNEL *ch, const BIO_ADDR *peer,
+ const QUIC_CONN_ID *peer_scid,
+ const QUIC_CONN_ID *peer_dcid)
{
if (!ossl_assert(ch->state == QUIC_CHANNEL_STATE_IDLE && ch->is_server))
return 0;
- /* Generate a SCID we will use for the connection. */
- if (!gen_rand_conn_id(ch->libctx, INIT_DCID_LEN,
- &ch->cur_local_cid))
+ /* Generate an Initial LCID we will use for the connection. */
+ if (!ossl_quic_lcidm_generate_initial(ch->lcidm, ch, &ch->cur_local_cid))
return 0;
/* Note our newly learnt peer address and CIDs. */
return 0;
/* Plug in secrets for the Initial EL. */
- if (!ossl_quic_provide_initial_secret(ch->libctx,
- ch->propq,
+ if (!ossl_quic_provide_initial_secret(ch->port->engine->libctx,
+ ch->port->engine->propq,
&ch->init_dcid,
/*is_server=*/1,
ch->qrx, ch->qtx))
return 0;
- /* Register our local CID in the DEMUX. */
- if (!ossl_qrx_add_dst_conn_id(ch->qrx, &ch->cur_local_cid))
+ /* Register the peer ODCID in the LCIDM. */
+ if (!ossl_quic_lcidm_enrol_odcid(ch->lcidm, ch, &ch->init_dcid))
return 0;
/* Change state. */
- ch->state = QUIC_CHANNEL_STATE_ACTIVE;
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_ACTIVE);
ch->doing_proactive_ver_neg = 0; /* not currently supported */
return 1;
}
return 0;
}
+static uint64_t *ch_get_local_stream_next_ordinal_ptr(QUIC_CHANNEL *ch,
+ int is_uni)
+{
+ return is_uni ? &ch->next_local_stream_ordinal_uni
+ : &ch->next_local_stream_ordinal_bidi;
+}
+
+int ossl_quic_channel_is_new_local_stream_admissible(QUIC_CHANNEL *ch,
+ int is_uni)
+{
+ uint64_t *p_next_ordinal = ch_get_local_stream_next_ordinal_ptr(ch, is_uni);
+
+ return ossl_quic_stream_map_is_local_allowed_by_stream_limit(&ch->qsm,
+ *p_next_ordinal,
+ is_uni);
+}
+
QUIC_STREAM *ossl_quic_channel_new_stream_local(QUIC_CHANNEL *ch, int is_uni)
{
QUIC_STREAM *qs;
type = ch->is_server ? QUIC_STREAM_INITIATOR_SERVER
: QUIC_STREAM_INITIATOR_CLIENT;
- if (is_uni) {
- p_next_ordinal = &ch->next_local_stream_ordinal_uni;
+ p_next_ordinal = ch_get_local_stream_next_ordinal_ptr(ch, is_uni);
+
+ if (is_uni)
type |= QUIC_STREAM_DIR_UNI;
- } else {
- p_next_ordinal = &ch->next_local_stream_ordinal_bidi;
+ else
type |= QUIC_STREAM_DIR_BIDI;
- }
if (*p_next_ordinal >= ((uint64_t)1) << 62)
return NULL;
if ((qs = ossl_quic_stream_map_alloc(&ch->qsm, stream_id, type)) == NULL)
return NULL;
-
/* Locally-initiated stream, so we always want a send buffer. */
if (!ch_init_new_stream(ch, qs, /*can_send=*/1, /*can_recv=*/!is_uni))
goto err;
int ossl_quic_channel_replace_local_cid(QUIC_CHANNEL *ch,
const QUIC_CONN_ID *conn_id)
{
- /* Remove the current local CID from the DEMUX. */
- if (!ossl_qrx_remove_dst_conn_id(ch->qrx, &ch->cur_local_cid))
+ /* Remove the current LCID from the LCIDM. */
+ if (!ossl_quic_lcidm_debug_remove(ch->lcidm, &ch->cur_local_cid))
return 0;
ch->cur_local_cid = *conn_id;
/* Set in the TXP, used only for long header packets. */
if (!ossl_quic_tx_packetiser_set_cur_scid(ch->txp, &ch->cur_local_cid))
return 0;
- /* Register our new local CID in the DEMUX. */
- if (!ossl_qrx_add_dst_conn_id(ch->qrx, &ch->cur_local_cid))
+ /* Add the new LCID to the LCIDM. */
+ if (!ossl_quic_lcidm_debug_add(ch->lcidm, ch, &ch->cur_local_cid,
+ 100))
return 0;
return 1;
}
ch_trigger_txku(ch);
return 1;
}
+
+int ossl_quic_channel_ping(QUIC_CHANNEL *ch)
+{
+ int pn_space = ossl_quic_enc_level_to_pn_space(ch->tx_enc_level);
+
+ ossl_quic_tx_packetiser_schedule_ack_eliciting(ch->txp, pn_space);
+
+ return 1;
+}
+
+uint16_t ossl_quic_channel_get_diag_num_rx_ack(QUIC_CHANNEL *ch)
+{
+ return ch->diag_num_rx_ack;
+}
+
+void ossl_quic_channel_get_diag_local_cid(QUIC_CHANNEL *ch, QUIC_CONN_ID *cid)
+{
+ *cid = ch->cur_local_cid;
+}