]> git.ipfire.org Git - thirdparty/openssl.git/blob - ssl/quic/quic_channel.c
Minor updates
[thirdparty/openssl.git] / ssl / quic / quic_channel.c
1 /*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/rand.h>
11 #include <openssl/err.h>
12 #include "internal/quic_channel.h"
13 #include "internal/quic_error.h"
14 #include "internal/quic_rx_depack.h"
15 #include "../ssl_local.h"
16 #include "quic_channel_local.h"
17
18 /*
19 * NOTE: While this channel implementation currently has basic server support,
20 * this functionality has been implemented for internal testing purposes and is
21 * not suitable for network use. In particular, it does not implement address
22 * validation, anti-amplification or retry logic.
23 *
24 * TODO(QUIC): Implement address validation and anti-amplification
25 * TODO(QUIC): Implement retry logic
26 */
27
28 #define INIT_DCID_LEN 8
29 #define INIT_CRYPTO_BUF_LEN 8192
30 #define INIT_APP_BUF_LEN 8192
31
32 /*
33 * Interval before we force a PING to ensure NATs don't timeout. This is based
34 * on the lowest commonly seen value of 30 seconds as cited in RFC 9000 s.
35 * 10.1.2.
36 */
37 #define MAX_NAT_INTERVAL (ossl_ms2time(25000))
38
39 static void ch_rx_pre(QUIC_CHANNEL *ch);
40 static int ch_rx(QUIC_CHANNEL *ch);
41 static int ch_tx(QUIC_CHANNEL *ch);
42 static void ch_tick(QUIC_TICK_RESULT *res, void *arg, uint32_t flags);
43 static void ch_rx_handle_packet(QUIC_CHANNEL *ch);
44 static OSSL_TIME ch_determine_next_tick_deadline(QUIC_CHANNEL *ch);
45 static int ch_retry(QUIC_CHANNEL *ch,
46 const unsigned char *retry_token,
47 size_t retry_token_len,
48 const QUIC_CONN_ID *retry_scid);
49 static void ch_cleanup(QUIC_CHANNEL *ch);
50 static int ch_generate_transport_params(QUIC_CHANNEL *ch);
51 static int ch_on_transport_params(const unsigned char *params,
52 size_t params_len,
53 void *arg);
54 static int ch_on_handshake_alert(void *arg, unsigned char alert_code);
55 static int ch_on_handshake_complete(void *arg);
56 static int ch_on_handshake_yield_secret(uint32_t enc_level, int direction,
57 uint32_t suite_id, EVP_MD *md,
58 const unsigned char *secret,
59 size_t secret_len,
60 void *arg);
61 static int ch_on_crypto_recv_record(const unsigned char **buf,
62 size_t *bytes_read, void *arg);
63 static int ch_on_crypto_release_record(size_t bytes_read, void *arg);
64 static int crypto_ensure_empty(QUIC_RSTREAM *rstream);
65 static int ch_on_crypto_send(const unsigned char *buf, size_t buf_len,
66 size_t *consumed, void *arg);
67 static OSSL_TIME get_time(void *arg);
68 static uint64_t get_stream_limit(int uni, void *arg);
69 static int rx_late_validate(QUIC_PN pn, int pn_space, void *arg);
70 static void rxku_detected(QUIC_PN pn, void *arg);
71 static int ch_retry(QUIC_CHANNEL *ch,
72 const unsigned char *retry_token,
73 size_t retry_token_len,
74 const QUIC_CONN_ID *retry_scid);
75 static void ch_update_idle(QUIC_CHANNEL *ch);
76 static int ch_discard_el(QUIC_CHANNEL *ch,
77 uint32_t enc_level);
78 static void ch_on_idle_timeout(QUIC_CHANNEL *ch);
79 static void ch_update_idle(QUIC_CHANNEL *ch);
80 static void ch_update_ping_deadline(QUIC_CHANNEL *ch);
81 static void ch_raise_net_error(QUIC_CHANNEL *ch);
82 static void ch_on_terminating_timeout(QUIC_CHANNEL *ch);
83 static void ch_start_terminating(QUIC_CHANNEL *ch,
84 const QUIC_TERMINATE_CAUSE *tcause,
85 int force_immediate);
86 static void ch_default_packet_handler(QUIC_URXE *e, void *arg);
87 static int ch_server_on_new_conn(QUIC_CHANNEL *ch, const BIO_ADDR *peer,
88 const QUIC_CONN_ID *peer_scid,
89 const QUIC_CONN_ID *peer_dcid);
90 static void ch_on_txp_ack_tx(const OSSL_QUIC_FRAME_ACK *ack, uint32_t pn_space,
91 void *arg);
92
93 static int gen_rand_conn_id(OSSL_LIB_CTX *libctx, size_t len, QUIC_CONN_ID *cid)
94 {
95 if (len > QUIC_MAX_CONN_ID_LEN)
96 return 0;
97
98 cid->id_len = (unsigned char)len;
99
100 if (RAND_bytes_ex(libctx, cid->id, len, len * 8) != 1) {
101 cid->id_len = 0;
102 return 0;
103 }
104
105 return 1;
106 }
107
108 /*
109 * QUIC Channel Initialization and Teardown
110 * ========================================
111 */
112 #define DEFAULT_INIT_CONN_RXFC_WND (2 * 1024 * 1024)
113 #define DEFAULT_CONN_RXFC_MAX_WND_MUL 5
114
115 #define DEFAULT_INIT_STREAM_RXFC_WND (2 * 1024 * 1024)
116 #define DEFAULT_STREAM_RXFC_MAX_WND_MUL 5
117
118 #define DEFAULT_INIT_CONN_MAX_STREAMS 100
119
120 static int ch_init(QUIC_CHANNEL *ch)
121 {
122 OSSL_QUIC_TX_PACKETISER_ARGS txp_args = {0};
123 OSSL_QTX_ARGS qtx_args = {0};
124 OSSL_QRX_ARGS qrx_args = {0};
125 QUIC_TLS_ARGS tls_args = {0};
126 uint32_t pn_space;
127 size_t rx_short_cid_len = ch->is_server ? INIT_DCID_LEN : 0;
128
129 /* For clients, generate our initial DCID. */
130 if (!ch->is_server
131 && !gen_rand_conn_id(ch->libctx, INIT_DCID_LEN, &ch->init_dcid))
132 goto err;
133
134 /* We plug in a network write BIO to the QTX later when we get one. */
135 qtx_args.libctx = ch->libctx;
136 qtx_args.mdpl = QUIC_MIN_INITIAL_DGRAM_LEN;
137 ch->rx_max_udp_payload_size = qtx_args.mdpl;
138
139 ch->ping_deadline = ossl_time_infinite();
140
141 ch->qtx = ossl_qtx_new(&qtx_args);
142 if (ch->qtx == NULL)
143 goto err;
144
145 ch->txpim = ossl_quic_txpim_new();
146 if (ch->txpim == NULL)
147 goto err;
148
149 ch->cfq = ossl_quic_cfq_new();
150 if (ch->cfq == NULL)
151 goto err;
152
153 if (!ossl_quic_txfc_init(&ch->conn_txfc, NULL))
154 goto err;
155
156 /*
157 * Note: The TP we transmit governs what the peer can transmit and thus
158 * applies to the RXFC.
159 */
160 ch->tx_init_max_stream_data_bidi_local = DEFAULT_INIT_STREAM_RXFC_WND;
161 ch->tx_init_max_stream_data_bidi_remote = DEFAULT_INIT_STREAM_RXFC_WND;
162 ch->tx_init_max_stream_data_uni = DEFAULT_INIT_STREAM_RXFC_WND;
163
164 if (!ossl_quic_rxfc_init(&ch->conn_rxfc, NULL,
165 DEFAULT_INIT_CONN_RXFC_WND,
166 DEFAULT_CONN_RXFC_MAX_WND_MUL *
167 DEFAULT_INIT_CONN_RXFC_WND,
168 get_time, ch))
169 goto err;
170
171 if (!ossl_quic_rxfc_init_for_stream_count(&ch->max_streams_bidi_rxfc,
172 DEFAULT_INIT_CONN_MAX_STREAMS,
173 get_time, ch))
174 goto err;
175
176 if (!ossl_quic_rxfc_init_for_stream_count(&ch->max_streams_uni_rxfc,
177 DEFAULT_INIT_CONN_MAX_STREAMS,
178 get_time, ch))
179 goto err;
180
181 if (!ossl_statm_init(&ch->statm))
182 goto err;
183
184 ch->have_statm = 1;
185 ch->cc_method = &ossl_cc_newreno_method;
186 if ((ch->cc_data = ch->cc_method->new(get_time, ch)) == NULL)
187 goto err;
188
189 if ((ch->ackm = ossl_ackm_new(get_time, ch, &ch->statm,
190 ch->cc_method, ch->cc_data)) == NULL)
191 goto err;
192
193 if (!ossl_quic_stream_map_init(&ch->qsm, get_stream_limit, ch,
194 &ch->max_streams_bidi_rxfc,
195 &ch->max_streams_uni_rxfc,
196 ch->is_server))
197 goto err;
198
199 ch->have_qsm = 1;
200
201 /* We use a zero-length SCID. */
202 txp_args.cur_dcid = ch->init_dcid;
203 txp_args.ack_delay_exponent = 3;
204 txp_args.qtx = ch->qtx;
205 txp_args.txpim = ch->txpim;
206 txp_args.cfq = ch->cfq;
207 txp_args.ackm = ch->ackm;
208 txp_args.qsm = &ch->qsm;
209 txp_args.conn_txfc = &ch->conn_txfc;
210 txp_args.conn_rxfc = &ch->conn_rxfc;
211 txp_args.max_streams_bidi_rxfc = &ch->max_streams_bidi_rxfc;
212 txp_args.max_streams_uni_rxfc = &ch->max_streams_uni_rxfc;
213 txp_args.cc_method = ch->cc_method;
214 txp_args.cc_data = ch->cc_data;
215 txp_args.now = get_time;
216 txp_args.now_arg = ch;
217
218 for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space) {
219 ch->crypto_send[pn_space] = ossl_quic_sstream_new(INIT_CRYPTO_BUF_LEN);
220 if (ch->crypto_send[pn_space] == NULL)
221 goto err;
222
223 txp_args.crypto[pn_space] = ch->crypto_send[pn_space];
224 }
225
226 ch->txp = ossl_quic_tx_packetiser_new(&txp_args);
227 if (ch->txp == NULL)
228 goto err;
229
230 ossl_quic_tx_packetiser_set_ack_tx_cb(ch->txp, ch_on_txp_ack_tx, ch);
231
232 if ((ch->demux = ossl_quic_demux_new(/*BIO=*/NULL,
233 /*Short CID Len=*/rx_short_cid_len,
234 get_time, ch)) == NULL)
235 goto err;
236
237 /*
238 * If we are a server, setup our handler for packets not corresponding to
239 * any known DCID on our end. This is for handling clients establishing new
240 * connections.
241 */
242 if (ch->is_server)
243 ossl_quic_demux_set_default_handler(ch->demux,
244 ch_default_packet_handler,
245 ch);
246
247 qrx_args.libctx = ch->libctx;
248 qrx_args.demux = ch->demux;
249 qrx_args.short_conn_id_len = rx_short_cid_len;
250 qrx_args.max_deferred = 32;
251
252 if ((ch->qrx = ossl_qrx_new(&qrx_args)) == NULL)
253 goto err;
254
255 if (!ossl_qrx_set_late_validation_cb(ch->qrx,
256 rx_late_validate,
257 ch))
258 goto err;
259
260 if (!ossl_qrx_set_key_update_cb(ch->qrx,
261 rxku_detected,
262 ch))
263 goto err;
264
265 if (!ch->is_server && !ossl_qrx_add_dst_conn_id(ch->qrx, &txp_args.cur_scid))
266 goto err;
267
268 for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space) {
269 ch->crypto_recv[pn_space] = ossl_quic_rstream_new(NULL, NULL, 0);
270 if (ch->crypto_recv[pn_space] == NULL)
271 goto err;
272 }
273
274 /* Plug in the TLS handshake layer. */
275 tls_args.s = ch->tls;
276 tls_args.crypto_send_cb = ch_on_crypto_send;
277 tls_args.crypto_send_cb_arg = ch;
278 tls_args.crypto_recv_rcd_cb = ch_on_crypto_recv_record;
279 tls_args.crypto_recv_rcd_cb_arg = ch;
280 tls_args.crypto_release_rcd_cb = ch_on_crypto_release_record;
281 tls_args.crypto_release_rcd_cb_arg = ch;
282 tls_args.yield_secret_cb = ch_on_handshake_yield_secret;
283 tls_args.yield_secret_cb_arg = ch;
284 tls_args.got_transport_params_cb = ch_on_transport_params;
285 tls_args.got_transport_params_cb_arg= ch;
286 tls_args.handshake_complete_cb = ch_on_handshake_complete;
287 tls_args.handshake_complete_cb_arg = ch;
288 tls_args.alert_cb = ch_on_handshake_alert;
289 tls_args.alert_cb_arg = ch;
290 tls_args.is_server = ch->is_server;
291
292 if ((ch->qtls = ossl_quic_tls_new(&tls_args)) == NULL)
293 goto err;
294
295 ch->rx_max_ack_delay = QUIC_DEFAULT_MAX_ACK_DELAY;
296 ch->rx_ack_delay_exp = QUIC_DEFAULT_ACK_DELAY_EXP;
297 ch->rx_active_conn_id_limit = QUIC_MIN_ACTIVE_CONN_ID_LIMIT;
298 ch->max_idle_timeout = QUIC_DEFAULT_IDLE_TIMEOUT;
299 ch->tx_enc_level = QUIC_ENC_LEVEL_INITIAL;
300 ch->rx_enc_level = QUIC_ENC_LEVEL_INITIAL;
301 ch->txku_threshold_override = UINT64_MAX;
302
303 /*
304 * Determine the QUIC Transport Parameters and serialize the transport
305 * parameters block. (For servers, we do this later as we must defer
306 * generation until we have received the client's transport parameters.)
307 */
308 if (!ch->is_server && !ch_generate_transport_params(ch))
309 goto err;
310
311 ch_update_idle(ch);
312 ossl_quic_reactor_init(&ch->rtor, ch_tick, ch,
313 ch_determine_next_tick_deadline(ch));
314 return 1;
315
316 err:
317 ch_cleanup(ch);
318 return 0;
319 }
320
321 static void ch_cleanup(QUIC_CHANNEL *ch)
322 {
323 uint32_t pn_space;
324
325 if (ch->ackm != NULL)
326 for (pn_space = QUIC_PN_SPACE_INITIAL;
327 pn_space < QUIC_PN_SPACE_NUM;
328 ++pn_space)
329 ossl_ackm_on_pkt_space_discarded(ch->ackm, pn_space);
330
331 ossl_quic_tx_packetiser_free(ch->txp);
332 ossl_quic_txpim_free(ch->txpim);
333 ossl_quic_cfq_free(ch->cfq);
334 ossl_qtx_free(ch->qtx);
335 if (ch->cc_data != NULL)
336 ch->cc_method->free(ch->cc_data);
337 if (ch->have_statm)
338 ossl_statm_destroy(&ch->statm);
339 ossl_ackm_free(ch->ackm);
340
341 if (ch->have_qsm)
342 ossl_quic_stream_map_cleanup(&ch->qsm);
343
344 for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space) {
345 ossl_quic_sstream_free(ch->crypto_send[pn_space]);
346 ossl_quic_rstream_free(ch->crypto_recv[pn_space]);
347 }
348
349 ossl_qrx_pkt_release(ch->qrx_pkt);
350 ch->qrx_pkt = NULL;
351
352 ossl_quic_tls_free(ch->qtls);
353 ossl_qrx_free(ch->qrx);
354 ossl_quic_demux_free(ch->demux);
355 OPENSSL_free(ch->local_transport_params);
356 OSSL_ERR_STATE_free(ch->err_state);
357 }
358
359 QUIC_CHANNEL *ossl_quic_channel_new(const QUIC_CHANNEL_ARGS *args)
360 {
361 QUIC_CHANNEL *ch = NULL;
362
363 if ((ch = OPENSSL_zalloc(sizeof(*ch))) == NULL)
364 return NULL;
365
366 ch->libctx = args->libctx;
367 ch->propq = args->propq;
368 ch->is_server = args->is_server;
369 ch->tls = args->tls;
370 ch->mutex = args->mutex;
371 ch->now_cb = args->now_cb;
372 ch->now_cb_arg = args->now_cb_arg;
373
374 if (!ch_init(ch)) {
375 OPENSSL_free(ch);
376 return NULL;
377 }
378
379 return ch;
380 }
381
382 void ossl_quic_channel_free(QUIC_CHANNEL *ch)
383 {
384 if (ch == NULL)
385 return;
386
387 ch_cleanup(ch);
388 OPENSSL_free(ch);
389 }
390
391 /* Set mutator callbacks for test framework support */
392 int ossl_quic_channel_set_mutator(QUIC_CHANNEL *ch,
393 ossl_mutate_packet_cb mutatecb,
394 ossl_finish_mutate_cb finishmutatecb,
395 void *mutatearg)
396 {
397 if (ch->qtx == NULL)
398 return 0;
399
400 ossl_qtx_set_mutator(ch->qtx, mutatecb, finishmutatecb, mutatearg);
401 return 1;
402 }
403
404 int ossl_quic_channel_get_peer_addr(QUIC_CHANNEL *ch, BIO_ADDR *peer_addr)
405 {
406 *peer_addr = ch->cur_peer_addr;
407 return 1;
408 }
409
410 int ossl_quic_channel_set_peer_addr(QUIC_CHANNEL *ch, const BIO_ADDR *peer_addr)
411 {
412 ch->cur_peer_addr = *peer_addr;
413 return 1;
414 }
415
416 QUIC_REACTOR *ossl_quic_channel_get_reactor(QUIC_CHANNEL *ch)
417 {
418 return &ch->rtor;
419 }
420
421 QUIC_STREAM_MAP *ossl_quic_channel_get_qsm(QUIC_CHANNEL *ch)
422 {
423 return &ch->qsm;
424 }
425
426 OSSL_STATM *ossl_quic_channel_get_statm(QUIC_CHANNEL *ch)
427 {
428 return &ch->statm;
429 }
430
431 QUIC_STREAM *ossl_quic_channel_get_stream_by_id(QUIC_CHANNEL *ch,
432 uint64_t stream_id)
433 {
434 return ossl_quic_stream_map_get_by_id(&ch->qsm, stream_id);
435 }
436
437 int ossl_quic_channel_is_active(const QUIC_CHANNEL *ch)
438 {
439 return ch != NULL && ch->state == QUIC_CHANNEL_STATE_ACTIVE;
440 }
441
442 int ossl_quic_channel_is_terminating(const QUIC_CHANNEL *ch)
443 {
444 if (ch->state == QUIC_CHANNEL_STATE_TERMINATING_CLOSING
445 || ch->state == QUIC_CHANNEL_STATE_TERMINATING_DRAINING)
446 return 1;
447
448 return 0;
449 }
450
451 int ossl_quic_channel_is_terminated(const QUIC_CHANNEL *ch)
452 {
453 if (ch->state == QUIC_CHANNEL_STATE_TERMINATED)
454 return 1;
455
456 return 0;
457 }
458
459 int ossl_quic_channel_is_term_any(const QUIC_CHANNEL *ch)
460 {
461 return ossl_quic_channel_is_terminating(ch)
462 || ossl_quic_channel_is_terminated(ch);
463 }
464
465 const QUIC_TERMINATE_CAUSE *
466 ossl_quic_channel_get_terminate_cause(const QUIC_CHANNEL *ch)
467 {
468 return ossl_quic_channel_is_term_any(ch) ? &ch->terminate_cause : NULL;
469 }
470
471 int ossl_quic_channel_is_handshake_complete(const QUIC_CHANNEL *ch)
472 {
473 return ch->handshake_complete;
474 }
475
476 int ossl_quic_channel_is_handshake_confirmed(const QUIC_CHANNEL *ch)
477 {
478 return ch->handshake_confirmed;
479 }
480
481 QUIC_DEMUX *ossl_quic_channel_get0_demux(QUIC_CHANNEL *ch)
482 {
483 return ch->demux;
484 }
485
486 CRYPTO_MUTEX *ossl_quic_channel_get_mutex(QUIC_CHANNEL *ch)
487 {
488 return ch->mutex;
489 }
490
491 int ossl_quic_channel_has_pending(const QUIC_CHANNEL *ch)
492 {
493 return ossl_quic_demux_has_pending(ch->demux)
494 || ossl_qrx_processed_read_pending(ch->qrx);
495 }
496
497 /*
498 * QUIC Channel: Callbacks from Miscellaneous Subsidiary Components
499 * ================================================================
500 */
501
502 /* Used by various components. */
503 static OSSL_TIME get_time(void *arg)
504 {
505 QUIC_CHANNEL *ch = arg;
506
507 if (ch->now_cb == NULL)
508 return ossl_time_now();
509
510 return ch->now_cb(ch->now_cb_arg);
511 }
512
513 /* Used by QSM. */
514 static uint64_t get_stream_limit(int uni, void *arg)
515 {
516 QUIC_CHANNEL *ch = arg;
517
518 return uni ? ch->max_local_streams_uni : ch->max_local_streams_bidi;
519 }
520
521 /*
522 * Called by QRX to determine if a packet is potentially invalid before trying
523 * to decrypt it.
524 */
525 static int rx_late_validate(QUIC_PN pn, int pn_space, void *arg)
526 {
527 QUIC_CHANNEL *ch = arg;
528
529 /* Potential duplicates should not be processed. */
530 if (!ossl_ackm_is_rx_pn_processable(ch->ackm, pn, pn_space))
531 return 0;
532
533 return 1;
534 }
535
536 /*
537 * Triggers a TXKU (whether spontaneous or solicited). Does not check whether
538 * spontaneous TXKU is currently allowed.
539 */
540 QUIC_NEEDS_LOCK
541 static void ch_trigger_txku(QUIC_CHANNEL *ch)
542 {
543 uint64_t next_pn
544 = ossl_quic_tx_packetiser_get_next_pn(ch->txp, QUIC_PN_SPACE_APP);
545
546 if (!ossl_quic_pn_valid(next_pn)
547 || !ossl_qtx_trigger_key_update(ch->qtx)) {
548 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR, 0,
549 "key update");
550 return;
551 }
552
553 ch->txku_in_progress = 1;
554 ch->txku_pn = next_pn;
555 ch->rxku_expected = ch->ku_locally_initiated;
556 }
557
558 QUIC_NEEDS_LOCK
559 static int txku_in_progress(QUIC_CHANNEL *ch)
560 {
561 if (ch->txku_in_progress
562 && ossl_ackm_get_largest_acked(ch->ackm, QUIC_PN_SPACE_APP) >= ch->txku_pn) {
563 OSSL_TIME pto = ossl_ackm_get_pto_duration(ch->ackm);
564
565 /*
566 * RFC 9001 s. 6.5: Endpoints SHOULD wait three times the PTO before
567 * initiating a key update after receiving an acknowledgment that
568 * confirms that the previous key update was received.
569 *
570 * Note that by the above wording, this period starts from when we get
571 * the ack for a TXKU-triggering packet, not when the TXKU is initiated.
572 * So we defer TXKU cooldown deadline calculation to this point.
573 */
574 ch->txku_in_progress = 0;
575 ch->txku_cooldown_deadline = ossl_time_add(get_time(ch),
576 ossl_time_multiply(pto, 3));
577 }
578
579 return ch->txku_in_progress;
580 }
581
582 QUIC_NEEDS_LOCK
583 static int txku_allowed(QUIC_CHANNEL *ch)
584 {
585 return ch->tx_enc_level == QUIC_ENC_LEVEL_1RTT /* Sanity check. */
586 /* Strict RFC 9001 criterion for TXKU. */
587 && ch->handshake_confirmed
588 && !txku_in_progress(ch);
589 }
590
591 QUIC_NEEDS_LOCK
592 static int txku_recommendable(QUIC_CHANNEL *ch)
593 {
594 if (!txku_allowed(ch))
595 return 0;
596
597 return
598 /* Recommended RFC 9001 criterion for TXKU. */
599 ossl_time_compare(get_time(ch), ch->txku_cooldown_deadline) >= 0
600 /* Some additional sensible criteria. */
601 && !ch->rxku_in_progress
602 && !ch->rxku_pending_confirm;
603 }
604
605 QUIC_NEEDS_LOCK
606 static int txku_desirable(QUIC_CHANNEL *ch)
607 {
608 uint64_t cur_pkt_count, max_pkt_count, thresh_pkt_count;
609 const uint32_t enc_level = QUIC_ENC_LEVEL_1RTT;
610
611 /* Check AEAD limit to determine if we should perform a spontaneous TXKU. */
612 cur_pkt_count = ossl_qtx_get_cur_epoch_pkt_count(ch->qtx, enc_level);
613 max_pkt_count = ossl_qtx_get_max_epoch_pkt_count(ch->qtx, enc_level);
614
615 thresh_pkt_count = max_pkt_count / 2;
616 if (ch->txku_threshold_override != UINT64_MAX)
617 thresh_pkt_count = ch->txku_threshold_override;
618
619 return cur_pkt_count >= thresh_pkt_count;
620 }
621
622 QUIC_NEEDS_LOCK
623 static void ch_maybe_trigger_spontaneous_txku(QUIC_CHANNEL *ch)
624 {
625 if (!txku_recommendable(ch) || !txku_desirable(ch))
626 return;
627
628 ch->ku_locally_initiated = 1;
629 ch_trigger_txku(ch);
630 }
631
632 QUIC_NEEDS_LOCK
633 static int rxku_allowed(QUIC_CHANNEL *ch)
634 {
635 /*
636 * RFC 9001 s. 6.1: An endpoint MUST NOT initiate a key update prior to
637 * having confirmed the handshake (Section 4.1.2).
638 *
639 * RFC 9001 s. 6.1: An endpoint MUST NOT initiate a subsequent key update
640 * unless it has received an acknowledgment for a packet that was sent
641 * protected with keys from the current key phase.
642 *
643 * RFC 9001 s. 6.2: If an endpoint detects a second update before it has
644 * sent any packets with updated keys containing an acknowledgment for the
645 * packet that initiated the key update, it indicates that its peer has
646 * updated keys twice without awaiting confirmation. An endpoint MAY treat
647 * such consecutive key updates as a connection error of type
648 * KEY_UPDATE_ERROR.
649 */
650 return ch->handshake_confirmed && !ch->rxku_pending_confirm;
651 }
652
653 /*
654 * Called when the QRX detects a new RX key update event.
655 */
656 enum rxku_decision {
657 DECISION_RXKU_ONLY,
658 DECISION_PROTOCOL_VIOLATION,
659 DECISION_SOLICITED_TXKU
660 };
661
662 /* Called when the QRX detects a key update has occurred. */
663 QUIC_NEEDS_LOCK
664 static void rxku_detected(QUIC_PN pn, void *arg)
665 {
666 QUIC_CHANNEL *ch = arg;
667 enum rxku_decision decision;
668 OSSL_TIME pto;
669
670 /*
671 * Note: rxku_in_progress is always 0 here as an RXKU cannot be detected
672 * when we are still in UPDATING or COOLDOWN (see quic_record_rx.h).
673 */
674 assert(!ch->rxku_in_progress);
675
676 if (!rxku_allowed(ch))
677 /* Is RXKU even allowed at this time? */
678 decision = DECISION_PROTOCOL_VIOLATION;
679
680 else if (ch->ku_locally_initiated)
681 /*
682 * If this key update was locally initiated (meaning that this detected
683 * RXKU event is a result of our own spontaneous TXKU), we do not
684 * trigger another TXKU; after all, to do so would result in an infinite
685 * ping-pong of key updates. We still process it as an RXKU.
686 */
687 decision = DECISION_RXKU_ONLY;
688
689 else
690 /*
691 * Otherwise, a peer triggering a KU means we have to trigger a KU also.
692 */
693 decision = DECISION_SOLICITED_TXKU;
694
695 if (decision == DECISION_PROTOCOL_VIOLATION) {
696 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_KEY_UPDATE_ERROR,
697 0, "RX key update again too soon");
698 return;
699 }
700
701 pto = ossl_ackm_get_pto_duration(ch->ackm);
702
703 ch->ku_locally_initiated = 0;
704 ch->rxku_in_progress = 1;
705 ch->rxku_pending_confirm = 1;
706 ch->rxku_trigger_pn = pn;
707 ch->rxku_update_end_deadline = ossl_time_add(get_time(ch), pto);
708 ch->rxku_expected = 0;
709
710 if (decision == DECISION_SOLICITED_TXKU)
711 /* NOT gated by usual txku_allowed() */
712 ch_trigger_txku(ch);
713
714 /*
715 * Ordinarily, we only generate ACK when some ACK-eliciting frame has been
716 * received. In some cases, this may not occur for a long time, for example
717 * if transmission of application data is going in only one direction and
718 * nothing else is happening with the connection. However, since the peer
719 * cannot initiate a subsequent (spontaneous) TXKU until its prior
720 * (spontaneous or solicited) TXKU has completed - meaning that prior
721 * TXKU's trigger packet (or subsequent packet) has been acknowledged, this
722 * can lead to very long times before a TXKU is considered 'completed'.
723 * Optimise this by forcing ACK generation after triggering TXKU.
724 * (Basically, we consider a RXKU event something that is 'ACK-eliciting',
725 * which it more or less should be; it is necessarily separate from ordinary
726 * processing of ACK-eliciting frames as key update is not indicated via a
727 * frame.)
728 */
729 ossl_quic_tx_packetiser_schedule_ack(ch->txp, QUIC_PN_SPACE_APP);
730 }
731
732 /* Called per tick to handle RXKU timer events. */
733 QUIC_NEEDS_LOCK
734 static void ch_rxku_tick(QUIC_CHANNEL *ch)
735 {
736 if (!ch->rxku_in_progress
737 || ossl_time_compare(get_time(ch), ch->rxku_update_end_deadline) < 0)
738 return;
739
740 ch->rxku_update_end_deadline = ossl_time_infinite();
741 ch->rxku_in_progress = 0;
742
743 if (!ossl_qrx_key_update_timeout(ch->qrx, /*normal=*/1))
744 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR, 0,
745 "RXKU cooldown internal error");
746 }
747
748 QUIC_NEEDS_LOCK
749 static void ch_on_txp_ack_tx(const OSSL_QUIC_FRAME_ACK *ack, uint32_t pn_space,
750 void *arg)
751 {
752 QUIC_CHANNEL *ch = arg;
753
754 if (pn_space != QUIC_PN_SPACE_APP || !ch->rxku_pending_confirm
755 || !ossl_quic_frame_ack_contains_pn(ack, ch->rxku_trigger_pn))
756 return;
757
758 /*
759 * Defer clearing rxku_pending_confirm until TXP generate call returns
760 * successfully.
761 */
762 ch->rxku_pending_confirm_done = 1;
763 }
764
765 /*
766 * QUIC Channel: Handshake Layer Event Handling
767 * ============================================
768 */
769 static int ch_on_crypto_send(const unsigned char *buf, size_t buf_len,
770 size_t *consumed, void *arg)
771 {
772 int ret;
773 QUIC_CHANNEL *ch = arg;
774 uint32_t enc_level = ch->tx_enc_level;
775 uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
776 QUIC_SSTREAM *sstream = ch->crypto_send[pn_space];
777
778 if (!ossl_assert(sstream != NULL))
779 return 0;
780
781 ret = ossl_quic_sstream_append(sstream, buf, buf_len, consumed);
782 return ret;
783 }
784
785 static int crypto_ensure_empty(QUIC_RSTREAM *rstream)
786 {
787 size_t avail = 0;
788 int is_fin = 0;
789
790 if (rstream == NULL)
791 return 1;
792
793 if (!ossl_quic_rstream_available(rstream, &avail, &is_fin))
794 return 0;
795
796 return avail == 0;
797 }
798
799 static int ch_on_crypto_recv_record(const unsigned char **buf,
800 size_t *bytes_read, void *arg)
801 {
802 QUIC_CHANNEL *ch = arg;
803 QUIC_RSTREAM *rstream;
804 int is_fin = 0; /* crypto stream is never finished, so we don't use this */
805 uint32_t i;
806
807 /*
808 * After we move to a later EL we must not allow our peer to send any new
809 * bytes in the crypto stream on a previous EL. Retransmissions of old bytes
810 * are allowed.
811 *
812 * In practice we will only move to a new EL when we have consumed all bytes
813 * which should be sent on the crypto stream at a previous EL. For example,
814 * the Handshake EL should not be provisioned until we have completely
815 * consumed a TLS 1.3 ServerHello. Thus when we provision an EL the output
816 * of ossl_quic_rstream_available() should be 0 for all lower ELs. Thus if a
817 * given EL is available we simply ensure we have not received any further
818 * bytes at a lower EL.
819 */
820 for (i = QUIC_ENC_LEVEL_INITIAL; i < ch->rx_enc_level; ++i)
821 if (i != QUIC_ENC_LEVEL_0RTT &&
822 !crypto_ensure_empty(ch->crypto_recv[ossl_quic_enc_level_to_pn_space(i)])) {
823 /* Protocol violation (RFC 9001 s. 4.1.3) */
824 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
825 OSSL_QUIC_FRAME_TYPE_CRYPTO,
826 "crypto stream data in wrong EL");
827 return 0;
828 }
829
830 rstream = ch->crypto_recv[ossl_quic_enc_level_to_pn_space(ch->rx_enc_level)];
831 if (rstream == NULL)
832 return 0;
833
834 return ossl_quic_rstream_get_record(rstream, buf, bytes_read,
835 &is_fin);
836 }
837
838 static int ch_on_crypto_release_record(size_t bytes_read, void *arg)
839 {
840 QUIC_CHANNEL *ch = arg;
841 QUIC_RSTREAM *rstream;
842
843 rstream = ch->crypto_recv[ossl_quic_enc_level_to_pn_space(ch->rx_enc_level)];
844 if (rstream == NULL)
845 return 0;
846
847 return ossl_quic_rstream_release_record(rstream, bytes_read);
848 }
849
850 static int ch_on_handshake_yield_secret(uint32_t enc_level, int direction,
851 uint32_t suite_id, EVP_MD *md,
852 const unsigned char *secret,
853 size_t secret_len,
854 void *arg)
855 {
856 QUIC_CHANNEL *ch = arg;
857 uint32_t i;
858
859 if (enc_level < QUIC_ENC_LEVEL_HANDSHAKE || enc_level >= QUIC_ENC_LEVEL_NUM)
860 /* Invalid EL. */
861 return 0;
862
863
864 if (direction) {
865 /* TX */
866 if (enc_level <= ch->tx_enc_level)
867 /*
868 * Does not make sense for us to try and provision an EL we have already
869 * attained.
870 */
871 return 0;
872
873 if (!ossl_qtx_provide_secret(ch->qtx, enc_level,
874 suite_id, md,
875 secret, secret_len))
876 return 0;
877
878 ch->tx_enc_level = enc_level;
879 } else {
880 /* RX */
881 if (enc_level <= ch->rx_enc_level)
882 /*
883 * Does not make sense for us to try and provision an EL we have already
884 * attained.
885 */
886 return 0;
887
888 /*
889 * Ensure all crypto streams for previous ELs are now empty of available
890 * data.
891 */
892 for (i = QUIC_ENC_LEVEL_INITIAL; i < enc_level; ++i)
893 if (!crypto_ensure_empty(ch->crypto_recv[ossl_quic_enc_level_to_pn_space(i)])) {
894 /* Protocol violation (RFC 9001 s. 4.1.3) */
895 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
896 OSSL_QUIC_FRAME_TYPE_CRYPTO,
897 "crypto stream data in wrong EL");
898 return 0;
899 }
900
901 if (!ossl_qrx_provide_secret(ch->qrx, enc_level,
902 suite_id, md,
903 secret, secret_len))
904 return 0;
905
906 ch->have_new_rx_secret = 1;
907 ch->rx_enc_level = enc_level;
908 }
909
910 return 1;
911 }
912
913 static int ch_on_handshake_complete(void *arg)
914 {
915 QUIC_CHANNEL *ch = arg;
916
917 if (!ossl_assert(!ch->handshake_complete))
918 return 0; /* this should not happen twice */
919
920 if (!ossl_assert(ch->tx_enc_level == QUIC_ENC_LEVEL_1RTT))
921 return 0;
922
923 if (!ch->got_remote_transport_params) {
924 /*
925 * Was not a valid QUIC handshake if we did not get valid transport
926 * params.
927 */
928 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
929 OSSL_QUIC_FRAME_TYPE_CRYPTO,
930 "no transport parameters received");
931 return 0;
932 }
933
934 /* Don't need transport parameters anymore. */
935 OPENSSL_free(ch->local_transport_params);
936 ch->local_transport_params = NULL;
937
938 /* Tell TXP the handshake is complete. */
939 ossl_quic_tx_packetiser_notify_handshake_complete(ch->txp);
940
941 ch->handshake_complete = 1;
942
943 if (ch->is_server) {
944 /*
945 * On the server, the handshake is confirmed as soon as it is complete.
946 */
947 ossl_quic_channel_on_handshake_confirmed(ch);
948
949 ossl_quic_tx_packetiser_schedule_handshake_done(ch->txp);
950 }
951
952 return 1;
953 }
954
955 static int ch_on_handshake_alert(void *arg, unsigned char alert_code)
956 {
957 QUIC_CHANNEL *ch = arg;
958
959 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_CRYPTO_ERR_BEGIN + alert_code,
960 0, "handshake alert");
961 return 1;
962 }
963
964 /*
965 * QUIC Channel: Transport Parameter Handling
966 * ==========================================
967 */
968
969 /*
970 * Called by handshake layer when we receive QUIC Transport Parameters from the
971 * peer. Note that these are not authenticated until the handshake is marked
972 * as complete.
973 */
974 #define TP_REASON_SERVER_ONLY(x) \
975 x " may not be sent by a client"
976 #define TP_REASON_DUP(x) \
977 x " appears multiple times"
978 #define TP_REASON_MALFORMED(x) \
979 x " is malformed"
980 #define TP_REASON_EXPECTED_VALUE(x) \
981 x " does not match expected value"
982 #define TP_REASON_NOT_RETRY(x) \
983 x " sent when not performing a retry"
984 #define TP_REASON_REQUIRED(x) \
985 x " was not sent but is required"
986
987 static void txfc_bump_cwm_bidi(QUIC_STREAM *s, void *arg)
988 {
989 if (!ossl_quic_stream_is_bidi(s)
990 || ossl_quic_stream_is_server_init(s))
991 return;
992
993 ossl_quic_txfc_bump_cwm(&s->txfc, *(uint64_t *)arg);
994 }
995
996 static void txfc_bump_cwm_uni(QUIC_STREAM *s, void *arg)
997 {
998 if (ossl_quic_stream_is_bidi(s)
999 || ossl_quic_stream_is_server_init(s))
1000 return;
1001
1002 ossl_quic_txfc_bump_cwm(&s->txfc, *(uint64_t *)arg);
1003 }
1004
1005 static void do_update(QUIC_STREAM *s, void *arg)
1006 {
1007 QUIC_CHANNEL *ch = arg;
1008
1009 ossl_quic_stream_map_update_state(&ch->qsm, s);
1010 }
1011
1012 static int ch_on_transport_params(const unsigned char *params,
1013 size_t params_len,
1014 void *arg)
1015 {
1016 QUIC_CHANNEL *ch = arg;
1017 PACKET pkt;
1018 uint64_t id, v;
1019 size_t len;
1020 const unsigned char *body;
1021 int got_orig_dcid = 0;
1022 int got_initial_scid = 0;
1023 int got_retry_scid = 0;
1024 int got_initial_max_data = 0;
1025 int got_initial_max_stream_data_bidi_local = 0;
1026 int got_initial_max_stream_data_bidi_remote = 0;
1027 int got_initial_max_stream_data_uni = 0;
1028 int got_initial_max_streams_bidi = 0;
1029 int got_initial_max_streams_uni = 0;
1030 int got_ack_delay_exp = 0;
1031 int got_max_ack_delay = 0;
1032 int got_max_udp_payload_size = 0;
1033 int got_max_idle_timeout = 0;
1034 int got_active_conn_id_limit = 0;
1035 int got_disable_active_migration = 0;
1036 QUIC_CONN_ID cid;
1037 const char *reason = "bad transport parameter";
1038
1039 if (ch->got_remote_transport_params)
1040 goto malformed;
1041
1042 if (!PACKET_buf_init(&pkt, params, params_len))
1043 return 0;
1044
1045 while (PACKET_remaining(&pkt) > 0) {
1046 if (!ossl_quic_wire_peek_transport_param(&pkt, &id))
1047 goto malformed;
1048
1049 switch (id) {
1050 case QUIC_TPARAM_ORIG_DCID:
1051 if (got_orig_dcid) {
1052 reason = TP_REASON_DUP("ORIG_DCID");
1053 goto malformed;
1054 }
1055
1056 if (ch->is_server) {
1057 reason = TP_REASON_SERVER_ONLY("ORIG_DCID");
1058 goto malformed;
1059 }
1060
1061 if (!ossl_quic_wire_decode_transport_param_cid(&pkt, NULL, &cid)) {
1062 reason = TP_REASON_MALFORMED("ORIG_DCID");
1063 goto malformed;
1064 }
1065
1066 /* Must match our initial DCID. */
1067 if (!ossl_quic_conn_id_eq(&ch->init_dcid, &cid)) {
1068 reason = TP_REASON_EXPECTED_VALUE("ORIG_DCID");
1069 goto malformed;
1070 }
1071
1072 got_orig_dcid = 1;
1073 break;
1074
1075 case QUIC_TPARAM_RETRY_SCID:
1076 if (ch->is_server) {
1077 reason = TP_REASON_SERVER_ONLY("RETRY_SCID");
1078 goto malformed;
1079 }
1080
1081 if (got_retry_scid) {
1082 reason = TP_REASON_DUP("RETRY_SCID");
1083 goto malformed;
1084 }
1085
1086 if (!ch->doing_retry) {
1087 reason = TP_REASON_NOT_RETRY("RETRY_SCID");
1088 goto malformed;
1089 }
1090
1091 if (!ossl_quic_wire_decode_transport_param_cid(&pkt, NULL, &cid)) {
1092 reason = TP_REASON_MALFORMED("RETRY_SCID");
1093 goto malformed;
1094 }
1095
1096 /* Must match Retry packet SCID. */
1097 if (!ossl_quic_conn_id_eq(&ch->retry_scid, &cid)) {
1098 reason = TP_REASON_EXPECTED_VALUE("RETRY_SCID");
1099 goto malformed;
1100 }
1101
1102 got_retry_scid = 1;
1103 break;
1104
1105 case QUIC_TPARAM_INITIAL_SCID:
1106 if (got_initial_scid) {
1107 /* must not appear more than once */
1108 reason = TP_REASON_DUP("INITIAL_SCID");
1109 goto malformed;
1110 }
1111
1112 if (!ossl_quic_wire_decode_transport_param_cid(&pkt, NULL, &cid)) {
1113 reason = TP_REASON_MALFORMED("INITIAL_SCID");
1114 goto malformed;
1115 }
1116
1117 /* Must match SCID of first Initial packet from server. */
1118 if (!ossl_quic_conn_id_eq(&ch->init_scid, &cid)) {
1119 reason = TP_REASON_EXPECTED_VALUE("INITIAL_SCID");
1120 goto malformed;
1121 }
1122
1123 got_initial_scid = 1;
1124 break;
1125
1126 case QUIC_TPARAM_INITIAL_MAX_DATA:
1127 if (got_initial_max_data) {
1128 /* must not appear more than once */
1129 reason = TP_REASON_DUP("INITIAL_MAX_DATA");
1130 goto malformed;
1131 }
1132
1133 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
1134 reason = TP_REASON_MALFORMED("INITIAL_MAX_DATA");
1135 goto malformed;
1136 }
1137
1138 ossl_quic_txfc_bump_cwm(&ch->conn_txfc, v);
1139 got_initial_max_data = 1;
1140 break;
1141
1142 case QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL:
1143 if (got_initial_max_stream_data_bidi_local) {
1144 /* must not appear more than once */
1145 reason = TP_REASON_DUP("INITIAL_MAX_STREAM_DATA_BIDI_LOCAL");
1146 goto malformed;
1147 }
1148
1149 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
1150 reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAM_DATA_BIDI_LOCAL");
1151 goto malformed;
1152 }
1153
1154 /*
1155 * This is correct; the BIDI_LOCAL TP governs streams created by
1156 * the endpoint which sends the TP, i.e., our peer.
1157 */
1158 ch->rx_init_max_stream_data_bidi_remote = v;
1159 got_initial_max_stream_data_bidi_local = 1;
1160 break;
1161
1162 case QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE:
1163 if (got_initial_max_stream_data_bidi_remote) {
1164 /* must not appear more than once */
1165 reason = TP_REASON_DUP("INITIAL_MAX_STREAM_DATA_BIDI_REMOTE");
1166 goto malformed;
1167 }
1168
1169 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
1170 reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAM_DATA_BIDI_REMOTE");
1171 goto malformed;
1172 }
1173
1174 /*
1175 * This is correct; the BIDI_REMOTE TP governs streams created
1176 * by the endpoint which receives the TP, i.e., us.
1177 */
1178 ch->rx_init_max_stream_data_bidi_local = v;
1179
1180 /* Apply to all existing streams. */
1181 ossl_quic_stream_map_visit(&ch->qsm, txfc_bump_cwm_bidi, &v);
1182 got_initial_max_stream_data_bidi_remote = 1;
1183 break;
1184
1185 case QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_UNI:
1186 if (got_initial_max_stream_data_uni) {
1187 /* must not appear more than once */
1188 reason = TP_REASON_DUP("INITIAL_MAX_STREAM_DATA_UNI");
1189 goto malformed;
1190 }
1191
1192 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
1193 reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAM_DATA_UNI");
1194 goto malformed;
1195 }
1196
1197 ch->rx_init_max_stream_data_uni = v;
1198
1199 /* Apply to all existing streams. */
1200 ossl_quic_stream_map_visit(&ch->qsm, txfc_bump_cwm_uni, &v);
1201 got_initial_max_stream_data_uni = 1;
1202 break;
1203
1204 case QUIC_TPARAM_ACK_DELAY_EXP:
1205 if (got_ack_delay_exp) {
1206 /* must not appear more than once */
1207 reason = TP_REASON_DUP("ACK_DELAY_EXP");
1208 goto malformed;
1209 }
1210
1211 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
1212 || v > QUIC_MAX_ACK_DELAY_EXP) {
1213 reason = TP_REASON_MALFORMED("ACK_DELAY_EXP");
1214 goto malformed;
1215 }
1216
1217 ch->rx_ack_delay_exp = (unsigned char)v;
1218 got_ack_delay_exp = 1;
1219 break;
1220
1221 case QUIC_TPARAM_MAX_ACK_DELAY:
1222 if (got_max_ack_delay) {
1223 /* must not appear more than once */
1224 reason = TP_REASON_DUP("MAX_ACK_DELAY");
1225 return 0;
1226 }
1227
1228 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
1229 || v >= (((uint64_t)1) << 14)) {
1230 reason = TP_REASON_MALFORMED("MAX_ACK_DELAY");
1231 goto malformed;
1232 }
1233
1234 ch->rx_max_ack_delay = v;
1235 got_max_ack_delay = 1;
1236 break;
1237
1238 case QUIC_TPARAM_INITIAL_MAX_STREAMS_BIDI:
1239 if (got_initial_max_streams_bidi) {
1240 /* must not appear more than once */
1241 reason = TP_REASON_DUP("INITIAL_MAX_STREAMS_BIDI");
1242 return 0;
1243 }
1244
1245 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
1246 || v > (((uint64_t)1) << 60)) {
1247 reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAMS_BIDI");
1248 goto malformed;
1249 }
1250
1251 assert(ch->max_local_streams_bidi == 0);
1252 ch->max_local_streams_bidi = v;
1253 got_initial_max_streams_bidi = 1;
1254 break;
1255
1256 case QUIC_TPARAM_INITIAL_MAX_STREAMS_UNI:
1257 if (got_initial_max_streams_uni) {
1258 /* must not appear more than once */
1259 reason = TP_REASON_DUP("INITIAL_MAX_STREAMS_UNI");
1260 goto malformed;
1261 }
1262
1263 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
1264 || v > (((uint64_t)1) << 60)) {
1265 reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAMS_UNI");
1266 goto malformed;
1267 }
1268
1269 assert(ch->max_local_streams_uni == 0);
1270 ch->max_local_streams_uni = v;
1271 got_initial_max_streams_uni = 1;
1272 break;
1273
1274 case QUIC_TPARAM_MAX_IDLE_TIMEOUT:
1275 if (got_max_idle_timeout) {
1276 /* must not appear more than once */
1277 reason = TP_REASON_DUP("MAX_IDLE_TIMEOUT");
1278 goto malformed;
1279 }
1280
1281 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
1282 reason = TP_REASON_MALFORMED("MAX_IDLE_TIMEOUT");
1283 goto malformed;
1284 }
1285
1286 if (v > 0 && v < ch->max_idle_timeout)
1287 ch->max_idle_timeout = v;
1288
1289 ch_update_idle(ch);
1290 got_max_idle_timeout = 1;
1291 break;
1292
1293 case QUIC_TPARAM_MAX_UDP_PAYLOAD_SIZE:
1294 if (got_max_udp_payload_size) {
1295 /* must not appear more than once */
1296 reason = TP_REASON_DUP("MAX_UDP_PAYLOAD_SIZE");
1297 goto malformed;
1298 }
1299
1300 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
1301 || v < QUIC_MIN_INITIAL_DGRAM_LEN) {
1302 reason = TP_REASON_MALFORMED("MAX_UDP_PAYLOAD_SIZE");
1303 goto malformed;
1304 }
1305
1306 ch->rx_max_udp_payload_size = v;
1307 got_max_udp_payload_size = 1;
1308 break;
1309
1310 case QUIC_TPARAM_ACTIVE_CONN_ID_LIMIT:
1311 if (got_active_conn_id_limit) {
1312 /* must not appear more than once */
1313 reason = TP_REASON_DUP("ACTIVE_CONN_ID_LIMIT");
1314 goto malformed;
1315 }
1316
1317 if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
1318 || v < QUIC_MIN_ACTIVE_CONN_ID_LIMIT) {
1319 reason = TP_REASON_MALFORMED("ACTIVE_CONN_ID_LIMIT");
1320 goto malformed;
1321 }
1322
1323 ch->rx_active_conn_id_limit = v;
1324 got_active_conn_id_limit = 1;
1325 break;
1326
1327 case QUIC_TPARAM_STATELESS_RESET_TOKEN:
1328 /* TODO(QUIC): Handle stateless reset tokens. */
1329 /*
1330 * We ignore these for now, but we must ensure a client doesn't
1331 * send them.
1332 */
1333 if (ch->is_server) {
1334 reason = TP_REASON_SERVER_ONLY("STATELESS_RESET_TOKEN");
1335 goto malformed;
1336 }
1337
1338 body = ossl_quic_wire_decode_transport_param_bytes(&pkt, &id, &len);
1339 if (body == NULL || len != QUIC_STATELESS_RESET_TOKEN_LEN) {
1340 reason = TP_REASON_MALFORMED("STATELESS_RESET_TOKEN");
1341 goto malformed;
1342 }
1343
1344 break;
1345
1346 case QUIC_TPARAM_PREFERRED_ADDR:
1347 {
1348 /* TODO(QUIC): Handle preferred address. */
1349 QUIC_PREFERRED_ADDR pfa;
1350
1351 /*
1352 * RFC 9000 s. 18.2: "A server that chooses a zero-length
1353 * connection ID MUST NOT provide a preferred address.
1354 * Similarly, a server MUST NOT include a zero-length connection
1355 * ID in this transport parameter. A client MUST treat a
1356 * violation of these requirements as a connection error of type
1357 * TRANSPORT_PARAMETER_ERROR."
1358 */
1359 if (ch->is_server) {
1360 reason = TP_REASON_SERVER_ONLY("PREFERRED_ADDR");
1361 goto malformed;
1362 }
1363
1364 if (ch->cur_remote_dcid.id_len == 0) {
1365 reason = "PREFERRED_ADDR provided for zero-length CID";
1366 goto malformed;
1367 }
1368
1369 if (!ossl_quic_wire_decode_transport_param_preferred_addr(&pkt, &pfa)) {
1370 reason = TP_REASON_MALFORMED("PREFERRED_ADDR");
1371 goto malformed;
1372 }
1373
1374 if (pfa.cid.id_len == 0) {
1375 reason = "zero-length CID in PREFERRED_ADDR";
1376 goto malformed;
1377 }
1378 }
1379 break;
1380
1381 case QUIC_TPARAM_DISABLE_ACTIVE_MIGRATION:
1382 /* We do not currently handle migration, so nothing to do. */
1383 if (got_disable_active_migration) {
1384 /* must not appear more than once */
1385 reason = TP_REASON_DUP("DISABLE_ACTIVE_MIGRATION");
1386 goto malformed;
1387 }
1388
1389 body = ossl_quic_wire_decode_transport_param_bytes(&pkt, &id, &len);
1390 if (body == NULL || len > 0) {
1391 reason = TP_REASON_MALFORMED("DISABLE_ACTIVE_MIGRATION");
1392 goto malformed;
1393 }
1394
1395 got_disable_active_migration = 1;
1396 break;
1397
1398 default:
1399 /*
1400 * Skip over and ignore.
1401 *
1402 * RFC 9000 s. 7.4: We SHOULD treat duplicated transport parameters
1403 * as a connection error, but we are not required to. Currently,
1404 * handle this programmatically by checking for duplicates in the
1405 * parameters that we recognise, as above, but don't bother
1406 * maintaining a list of duplicates for anything we don't recognise.
1407 */
1408 body = ossl_quic_wire_decode_transport_param_bytes(&pkt, &id,
1409 &len);
1410 if (body == NULL)
1411 goto malformed;
1412
1413 break;
1414 }
1415 }
1416
1417 if (!got_initial_scid) {
1418 reason = TP_REASON_REQUIRED("INITIAL_SCID");
1419 goto malformed;
1420 }
1421
1422 if (!ch->is_server) {
1423 if (!got_orig_dcid) {
1424 reason = TP_REASON_REQUIRED("ORIG_DCID");
1425 goto malformed;
1426 }
1427
1428 if (ch->doing_retry && !got_retry_scid) {
1429 reason = TP_REASON_REQUIRED("RETRY_SCID");
1430 goto malformed;
1431 }
1432 }
1433
1434 ch->got_remote_transport_params = 1;
1435
1436 if (got_initial_max_data || got_initial_max_stream_data_bidi_remote
1437 || got_initial_max_streams_bidi || got_initial_max_streams_uni)
1438 /*
1439 * If FC credit was bumped, we may now be able to send. Update all
1440 * streams.
1441 */
1442 ossl_quic_stream_map_visit(&ch->qsm, do_update, ch);
1443
1444 /* If we are a server, we now generate our own transport parameters. */
1445 if (ch->is_server && !ch_generate_transport_params(ch)) {
1446 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR, 0,
1447 "internal error");
1448 return 0;
1449 }
1450
1451 return 1;
1452
1453 malformed:
1454 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_TRANSPORT_PARAMETER_ERROR,
1455 0, reason);
1456 return 0;
1457 }
1458
1459 /*
1460 * Called when we want to generate transport parameters. This is called
1461 * immediately at instantiation time for a client and after we receive the
1462 * client's transport parameters for a server.
1463 */
1464 static int ch_generate_transport_params(QUIC_CHANNEL *ch)
1465 {
1466 int ok = 0;
1467 BUF_MEM *buf_mem = NULL;
1468 WPACKET wpkt;
1469 int wpkt_valid = 0;
1470 size_t buf_len = 0;
1471
1472 if (ch->local_transport_params != NULL)
1473 goto err;
1474
1475 if ((buf_mem = BUF_MEM_new()) == NULL)
1476 goto err;
1477
1478 if (!WPACKET_init(&wpkt, buf_mem))
1479 goto err;
1480
1481 wpkt_valid = 1;
1482
1483 if (ossl_quic_wire_encode_transport_param_bytes(&wpkt, QUIC_TPARAM_DISABLE_ACTIVE_MIGRATION,
1484 NULL, 0) == NULL)
1485 goto err;
1486
1487 if (ch->is_server) {
1488 if (!ossl_quic_wire_encode_transport_param_cid(&wpkt, QUIC_TPARAM_ORIG_DCID,
1489 &ch->init_dcid))
1490 goto err;
1491
1492 if (!ossl_quic_wire_encode_transport_param_cid(&wpkt, QUIC_TPARAM_INITIAL_SCID,
1493 &ch->cur_local_cid))
1494 goto err;
1495 } else {
1496 /* Client always uses an empty SCID. */
1497 if (ossl_quic_wire_encode_transport_param_bytes(&wpkt, QUIC_TPARAM_INITIAL_SCID,
1498 NULL, 0) == NULL)
1499 goto err;
1500 }
1501
1502 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_MAX_IDLE_TIMEOUT,
1503 ch->max_idle_timeout))
1504 goto err;
1505
1506 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_MAX_UDP_PAYLOAD_SIZE,
1507 QUIC_MIN_INITIAL_DGRAM_LEN))
1508 goto err;
1509
1510 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_ACTIVE_CONN_ID_LIMIT,
1511 QUIC_MIN_ACTIVE_CONN_ID_LIMIT))
1512 goto err;
1513
1514 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_DATA,
1515 ossl_quic_rxfc_get_cwm(&ch->conn_rxfc)))
1516 goto err;
1517
1518 /* Send the default CWM for a new RXFC. */
1519 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL,
1520 ch->tx_init_max_stream_data_bidi_local))
1521 goto err;
1522
1523 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE,
1524 ch->tx_init_max_stream_data_bidi_remote))
1525 goto err;
1526
1527 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_UNI,
1528 ch->tx_init_max_stream_data_uni))
1529 goto err;
1530
1531 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAMS_BIDI,
1532 ossl_quic_rxfc_get_cwm(&ch->max_streams_bidi_rxfc)))
1533 goto err;
1534
1535 if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAMS_UNI,
1536 ossl_quic_rxfc_get_cwm(&ch->max_streams_uni_rxfc)))
1537 goto err;
1538
1539 if (!WPACKET_finish(&wpkt))
1540 goto err;
1541
1542 wpkt_valid = 0;
1543
1544 if (!WPACKET_get_total_written(&wpkt, &buf_len))
1545 goto err;
1546
1547 ch->local_transport_params = (unsigned char *)buf_mem->data;
1548 buf_mem->data = NULL;
1549
1550
1551 if (!ossl_quic_tls_set_transport_params(ch->qtls, ch->local_transport_params,
1552 buf_len))
1553 goto err;
1554
1555 ok = 1;
1556 err:
1557 if (wpkt_valid)
1558 WPACKET_cleanup(&wpkt);
1559 BUF_MEM_free(buf_mem);
1560 return ok;
1561 }
1562
1563 /*
1564 * QUIC Channel: Ticker-Mutator
1565 * ============================
1566 */
1567
1568 /*
1569 * The central ticker function called by the reactor. This does everything, or
1570 * at least everything network I/O related. Best effort - not allowed to fail
1571 * "loudly".
1572 */
1573 static void ch_tick(QUIC_TICK_RESULT *res, void *arg, uint32_t flags)
1574 {
1575 OSSL_TIME now, deadline;
1576 QUIC_CHANNEL *ch = arg;
1577 int channel_only = (flags & QUIC_REACTOR_TICK_FLAG_CHANNEL_ONLY) != 0;
1578
1579 /*
1580 * When we tick the QUIC connection, we do everything we need to do
1581 * periodically. In order, we:
1582 *
1583 * - handle any incoming data from the network;
1584 * - handle any timer events which are due to fire (ACKM, etc.)
1585 * - write any data to the network due to be sent, to the extent
1586 * possible;
1587 * - determine the time at which we should next be ticked.
1588 */
1589
1590 /* If we are in the TERMINATED state, there is nothing to do. */
1591 if (ossl_quic_channel_is_terminated(ch)) {
1592 res->net_read_desired = 0;
1593 res->net_write_desired = 0;
1594 res->tick_deadline = ossl_time_infinite();
1595 return;
1596 }
1597
1598 /*
1599 * If we are in the TERMINATING state, check if the terminating timer has
1600 * expired.
1601 */
1602 if (ossl_quic_channel_is_terminating(ch)) {
1603 now = get_time(ch);
1604
1605 if (ossl_time_compare(now, ch->terminate_deadline) >= 0) {
1606 ch_on_terminating_timeout(ch);
1607 res->net_read_desired = 0;
1608 res->net_write_desired = 0;
1609 res->tick_deadline = ossl_time_infinite();
1610 return; /* abort normal processing, nothing to do */
1611 }
1612 }
1613
1614 /* Handle RXKU timeouts. */
1615 ch_rxku_tick(ch);
1616
1617 /* Handle any incoming data from network. */
1618 ch_rx_pre(ch);
1619
1620 do {
1621 /* Process queued incoming packets. */
1622 ch_rx(ch);
1623
1624 /*
1625 * Allow the handshake layer to check for any new incoming data and generate
1626 * new outgoing data.
1627 */
1628 ch->have_new_rx_secret = 0;
1629 if (!channel_only)
1630 ossl_quic_tls_tick(ch->qtls);
1631
1632 /*
1633 * If the handshake layer gave us a new secret, we need to do RX again
1634 * because packets that were not previously processable and were
1635 * deferred might now be processable.
1636 *
1637 * TODO(QUIC): Consider handling this in the yield_secret callback.
1638 */
1639 } while (ch->have_new_rx_secret);
1640
1641 /*
1642 * Handle any timer events which are due to fire; namely, the loss detection
1643 * deadline and the idle timeout.
1644 *
1645 * ACKM ACK generation deadline is polled by TXP, so we don't need to handle
1646 * it here.
1647 */
1648 now = get_time(ch);
1649 if (ossl_time_compare(now, ch->idle_deadline) >= 0) {
1650 /*
1651 * Idle timeout differs from normal protocol violation because we do not
1652 * send a CONN_CLOSE frame; go straight to TERMINATED.
1653 */
1654 ch_on_idle_timeout(ch);
1655 res->net_read_desired = 0;
1656 res->net_write_desired = 0;
1657 res->tick_deadline = ossl_time_infinite();
1658 return;
1659 }
1660
1661 deadline = ossl_ackm_get_loss_detection_deadline(ch->ackm);
1662 if (!ossl_time_is_zero(deadline) && ossl_time_compare(now, deadline) >= 0)
1663 ossl_ackm_on_timeout(ch->ackm);
1664
1665 /* If a ping is due, inform TXP. */
1666 if (ossl_time_compare(now, ch->ping_deadline) >= 0) {
1667 int pn_space = ossl_quic_enc_level_to_pn_space(ch->tx_enc_level);
1668
1669 ossl_quic_tx_packetiser_schedule_ack_eliciting(ch->txp, pn_space);
1670 }
1671
1672 /* Write any data to the network due to be sent. */
1673 ch_tx(ch);
1674
1675 /* Do stream GC. */
1676 ossl_quic_stream_map_gc(&ch->qsm);
1677
1678 /* Determine the time at which we should next be ticked. */
1679 res->tick_deadline = ch_determine_next_tick_deadline(ch);
1680
1681 /*
1682 * Always process network input unless we are now terminated.
1683 * Although we had not terminated at the beginning of this tick, network
1684 * errors in ch_rx_pre() or ch_tx() may have caused us to transition to the
1685 * Terminated state.
1686 */
1687 res->net_read_desired = !ossl_quic_channel_is_terminated(ch);
1688
1689 /* We want to write to the network if we have any in our queue. */
1690 res->net_write_desired
1691 = (!ossl_quic_channel_is_terminated(ch)
1692 && ossl_qtx_get_queue_len_datagrams(ch->qtx) > 0);
1693 }
1694
1695 /* Process incoming datagrams, if any. */
1696 static void ch_rx_pre(QUIC_CHANNEL *ch)
1697 {
1698 int ret;
1699
1700 if (!ch->is_server && !ch->have_sent_any_pkt)
1701 return;
1702
1703 /*
1704 * Get DEMUX to BIO_recvmmsg from the network and queue incoming datagrams
1705 * to the appropriate QRX instance.
1706 */
1707 ret = ossl_quic_demux_pump(ch->demux);
1708 if (ret == QUIC_DEMUX_PUMP_RES_PERMANENT_FAIL)
1709 /*
1710 * We don't care about transient failure, but permanent failure means we
1711 * should tear down the connection as though a protocol violation
1712 * occurred. Skip straight to the Terminating state as there is no point
1713 * trying to send CONNECTION_CLOSE frames if the network BIO is not
1714 * operating correctly.
1715 */
1716 ch_raise_net_error(ch);
1717 }
1718
1719 /* Check incoming forged packet limit and terminate connection if needed. */
1720 static void ch_rx_check_forged_pkt_limit(QUIC_CHANNEL *ch)
1721 {
1722 uint32_t enc_level;
1723 uint64_t limit = UINT64_MAX, l;
1724
1725 for (enc_level = QUIC_ENC_LEVEL_INITIAL;
1726 enc_level < QUIC_ENC_LEVEL_NUM;
1727 ++enc_level)
1728 {
1729 /*
1730 * Different ELs can have different AEADs which can in turn impose
1731 * different limits, so use the lowest value of any currently valid EL.
1732 */
1733 if ((ch->el_discarded & (1U << enc_level)) != 0)
1734 continue;
1735
1736 if (enc_level > ch->rx_enc_level)
1737 break;
1738
1739 l = ossl_qrx_get_max_forged_pkt_count(ch->qrx, enc_level);
1740 if (l < limit)
1741 limit = l;
1742 }
1743
1744 if (ossl_qrx_get_cur_forged_pkt_count(ch->qrx) < limit)
1745 return;
1746
1747 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_AEAD_LIMIT_REACHED, 0,
1748 "forgery limit");
1749 }
1750
1751 /* Process queued incoming packets and handle frames, if any. */
1752 static int ch_rx(QUIC_CHANNEL *ch)
1753 {
1754 int handled_any = 0;
1755
1756 if (!ch->is_server && !ch->have_sent_any_pkt)
1757 /*
1758 * We have not sent anything yet, therefore there is no need to check
1759 * for incoming data.
1760 */
1761 return 1;
1762
1763 for (;;) {
1764 assert(ch->qrx_pkt == NULL);
1765
1766 if (!ossl_qrx_read_pkt(ch->qrx, &ch->qrx_pkt))
1767 break;
1768
1769 if (!handled_any)
1770 ch_update_idle(ch);
1771
1772 ch_rx_handle_packet(ch); /* best effort */
1773
1774 /*
1775 * Regardless of the outcome of frame handling, unref the packet.
1776 * This will free the packet unless something added another
1777 * reference to it during frame processing.
1778 */
1779 ossl_qrx_pkt_release(ch->qrx_pkt);
1780 ch->qrx_pkt = NULL;
1781
1782 ch->have_sent_ack_eliciting_since_rx = 0;
1783 handled_any = 1;
1784 }
1785
1786 ch_rx_check_forged_pkt_limit(ch);
1787
1788 /*
1789 * When in TERMINATING - CLOSING, generate a CONN_CLOSE frame whenever we
1790 * process one or more incoming packets.
1791 */
1792 if (handled_any && ch->state == QUIC_CHANNEL_STATE_TERMINATING_CLOSING)
1793 ch->conn_close_queued = 1;
1794
1795 return 1;
1796 }
1797
1798 static int bio_addr_eq(const BIO_ADDR *a, const BIO_ADDR *b)
1799 {
1800 if (BIO_ADDR_family(a) != BIO_ADDR_family(b))
1801 return 0;
1802
1803 switch (BIO_ADDR_family(a)) {
1804 case AF_INET:
1805 return !memcmp(&a->s_in.sin_addr,
1806 &b->s_in.sin_addr,
1807 sizeof(a->s_in.sin_addr))
1808 && a->s_in.sin_port == b->s_in.sin_port;
1809 case AF_INET6:
1810 return !memcmp(&a->s_in6.sin6_addr,
1811 &b->s_in6.sin6_addr,
1812 sizeof(a->s_in6.sin6_addr))
1813 && a->s_in6.sin6_port == b->s_in6.sin6_port;
1814 default:
1815 return 0; /* not supported */
1816 }
1817
1818 return 1;
1819 }
1820
1821 /* Handles the packet currently in ch->qrx_pkt->hdr. */
1822 static void ch_rx_handle_packet(QUIC_CHANNEL *ch)
1823 {
1824 uint32_t enc_level;
1825
1826 assert(ch->qrx_pkt != NULL);
1827
1828 if (!ossl_quic_channel_is_active(ch))
1829 /* Do not process packets once we are terminating. */
1830 return;
1831
1832 if (ossl_quic_pkt_type_is_encrypted(ch->qrx_pkt->hdr->type)) {
1833 if (!ch->have_received_enc_pkt) {
1834 ch->cur_remote_dcid = ch->init_scid = ch->qrx_pkt->hdr->src_conn_id;
1835 ch->have_received_enc_pkt = 1;
1836
1837 /*
1838 * We change to using the SCID in the first Initial packet as the
1839 * DCID.
1840 */
1841 ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, &ch->init_scid);
1842 }
1843
1844 enc_level = ossl_quic_pkt_type_to_enc_level(ch->qrx_pkt->hdr->type);
1845 if ((ch->el_discarded & (1U << enc_level)) != 0)
1846 /* Do not process packets from ELs we have already discarded. */
1847 return;
1848 }
1849
1850 /*
1851 * RFC 9000 s. 9.6: "If a client receives packets from a new server address
1852 * when the client has not initiated a migration to that address, the client
1853 * SHOULD discard these packets."
1854 *
1855 * We need to be a bit careful here as due to the BIO abstraction layer an
1856 * application is liable to be weird and lie to us about peer addresses.
1857 * Only apply this check if we actually are using a real AF_INET or AF_INET6
1858 * address.
1859 */
1860 if (!ch->is_server
1861 && ch->qrx_pkt->peer != NULL
1862 && (BIO_ADDR_family(&ch->cur_peer_addr) == AF_INET
1863 || BIO_ADDR_family(&ch->cur_peer_addr) == AF_INET6)
1864 && !bio_addr_eq(ch->qrx_pkt->peer, &ch->cur_peer_addr))
1865 return;
1866
1867 if (!ch->is_server
1868 && ch->have_received_enc_pkt
1869 && ossl_quic_pkt_type_has_scid(ch->qrx_pkt->hdr->type)) {
1870 /*
1871 * RFC 9000 s. 7.2: "Once a client has received a valid Initial packet
1872 * from the server, it MUST discard any subsequent packet it receives on
1873 * that connection with a different SCID."
1874 */
1875 if (!ossl_quic_conn_id_eq(&ch->qrx_pkt->hdr->src_conn_id,
1876 &ch->init_scid))
1877 return;
1878 }
1879
1880 if (ossl_quic_pkt_type_has_version(ch->qrx_pkt->hdr->type)
1881 && ch->qrx_pkt->hdr->version != QUIC_VERSION_1)
1882 /*
1883 * RFC 9000 s. 5.2.1: If a client receives a packet that uses a
1884 * different version than it initially selected, it MUST discard the
1885 * packet. We only ever use v1, so require it.
1886 */
1887 return;
1888
1889 /*
1890 * RFC 9000 s. 17.2: "An endpoint MUST treat receipt of a packet that has a
1891 * non-zero value for [the reserved bits] after removing both packet and
1892 * header protection as a connection error of type PROTOCOL_VIOLATION."
1893 */
1894 if (ossl_quic_pkt_type_is_encrypted(ch->qrx_pkt->hdr->type)
1895 && ch->qrx_pkt->hdr->reserved != 0) {
1896 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
1897 0, "packet header reserved bits");
1898 return;
1899 }
1900
1901 /* Handle incoming packet. */
1902 switch (ch->qrx_pkt->hdr->type) {
1903 case QUIC_PKT_TYPE_RETRY:
1904 if (ch->doing_retry || ch->is_server)
1905 /*
1906 * It is not allowed to ask a client to do a retry more than
1907 * once. Clients may not send retries.
1908 */
1909 return;
1910
1911 if (ch->qrx_pkt->hdr->len <= QUIC_RETRY_INTEGRITY_TAG_LEN)
1912 /* Packets with zero-length Retry Tokens are invalid. */
1913 return;
1914
1915 /*
1916 * TODO(QUIC): Theoretically this should probably be in the QRX.
1917 * However because validation is dependent on context (namely the
1918 * client's initial DCID) we can't do this cleanly. In the future we
1919 * should probably add a callback to the QRX to let it call us (via
1920 * the DEMUX) and ask us about the correct original DCID, rather
1921 * than allow the QRX to emit a potentially malformed packet to the
1922 * upper layers. However, special casing this will do for now.
1923 */
1924 if (!ossl_quic_validate_retry_integrity_tag(ch->libctx,
1925 ch->propq,
1926 ch->qrx_pkt->hdr,
1927 &ch->init_dcid))
1928 /* Malformed retry packet, ignore. */
1929 return;
1930
1931 ch_retry(ch, ch->qrx_pkt->hdr->data,
1932 ch->qrx_pkt->hdr->len - QUIC_RETRY_INTEGRITY_TAG_LEN,
1933 &ch->qrx_pkt->hdr->src_conn_id);
1934 break;
1935
1936 case QUIC_PKT_TYPE_0RTT:
1937 if (!ch->is_server)
1938 /* Clients should never receive 0-RTT packets. */
1939 return;
1940
1941 /*
1942 * TODO(QUIC): Implement 0-RTT on the server side. We currently do
1943 * not need to implement this as a client can only do 0-RTT if we
1944 * have given it permission to in a previous session.
1945 */
1946 break;
1947
1948 case QUIC_PKT_TYPE_INITIAL:
1949 case QUIC_PKT_TYPE_HANDSHAKE:
1950 case QUIC_PKT_TYPE_1RTT:
1951 if (ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_HANDSHAKE)
1952 /*
1953 * We automatically drop INITIAL EL keys when first successfully
1954 * decrypting a HANDSHAKE packet, as per the RFC.
1955 */
1956 ch_discard_el(ch, QUIC_ENC_LEVEL_INITIAL);
1957
1958 if (ch->rxku_in_progress
1959 && ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_1RTT
1960 && ch->qrx_pkt->pn >= ch->rxku_trigger_pn
1961 && ch->qrx_pkt->key_epoch < ossl_qrx_get_key_epoch(ch->qrx)) {
1962 /*
1963 * RFC 9001 s. 6.4: Packets with higher packet numbers MUST be
1964 * protected with either the same or newer packet protection keys
1965 * than packets with lower packet numbers. An endpoint that
1966 * successfully removes protection with old keys when newer keys
1967 * were used for packets with lower packet numbers MUST treat this
1968 * as a connection error of type KEY_UPDATE_ERROR.
1969 */
1970 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_KEY_UPDATE_ERROR,
1971 0, "new packet with old keys");
1972 break;
1973 }
1974
1975 if (!ch->is_server
1976 && ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_INITIAL
1977 && ch->qrx_pkt->hdr->token_len > 0) {
1978 /*
1979 * RFC 9000 s. 17.2.2: Clients that receive an Initial packet with a
1980 * non-zero Token Length field MUST either discard the packet or
1981 * generate a connection error of type PROTOCOL_VIOLATION.
1982 */
1983 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
1984 0, "client received initial token");
1985 break;
1986 }
1987
1988 /* This packet contains frames, pass to the RXDP. */
1989 ossl_quic_handle_frames(ch, ch->qrx_pkt); /* best effort */
1990 break;
1991
1992 default:
1993 assert(0);
1994 break;
1995 }
1996 }
1997
1998 /*
1999 * This is called by the demux when we get a packet not destined for any known
2000 * DCID.
2001 */
2002 static void ch_default_packet_handler(QUIC_URXE *e, void *arg)
2003 {
2004 QUIC_CHANNEL *ch = arg;
2005 PACKET pkt;
2006 QUIC_PKT_HDR hdr;
2007
2008 if (!ossl_assert(ch->is_server))
2009 goto undesirable;
2010
2011 /*
2012 * We only support one connection to our server currently, so if we already
2013 * started one, ignore any new connection attempts.
2014 */
2015 if (ch->state != QUIC_CHANNEL_STATE_IDLE)
2016 goto undesirable;
2017
2018 /*
2019 * We have got a packet for an unknown DCID. This might be an attempt to
2020 * open a new connection.
2021 */
2022 if (e->data_len < QUIC_MIN_INITIAL_DGRAM_LEN)
2023 goto undesirable;
2024
2025 if (!PACKET_buf_init(&pkt, ossl_quic_urxe_data(e), e->data_len))
2026 goto err;
2027
2028 /*
2029 * We set short_conn_id_len to SIZE_MAX here which will cause the decode
2030 * operation to fail if we get a 1-RTT packet. This is fine since we only
2031 * care about Initial packets.
2032 */
2033 if (!ossl_quic_wire_decode_pkt_hdr(&pkt, SIZE_MAX, 1, 0, &hdr, NULL))
2034 goto undesirable;
2035
2036 switch (hdr.version) {
2037 case QUIC_VERSION_1:
2038 break;
2039
2040 case QUIC_VERSION_NONE:
2041 default:
2042 /* Unknown version or proactive version negotiation request, bail. */
2043 /* TODO(QUIC): Handle version negotiation on server side */
2044 goto undesirable;
2045 }
2046
2047 /*
2048 * We only care about Initial packets which might be trying to establish a
2049 * connection.
2050 */
2051 if (hdr.type != QUIC_PKT_TYPE_INITIAL)
2052 goto undesirable;
2053
2054 /*
2055 * Assume this is a valid attempt to initiate a connection.
2056 *
2057 * We do not register the DCID in the initial packet we received and that
2058 * DCID is not actually used again, thus after provisioning the correct
2059 * Initial keys derived from it (which is done in the call below) we pass
2060 * the received packet directly to the QRX so that it can process it as a
2061 * one-time thing, instead of going through the usual DEMUX DCID-based
2062 * routing.
2063 */
2064 if (!ch_server_on_new_conn(ch, &e->peer,
2065 &hdr.src_conn_id,
2066 &hdr.dst_conn_id))
2067 goto err;
2068
2069 ossl_qrx_inject_urxe(ch->qrx, e);
2070 return;
2071
2072 err:
2073 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR, 0,
2074 "internal error");
2075 undesirable:
2076 ossl_quic_demux_release_urxe(ch->demux, e);
2077 }
2078
2079 /* Try to generate packets and if possible, flush them to the network. */
2080 static int ch_tx(QUIC_CHANNEL *ch)
2081 {
2082 QUIC_TXP_STATUS status;
2083
2084 if (ch->state == QUIC_CHANNEL_STATE_TERMINATING_CLOSING) {
2085 /*
2086 * While closing, only send CONN_CLOSE if we've received more traffic
2087 * from the peer. Once we tell the TXP to generate CONN_CLOSE, all
2088 * future calls to it generate CONN_CLOSE frames, so otherwise we would
2089 * just constantly generate CONN_CLOSE frames.
2090 */
2091 if (!ch->conn_close_queued)
2092 return 0;
2093
2094 ch->conn_close_queued = 0;
2095 }
2096
2097 /* Do TXKU if we need to. */
2098 ch_maybe_trigger_spontaneous_txku(ch);
2099
2100 ch->rxku_pending_confirm_done = 0;
2101
2102 /*
2103 * Send a packet, if we need to. Best effort. The TXP consults the CC and
2104 * applies any limitations imposed by it, so we don't need to do it here.
2105 *
2106 * Best effort. In particular if TXP fails for some reason we should still
2107 * flush any queued packets which we already generated.
2108 */
2109 switch (ossl_quic_tx_packetiser_generate(ch->txp,
2110 TX_PACKETISER_ARCHETYPE_NORMAL,
2111 &status)) {
2112 case TX_PACKETISER_RES_SENT_PKT:
2113 ch->have_sent_any_pkt = 1; /* Packet was sent */
2114
2115 /*
2116 * RFC 9000 s. 10.1. 'An endpoint also restarts its idle timer when
2117 * sending an ack-eliciting packet if no other ack-eliciting packets
2118 * have been sent since last receiving and processing a packet.'
2119 */
2120 if (status.sent_ack_eliciting && !ch->have_sent_ack_eliciting_since_rx) {
2121 ch_update_idle(ch);
2122 ch->have_sent_ack_eliciting_since_rx = 1;
2123 }
2124
2125 if (ch->rxku_pending_confirm_done)
2126 ch->rxku_pending_confirm = 0;
2127
2128 ch_update_ping_deadline(ch);
2129 break;
2130
2131 case TX_PACKETISER_RES_NO_PKT:
2132 break; /* No packet was sent */
2133
2134 default:
2135 /*
2136 * One case where TXP can fail is if we reach a TX PN of 2**62 - 1. As
2137 * per RFC 9000 s. 12.3, if this happens we MUST close the connection
2138 * without sending a CONNECTION_CLOSE frame. This is actually handled as
2139 * an emergent consequence of our design, as the TX packetiser will
2140 * never transmit another packet when the TX PN reaches the limit.
2141 *
2142 * Calling the below function terminates the connection; its attempt to
2143 * schedule a CONNECTION_CLOSE frame will not actually cause a packet to
2144 * be transmitted for this reason.
2145 */
2146 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_INTERNAL_ERROR, 0,
2147 "internal error");
2148 break; /* Internal failure (e.g. allocation, assertion) */
2149 }
2150
2151 /* Flush packets to network. */
2152 switch (ossl_qtx_flush_net(ch->qtx)) {
2153 case QTX_FLUSH_NET_RES_OK:
2154 case QTX_FLUSH_NET_RES_TRANSIENT_FAIL:
2155 /* Best effort, done for now. */
2156 break;
2157
2158 case QTX_FLUSH_NET_RES_PERMANENT_FAIL:
2159 default:
2160 /* Permanent underlying network BIO, start terminating. */
2161 ch_raise_net_error(ch);
2162 break;
2163 }
2164
2165 return 1;
2166 }
2167
2168 /* Determine next tick deadline. */
2169 static OSSL_TIME ch_determine_next_tick_deadline(QUIC_CHANNEL *ch)
2170 {
2171 OSSL_TIME deadline;
2172 int i;
2173
2174 if (ossl_quic_channel_is_terminated(ch))
2175 return ossl_time_infinite();
2176
2177 deadline = ossl_ackm_get_loss_detection_deadline(ch->ackm);
2178 if (ossl_time_is_zero(deadline))
2179 deadline = ossl_time_infinite();
2180
2181 /*
2182 * If the CC will let us send acks, check the ack deadline for all
2183 * enc_levels that are actually provisioned
2184 */
2185 if (ch->cc_method->get_tx_allowance(ch->cc_data) > 0) {
2186 for (i = 0; i < QUIC_ENC_LEVEL_NUM; i++) {
2187 if (ossl_qtx_is_enc_level_provisioned(ch->qtx, i)) {
2188 deadline = ossl_time_min(deadline,
2189 ossl_ackm_get_ack_deadline(ch->ackm,
2190 ossl_quic_enc_level_to_pn_space(i)));
2191 }
2192 }
2193 }
2194
2195 /* When will CC let us send more? */
2196 if (ossl_quic_tx_packetiser_has_pending(ch->txp, TX_PACKETISER_ARCHETYPE_NORMAL,
2197 TX_PACKETISER_BYPASS_CC))
2198 deadline = ossl_time_min(deadline,
2199 ch->cc_method->get_wakeup_deadline(ch->cc_data));
2200
2201 /* Is the terminating timer armed? */
2202 if (ossl_quic_channel_is_terminating(ch))
2203 deadline = ossl_time_min(deadline,
2204 ch->terminate_deadline);
2205 else if (!ossl_time_is_infinite(ch->idle_deadline))
2206 deadline = ossl_time_min(deadline,
2207 ch->idle_deadline);
2208
2209 /*
2210 * When do we need to send an ACK-eliciting packet to reset the idle
2211 * deadline timer for the peer?
2212 */
2213 if (!ossl_time_is_infinite(ch->ping_deadline))
2214 deadline = ossl_time_min(deadline,
2215 ch->ping_deadline);
2216
2217 /* When does the RXKU process complete? */
2218 if (ch->rxku_in_progress)
2219 deadline = ossl_time_min(deadline, ch->rxku_update_end_deadline);
2220
2221 return deadline;
2222 }
2223
2224 /*
2225 * QUIC Channel: Network BIO Configuration
2226 * =======================================
2227 */
2228
2229 /* Determines whether we can support a given poll descriptor. */
2230 static int validate_poll_descriptor(const BIO_POLL_DESCRIPTOR *d)
2231 {
2232 if (d->type == BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD && d->value.fd < 0)
2233 return 0;
2234
2235 return 1;
2236 }
2237
2238 BIO *ossl_quic_channel_get_net_rbio(QUIC_CHANNEL *ch)
2239 {
2240 return ch->net_rbio;
2241 }
2242
2243 BIO *ossl_quic_channel_get_net_wbio(QUIC_CHANNEL *ch)
2244 {
2245 return ch->net_wbio;
2246 }
2247
2248 /*
2249 * QUIC_CHANNEL does not ref any BIO it is provided with, nor is any ref
2250 * transferred to it. The caller (i.e., QUIC_CONNECTION) is responsible for
2251 * ensuring the BIO lasts until the channel is freed or the BIO is switched out
2252 * for another BIO by a subsequent successful call to this function.
2253 */
2254 int ossl_quic_channel_set_net_rbio(QUIC_CHANNEL *ch, BIO *net_rbio)
2255 {
2256 BIO_POLL_DESCRIPTOR d = {0};
2257
2258 if (ch->net_rbio == net_rbio)
2259 return 1;
2260
2261 if (net_rbio != NULL) {
2262 if (!BIO_get_rpoll_descriptor(net_rbio, &d))
2263 /* Non-pollable BIO */
2264 d.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
2265
2266 if (!validate_poll_descriptor(&d))
2267 return 0;
2268 }
2269
2270 ossl_quic_reactor_set_poll_r(&ch->rtor, &d);
2271 ossl_quic_demux_set_bio(ch->demux, net_rbio);
2272 ch->net_rbio = net_rbio;
2273 return 1;
2274 }
2275
2276 int ossl_quic_channel_set_net_wbio(QUIC_CHANNEL *ch, BIO *net_wbio)
2277 {
2278 BIO_POLL_DESCRIPTOR d = {0};
2279
2280 if (ch->net_wbio == net_wbio)
2281 return 1;
2282
2283 if (net_wbio != NULL) {
2284 if (!BIO_get_wpoll_descriptor(net_wbio, &d))
2285 /* Non-pollable BIO */
2286 d.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
2287
2288 if (!validate_poll_descriptor(&d))
2289 return 0;
2290 }
2291
2292 ossl_quic_reactor_set_poll_w(&ch->rtor, &d);
2293 ossl_qtx_set_bio(ch->qtx, net_wbio);
2294 ch->net_wbio = net_wbio;
2295 return 1;
2296 }
2297
2298 /*
2299 * QUIC Channel: Lifecycle Events
2300 * ==============================
2301 */
2302 int ossl_quic_channel_start(QUIC_CHANNEL *ch)
2303 {
2304 if (ch->is_server)
2305 /*
2306 * This is not used by the server. The server moves to active
2307 * automatically on receiving an incoming connection.
2308 */
2309 return 0;
2310
2311 if (ch->state != QUIC_CHANNEL_STATE_IDLE)
2312 /* Calls to connect are idempotent */
2313 return 1;
2314
2315 /* Inform QTX of peer address. */
2316 if (!ossl_quic_tx_packetiser_set_peer(ch->txp, &ch->cur_peer_addr))
2317 return 0;
2318
2319 /* Plug in secrets for the Initial EL. */
2320 if (!ossl_quic_provide_initial_secret(ch->libctx,
2321 ch->propq,
2322 &ch->init_dcid,
2323 ch->is_server,
2324 ch->qrx, ch->qtx))
2325 return 0;
2326
2327 /* Change state. */
2328 ch->state = QUIC_CHANNEL_STATE_ACTIVE;
2329 ch->doing_proactive_ver_neg = 0; /* not currently supported */
2330
2331 /* Handshake layer: start (e.g. send CH). */
2332 if (!ossl_quic_tls_tick(ch->qtls))
2333 return 0;
2334
2335 ossl_quic_reactor_tick(&ch->rtor, 0); /* best effort */
2336 return 1;
2337 }
2338
2339 /* Start a locally initiated connection shutdown. */
2340 void ossl_quic_channel_local_close(QUIC_CHANNEL *ch, uint64_t app_error_code)
2341 {
2342 QUIC_TERMINATE_CAUSE tcause = {0};
2343
2344 if (ossl_quic_channel_is_term_any(ch))
2345 return;
2346
2347 tcause.app = 1;
2348 tcause.error_code = app_error_code;
2349 ch_start_terminating(ch, &tcause, 0);
2350 }
2351
2352 static void free_token(const unsigned char *buf, size_t buf_len, void *arg)
2353 {
2354 OPENSSL_free((unsigned char *)buf);
2355 }
2356
2357 /* Called when a server asks us to do a retry. */
2358 static int ch_retry(QUIC_CHANNEL *ch,
2359 const unsigned char *retry_token,
2360 size_t retry_token_len,
2361 const QUIC_CONN_ID *retry_scid)
2362 {
2363 void *buf;
2364
2365 /*
2366 * RFC 9000 s. 17.2.5.1: "A client MUST discard a Retry packet that contains
2367 * a SCID field that is identical to the DCID field of its initial packet."
2368 */
2369 if (ossl_quic_conn_id_eq(&ch->init_dcid, retry_scid))
2370 return 0;
2371
2372 /* We change to using the SCID in the Retry packet as the DCID. */
2373 if (!ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, retry_scid))
2374 return 0;
2375
2376 /*
2377 * Now we retry. We will release the Retry packet immediately, so copy
2378 * the token.
2379 */
2380 if ((buf = OPENSSL_memdup(retry_token, retry_token_len)) == NULL)
2381 return 0;
2382
2383 ossl_quic_tx_packetiser_set_initial_token(ch->txp, buf, retry_token_len,
2384 free_token, NULL);
2385
2386 ch->retry_scid = *retry_scid;
2387 ch->doing_retry = 1;
2388
2389 /*
2390 * We need to stimulate the Initial EL to generate the first CRYPTO frame
2391 * again. We can do this most cleanly by simply forcing the ACKM to consider
2392 * the first Initial packet as lost, which it effectively was as the server
2393 * hasn't processed it. This also maintains the desired behaviour with e.g.
2394 * PNs not resetting and so on.
2395 *
2396 * The PN we used initially is always zero, because QUIC does not allow
2397 * repeated retries.
2398 */
2399 if (!ossl_ackm_mark_packet_pseudo_lost(ch->ackm, QUIC_PN_SPACE_INITIAL,
2400 /*PN=*/0))
2401 return 0;
2402
2403 /*
2404 * Plug in new secrets for the Initial EL. This is the only time we change
2405 * the secrets for an EL after we already provisioned it.
2406 */
2407 if (!ossl_quic_provide_initial_secret(ch->libctx,
2408 ch->propq,
2409 &ch->retry_scid,
2410 /*is_server=*/0,
2411 ch->qrx, ch->qtx))
2412 return 0;
2413
2414 return 1;
2415 }
2416
2417 /* Called when an EL is to be discarded. */
2418 static int ch_discard_el(QUIC_CHANNEL *ch,
2419 uint32_t enc_level)
2420 {
2421 if (!ossl_assert(enc_level < QUIC_ENC_LEVEL_1RTT))
2422 return 0;
2423
2424 if ((ch->el_discarded & (1U << enc_level)) != 0)
2425 /* Already done. */
2426 return 1;
2427
2428 /* Best effort for all of these. */
2429 ossl_quic_tx_packetiser_discard_enc_level(ch->txp, enc_level);
2430 ossl_qrx_discard_enc_level(ch->qrx, enc_level);
2431 ossl_qtx_discard_enc_level(ch->qtx, enc_level);
2432
2433 if (enc_level != QUIC_ENC_LEVEL_0RTT) {
2434 uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
2435
2436 ossl_ackm_on_pkt_space_discarded(ch->ackm, pn_space);
2437
2438 /* We should still have crypto streams at this point. */
2439 if (!ossl_assert(ch->crypto_send[pn_space] != NULL)
2440 || !ossl_assert(ch->crypto_recv[pn_space] != NULL))
2441 return 0;
2442
2443 /* Get rid of the crypto stream state for the EL. */
2444 ossl_quic_sstream_free(ch->crypto_send[pn_space]);
2445 ch->crypto_send[pn_space] = NULL;
2446
2447 ossl_quic_rstream_free(ch->crypto_recv[pn_space]);
2448 ch->crypto_recv[pn_space] = NULL;
2449 }
2450
2451 ch->el_discarded |= (1U << enc_level);
2452 return 1;
2453 }
2454
2455 /* Intended to be called by the RXDP. */
2456 int ossl_quic_channel_on_handshake_confirmed(QUIC_CHANNEL *ch)
2457 {
2458 if (ch->handshake_confirmed)
2459 return 1;
2460
2461 if (!ch->handshake_complete) {
2462 /*
2463 * Does not make sense for handshake to be confirmed before it is
2464 * completed.
2465 */
2466 ossl_quic_channel_raise_protocol_error(ch, QUIC_ERR_PROTOCOL_VIOLATION,
2467 OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE,
2468 "handshake cannot be confirmed "
2469 "before it is completed");
2470 return 0;
2471 }
2472
2473 ch_discard_el(ch, QUIC_ENC_LEVEL_HANDSHAKE);
2474 ch->handshake_confirmed = 1;
2475 ossl_ackm_on_handshake_confirmed(ch->ackm);
2476 return 1;
2477 }
2478
2479 /*
2480 * Master function used when we want to start tearing down a connection:
2481 *
2482 * - If the connection is still IDLE we can go straight to TERMINATED;
2483 *
2484 * - If we are already TERMINATED this is a no-op.
2485 *
2486 * - If we are TERMINATING - CLOSING and we have now got a CONNECTION_CLOSE
2487 * from the peer (tcause->remote == 1), we move to TERMINATING - DRAINING.
2488 *
2489 * - If we are TERMINATING - DRAINING, we remain here until the terminating
2490 * timer expires.
2491 *
2492 * - Otherwise, we are in ACTIVE and move to TERMINATING - CLOSING.
2493 * if we caused the termination (e.g. we have sent a CONNECTION_CLOSE). Note
2494 * that we are considered to have caused a termination if we sent the first
2495 * CONNECTION_CLOSE frame, even if it is caused by a peer protocol
2496 * violation. If the peer sent the first CONNECTION_CLOSE frame, we move to
2497 * TERMINATING - DRAINING.
2498 *
2499 * We record the termination cause structure passed on the first call only.
2500 * Any successive calls have their termination cause data discarded;
2501 * once we start sending a CONNECTION_CLOSE frame, we don't change the details
2502 * in it.
2503 */
2504 static void ch_start_terminating(QUIC_CHANNEL *ch,
2505 const QUIC_TERMINATE_CAUSE *tcause,
2506 int force_immediate)
2507 {
2508 switch (ch->state) {
2509 default:
2510 case QUIC_CHANNEL_STATE_IDLE:
2511 ch->terminate_cause = *tcause;
2512 ch_on_terminating_timeout(ch);
2513 break;
2514
2515 case QUIC_CHANNEL_STATE_ACTIVE:
2516 ch->terminate_cause = *tcause;
2517
2518 if (!force_immediate) {
2519 ch->state = tcause->remote ? QUIC_CHANNEL_STATE_TERMINATING_DRAINING
2520 : QUIC_CHANNEL_STATE_TERMINATING_CLOSING;
2521 ch->terminate_deadline
2522 = ossl_time_add(get_time(ch),
2523 ossl_time_multiply(ossl_ackm_get_pto_duration(ch->ackm),
2524 3));
2525
2526 if (!tcause->remote) {
2527 OSSL_QUIC_FRAME_CONN_CLOSE f = {0};
2528
2529 /* best effort */
2530 f.error_code = ch->terminate_cause.error_code;
2531 f.frame_type = ch->terminate_cause.frame_type;
2532 f.is_app = ch->terminate_cause.app;
2533 ossl_quic_tx_packetiser_schedule_conn_close(ch->txp, &f);
2534 ch->conn_close_queued = 1;
2535 }
2536 } else {
2537 ch_on_terminating_timeout(ch);
2538 }
2539 break;
2540
2541 case QUIC_CHANNEL_STATE_TERMINATING_CLOSING:
2542 if (force_immediate)
2543 ch_on_terminating_timeout(ch);
2544 else if (tcause->remote)
2545 ch->state = QUIC_CHANNEL_STATE_TERMINATING_DRAINING;
2546
2547 break;
2548
2549 case QUIC_CHANNEL_STATE_TERMINATING_DRAINING:
2550 /*
2551 * Other than in the force-immediate case, we remain here until the
2552 * timeout expires.
2553 */
2554 if (force_immediate)
2555 ch_on_terminating_timeout(ch);
2556
2557 break;
2558
2559 case QUIC_CHANNEL_STATE_TERMINATED:
2560 /* No-op. */
2561 break;
2562 }
2563 }
2564
2565 /* For RXDP use. */
2566 void ossl_quic_channel_on_remote_conn_close(QUIC_CHANNEL *ch,
2567 OSSL_QUIC_FRAME_CONN_CLOSE *f)
2568 {
2569 QUIC_TERMINATE_CAUSE tcause = {0};
2570
2571 if (!ossl_quic_channel_is_active(ch))
2572 return;
2573
2574 tcause.remote = 1;
2575 tcause.app = f->is_app;
2576 tcause.error_code = f->error_code;
2577 tcause.frame_type = f->frame_type;
2578
2579 ch_start_terminating(ch, &tcause, 0);
2580 }
2581
2582 static void free_frame_data(unsigned char *buf, size_t buf_len, void *arg)
2583 {
2584 OPENSSL_free(buf);
2585 }
2586
2587 static int ch_enqueue_retire_conn_id(QUIC_CHANNEL *ch, uint64_t seq_num)
2588 {
2589 BUF_MEM *buf_mem;
2590 WPACKET wpkt;
2591 size_t l;
2592
2593 if ((buf_mem = BUF_MEM_new()) == NULL)
2594 return 0;
2595
2596 if (!WPACKET_init(&wpkt, buf_mem))
2597 goto err;
2598
2599 if (!ossl_quic_wire_encode_frame_retire_conn_id(&wpkt, seq_num)) {
2600 WPACKET_cleanup(&wpkt);
2601 goto err;
2602 }
2603
2604 WPACKET_finish(&wpkt);
2605 if (!WPACKET_get_total_written(&wpkt, &l))
2606 goto err;
2607
2608 if (ossl_quic_cfq_add_frame(ch->cfq, 1, QUIC_PN_SPACE_APP,
2609 OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID,
2610 (unsigned char *)buf_mem->data, l,
2611 free_frame_data, NULL) == NULL)
2612 goto err;
2613
2614 buf_mem->data = NULL;
2615 BUF_MEM_free(buf_mem);
2616 return 1;
2617
2618 err:
2619 ossl_quic_channel_raise_protocol_error(ch,
2620 QUIC_ERR_INTERNAL_ERROR,
2621 OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
2622 "internal error enqueueing retire conn id");
2623 BUF_MEM_free(buf_mem);
2624 return 0;
2625 }
2626
2627 void ossl_quic_channel_on_new_conn_id(QUIC_CHANNEL *ch,
2628 OSSL_QUIC_FRAME_NEW_CONN_ID *f)
2629 {
2630 uint64_t new_remote_seq_num = ch->cur_remote_seq_num;
2631 uint64_t new_retire_prior_to = ch->cur_retire_prior_to;
2632
2633 if (!ossl_quic_channel_is_active(ch))
2634 return;
2635
2636 /* We allow only two active connection ids; first check some constraints */
2637 if (ch->cur_remote_dcid.id_len == 0) {
2638 /* Changing from 0 length connection id is disallowed */
2639 ossl_quic_channel_raise_protocol_error(ch,
2640 QUIC_ERR_PROTOCOL_VIOLATION,
2641 OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
2642 "zero length connection id in use");
2643
2644 return;
2645 }
2646
2647 if (f->seq_num > new_remote_seq_num)
2648 new_remote_seq_num = f->seq_num;
2649 if (f->retire_prior_to > new_retire_prior_to)
2650 new_retire_prior_to = f->retire_prior_to;
2651
2652 /*
2653 * RFC 9000-5.1.1: An endpoint MUST NOT provide more connection IDs
2654 * than the peer's limit.
2655 *
2656 * After processing a NEW_CONNECTION_ID frame and adding and retiring
2657 * active connection IDs, if the number of active connection IDs exceeds
2658 * the value advertised in its active_connection_id_limit transport
2659 * parameter, an endpoint MUST close the connection with an error of
2660 * type CONNECTION_ID_LIMIT_ERROR.
2661 */
2662 if (new_remote_seq_num - new_retire_prior_to > 1) {
2663 ossl_quic_channel_raise_protocol_error(ch,
2664 QUIC_ERR_CONNECTION_ID_LIMIT_ERROR,
2665 OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
2666 "active_connection_id limit violated");
2667 return;
2668 }
2669
2670 /*
2671 * RFC 9000-5.1.1: An endpoint MAY send connection IDs that temporarily
2672 * exceed a peer's limit if the NEW_CONNECTION_ID frame also requires
2673 * the retirement of any excess, by including a sufficiently large
2674 * value in the Retire Prior To field.
2675 *
2676 * RFC 9000-5.1.2: An endpoint SHOULD allow for sending and tracking
2677 * a number of RETIRE_CONNECTION_ID frames of at least twice the value
2678 * of the active_connection_id_limit transport parameter. An endpoint
2679 * MUST NOT forget a connection ID without retiring it, though it MAY
2680 * choose to treat having connection IDs in need of retirement that
2681 * exceed this limit as a connection error of type CONNECTION_ID_LIMIT_ERROR.
2682 *
2683 * We are a little bit more liberal than the minimum mandated.
2684 */
2685 if (new_retire_prior_to - ch->cur_retire_prior_to > 10) {
2686 ossl_quic_channel_raise_protocol_error(ch,
2687 QUIC_ERR_CONNECTION_ID_LIMIT_ERROR,
2688 OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
2689 "retiring connection id limit violated");
2690
2691 return;
2692 }
2693
2694 if (new_remote_seq_num > ch->cur_remote_seq_num) {
2695 ch->cur_remote_seq_num = new_remote_seq_num;
2696 ch->cur_remote_dcid = f->conn_id;
2697 ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, &ch->cur_remote_dcid);
2698 }
2699
2700 /*
2701 * RFC 9000-5.1.2: Upon receipt of an increased Retire Prior To
2702 * field, the peer MUST stop using the corresponding connection IDs
2703 * and retire them with RETIRE_CONNECTION_ID frames before adding the
2704 * newly provided connection ID to the set of active connection IDs.
2705 */
2706
2707 /*
2708 * Note: RFC 9000 s. 19.15 says:
2709 * "An endpoint that receives a NEW_CONNECTION_ID frame with a sequence
2710 * number smaller than the Retire Prior To field of a previously received
2711 * NEW_CONNECTION_ID frame MUST send a correspoonding
2712 * RETIRE_CONNECTION_ID frame that retires the newly received connection
2713 * ID, unless it has already done so for that sequence number."
2714 *
2715 * Since we currently always queue RETIRE_CONN_ID frames based on the Retire
2716 * Prior To field of a NEW_CONNECTION_ID frame immediately upon receiving
2717 * that NEW_CONNECTION_ID frame, by definition this will always be met.
2718 * This may change in future when we change our CID handling.
2719 */
2720 while (new_retire_prior_to > ch->cur_retire_prior_to) {
2721 if (!ch_enqueue_retire_conn_id(ch, ch->cur_retire_prior_to))
2722 break;
2723 ++ch->cur_retire_prior_to;
2724 }
2725 }
2726
2727 static void ch_save_err_state(QUIC_CHANNEL *ch)
2728 {
2729 if (ch->err_state == NULL)
2730 ch->err_state = OSSL_ERR_STATE_new();
2731
2732 if (ch->err_state == NULL)
2733 return;
2734
2735 OSSL_ERR_STATE_save(ch->err_state);
2736 }
2737
2738 static void ch_raise_net_error(QUIC_CHANNEL *ch)
2739 {
2740 QUIC_TERMINATE_CAUSE tcause = {0};
2741
2742 ch->net_error = 1;
2743 ch_save_err_state(ch);
2744
2745 tcause.error_code = QUIC_ERR_INTERNAL_ERROR;
2746
2747 /*
2748 * Skip Terminating state and go directly to Terminated, no point trying to
2749 * send CONNECTION_CLOSE if we cannot communicate.
2750 */
2751 ch_start_terminating(ch, &tcause, 1);
2752 }
2753
2754 int ossl_quic_channel_net_error(QUIC_CHANNEL *ch)
2755 {
2756 return ch->net_error;
2757 }
2758
2759 void ossl_quic_channel_restore_err_state(QUIC_CHANNEL *ch)
2760 {
2761 if (ch == NULL)
2762 return;
2763
2764 OSSL_ERR_STATE_restore(ch->err_state);
2765 }
2766
2767 void ossl_quic_channel_raise_protocol_error(QUIC_CHANNEL *ch,
2768 uint64_t error_code,
2769 uint64_t frame_type,
2770 const char *reason)
2771 {
2772 QUIC_TERMINATE_CAUSE tcause = {0};
2773
2774 if (error_code == QUIC_ERR_INTERNAL_ERROR)
2775 /* Internal errors might leave some errors on the stack. */
2776 ch_save_err_state(ch);
2777
2778 tcause.error_code = error_code;
2779 tcause.frame_type = frame_type;
2780
2781 ch_start_terminating(ch, &tcause, 0);
2782 }
2783
2784 /*
2785 * Called once the terminating timer expires, meaning we move from TERMINATING
2786 * to TERMINATED.
2787 */
2788 static void ch_on_terminating_timeout(QUIC_CHANNEL *ch)
2789 {
2790 ch->state = QUIC_CHANNEL_STATE_TERMINATED;
2791 }
2792
2793 /*
2794 * Updates our idle deadline. Called when an event happens which should bump the
2795 * idle timeout.
2796 */
2797 static void ch_update_idle(QUIC_CHANNEL *ch)
2798 {
2799 if (ch->max_idle_timeout == 0)
2800 ch->idle_deadline = ossl_time_infinite();
2801 else
2802 ch->idle_deadline = ossl_time_add(get_time(ch),
2803 ossl_ms2time(ch->max_idle_timeout));
2804 }
2805
2806 /*
2807 * Updates our ping deadline, which determines when we next generate a ping if
2808 * we don't have any other ACK-eliciting frames to send.
2809 */
2810 static void ch_update_ping_deadline(QUIC_CHANNEL *ch)
2811 {
2812 if (ch->max_idle_timeout > 0) {
2813 /*
2814 * Maximum amount of time without traffic before we send a PING to keep
2815 * the connection open. Usually we use max_idle_timeout/2, but ensure
2816 * the period never exceeds the assumed NAT interval to ensure NAT
2817 * devices don't have their state time out (RFC 9000 s. 10.1.2).
2818 */
2819 OSSL_TIME max_span
2820 = ossl_time_divide(ossl_ms2time(ch->max_idle_timeout), 2);
2821
2822 max_span = ossl_time_min(max_span, MAX_NAT_INTERVAL);
2823
2824 ch->ping_deadline = ossl_time_add(get_time(ch), max_span);
2825 } else {
2826 ch->ping_deadline = ossl_time_infinite();
2827 }
2828 }
2829
2830 /* Called when the idle timeout expires. */
2831 static void ch_on_idle_timeout(QUIC_CHANNEL *ch)
2832 {
2833 /*
2834 * Idle timeout does not have an error code associated with it because a
2835 * CONN_CLOSE is never sent for it. We shouldn't use this data once we reach
2836 * TERMINATED anyway.
2837 */
2838 ch->terminate_cause.app = 0;
2839 ch->terminate_cause.error_code = UINT64_MAX;
2840 ch->terminate_cause.frame_type = 0;
2841
2842 ch->state = QUIC_CHANNEL_STATE_TERMINATED;
2843 }
2844
2845 /* Called when we, as a server, get a new incoming connection. */
2846 static int ch_server_on_new_conn(QUIC_CHANNEL *ch, const BIO_ADDR *peer,
2847 const QUIC_CONN_ID *peer_scid,
2848 const QUIC_CONN_ID *peer_dcid)
2849 {
2850 if (!ossl_assert(ch->state == QUIC_CHANNEL_STATE_IDLE && ch->is_server))
2851 return 0;
2852
2853 /* Generate a SCID we will use for the connection. */
2854 if (!gen_rand_conn_id(ch->libctx, INIT_DCID_LEN,
2855 &ch->cur_local_cid))
2856 return 0;
2857
2858 /* Note our newly learnt peer address and CIDs. */
2859 ch->cur_peer_addr = *peer;
2860 ch->init_dcid = *peer_dcid;
2861 ch->cur_remote_dcid = *peer_scid;
2862
2863 /* Inform QTX of peer address. */
2864 if (!ossl_quic_tx_packetiser_set_peer(ch->txp, &ch->cur_peer_addr))
2865 return 0;
2866
2867 /* Inform TXP of desired CIDs. */
2868 if (!ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, &ch->cur_remote_dcid))
2869 return 0;
2870
2871 if (!ossl_quic_tx_packetiser_set_cur_scid(ch->txp, &ch->cur_local_cid))
2872 return 0;
2873
2874 /* Plug in secrets for the Initial EL. */
2875 if (!ossl_quic_provide_initial_secret(ch->libctx,
2876 ch->propq,
2877 &ch->init_dcid,
2878 /*is_server=*/1,
2879 ch->qrx, ch->qtx))
2880 return 0;
2881
2882 /* Register our local CID in the DEMUX. */
2883 if (!ossl_qrx_add_dst_conn_id(ch->qrx, &ch->cur_local_cid))
2884 return 0;
2885
2886 /* Change state. */
2887 ch->state = QUIC_CHANNEL_STATE_ACTIVE;
2888 ch->doing_proactive_ver_neg = 0; /* not currently supported */
2889 return 1;
2890 }
2891
2892 SSL *ossl_quic_channel_get0_ssl(QUIC_CHANNEL *ch)
2893 {
2894 return ch->tls;
2895 }
2896
2897 static int ch_init_new_stream(QUIC_CHANNEL *ch, QUIC_STREAM *qs,
2898 int can_send, int can_recv)
2899 {
2900 uint64_t rxfc_wnd;
2901 int server_init = ossl_quic_stream_is_server_init(qs);
2902 int local_init = (ch->is_server == server_init);
2903 int is_uni = !ossl_quic_stream_is_bidi(qs);
2904
2905 if (can_send)
2906 if ((qs->sstream = ossl_quic_sstream_new(INIT_APP_BUF_LEN)) == NULL)
2907 goto err;
2908
2909 if (can_recv)
2910 if ((qs->rstream = ossl_quic_rstream_new(NULL, NULL, 0)) == NULL)
2911 goto err;
2912
2913 /* TXFC */
2914 if (!ossl_quic_txfc_init(&qs->txfc, &ch->conn_txfc))
2915 goto err;
2916
2917 if (ch->got_remote_transport_params) {
2918 /*
2919 * If we already got peer TPs we need to apply the initial CWM credit
2920 * now. If we didn't already get peer TPs this will be done
2921 * automatically for all extant streams when we do.
2922 */
2923 if (can_send) {
2924 uint64_t cwm;
2925
2926 if (is_uni)
2927 cwm = ch->rx_init_max_stream_data_uni;
2928 else if (local_init)
2929 cwm = ch->rx_init_max_stream_data_bidi_local;
2930 else
2931 cwm = ch->rx_init_max_stream_data_bidi_remote;
2932
2933 ossl_quic_txfc_bump_cwm(&qs->txfc, cwm);
2934 }
2935 }
2936
2937 /* RXFC */
2938 if (!can_recv)
2939 rxfc_wnd = 0;
2940 else if (is_uni)
2941 rxfc_wnd = ch->tx_init_max_stream_data_uni;
2942 else if (local_init)
2943 rxfc_wnd = ch->tx_init_max_stream_data_bidi_local;
2944 else
2945 rxfc_wnd = ch->tx_init_max_stream_data_bidi_remote;
2946
2947 if (!ossl_quic_rxfc_init(&qs->rxfc, &ch->conn_rxfc,
2948 rxfc_wnd,
2949 DEFAULT_STREAM_RXFC_MAX_WND_MUL * rxfc_wnd,
2950 get_time, ch))
2951 goto err;
2952
2953 return 1;
2954
2955 err:
2956 ossl_quic_sstream_free(qs->sstream);
2957 qs->sstream = NULL;
2958 ossl_quic_rstream_free(qs->rstream);
2959 qs->rstream = NULL;
2960 return 0;
2961 }
2962
2963 QUIC_STREAM *ossl_quic_channel_new_stream_local(QUIC_CHANNEL *ch, int is_uni)
2964 {
2965 QUIC_STREAM *qs;
2966 int type;
2967 uint64_t stream_id, *p_next_ordinal;
2968
2969 type = ch->is_server ? QUIC_STREAM_INITIATOR_SERVER
2970 : QUIC_STREAM_INITIATOR_CLIENT;
2971
2972 if (is_uni) {
2973 p_next_ordinal = &ch->next_local_stream_ordinal_uni;
2974 type |= QUIC_STREAM_DIR_UNI;
2975 } else {
2976 p_next_ordinal = &ch->next_local_stream_ordinal_bidi;
2977 type |= QUIC_STREAM_DIR_BIDI;
2978 }
2979
2980 if (*p_next_ordinal >= ((uint64_t)1) << 62)
2981 return NULL;
2982
2983 stream_id = ((*p_next_ordinal) << 2) | type;
2984
2985 if ((qs = ossl_quic_stream_map_alloc(&ch->qsm, stream_id, type)) == NULL)
2986 return NULL;
2987
2988 /* Locally-initiated stream, so we always want a send buffer. */
2989 if (!ch_init_new_stream(ch, qs, /*can_send=*/1, /*can_recv=*/!is_uni))
2990 goto err;
2991
2992 ++*p_next_ordinal;
2993 return qs;
2994
2995 err:
2996 ossl_quic_stream_map_release(&ch->qsm, qs);
2997 return NULL;
2998 }
2999
3000 QUIC_STREAM *ossl_quic_channel_new_stream_remote(QUIC_CHANNEL *ch,
3001 uint64_t stream_id)
3002 {
3003 uint64_t peer_role;
3004 int is_uni;
3005 QUIC_STREAM *qs;
3006
3007 peer_role = ch->is_server
3008 ? QUIC_STREAM_INITIATOR_CLIENT
3009 : QUIC_STREAM_INITIATOR_SERVER;
3010
3011 if ((stream_id & QUIC_STREAM_INITIATOR_MASK) != peer_role)
3012 return NULL;
3013
3014 is_uni = ((stream_id & QUIC_STREAM_DIR_MASK) == QUIC_STREAM_DIR_UNI);
3015
3016 qs = ossl_quic_stream_map_alloc(&ch->qsm, stream_id,
3017 stream_id & (QUIC_STREAM_INITIATOR_MASK
3018 | QUIC_STREAM_DIR_MASK));
3019 if (qs == NULL)
3020 return NULL;
3021
3022 if (!ch_init_new_stream(ch, qs, /*can_send=*/!is_uni, /*can_recv=*/1))
3023 goto err;
3024
3025 if (ch->incoming_stream_auto_reject)
3026 ossl_quic_channel_reject_stream(ch, qs);
3027 else
3028 ossl_quic_stream_map_push_accept_queue(&ch->qsm, qs);
3029
3030 return qs;
3031
3032 err:
3033 ossl_quic_stream_map_release(&ch->qsm, qs);
3034 return NULL;
3035 }
3036
3037 void ossl_quic_channel_set_incoming_stream_auto_reject(QUIC_CHANNEL *ch,
3038 int enable,
3039 uint64_t aec)
3040 {
3041 ch->incoming_stream_auto_reject = (enable != 0);
3042 ch->incoming_stream_auto_reject_aec = aec;
3043 }
3044
3045 void ossl_quic_channel_reject_stream(QUIC_CHANNEL *ch, QUIC_STREAM *qs)
3046 {
3047 ossl_quic_stream_map_stop_sending_recv_part(&ch->qsm, qs,
3048 ch->incoming_stream_auto_reject_aec);
3049
3050 ossl_quic_stream_map_reset_stream_send_part(&ch->qsm, qs,
3051 ch->incoming_stream_auto_reject_aec);
3052 qs->deleted = 1;
3053
3054 ossl_quic_stream_map_update_state(&ch->qsm, qs);
3055 }
3056
3057 /* Replace local connection ID in TXP and DEMUX for testing purposes. */
3058 int ossl_quic_channel_replace_local_cid(QUIC_CHANNEL *ch,
3059 const QUIC_CONN_ID *conn_id)
3060 {
3061 /* Remove the current local CID from the DEMUX. */
3062 if (!ossl_qrx_remove_dst_conn_id(ch->qrx, &ch->cur_local_cid))
3063 return 0;
3064 ch->cur_local_cid = *conn_id;
3065 /* Set in the TXP, used only for long header packets. */
3066 if (!ossl_quic_tx_packetiser_set_cur_scid(ch->txp, &ch->cur_local_cid))
3067 return 0;
3068 /* Register our new local CID in the DEMUX. */
3069 if (!ossl_qrx_add_dst_conn_id(ch->qrx, &ch->cur_local_cid))
3070 return 0;
3071 return 1;
3072 }
3073
3074 void ossl_quic_channel_set_msg_callback(QUIC_CHANNEL *ch,
3075 ossl_msg_cb msg_callback,
3076 SSL *msg_callback_ssl)
3077 {
3078 ch->msg_callback = msg_callback;
3079 ch->msg_callback_ssl = msg_callback_ssl;
3080 ossl_qtx_set_msg_callback(ch->qtx, msg_callback, msg_callback_ssl);
3081 ossl_quic_tx_packetiser_set_msg_callback(ch->txp, msg_callback,
3082 msg_callback_ssl);
3083 ossl_qrx_set_msg_callback(ch->qrx, msg_callback, msg_callback_ssl);
3084 }
3085
3086 void ossl_quic_channel_set_msg_callback_arg(QUIC_CHANNEL *ch,
3087 void *msg_callback_arg)
3088 {
3089 ch->msg_callback_arg = msg_callback_arg;
3090 ossl_qtx_set_msg_callback_arg(ch->qtx, msg_callback_arg);
3091 ossl_quic_tx_packetiser_set_msg_callback_arg(ch->txp, msg_callback_arg);
3092 ossl_qrx_set_msg_callback_arg(ch->qrx, msg_callback_arg);
3093 }
3094
3095 void ossl_quic_channel_set_txku_threshold_override(QUIC_CHANNEL *ch,
3096 uint64_t tx_pkt_threshold)
3097 {
3098 ch->txku_threshold_override = tx_pkt_threshold;
3099 }
3100
3101 uint64_t ossl_quic_channel_get_tx_key_epoch(QUIC_CHANNEL *ch)
3102 {
3103 return ossl_qtx_get_key_epoch(ch->qtx);
3104 }
3105
3106 uint64_t ossl_quic_channel_get_rx_key_epoch(QUIC_CHANNEL *ch)
3107 {
3108 return ossl_qrx_get_key_epoch(ch->qrx);
3109 }
3110
3111 int ossl_quic_channel_trigger_txku(QUIC_CHANNEL *ch)
3112 {
3113 if (!txku_allowed(ch))
3114 return 0;
3115
3116 ch->ku_locally_initiated = 1;
3117 ch_trigger_txku(ch);
3118 return 1;
3119 }