]> git.ipfire.org Git - thirdparty/openssl.git/blob - ssl/quic/quic_record_rx.c
Copyright year updates
[thirdparty/openssl.git] / ssl / quic / quic_record_rx.c
1 /*
2 * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/ssl.h>
11 #include "internal/quic_record_rx.h"
12 #include "quic_record_shared.h"
13 #include "internal/common.h"
14 #include "internal/list.h"
15 #include "../ssl_local.h"
16
17 /*
18 * Mark a packet in a bitfield.
19 *
20 * pkt_idx: index of packet within datagram.
21 */
22 static ossl_inline void pkt_mark(uint64_t *bitf, size_t pkt_idx)
23 {
24 assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
25 *bitf |= ((uint64_t)1) << pkt_idx;
26 }
27
28 /* Returns 1 if a packet is in the bitfield. */
29 static ossl_inline int pkt_is_marked(const uint64_t *bitf, size_t pkt_idx)
30 {
31 assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
32 return (*bitf & (((uint64_t)1) << pkt_idx)) != 0;
33 }
34
35 /*
36 * RXE
37 * ===
38 *
39 * RX Entries (RXEs) store processed (i.e., decrypted) data received from the
40 * network. One RXE is used per received QUIC packet.
41 */
42 typedef struct rxe_st RXE;
43
44 struct rxe_st {
45 OSSL_QRX_PKT pkt;
46 OSSL_LIST_MEMBER(rxe, RXE);
47 size_t data_len, alloc_len, refcount;
48
49 /* Extra fields for per-packet information. */
50 QUIC_PKT_HDR hdr; /* data/len are decrypted payload */
51
52 /* Decoded packet number. */
53 QUIC_PN pn;
54
55 /* Addresses copied from URXE. */
56 BIO_ADDR peer, local;
57
58 /* Time we received the packet (not when we processed it). */
59 OSSL_TIME time;
60
61 /* Total length of the datagram which contained this packet. */
62 size_t datagram_len;
63
64 /*
65 * The key epoch the packet was received with. Always 0 for non-1-RTT
66 * packets.
67 */
68 uint64_t key_epoch;
69
70 /*
71 * alloc_len allocated bytes (of which data_len bytes are valid) follow this
72 * structure.
73 */
74 };
75
76 DEFINE_LIST_OF(rxe, RXE);
77 typedef OSSL_LIST(rxe) RXE_LIST;
78
79 static ossl_inline unsigned char *rxe_data(const RXE *e)
80 {
81 return (unsigned char *)(e + 1);
82 }
83
84 /*
85 * QRL
86 * ===
87 */
88 struct ossl_qrx_st {
89 OSSL_LIB_CTX *libctx;
90 const char *propq;
91
92 /* Demux to receive datagrams from. */
93 QUIC_DEMUX *demux;
94
95 /* Length of connection IDs used in short-header packets in bytes. */
96 size_t short_conn_id_len;
97
98 /* Maximum number of deferred datagrams buffered at any one time. */
99 size_t max_deferred;
100
101 /* Current count of deferred datagrams. */
102 size_t num_deferred;
103
104 /*
105 * List of URXEs which are filled with received encrypted data.
106 * These are returned to the DEMUX's free list as they are processed.
107 */
108 QUIC_URXE_LIST urx_pending;
109
110 /*
111 * List of URXEs which we could not decrypt immediately and which are being
112 * kept in case they can be decrypted later.
113 */
114 QUIC_URXE_LIST urx_deferred;
115
116 /*
117 * List of RXEs which are not currently in use. These are moved
118 * to the pending list as they are filled.
119 */
120 RXE_LIST rx_free;
121
122 /*
123 * List of RXEs which are filled with decrypted packets ready to be passed
124 * to the user. A RXE is removed from all lists inside the QRL when passed
125 * to the user, then returned to the free list when the user returns it.
126 */
127 RXE_LIST rx_pending;
128
129 /* Largest PN we have received and processed in a given PN space. */
130 QUIC_PN largest_pn[QUIC_PN_SPACE_NUM];
131
132 /* Per encryption-level state. */
133 OSSL_QRL_ENC_LEVEL_SET el_set;
134
135 /* Bytes we have received since this counter was last cleared. */
136 uint64_t bytes_received;
137
138 /*
139 * Number of forged packets we have received since the QRX was instantiated.
140 * Note that as per RFC 9001, this is connection-level state; it is not per
141 * EL and is not reset by a key update.
142 */
143 uint64_t forged_pkt_count;
144
145 /*
146 * The PN the current key epoch started at, inclusive.
147 */
148 uint64_t cur_epoch_start_pn;
149
150 /* Validation callback. */
151 ossl_qrx_late_validation_cb *validation_cb;
152 void *validation_cb_arg;
153
154 /* Key update callback. */
155 ossl_qrx_key_update_cb *key_update_cb;
156 void *key_update_cb_arg;
157
158 /* Initial key phase. For debugging use only; always 0 in real use. */
159 unsigned char init_key_phase_bit;
160
161 /* Are we allowed to process 1-RTT packets yet? */
162 unsigned char allow_1rtt;
163
164 /* Message callback related arguments */
165 ossl_msg_cb msg_callback;
166 void *msg_callback_arg;
167 SSL *msg_callback_ssl;
168 };
169
170 static void qrx_on_rx(QUIC_URXE *urxe, void *arg);
171
172 OSSL_QRX *ossl_qrx_new(const OSSL_QRX_ARGS *args)
173 {
174 OSSL_QRX *qrx;
175 size_t i;
176
177 if (args->demux == NULL || args->max_deferred == 0)
178 return 0;
179
180 qrx = OPENSSL_zalloc(sizeof(OSSL_QRX));
181 if (qrx == NULL)
182 return 0;
183
184 for (i = 0; i < OSSL_NELEM(qrx->largest_pn); ++i)
185 qrx->largest_pn[i] = args->init_largest_pn[i];
186
187 qrx->libctx = args->libctx;
188 qrx->propq = args->propq;
189 qrx->demux = args->demux;
190 qrx->short_conn_id_len = args->short_conn_id_len;
191 qrx->init_key_phase_bit = args->init_key_phase_bit;
192 qrx->max_deferred = args->max_deferred;
193 return qrx;
194 }
195
196 static void qrx_cleanup_rxl(RXE_LIST *l)
197 {
198 RXE *e, *enext;
199
200 for (e = ossl_list_rxe_head(l); e != NULL; e = enext) {
201 enext = ossl_list_rxe_next(e);
202 ossl_list_rxe_remove(l, e);
203 OPENSSL_free(e);
204 }
205 }
206
207 static void qrx_cleanup_urxl(OSSL_QRX *qrx, QUIC_URXE_LIST *l)
208 {
209 QUIC_URXE *e, *enext;
210
211 for (e = ossl_list_urxe_head(l); e != NULL; e = enext) {
212 enext = ossl_list_urxe_next(e);
213 ossl_list_urxe_remove(l, e);
214 ossl_quic_demux_release_urxe(qrx->demux, e);
215 }
216 }
217
218 void ossl_qrx_free(OSSL_QRX *qrx)
219 {
220 uint32_t i;
221
222 if (qrx == NULL)
223 return;
224
225 /* Unregister from the RX DEMUX. */
226 ossl_quic_demux_unregister_by_cb(qrx->demux, qrx_on_rx, qrx);
227
228 /* Free RXE queue data. */
229 qrx_cleanup_rxl(&qrx->rx_free);
230 qrx_cleanup_rxl(&qrx->rx_pending);
231 qrx_cleanup_urxl(qrx, &qrx->urx_pending);
232 qrx_cleanup_urxl(qrx, &qrx->urx_deferred);
233
234 /* Drop keying material and crypto resources. */
235 for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
236 ossl_qrl_enc_level_set_discard(&qrx->el_set, i);
237
238 OPENSSL_free(qrx);
239 }
240
241 void ossl_qrx_inject_urxe(OSSL_QRX *qrx, QUIC_URXE *urxe)
242 {
243 /* Initialize our own fields inside the URXE and add to the pending list. */
244 urxe->processed = 0;
245 urxe->hpr_removed = 0;
246 urxe->deferred = 0;
247 ossl_list_urxe_insert_tail(&qrx->urx_pending, urxe);
248
249 if (qrx->msg_callback != NULL)
250 qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM, urxe + 1,
251 urxe->data_len, qrx->msg_callback_ssl,
252 qrx->msg_callback_arg);
253 }
254
255 static void qrx_on_rx(QUIC_URXE *urxe, void *arg)
256 {
257 OSSL_QRX *qrx = arg;
258
259 ossl_qrx_inject_urxe(qrx, urxe);
260 }
261
262 int ossl_qrx_add_dst_conn_id(OSSL_QRX *qrx,
263 const QUIC_CONN_ID *dst_conn_id)
264 {
265 return ossl_quic_demux_register(qrx->demux,
266 dst_conn_id,
267 qrx_on_rx,
268 qrx);
269 }
270
271 int ossl_qrx_remove_dst_conn_id(OSSL_QRX *qrx,
272 const QUIC_CONN_ID *dst_conn_id)
273 {
274 return ossl_quic_demux_unregister(qrx->demux, dst_conn_id);
275 }
276
277 static void qrx_requeue_deferred(OSSL_QRX *qrx)
278 {
279 QUIC_URXE *e;
280
281 while ((e = ossl_list_urxe_head(&qrx->urx_deferred)) != NULL) {
282 ossl_list_urxe_remove(&qrx->urx_deferred, e);
283 ossl_list_urxe_insert_head(&qrx->urx_pending, e);
284 }
285 }
286
287 int ossl_qrx_provide_secret(OSSL_QRX *qrx, uint32_t enc_level,
288 uint32_t suite_id, EVP_MD *md,
289 const unsigned char *secret, size_t secret_len)
290 {
291 if (enc_level >= QUIC_ENC_LEVEL_NUM)
292 return 0;
293
294 if (!ossl_qrl_enc_level_set_provide_secret(&qrx->el_set,
295 qrx->libctx,
296 qrx->propq,
297 enc_level,
298 suite_id,
299 md,
300 secret,
301 secret_len,
302 qrx->init_key_phase_bit,
303 /*is_tx=*/0))
304 return 0;
305
306 /*
307 * Any packets we previously could not decrypt, we may now be able to
308 * decrypt, so move any datagrams containing deferred packets from the
309 * deferred to the pending queue.
310 */
311 qrx_requeue_deferred(qrx);
312 return 1;
313 }
314
315 int ossl_qrx_discard_enc_level(OSSL_QRX *qrx, uint32_t enc_level)
316 {
317 if (enc_level >= QUIC_ENC_LEVEL_NUM)
318 return 0;
319
320 ossl_qrl_enc_level_set_discard(&qrx->el_set, enc_level);
321 return 1;
322 }
323
324 /* Returns 1 if there are one or more pending RXEs. */
325 int ossl_qrx_processed_read_pending(OSSL_QRX *qrx)
326 {
327 return !ossl_list_rxe_is_empty(&qrx->rx_pending);
328 }
329
330 /* Returns 1 if there are yet-unprocessed packets. */
331 int ossl_qrx_unprocessed_read_pending(OSSL_QRX *qrx)
332 {
333 return !ossl_list_urxe_is_empty(&qrx->urx_pending)
334 || !ossl_list_urxe_is_empty(&qrx->urx_deferred);
335 }
336
337 /* Pop the next pending RXE. Returns NULL if no RXE is pending. */
338 static RXE *qrx_pop_pending_rxe(OSSL_QRX *qrx)
339 {
340 RXE *rxe = ossl_list_rxe_head(&qrx->rx_pending);
341
342 if (rxe == NULL)
343 return NULL;
344
345 ossl_list_rxe_remove(&qrx->rx_pending, rxe);
346 return rxe;
347 }
348
349 /* Allocate a new RXE. */
350 static RXE *qrx_alloc_rxe(size_t alloc_len)
351 {
352 RXE *rxe;
353
354 if (alloc_len >= SIZE_MAX - sizeof(RXE))
355 return NULL;
356
357 rxe = OPENSSL_malloc(sizeof(RXE) + alloc_len);
358 if (rxe == NULL)
359 return NULL;
360
361 ossl_list_rxe_init_elem(rxe);
362 rxe->alloc_len = alloc_len;
363 rxe->data_len = 0;
364 rxe->refcount = 0;
365 return rxe;
366 }
367
368 /*
369 * Ensures there is at least one RXE in the RX free list, allocating a new entry
370 * if necessary. The returned RXE is in the RX free list; it is not popped.
371 *
372 * alloc_len is a hint which may be used to determine the RXE size if allocation
373 * is necessary. Returns NULL on allocation failure.
374 */
375 static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len)
376 {
377 RXE *rxe;
378
379 if (ossl_list_rxe_head(&qrx->rx_free) != NULL)
380 return ossl_list_rxe_head(&qrx->rx_free);
381
382 rxe = qrx_alloc_rxe(alloc_len);
383 if (rxe == NULL)
384 return NULL;
385
386 ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
387 return rxe;
388 }
389
390 /*
391 * Resize the data buffer attached to an RXE to be n bytes in size. The address
392 * of the RXE might change; the new address is returned, or NULL on failure, in
393 * which case the original RXE remains valid.
394 */
395 static RXE *qrx_resize_rxe(RXE_LIST *rxl, RXE *rxe, size_t n)
396 {
397 RXE *rxe2, *p;
398
399 /* Should never happen. */
400 if (rxe == NULL)
401 return NULL;
402
403 if (n >= SIZE_MAX - sizeof(RXE))
404 return NULL;
405
406 /* Remove the item from the list to avoid accessing freed memory */
407 p = ossl_list_rxe_prev(rxe);
408 ossl_list_rxe_remove(rxl, rxe);
409
410 /* Should never resize an RXE which has been handed out. */
411 if (!ossl_assert(rxe->refcount == 0))
412 return NULL;
413
414 /*
415 * NOTE: We do not clear old memory, although it does contain decrypted
416 * data.
417 */
418 rxe2 = OPENSSL_realloc(rxe, sizeof(RXE) + n);
419 if (rxe2 == NULL) {
420 /* Resize failed, restore old allocation. */
421 if (p == NULL)
422 ossl_list_rxe_insert_head(rxl, rxe);
423 else
424 ossl_list_rxe_insert_after(rxl, p, rxe);
425 return NULL;
426 }
427
428 if (p == NULL)
429 ossl_list_rxe_insert_head(rxl, rxe2);
430 else
431 ossl_list_rxe_insert_after(rxl, p, rxe2);
432
433 rxe2->alloc_len = n;
434 return rxe2;
435 }
436
437 /*
438 * Ensure the data buffer attached to an RXE is at least n bytes in size.
439 * Returns NULL on failure.
440 */
441 static RXE *qrx_reserve_rxe(RXE_LIST *rxl,
442 RXE *rxe, size_t n)
443 {
444 if (rxe->alloc_len >= n)
445 return rxe;
446
447 return qrx_resize_rxe(rxl, rxe, n);
448 }
449
450 /* Return a RXE handed out to the user back to our freelist. */
451 static void qrx_recycle_rxe(OSSL_QRX *qrx, RXE *rxe)
452 {
453 /* RXE should not be in any list */
454 assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
455 rxe->pkt.hdr = NULL;
456 rxe->pkt.peer = NULL;
457 rxe->pkt.local = NULL;
458 ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
459 }
460
461 /*
462 * Given a pointer to a pointer pointing to a buffer and the size of that
463 * buffer, copy the buffer into *prxe, expanding the RXE if necessary (its
464 * pointer may change due to realloc). *pi is the offset in bytes to copy the
465 * buffer to, and on success is updated to be the offset pointing after the
466 * copied buffer. *pptr is updated to point to the new location of the buffer.
467 */
468 static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
469 const unsigned char **pptr, size_t buf_len)
470 {
471 RXE *rxe;
472 unsigned char *dst;
473
474 if (!buf_len)
475 return 1;
476
477 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, *prxe, *pi + buf_len)) == NULL)
478 return 0;
479
480 *prxe = rxe;
481 dst = (unsigned char *)rxe_data(rxe) + *pi;
482
483 memcpy(dst, *pptr, buf_len);
484 *pi += buf_len;
485 *pptr = dst;
486 return 1;
487 }
488
489 static uint32_t qrx_determine_enc_level(const QUIC_PKT_HDR *hdr)
490 {
491 switch (hdr->type) {
492 case QUIC_PKT_TYPE_INITIAL:
493 return QUIC_ENC_LEVEL_INITIAL;
494 case QUIC_PKT_TYPE_HANDSHAKE:
495 return QUIC_ENC_LEVEL_HANDSHAKE;
496 case QUIC_PKT_TYPE_0RTT:
497 return QUIC_ENC_LEVEL_0RTT;
498 case QUIC_PKT_TYPE_1RTT:
499 return QUIC_ENC_LEVEL_1RTT;
500
501 default:
502 assert(0);
503 case QUIC_PKT_TYPE_RETRY:
504 case QUIC_PKT_TYPE_VERSION_NEG:
505 return QUIC_ENC_LEVEL_INITIAL; /* not used */
506 }
507 }
508
509 static uint32_t rxe_determine_pn_space(RXE *rxe)
510 {
511 uint32_t enc_level;
512
513 enc_level = qrx_determine_enc_level(&rxe->hdr);
514 return ossl_quic_enc_level_to_pn_space(enc_level);
515 }
516
517 static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
518 const QUIC_CONN_ID *first_dcid)
519 {
520 /* Ensure version is what we want. */
521 if (rxe->hdr.version != QUIC_VERSION_1
522 && rxe->hdr.version != QUIC_VERSION_NONE)
523 return 0;
524
525 /* Clients should never receive 0-RTT packets. */
526 if (rxe->hdr.type == QUIC_PKT_TYPE_0RTT)
527 return 0;
528
529 /* Version negotiation and retry packets must be the first packet. */
530 if (first_dcid != NULL && !ossl_quic_pkt_type_can_share_dgram(rxe->hdr.type))
531 return 0;
532
533 /*
534 * If this is not the first packet in a datagram, the destination connection
535 * ID must match the one in that packet.
536 */
537 if (first_dcid != NULL) {
538 if (!ossl_assert(first_dcid->id_len < QUIC_MAX_CONN_ID_LEN)
539 || !ossl_quic_conn_id_eq(first_dcid,
540 &rxe->hdr.dst_conn_id))
541 return 0;
542 }
543
544 return 1;
545 }
546
547 /* Validate header and decode PN. */
548 static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe)
549 {
550 int pn_space = rxe_determine_pn_space(rxe);
551
552 if (!ossl_quic_wire_decode_pkt_hdr_pn(rxe->hdr.pn, rxe->hdr.pn_len,
553 qrx->largest_pn[pn_space],
554 &rxe->pn))
555 return 0;
556
557 return 1;
558 }
559
560 /* Late packet header validation. */
561 static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe)
562 {
563 int pn_space = rxe_determine_pn_space(rxe);
564
565 /*
566 * Allow our user to decide whether to discard the packet before we try and
567 * decrypt it.
568 */
569 if (qrx->validation_cb != NULL
570 && !qrx->validation_cb(rxe->pn, pn_space, qrx->validation_cb_arg))
571 return 0;
572
573 return 1;
574 }
575
576 /*
577 * Retrieves the correct cipher context for an EL and key phase. Writes the key
578 * epoch number actually used for packet decryption to *rx_key_epoch.
579 */
580 static size_t qrx_get_cipher_ctx_idx(OSSL_QRX *qrx, OSSL_QRL_ENC_LEVEL *el,
581 uint32_t enc_level,
582 unsigned char key_phase_bit,
583 uint64_t *rx_key_epoch,
584 int *is_old_key)
585 {
586 size_t idx;
587
588 *is_old_key = 0;
589
590 if (enc_level != QUIC_ENC_LEVEL_1RTT) {
591 *rx_key_epoch = 0;
592 return 0;
593 }
594
595 if (!ossl_assert(key_phase_bit <= 1))
596 return SIZE_MAX;
597
598 /*
599 * RFC 9001 requires that we not create timing channels which could reveal
600 * the decrypted value of the Key Phase bit. We usually handle this by
601 * keeping the cipher contexts for both the current and next key epochs
602 * around, so that we just select a cipher context blindly using the key
603 * phase bit, which is time-invariant.
604 *
605 * In the COOLDOWN state, we only have one keyslot/cipher context. RFC 9001
606 * suggests an implementation strategy to avoid creating a timing channel in
607 * this case:
608 *
609 * Endpoints can use randomized packet protection keys in place of
610 * discarded keys when key updates are not yet permitted.
611 *
612 * Rather than use a randomised key, we simply use our existing key as it
613 * will fail AEAD verification anyway. This avoids the need to keep around a
614 * dedicated garbage key.
615 *
616 * Note: Accessing different cipher contexts is technically not
617 * timing-channel safe due to microarchitectural side channels, but this is
618 * the best we can reasonably do and appears to be directly suggested by the
619 * RFC.
620 */
621 idx = (el->state == QRL_EL_STATE_PROV_COOLDOWN ? el->key_epoch & 1
622 : key_phase_bit);
623
624 /*
625 * We also need to determine the key epoch number which this index
626 * corresponds to. This is so we can report the key epoch number in the
627 * OSSL_QRX_PKT structure, which callers need to validate whether it was OK
628 * for a packet to be sent using a given key epoch's keys.
629 */
630 switch (el->state) {
631 case QRL_EL_STATE_PROV_NORMAL:
632 /*
633 * If we are in the NORMAL state, usually the KP bit will match the LSB
634 * of our key epoch, meaning no new key update is being signalled. If it
635 * does not match, this means the packet (purports to) belong to
636 * the next key epoch.
637 *
638 * IMPORTANT: The AEAD tag has not been verified yet when this function
639 * is called, so this code must be timing-channel safe, hence use of
640 * XOR. Moreover, the value output below is not yet authenticated.
641 */
642 *rx_key_epoch
643 = el->key_epoch + ((el->key_epoch & 1) ^ (uint64_t)key_phase_bit);
644 break;
645
646 case QRL_EL_STATE_PROV_UPDATING:
647 /*
648 * If we are in the UPDATING state, usually the KP bit will match the
649 * LSB of our key epoch. If it does not match, this means that the
650 * packet (purports to) belong to the previous key epoch.
651 *
652 * As above, must be timing-channel safe.
653 */
654 *is_old_key = (el->key_epoch & 1) ^ (uint64_t)key_phase_bit;
655 *rx_key_epoch = el->key_epoch - (uint64_t)*is_old_key;
656 break;
657
658 case QRL_EL_STATE_PROV_COOLDOWN:
659 /*
660 * If we are in COOLDOWN, there is only one key epoch we can possibly
661 * decrypt with, so just try that. If AEAD decryption fails, the
662 * value we output here isn't used anyway.
663 */
664 *rx_key_epoch = el->key_epoch;
665 break;
666 }
667
668 return idx;
669 }
670
671 /*
672 * Tries to decrypt a packet payload.
673 *
674 * Returns 1 on success or 0 on failure (which is permanent). The payload is
675 * decrypted from src and written to dst. The buffer dst must be of at least
676 * src_len bytes in length. The actual length of the output in bytes is written
677 * to *dec_len on success, which will always be equal to or less than (usually
678 * less than) src_len.
679 */
680 static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
681 const unsigned char *src,
682 size_t src_len, size_t *dec_len,
683 const unsigned char *aad, size_t aad_len,
684 QUIC_PN pn, uint32_t enc_level,
685 unsigned char key_phase_bit,
686 uint64_t *rx_key_epoch)
687 {
688 int l = 0, l2 = 0, is_old_key, nonce_len;
689 unsigned char nonce[EVP_MAX_IV_LENGTH];
690 size_t i, cctx_idx;
691 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
692 enc_level, 1);
693 EVP_CIPHER_CTX *cctx;
694
695 if (src_len > INT_MAX || aad_len > INT_MAX)
696 return 0;
697
698 /* We should not have been called if we do not have key material. */
699 if (!ossl_assert(el != NULL))
700 return 0;
701
702 if (el->tag_len >= src_len)
703 return 0;
704
705 /*
706 * If we have failed to authenticate a certain number of ciphertexts, refuse
707 * to decrypt any more ciphertexts.
708 */
709 if (qrx->forged_pkt_count >= ossl_qrl_get_suite_max_forged_pkt(el->suite_id))
710 return 0;
711
712 cctx_idx = qrx_get_cipher_ctx_idx(qrx, el, enc_level, key_phase_bit,
713 rx_key_epoch, &is_old_key);
714 if (!ossl_assert(cctx_idx < OSSL_NELEM(el->cctx)))
715 return 0;
716
717 if (is_old_key && pn >= qrx->cur_epoch_start_pn)
718 /*
719 * RFC 9001 s. 5.5: Once an endpoint successfully receives a packet with
720 * a given PN, it MUST discard all packets in the same PN space with
721 * higher PNs if they cannot be successfully unprotected with the same
722 * key, or -- if there is a key update -- a subsequent packet protection
723 * key.
724 *
725 * In other words, once a PN x triggers a KU, it is invalid for us to
726 * receive a packet with a newer PN y (y > x) using the old keys.
727 */
728 return 0;
729
730 cctx = el->cctx[cctx_idx];
731
732 /* Construct nonce (nonce=IV ^ PN). */
733 nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
734 if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN)))
735 return 0;
736
737 memcpy(nonce, el->iv[cctx_idx], nonce_len);
738 for (i = 0; i < sizeof(QUIC_PN); ++i)
739 nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
740
741 /* type and key will already have been setup; feed the IV. */
742 if (EVP_CipherInit_ex(cctx, NULL,
743 NULL, NULL, nonce, /*enc=*/0) != 1)
744 return 0;
745
746 /* Feed the AEAD tag we got so the cipher can validate it. */
747 if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_SET_TAG,
748 el->tag_len,
749 (unsigned char *)src + src_len - el->tag_len) != 1)
750 return 0;
751
752 /* Feed AAD data. */
753 if (EVP_CipherUpdate(cctx, NULL, &l, aad, aad_len) != 1)
754 return 0;
755
756 /* Feed encrypted packet body. */
757 if (EVP_CipherUpdate(cctx, dst, &l, src, src_len - el->tag_len) != 1)
758 return 0;
759
760 /* Ensure authentication succeeded. */
761 if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
762 /* Authentication failed, increment failed auth counter. */
763 ++qrx->forged_pkt_count;
764 return 0;
765 }
766
767 *dec_len = l;
768 return 1;
769 }
770
771 static ossl_inline void ignore_res(int x)
772 {
773 /* No-op. */
774 }
775
776 static void qrx_key_update_initiated(OSSL_QRX *qrx, QUIC_PN pn)
777 {
778 if (!ossl_qrl_enc_level_set_key_update(&qrx->el_set, QUIC_ENC_LEVEL_1RTT))
779 /* We are already in RXKU, so we don't call the callback again. */
780 return;
781
782 qrx->cur_epoch_start_pn = pn;
783
784 if (qrx->key_update_cb != NULL)
785 qrx->key_update_cb(pn, qrx->key_update_cb_arg);
786 }
787
788 /* Process a single packet in a datagram. */
789 static int qrx_process_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
790 PACKET *pkt, size_t pkt_idx,
791 QUIC_CONN_ID *first_dcid,
792 size_t datagram_len)
793 {
794 RXE *rxe;
795 const unsigned char *eop = NULL;
796 size_t i, aad_len = 0, dec_len = 0;
797 PACKET orig_pkt = *pkt;
798 const unsigned char *sop = PACKET_data(pkt);
799 unsigned char *dst;
800 char need_second_decode = 0, already_processed = 0;
801 QUIC_PKT_HDR_PTRS ptrs;
802 uint32_t pn_space, enc_level;
803 OSSL_QRL_ENC_LEVEL *el = NULL;
804 uint64_t rx_key_epoch = UINT64_MAX;
805
806 /*
807 * Get a free RXE. If we need to allocate a new one, use the packet length
808 * as a good ballpark figure.
809 */
810 rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(pkt));
811 if (rxe == NULL)
812 return 0;
813
814 /* Have we already processed this packet? */
815 if (pkt_is_marked(&urxe->processed, pkt_idx))
816 already_processed = 1;
817
818 /*
819 * Decode the header into the RXE structure. We first decrypt and read the
820 * unprotected part of the packet header (unless we already removed header
821 * protection, in which case we decode all of it).
822 */
823 need_second_decode = !pkt_is_marked(&urxe->hpr_removed, pkt_idx);
824 if (!ossl_quic_wire_decode_pkt_hdr(pkt,
825 qrx->short_conn_id_len,
826 need_second_decode, 0, &rxe->hdr, &ptrs))
827 goto malformed;
828
829 /*
830 * Our successful decode above included an intelligible length and the
831 * PACKET is now pointing to the end of the QUIC packet.
832 */
833 eop = PACKET_data(pkt);
834
835 /*
836 * Make a note of the first packet's DCID so we can later ensure the
837 * destination connection IDs of all packets in a datagram match.
838 */
839 if (pkt_idx == 0)
840 *first_dcid = rxe->hdr.dst_conn_id;
841
842 /*
843 * Early header validation. Since we now know the packet length, we can also
844 * now skip over it if we already processed it.
845 */
846 if (already_processed
847 || !qrx_validate_hdr_early(qrx, rxe, pkt_idx == 0 ? NULL : first_dcid))
848 /*
849 * Already processed packets are handled identically to malformed
850 * packets; i.e., they are ignored.
851 */
852 goto malformed;
853
854 if (!ossl_quic_pkt_type_is_encrypted(rxe->hdr.type)) {
855 /*
856 * Version negotiation and retry packets are a special case. They do not
857 * contain a payload which needs decrypting and have no header
858 * protection.
859 */
860
861 /* Just copy the payload from the URXE to the RXE. */
862 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len)) == NULL)
863 /*
864 * Allocation failure. EOP will be pointing to the end of the
865 * datagram so processing of this datagram will end here.
866 */
867 goto malformed;
868
869 /* We are now committed to returning the packet. */
870 memcpy(rxe_data(rxe), rxe->hdr.data, rxe->hdr.len);
871 pkt_mark(&urxe->processed, pkt_idx);
872
873 rxe->hdr.data = rxe_data(rxe);
874 rxe->pn = QUIC_PN_INVALID;
875
876 rxe->data_len = rxe->hdr.len;
877 rxe->datagram_len = datagram_len;
878 rxe->key_epoch = 0;
879 rxe->peer = urxe->peer;
880 rxe->local = urxe->local;
881 rxe->time = urxe->time;
882
883 /* Move RXE to pending. */
884 ossl_list_rxe_remove(&qrx->rx_free, rxe);
885 ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
886 return 0; /* success, did not defer */
887 }
888
889 /* Determine encryption level of packet. */
890 enc_level = qrx_determine_enc_level(&rxe->hdr);
891
892 /* If we do not have keying material for this encryption level yet, defer. */
893 switch (ossl_qrl_enc_level_set_have_el(&qrx->el_set, enc_level)) {
894 case 1:
895 /* We have keys. */
896 if (enc_level == QUIC_ENC_LEVEL_1RTT && !qrx->allow_1rtt)
897 /*
898 * But we cannot process 1-RTT packets until the handshake is
899 * completed (RFC 9000 s. 5.7).
900 */
901 goto cannot_decrypt;
902
903 break;
904 case 0:
905 /* No keys yet. */
906 goto cannot_decrypt;
907 default:
908 /* We already discarded keys for this EL, we will never process this.*/
909 goto malformed;
910 }
911
912 /*
913 * We will copy any token included in the packet to the start of our RXE
914 * data buffer (so that we don't reference the URXE buffer any more and can
915 * recycle it). Track our position in the RXE buffer by index instead of
916 * pointer as the pointer may change as reallocs occur.
917 */
918 i = 0;
919
920 /*
921 * rxe->hdr.data is now pointing at the (encrypted) packet payload. rxe->hdr
922 * also has fields pointing into the PACKET buffer which will be going away
923 * soon (the URXE will be reused for another incoming packet).
924 *
925 * Firstly, relocate some of these fields into the RXE as needed.
926 *
927 * Relocate token buffer and fix pointer.
928 */
929 if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL
930 && !qrx_relocate_buffer(qrx, &rxe, &i, &rxe->hdr.token,
931 rxe->hdr.token_len))
932 goto malformed;
933
934 /* Now remove header protection. */
935 *pkt = orig_pkt;
936
937 el = ossl_qrl_enc_level_set_get(&qrx->el_set, enc_level, 1);
938 assert(el != NULL); /* Already checked above */
939
940 if (need_second_decode) {
941 if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
942 goto malformed;
943
944 /*
945 * We have removed header protection, so don't attempt to do it again if
946 * the packet gets deferred and processed again.
947 */
948 pkt_mark(&urxe->hpr_removed, pkt_idx);
949
950 /* Decode the now unprotected header. */
951 if (ossl_quic_wire_decode_pkt_hdr(pkt, qrx->short_conn_id_len,
952 0, 0, &rxe->hdr, NULL) != 1)
953 goto malformed;
954 }
955
956 /* Validate header and decode PN. */
957 if (!qrx_validate_hdr(qrx, rxe))
958 goto malformed;
959
960 if (qrx->msg_callback != NULL)
961 qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, sop,
962 eop - sop - rxe->hdr.len, qrx->msg_callback_ssl,
963 qrx->msg_callback_arg);
964
965 /*
966 * The AAD data is the entire (unprotected) packet header including the PN.
967 * The packet header has been unprotected in place, so we can just reuse the
968 * PACKET buffer. The header ends where the payload begins.
969 */
970 aad_len = rxe->hdr.data - sop;
971
972 /* Ensure the RXE buffer size is adequate for our payload. */
973 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL) {
974 /*
975 * Allocation failure, treat as malformed and do not bother processing
976 * any further packets in the datagram as they are likely to also
977 * encounter allocation failures.
978 */
979 eop = NULL;
980 goto malformed;
981 }
982
983 /*
984 * We decrypt the packet body to immediately after the token at the start of
985 * the RXE buffer (where present).
986 *
987 * Do the decryption from the PACKET (which points into URXE memory) to our
988 * RXE payload (single-copy decryption), then fixup the pointers in the
989 * header to point to our new buffer.
990 *
991 * If decryption fails this is considered a permanent error; we defer
992 * packets we don't yet have decryption keys for above, so if this fails,
993 * something has gone wrong with the handshake process or a packet has been
994 * corrupted.
995 */
996 dst = (unsigned char *)rxe_data(rxe) + i;
997 if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
998 &dec_len, sop, aad_len, rxe->pn, enc_level,
999 rxe->hdr.key_phase, &rx_key_epoch))
1000 goto malformed;
1001
1002 /*
1003 * -----------------------------------------------------
1004 * IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
1005 * AND MUST BE TIMING-CHANNEL SAFE.
1006 * -----------------------------------------------------
1007 *
1008 * At this point, we have successfully authenticated the AEAD tag and no
1009 * longer need to worry about exposing the PN, PN length or Key Phase bit in
1010 * timing channels. Invoke any configured validation callback to allow for
1011 * rejection of duplicate PNs.
1012 */
1013 if (!qrx_validate_hdr_late(qrx, rxe))
1014 goto malformed;
1015
1016 /* Check for a Key Phase bit differing from our expectation. */
1017 if (rxe->hdr.type == QUIC_PKT_TYPE_1RTT
1018 && rxe->hdr.key_phase != (el->key_epoch & 1))
1019 qrx_key_update_initiated(qrx, rxe->pn);
1020
1021 /*
1022 * We have now successfully decrypted the packet payload. If there are
1023 * additional packets in the datagram, it is possible we will fail to
1024 * decrypt them and need to defer them until we have some key material we
1025 * don't currently possess. If this happens, the URXE will be moved to the
1026 * deferred queue. Since a URXE corresponds to one datagram, which may
1027 * contain multiple packets, we must ensure any packets we have already
1028 * processed in the URXE are not processed again (this is an RFC
1029 * requirement). We do this by marking the nth packet in the datagram as
1030 * processed.
1031 *
1032 * We are now committed to returning this decrypted packet to the user,
1033 * meaning we now consider the packet processed and must mark it
1034 * accordingly.
1035 */
1036 pkt_mark(&urxe->processed, pkt_idx);
1037
1038 /*
1039 * Update header to point to the decrypted buffer, which may be shorter
1040 * due to AEAD tags, block padding, etc.
1041 */
1042 rxe->hdr.data = dst;
1043 rxe->hdr.len = dec_len;
1044 rxe->data_len = dec_len;
1045 rxe->datagram_len = datagram_len;
1046 rxe->key_epoch = rx_key_epoch;
1047
1048 /* We processed the PN successfully, so update largest processed PN. */
1049 pn_space = rxe_determine_pn_space(rxe);
1050 if (rxe->pn > qrx->largest_pn[pn_space])
1051 qrx->largest_pn[pn_space] = rxe->pn;
1052
1053 /* Copy across network addresses and RX time from URXE to RXE. */
1054 rxe->peer = urxe->peer;
1055 rxe->local = urxe->local;
1056 rxe->time = urxe->time;
1057
1058 /* Move RXE to pending. */
1059 ossl_list_rxe_remove(&qrx->rx_free, rxe);
1060 ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
1061 return 0; /* success, did not defer; not distinguished from failure */
1062
1063 cannot_decrypt:
1064 /*
1065 * We cannot process this packet right now (but might be able to later). We
1066 * MUST attempt to process any other packets in the datagram, so defer it
1067 * and skip over it.
1068 */
1069 assert(eop != NULL && eop >= PACKET_data(pkt));
1070 /*
1071 * We don't care if this fails as it will just result in the packet being at
1072 * the end of the datagram buffer.
1073 */
1074 ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
1075 return 1; /* deferred */
1076
1077 malformed:
1078 if (eop != NULL) {
1079 /*
1080 * This packet cannot be processed and will never be processable. We
1081 * were at least able to decode its header and determine its length, so
1082 * we can skip over it and try to process any subsequent packets in the
1083 * datagram.
1084 *
1085 * Mark as processed as an optimization.
1086 */
1087 assert(eop >= PACKET_data(pkt));
1088 pkt_mark(&urxe->processed, pkt_idx);
1089 /* We don't care if this fails (see above) */
1090 ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
1091 } else {
1092 /*
1093 * This packet cannot be processed and will never be processable.
1094 * Because even its header is not intelligible, we cannot examine any
1095 * further packets in the datagram because its length cannot be
1096 * discerned.
1097 *
1098 * Advance over the entire remainder of the datagram, and mark it as
1099 * processed as an optimization.
1100 */
1101 pkt_mark(&urxe->processed, pkt_idx);
1102 /* We don't care if this fails (see above) */
1103 ignore_res(PACKET_forward(pkt, PACKET_remaining(pkt)));
1104 }
1105 return 0; /* failure, did not defer; not distinguished from success */
1106 }
1107
1108 /* Process a datagram which was received. */
1109 static int qrx_process_datagram(OSSL_QRX *qrx, QUIC_URXE *e,
1110 const unsigned char *data,
1111 size_t data_len)
1112 {
1113 int have_deferred = 0;
1114 PACKET pkt;
1115 size_t pkt_idx = 0;
1116 QUIC_CONN_ID first_dcid = { 255 };
1117
1118 qrx->bytes_received += data_len;
1119
1120 if (!PACKET_buf_init(&pkt, data, data_len))
1121 return 0;
1122
1123 for (; PACKET_remaining(&pkt) > 0; ++pkt_idx) {
1124 /*
1125 * A packet smaller than the minimum possible QUIC packet size is not
1126 * considered valid. We also ignore more than a certain number of
1127 * packets within the same datagram.
1128 */
1129 if (PACKET_remaining(&pkt) < QUIC_MIN_VALID_PKT_LEN
1130 || pkt_idx >= QUIC_MAX_PKT_PER_URXE)
1131 break;
1132
1133 /*
1134 * We note whether packet processing resulted in a deferral since
1135 * this means we need to move the URXE to the deferred list rather
1136 * than the free list after we're finished dealing with it for now.
1137 *
1138 * However, we don't otherwise care here whether processing succeeded or
1139 * failed, as the RFC says even if a packet in a datagram is malformed,
1140 * we should still try to process any packets following it.
1141 *
1142 * In the case where the packet is so malformed we can't determine its
1143 * length, qrx_process_pkt will take care of advancing to the end of
1144 * the packet, so we will exit the loop automatically in this case.
1145 */
1146 if (qrx_process_pkt(qrx, e, &pkt, pkt_idx, &first_dcid, data_len))
1147 have_deferred = 1;
1148 }
1149
1150 /* Only report whether there were any deferrals. */
1151 return have_deferred;
1152 }
1153
1154 /* Process a single pending URXE. */
1155 static int qrx_process_one_urxe(OSSL_QRX *qrx, QUIC_URXE *e)
1156 {
1157 int was_deferred;
1158
1159 /* The next URXE we process should be at the head of the pending list. */
1160 if (!ossl_assert(e == ossl_list_urxe_head(&qrx->urx_pending)))
1161 return 0;
1162
1163 /*
1164 * Attempt to process the datagram. The return value indicates only if
1165 * processing of the datagram was deferred. If we failed to process the
1166 * datagram, we do not attempt to process it again and silently eat the
1167 * error.
1168 */
1169 was_deferred = qrx_process_datagram(qrx, e, ossl_quic_urxe_data(e),
1170 e->data_len);
1171
1172 /*
1173 * Remove the URXE from the pending list and return it to
1174 * either the free or deferred list.
1175 */
1176 ossl_list_urxe_remove(&qrx->urx_pending, e);
1177 if (was_deferred > 0 &&
1178 (e->deferred || qrx->num_deferred < qrx->max_deferred)) {
1179 ossl_list_urxe_insert_tail(&qrx->urx_deferred, e);
1180 if (!e->deferred) {
1181 e->deferred = 1;
1182 ++qrx->num_deferred;
1183 }
1184 } else {
1185 if (e->deferred) {
1186 e->deferred = 0;
1187 --qrx->num_deferred;
1188 }
1189 ossl_quic_demux_release_urxe(qrx->demux, e);
1190 }
1191
1192 return 1;
1193 }
1194
1195 /* Process any pending URXEs to generate pending RXEs. */
1196 static int qrx_process_pending_urxl(OSSL_QRX *qrx)
1197 {
1198 QUIC_URXE *e;
1199
1200 while ((e = ossl_list_urxe_head(&qrx->urx_pending)) != NULL)
1201 if (!qrx_process_one_urxe(qrx, e))
1202 return 0;
1203
1204 return 1;
1205 }
1206
1207 int ossl_qrx_read_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT **ppkt)
1208 {
1209 RXE *rxe;
1210
1211 if (!ossl_qrx_processed_read_pending(qrx)) {
1212 if (!qrx_process_pending_urxl(qrx))
1213 return 0;
1214
1215 if (!ossl_qrx_processed_read_pending(qrx))
1216 return 0;
1217 }
1218
1219 rxe = qrx_pop_pending_rxe(qrx);
1220 if (!ossl_assert(rxe != NULL))
1221 return 0;
1222
1223 assert(rxe->refcount == 0);
1224 rxe->refcount = 1;
1225
1226 rxe->pkt.hdr = &rxe->hdr;
1227 rxe->pkt.pn = rxe->pn;
1228 rxe->pkt.time = rxe->time;
1229 rxe->pkt.datagram_len = rxe->datagram_len;
1230 rxe->pkt.peer
1231 = BIO_ADDR_family(&rxe->peer) != AF_UNSPEC ? &rxe->peer : NULL;
1232 rxe->pkt.local
1233 = BIO_ADDR_family(&rxe->local) != AF_UNSPEC ? &rxe->local : NULL;
1234 rxe->pkt.key_epoch = rxe->key_epoch;
1235 rxe->pkt.qrx = qrx;
1236 *ppkt = &rxe->pkt;
1237
1238 return 1;
1239 }
1240
1241 void ossl_qrx_pkt_release(OSSL_QRX_PKT *pkt)
1242 {
1243 RXE *rxe;
1244
1245 if (pkt == NULL)
1246 return;
1247
1248 rxe = (RXE *)pkt;
1249 assert(rxe->refcount > 0);
1250 if (--rxe->refcount == 0)
1251 qrx_recycle_rxe(pkt->qrx, rxe);
1252 }
1253
1254 void ossl_qrx_pkt_up_ref(OSSL_QRX_PKT *pkt)
1255 {
1256 RXE *rxe = (RXE *)pkt;
1257
1258 assert(rxe->refcount > 0);
1259 ++rxe->refcount;
1260 }
1261
1262 uint64_t ossl_qrx_get_bytes_received(OSSL_QRX *qrx, int clear)
1263 {
1264 uint64_t v = qrx->bytes_received;
1265
1266 if (clear)
1267 qrx->bytes_received = 0;
1268
1269 return v;
1270 }
1271
1272 int ossl_qrx_set_late_validation_cb(OSSL_QRX *qrx,
1273 ossl_qrx_late_validation_cb *cb,
1274 void *cb_arg)
1275 {
1276 qrx->validation_cb = cb;
1277 qrx->validation_cb_arg = cb_arg;
1278 return 1;
1279 }
1280
1281 int ossl_qrx_set_key_update_cb(OSSL_QRX *qrx,
1282 ossl_qrx_key_update_cb *cb,
1283 void *cb_arg)
1284 {
1285 qrx->key_update_cb = cb;
1286 qrx->key_update_cb_arg = cb_arg;
1287 return 1;
1288 }
1289
1290 uint64_t ossl_qrx_get_key_epoch(OSSL_QRX *qrx)
1291 {
1292 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1293 QUIC_ENC_LEVEL_1RTT, 1);
1294
1295 return el == NULL ? UINT64_MAX : el->key_epoch;
1296 }
1297
1298 int ossl_qrx_key_update_timeout(OSSL_QRX *qrx, int normal)
1299 {
1300 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1301 QUIC_ENC_LEVEL_1RTT, 1);
1302
1303 if (el == NULL)
1304 return 0;
1305
1306 if (el->state == QRL_EL_STATE_PROV_UPDATING
1307 && !ossl_qrl_enc_level_set_key_update_done(&qrx->el_set,
1308 QUIC_ENC_LEVEL_1RTT))
1309 return 0;
1310
1311 if (normal && el->state == QRL_EL_STATE_PROV_COOLDOWN
1312 && !ossl_qrl_enc_level_set_key_cooldown_done(&qrx->el_set,
1313 QUIC_ENC_LEVEL_1RTT))
1314 return 0;
1315
1316 return 1;
1317 }
1318
1319 uint64_t ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX *qrx)
1320 {
1321 return qrx->forged_pkt_count;
1322 }
1323
1324 uint64_t ossl_qrx_get_max_forged_pkt_count(OSSL_QRX *qrx,
1325 uint32_t enc_level)
1326 {
1327 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1328 enc_level, 1);
1329
1330 return el == NULL ? UINT64_MAX
1331 : ossl_qrl_get_suite_max_forged_pkt(el->suite_id);
1332 }
1333
1334 void ossl_qrx_allow_1rtt_processing(OSSL_QRX *qrx)
1335 {
1336 if (qrx->allow_1rtt)
1337 return;
1338
1339 qrx->allow_1rtt = 1;
1340 qrx_requeue_deferred(qrx);
1341 }
1342
1343 void ossl_qrx_set_msg_callback(OSSL_QRX *qrx, ossl_msg_cb msg_callback,
1344 SSL *msg_callback_ssl)
1345 {
1346 qrx->msg_callback = msg_callback;
1347 qrx->msg_callback_ssl = msg_callback_ssl;
1348 }
1349
1350 void ossl_qrx_set_msg_callback_arg(OSSL_QRX *qrx, void *msg_callback_arg)
1351 {
1352 qrx->msg_callback_arg = msg_callback_arg;
1353 }