2 * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include <openssl/ssl.h>
11 #include "internal/quic_record_rx.h"
12 #include "quic_record_shared.h"
13 #include "internal/common.h"
14 #include "internal/list.h"
15 #include "../ssl_local.h"
18 * Mark a packet in a bitfield.
20 * pkt_idx: index of packet within datagram.
22 static ossl_inline
void pkt_mark(uint64_t *bitf
, size_t pkt_idx
)
24 assert(pkt_idx
< QUIC_MAX_PKT_PER_URXE
);
25 *bitf
|= ((uint64_t)1) << pkt_idx
;
28 /* Returns 1 if a packet is in the bitfield. */
29 static ossl_inline
int pkt_is_marked(const uint64_t *bitf
, size_t pkt_idx
)
31 assert(pkt_idx
< QUIC_MAX_PKT_PER_URXE
);
32 return (*bitf
& (((uint64_t)1) << pkt_idx
)) != 0;
39 * RX Entries (RXEs) store processed (i.e., decrypted) data received from the
40 * network. One RXE is used per received QUIC packet.
42 typedef struct rxe_st RXE
;
46 OSSL_LIST_MEMBER(rxe
, RXE
);
47 size_t data_len
, alloc_len
, refcount
;
49 /* Extra fields for per-packet information. */
50 QUIC_PKT_HDR hdr
; /* data/len are decrypted payload */
52 /* Decoded packet number. */
55 /* Addresses copied from URXE. */
58 /* Time we received the packet (not when we processed it). */
61 /* Total length of the datagram which contained this packet. */
65 * The key epoch the packet was received with. Always 0 for non-1-RTT
71 * alloc_len allocated bytes (of which data_len bytes are valid) follow this
76 DEFINE_LIST_OF(rxe
, RXE
);
77 typedef OSSL_LIST(rxe
) RXE_LIST
;
79 static ossl_inline
unsigned char *rxe_data(const RXE
*e
)
81 return (unsigned char *)(e
+ 1);
92 /* Demux to receive datagrams from. */
95 /* Length of connection IDs used in short-header packets in bytes. */
96 size_t short_conn_id_len
;
98 /* Maximum number of deferred datagrams buffered at any one time. */
101 /* Current count of deferred datagrams. */
105 * List of URXEs which are filled with received encrypted data.
106 * These are returned to the DEMUX's free list as they are processed.
108 QUIC_URXE_LIST urx_pending
;
111 * List of URXEs which we could not decrypt immediately and which are being
112 * kept in case they can be decrypted later.
114 QUIC_URXE_LIST urx_deferred
;
117 * List of RXEs which are not currently in use. These are moved
118 * to the pending list as they are filled.
123 * List of RXEs which are filled with decrypted packets ready to be passed
124 * to the user. A RXE is removed from all lists inside the QRL when passed
125 * to the user, then returned to the free list when the user returns it.
129 /* Largest PN we have received and processed in a given PN space. */
130 QUIC_PN largest_pn
[QUIC_PN_SPACE_NUM
];
132 /* Per encryption-level state. */
133 OSSL_QRL_ENC_LEVEL_SET el_set
;
135 /* Bytes we have received since this counter was last cleared. */
136 uint64_t bytes_received
;
139 * Number of forged packets we have received since the QRX was instantiated.
140 * Note that as per RFC 9001, this is connection-level state; it is not per
141 * EL and is not reset by a key update.
143 uint64_t forged_pkt_count
;
146 * The PN the current key epoch started at, inclusive.
148 uint64_t cur_epoch_start_pn
;
150 /* Validation callback. */
151 ossl_qrx_late_validation_cb
*validation_cb
;
152 void *validation_cb_arg
;
154 /* Key update callback. */
155 ossl_qrx_key_update_cb
*key_update_cb
;
156 void *key_update_cb_arg
;
158 /* Initial key phase. For debugging use only; always 0 in real use. */
159 unsigned char init_key_phase_bit
;
161 /* Are we allowed to process 1-RTT packets yet? */
162 unsigned char allow_1rtt
;
164 /* Message callback related arguments */
165 ossl_msg_cb msg_callback
;
166 void *msg_callback_arg
;
167 SSL
*msg_callback_ssl
;
170 static void qrx_on_rx(QUIC_URXE
*urxe
, void *arg
);
172 OSSL_QRX
*ossl_qrx_new(const OSSL_QRX_ARGS
*args
)
177 if (args
->demux
== NULL
|| args
->max_deferred
== 0)
180 qrx
= OPENSSL_zalloc(sizeof(OSSL_QRX
));
184 for (i
= 0; i
< OSSL_NELEM(qrx
->largest_pn
); ++i
)
185 qrx
->largest_pn
[i
] = args
->init_largest_pn
[i
];
187 qrx
->libctx
= args
->libctx
;
188 qrx
->propq
= args
->propq
;
189 qrx
->demux
= args
->demux
;
190 qrx
->short_conn_id_len
= args
->short_conn_id_len
;
191 qrx
->init_key_phase_bit
= args
->init_key_phase_bit
;
192 qrx
->max_deferred
= args
->max_deferred
;
196 static void qrx_cleanup_rxl(RXE_LIST
*l
)
200 for (e
= ossl_list_rxe_head(l
); e
!= NULL
; e
= enext
) {
201 enext
= ossl_list_rxe_next(e
);
202 ossl_list_rxe_remove(l
, e
);
207 static void qrx_cleanup_urxl(OSSL_QRX
*qrx
, QUIC_URXE_LIST
*l
)
209 QUIC_URXE
*e
, *enext
;
211 for (e
= ossl_list_urxe_head(l
); e
!= NULL
; e
= enext
) {
212 enext
= ossl_list_urxe_next(e
);
213 ossl_list_urxe_remove(l
, e
);
214 ossl_quic_demux_release_urxe(qrx
->demux
, e
);
218 void ossl_qrx_free(OSSL_QRX
*qrx
)
225 /* Unregister from the RX DEMUX. */
226 ossl_quic_demux_unregister_by_cb(qrx
->demux
, qrx_on_rx
, qrx
);
228 /* Free RXE queue data. */
229 qrx_cleanup_rxl(&qrx
->rx_free
);
230 qrx_cleanup_rxl(&qrx
->rx_pending
);
231 qrx_cleanup_urxl(qrx
, &qrx
->urx_pending
);
232 qrx_cleanup_urxl(qrx
, &qrx
->urx_deferred
);
234 /* Drop keying material and crypto resources. */
235 for (i
= 0; i
< QUIC_ENC_LEVEL_NUM
; ++i
)
236 ossl_qrl_enc_level_set_discard(&qrx
->el_set
, i
);
241 void ossl_qrx_inject_urxe(OSSL_QRX
*qrx
, QUIC_URXE
*urxe
)
243 /* Initialize our own fields inside the URXE and add to the pending list. */
245 urxe
->hpr_removed
= 0;
247 ossl_list_urxe_insert_tail(&qrx
->urx_pending
, urxe
);
249 if (qrx
->msg_callback
!= NULL
)
250 qrx
->msg_callback(0, OSSL_QUIC1_VERSION
, SSL3_RT_QUIC_DATAGRAM
, urxe
+ 1,
251 urxe
->data_len
, qrx
->msg_callback_ssl
,
252 qrx
->msg_callback_arg
);
255 static void qrx_on_rx(QUIC_URXE
*urxe
, void *arg
)
259 ossl_qrx_inject_urxe(qrx
, urxe
);
262 int ossl_qrx_add_dst_conn_id(OSSL_QRX
*qrx
,
263 const QUIC_CONN_ID
*dst_conn_id
)
265 return ossl_quic_demux_register(qrx
->demux
,
271 int ossl_qrx_remove_dst_conn_id(OSSL_QRX
*qrx
,
272 const QUIC_CONN_ID
*dst_conn_id
)
274 return ossl_quic_demux_unregister(qrx
->demux
, dst_conn_id
);
277 static void qrx_requeue_deferred(OSSL_QRX
*qrx
)
281 while ((e
= ossl_list_urxe_head(&qrx
->urx_deferred
)) != NULL
) {
282 ossl_list_urxe_remove(&qrx
->urx_deferred
, e
);
283 ossl_list_urxe_insert_head(&qrx
->urx_pending
, e
);
287 int ossl_qrx_provide_secret(OSSL_QRX
*qrx
, uint32_t enc_level
,
288 uint32_t suite_id
, EVP_MD
*md
,
289 const unsigned char *secret
, size_t secret_len
)
291 if (enc_level
>= QUIC_ENC_LEVEL_NUM
)
294 if (!ossl_qrl_enc_level_set_provide_secret(&qrx
->el_set
,
302 qrx
->init_key_phase_bit
,
307 * Any packets we previously could not decrypt, we may now be able to
308 * decrypt, so move any datagrams containing deferred packets from the
309 * deferred to the pending queue.
311 qrx_requeue_deferred(qrx
);
315 int ossl_qrx_discard_enc_level(OSSL_QRX
*qrx
, uint32_t enc_level
)
317 if (enc_level
>= QUIC_ENC_LEVEL_NUM
)
320 ossl_qrl_enc_level_set_discard(&qrx
->el_set
, enc_level
);
324 /* Returns 1 if there are one or more pending RXEs. */
325 int ossl_qrx_processed_read_pending(OSSL_QRX
*qrx
)
327 return !ossl_list_rxe_is_empty(&qrx
->rx_pending
);
330 /* Returns 1 if there are yet-unprocessed packets. */
331 int ossl_qrx_unprocessed_read_pending(OSSL_QRX
*qrx
)
333 return !ossl_list_urxe_is_empty(&qrx
->urx_pending
)
334 || !ossl_list_urxe_is_empty(&qrx
->urx_deferred
);
337 /* Pop the next pending RXE. Returns NULL if no RXE is pending. */
338 static RXE
*qrx_pop_pending_rxe(OSSL_QRX
*qrx
)
340 RXE
*rxe
= ossl_list_rxe_head(&qrx
->rx_pending
);
345 ossl_list_rxe_remove(&qrx
->rx_pending
, rxe
);
349 /* Allocate a new RXE. */
350 static RXE
*qrx_alloc_rxe(size_t alloc_len
)
354 if (alloc_len
>= SIZE_MAX
- sizeof(RXE
))
357 rxe
= OPENSSL_malloc(sizeof(RXE
) + alloc_len
);
361 ossl_list_rxe_init_elem(rxe
);
362 rxe
->alloc_len
= alloc_len
;
369 * Ensures there is at least one RXE in the RX free list, allocating a new entry
370 * if necessary. The returned RXE is in the RX free list; it is not popped.
372 * alloc_len is a hint which may be used to determine the RXE size if allocation
373 * is necessary. Returns NULL on allocation failure.
375 static RXE
*qrx_ensure_free_rxe(OSSL_QRX
*qrx
, size_t alloc_len
)
379 if (ossl_list_rxe_head(&qrx
->rx_free
) != NULL
)
380 return ossl_list_rxe_head(&qrx
->rx_free
);
382 rxe
= qrx_alloc_rxe(alloc_len
);
386 ossl_list_rxe_insert_tail(&qrx
->rx_free
, rxe
);
391 * Resize the data buffer attached to an RXE to be n bytes in size. The address
392 * of the RXE might change; the new address is returned, or NULL on failure, in
393 * which case the original RXE remains valid.
395 static RXE
*qrx_resize_rxe(RXE_LIST
*rxl
, RXE
*rxe
, size_t n
)
399 /* Should never happen. */
403 if (n
>= SIZE_MAX
- sizeof(RXE
))
406 /* Remove the item from the list to avoid accessing freed memory */
407 p
= ossl_list_rxe_prev(rxe
);
408 ossl_list_rxe_remove(rxl
, rxe
);
410 /* Should never resize an RXE which has been handed out. */
411 if (!ossl_assert(rxe
->refcount
== 0))
415 * NOTE: We do not clear old memory, although it does contain decrypted
418 rxe2
= OPENSSL_realloc(rxe
, sizeof(RXE
) + n
);
420 /* Resize failed, restore old allocation. */
422 ossl_list_rxe_insert_head(rxl
, rxe
);
424 ossl_list_rxe_insert_after(rxl
, p
, rxe
);
429 ossl_list_rxe_insert_head(rxl
, rxe2
);
431 ossl_list_rxe_insert_after(rxl
, p
, rxe2
);
438 * Ensure the data buffer attached to an RXE is at least n bytes in size.
439 * Returns NULL on failure.
441 static RXE
*qrx_reserve_rxe(RXE_LIST
*rxl
,
444 if (rxe
->alloc_len
>= n
)
447 return qrx_resize_rxe(rxl
, rxe
, n
);
450 /* Return a RXE handed out to the user back to our freelist. */
451 static void qrx_recycle_rxe(OSSL_QRX
*qrx
, RXE
*rxe
)
453 /* RXE should not be in any list */
454 assert(ossl_list_rxe_prev(rxe
) == NULL
&& ossl_list_rxe_next(rxe
) == NULL
);
456 rxe
->pkt
.peer
= NULL
;
457 rxe
->pkt
.local
= NULL
;
458 ossl_list_rxe_insert_tail(&qrx
->rx_free
, rxe
);
462 * Given a pointer to a pointer pointing to a buffer and the size of that
463 * buffer, copy the buffer into *prxe, expanding the RXE if necessary (its
464 * pointer may change due to realloc). *pi is the offset in bytes to copy the
465 * buffer to, and on success is updated to be the offset pointing after the
466 * copied buffer. *pptr is updated to point to the new location of the buffer.
468 static int qrx_relocate_buffer(OSSL_QRX
*qrx
, RXE
**prxe
, size_t *pi
,
469 const unsigned char **pptr
, size_t buf_len
)
477 if ((rxe
= qrx_reserve_rxe(&qrx
->rx_free
, *prxe
, *pi
+ buf_len
)) == NULL
)
481 dst
= (unsigned char *)rxe_data(rxe
) + *pi
;
483 memcpy(dst
, *pptr
, buf_len
);
489 static uint32_t qrx_determine_enc_level(const QUIC_PKT_HDR
*hdr
)
492 case QUIC_PKT_TYPE_INITIAL
:
493 return QUIC_ENC_LEVEL_INITIAL
;
494 case QUIC_PKT_TYPE_HANDSHAKE
:
495 return QUIC_ENC_LEVEL_HANDSHAKE
;
496 case QUIC_PKT_TYPE_0RTT
:
497 return QUIC_ENC_LEVEL_0RTT
;
498 case QUIC_PKT_TYPE_1RTT
:
499 return QUIC_ENC_LEVEL_1RTT
;
503 case QUIC_PKT_TYPE_RETRY
:
504 case QUIC_PKT_TYPE_VERSION_NEG
:
505 return QUIC_ENC_LEVEL_INITIAL
; /* not used */
509 static uint32_t rxe_determine_pn_space(RXE
*rxe
)
513 enc_level
= qrx_determine_enc_level(&rxe
->hdr
);
514 return ossl_quic_enc_level_to_pn_space(enc_level
);
517 static int qrx_validate_hdr_early(OSSL_QRX
*qrx
, RXE
*rxe
,
518 const QUIC_CONN_ID
*first_dcid
)
520 /* Ensure version is what we want. */
521 if (rxe
->hdr
.version
!= QUIC_VERSION_1
522 && rxe
->hdr
.version
!= QUIC_VERSION_NONE
)
525 /* Clients should never receive 0-RTT packets. */
526 if (rxe
->hdr
.type
== QUIC_PKT_TYPE_0RTT
)
529 /* Version negotiation and retry packets must be the first packet. */
530 if (first_dcid
!= NULL
&& !ossl_quic_pkt_type_can_share_dgram(rxe
->hdr
.type
))
534 * If this is not the first packet in a datagram, the destination connection
535 * ID must match the one in that packet.
537 if (first_dcid
!= NULL
) {
538 if (!ossl_assert(first_dcid
->id_len
< QUIC_MAX_CONN_ID_LEN
)
539 || !ossl_quic_conn_id_eq(first_dcid
,
540 &rxe
->hdr
.dst_conn_id
))
547 /* Validate header and decode PN. */
548 static int qrx_validate_hdr(OSSL_QRX
*qrx
, RXE
*rxe
)
550 int pn_space
= rxe_determine_pn_space(rxe
);
552 if (!ossl_quic_wire_decode_pkt_hdr_pn(rxe
->hdr
.pn
, rxe
->hdr
.pn_len
,
553 qrx
->largest_pn
[pn_space
],
560 /* Late packet header validation. */
561 static int qrx_validate_hdr_late(OSSL_QRX
*qrx
, RXE
*rxe
)
563 int pn_space
= rxe_determine_pn_space(rxe
);
566 * Allow our user to decide whether to discard the packet before we try and
569 if (qrx
->validation_cb
!= NULL
570 && !qrx
->validation_cb(rxe
->pn
, pn_space
, qrx
->validation_cb_arg
))
577 * Retrieves the correct cipher context for an EL and key phase. Writes the key
578 * epoch number actually used for packet decryption to *rx_key_epoch.
580 static size_t qrx_get_cipher_ctx_idx(OSSL_QRX
*qrx
, OSSL_QRL_ENC_LEVEL
*el
,
582 unsigned char key_phase_bit
,
583 uint64_t *rx_key_epoch
,
590 if (enc_level
!= QUIC_ENC_LEVEL_1RTT
) {
595 if (!ossl_assert(key_phase_bit
<= 1))
599 * RFC 9001 requires that we not create timing channels which could reveal
600 * the decrypted value of the Key Phase bit. We usually handle this by
601 * keeping the cipher contexts for both the current and next key epochs
602 * around, so that we just select a cipher context blindly using the key
603 * phase bit, which is time-invariant.
605 * In the COOLDOWN state, we only have one keyslot/cipher context. RFC 9001
606 * suggests an implementation strategy to avoid creating a timing channel in
609 * Endpoints can use randomized packet protection keys in place of
610 * discarded keys when key updates are not yet permitted.
612 * Rather than use a randomised key, we simply use our existing key as it
613 * will fail AEAD verification anyway. This avoids the need to keep around a
614 * dedicated garbage key.
616 * Note: Accessing different cipher contexts is technically not
617 * timing-channel safe due to microarchitectural side channels, but this is
618 * the best we can reasonably do and appears to be directly suggested by the
621 idx
= (el
->state
== QRL_EL_STATE_PROV_COOLDOWN
? el
->key_epoch
& 1
625 * We also need to determine the key epoch number which this index
626 * corresponds to. This is so we can report the key epoch number in the
627 * OSSL_QRX_PKT structure, which callers need to validate whether it was OK
628 * for a packet to be sent using a given key epoch's keys.
631 case QRL_EL_STATE_PROV_NORMAL
:
633 * If we are in the NORMAL state, usually the KP bit will match the LSB
634 * of our key epoch, meaning no new key update is being signalled. If it
635 * does not match, this means the packet (purports to) belong to
636 * the next key epoch.
638 * IMPORTANT: The AEAD tag has not been verified yet when this function
639 * is called, so this code must be timing-channel safe, hence use of
640 * XOR. Moreover, the value output below is not yet authenticated.
643 = el
->key_epoch
+ ((el
->key_epoch
& 1) ^ (uint64_t)key_phase_bit
);
646 case QRL_EL_STATE_PROV_UPDATING
:
648 * If we are in the UPDATING state, usually the KP bit will match the
649 * LSB of our key epoch. If it does not match, this means that the
650 * packet (purports to) belong to the previous key epoch.
652 * As above, must be timing-channel safe.
654 *is_old_key
= (el
->key_epoch
& 1) ^ (uint64_t)key_phase_bit
;
655 *rx_key_epoch
= el
->key_epoch
- (uint64_t)*is_old_key
;
658 case QRL_EL_STATE_PROV_COOLDOWN
:
660 * If we are in COOLDOWN, there is only one key epoch we can possibly
661 * decrypt with, so just try that. If AEAD decryption fails, the
662 * value we output here isn't used anyway.
664 *rx_key_epoch
= el
->key_epoch
;
672 * Tries to decrypt a packet payload.
674 * Returns 1 on success or 0 on failure (which is permanent). The payload is
675 * decrypted from src and written to dst. The buffer dst must be of at least
676 * src_len bytes in length. The actual length of the output in bytes is written
677 * to *dec_len on success, which will always be equal to or less than (usually
678 * less than) src_len.
680 static int qrx_decrypt_pkt_body(OSSL_QRX
*qrx
, unsigned char *dst
,
681 const unsigned char *src
,
682 size_t src_len
, size_t *dec_len
,
683 const unsigned char *aad
, size_t aad_len
,
684 QUIC_PN pn
, uint32_t enc_level
,
685 unsigned char key_phase_bit
,
686 uint64_t *rx_key_epoch
)
688 int l
= 0, l2
= 0, is_old_key
, nonce_len
;
689 unsigned char nonce
[EVP_MAX_IV_LENGTH
];
691 OSSL_QRL_ENC_LEVEL
*el
= ossl_qrl_enc_level_set_get(&qrx
->el_set
,
693 EVP_CIPHER_CTX
*cctx
;
695 if (src_len
> INT_MAX
|| aad_len
> INT_MAX
)
698 /* We should not have been called if we do not have key material. */
699 if (!ossl_assert(el
!= NULL
))
702 if (el
->tag_len
>= src_len
)
706 * If we have failed to authenticate a certain number of ciphertexts, refuse
707 * to decrypt any more ciphertexts.
709 if (qrx
->forged_pkt_count
>= ossl_qrl_get_suite_max_forged_pkt(el
->suite_id
))
712 cctx_idx
= qrx_get_cipher_ctx_idx(qrx
, el
, enc_level
, key_phase_bit
,
713 rx_key_epoch
, &is_old_key
);
714 if (!ossl_assert(cctx_idx
< OSSL_NELEM(el
->cctx
)))
717 if (is_old_key
&& pn
>= qrx
->cur_epoch_start_pn
)
719 * RFC 9001 s. 5.5: Once an endpoint successfully receives a packet with
720 * a given PN, it MUST discard all packets in the same PN space with
721 * higher PNs if they cannot be successfully unprotected with the same
722 * key, or -- if there is a key update -- a subsequent packet protection
725 * In other words, once a PN x triggers a KU, it is invalid for us to
726 * receive a packet with a newer PN y (y > x) using the old keys.
730 cctx
= el
->cctx
[cctx_idx
];
732 /* Construct nonce (nonce=IV ^ PN). */
733 nonce_len
= EVP_CIPHER_CTX_get_iv_length(cctx
);
734 if (!ossl_assert(nonce_len
>= (int)sizeof(QUIC_PN
)))
737 memcpy(nonce
, el
->iv
[cctx_idx
], nonce_len
);
738 for (i
= 0; i
< sizeof(QUIC_PN
); ++i
)
739 nonce
[nonce_len
- i
- 1] ^= (unsigned char)(pn
>> (i
* 8));
741 /* type and key will already have been setup; feed the IV. */
742 if (EVP_CipherInit_ex(cctx
, NULL
,
743 NULL
, NULL
, nonce
, /*enc=*/0) != 1)
746 /* Feed the AEAD tag we got so the cipher can validate it. */
747 if (EVP_CIPHER_CTX_ctrl(cctx
, EVP_CTRL_AEAD_SET_TAG
,
749 (unsigned char *)src
+ src_len
- el
->tag_len
) != 1)
753 if (EVP_CipherUpdate(cctx
, NULL
, &l
, aad
, aad_len
) != 1)
756 /* Feed encrypted packet body. */
757 if (EVP_CipherUpdate(cctx
, dst
, &l
, src
, src_len
- el
->tag_len
) != 1)
760 /* Ensure authentication succeeded. */
761 if (EVP_CipherFinal_ex(cctx
, NULL
, &l2
) != 1) {
762 /* Authentication failed, increment failed auth counter. */
763 ++qrx
->forged_pkt_count
;
771 static ossl_inline
void ignore_res(int x
)
776 static void qrx_key_update_initiated(OSSL_QRX
*qrx
, QUIC_PN pn
)
778 if (!ossl_qrl_enc_level_set_key_update(&qrx
->el_set
, QUIC_ENC_LEVEL_1RTT
))
779 /* We are already in RXKU, so we don't call the callback again. */
782 qrx
->cur_epoch_start_pn
= pn
;
784 if (qrx
->key_update_cb
!= NULL
)
785 qrx
->key_update_cb(pn
, qrx
->key_update_cb_arg
);
788 /* Process a single packet in a datagram. */
789 static int qrx_process_pkt(OSSL_QRX
*qrx
, QUIC_URXE
*urxe
,
790 PACKET
*pkt
, size_t pkt_idx
,
791 QUIC_CONN_ID
*first_dcid
,
795 const unsigned char *eop
= NULL
;
796 size_t i
, aad_len
= 0, dec_len
= 0;
797 PACKET orig_pkt
= *pkt
;
798 const unsigned char *sop
= PACKET_data(pkt
);
800 char need_second_decode
= 0, already_processed
= 0;
801 QUIC_PKT_HDR_PTRS ptrs
;
802 uint32_t pn_space
, enc_level
;
803 OSSL_QRL_ENC_LEVEL
*el
= NULL
;
804 uint64_t rx_key_epoch
= UINT64_MAX
;
807 * Get a free RXE. If we need to allocate a new one, use the packet length
808 * as a good ballpark figure.
810 rxe
= qrx_ensure_free_rxe(qrx
, PACKET_remaining(pkt
));
814 /* Have we already processed this packet? */
815 if (pkt_is_marked(&urxe
->processed
, pkt_idx
))
816 already_processed
= 1;
819 * Decode the header into the RXE structure. We first decrypt and read the
820 * unprotected part of the packet header (unless we already removed header
821 * protection, in which case we decode all of it).
823 need_second_decode
= !pkt_is_marked(&urxe
->hpr_removed
, pkt_idx
);
824 if (!ossl_quic_wire_decode_pkt_hdr(pkt
,
825 qrx
->short_conn_id_len
,
826 need_second_decode
, 0, &rxe
->hdr
, &ptrs
))
830 * Our successful decode above included an intelligible length and the
831 * PACKET is now pointing to the end of the QUIC packet.
833 eop
= PACKET_data(pkt
);
836 * Make a note of the first packet's DCID so we can later ensure the
837 * destination connection IDs of all packets in a datagram match.
840 *first_dcid
= rxe
->hdr
.dst_conn_id
;
843 * Early header validation. Since we now know the packet length, we can also
844 * now skip over it if we already processed it.
846 if (already_processed
847 || !qrx_validate_hdr_early(qrx
, rxe
, pkt_idx
== 0 ? NULL
: first_dcid
))
849 * Already processed packets are handled identically to malformed
850 * packets; i.e., they are ignored.
854 if (!ossl_quic_pkt_type_is_encrypted(rxe
->hdr
.type
)) {
856 * Version negotiation and retry packets are a special case. They do not
857 * contain a payload which needs decrypting and have no header
861 /* Just copy the payload from the URXE to the RXE. */
862 if ((rxe
= qrx_reserve_rxe(&qrx
->rx_free
, rxe
, rxe
->hdr
.len
)) == NULL
)
864 * Allocation failure. EOP will be pointing to the end of the
865 * datagram so processing of this datagram will end here.
869 /* We are now committed to returning the packet. */
870 memcpy(rxe_data(rxe
), rxe
->hdr
.data
, rxe
->hdr
.len
);
871 pkt_mark(&urxe
->processed
, pkt_idx
);
873 rxe
->hdr
.data
= rxe_data(rxe
);
874 rxe
->pn
= QUIC_PN_INVALID
;
876 rxe
->data_len
= rxe
->hdr
.len
;
877 rxe
->datagram_len
= datagram_len
;
879 rxe
->peer
= urxe
->peer
;
880 rxe
->local
= urxe
->local
;
881 rxe
->time
= urxe
->time
;
883 /* Move RXE to pending. */
884 ossl_list_rxe_remove(&qrx
->rx_free
, rxe
);
885 ossl_list_rxe_insert_tail(&qrx
->rx_pending
, rxe
);
886 return 0; /* success, did not defer */
889 /* Determine encryption level of packet. */
890 enc_level
= qrx_determine_enc_level(&rxe
->hdr
);
892 /* If we do not have keying material for this encryption level yet, defer. */
893 switch (ossl_qrl_enc_level_set_have_el(&qrx
->el_set
, enc_level
)) {
896 if (enc_level
== QUIC_ENC_LEVEL_1RTT
&& !qrx
->allow_1rtt
)
898 * But we cannot process 1-RTT packets until the handshake is
899 * completed (RFC 9000 s. 5.7).
908 /* We already discarded keys for this EL, we will never process this.*/
913 * We will copy any token included in the packet to the start of our RXE
914 * data buffer (so that we don't reference the URXE buffer any more and can
915 * recycle it). Track our position in the RXE buffer by index instead of
916 * pointer as the pointer may change as reallocs occur.
921 * rxe->hdr.data is now pointing at the (encrypted) packet payload. rxe->hdr
922 * also has fields pointing into the PACKET buffer which will be going away
923 * soon (the URXE will be reused for another incoming packet).
925 * Firstly, relocate some of these fields into the RXE as needed.
927 * Relocate token buffer and fix pointer.
929 if (rxe
->hdr
.type
== QUIC_PKT_TYPE_INITIAL
930 && !qrx_relocate_buffer(qrx
, &rxe
, &i
, &rxe
->hdr
.token
,
934 /* Now remove header protection. */
937 el
= ossl_qrl_enc_level_set_get(&qrx
->el_set
, enc_level
, 1);
938 assert(el
!= NULL
); /* Already checked above */
940 if (need_second_decode
) {
941 if (!ossl_quic_hdr_protector_decrypt(&el
->hpr
, &ptrs
))
945 * We have removed header protection, so don't attempt to do it again if
946 * the packet gets deferred and processed again.
948 pkt_mark(&urxe
->hpr_removed
, pkt_idx
);
950 /* Decode the now unprotected header. */
951 if (ossl_quic_wire_decode_pkt_hdr(pkt
, qrx
->short_conn_id_len
,
952 0, 0, &rxe
->hdr
, NULL
) != 1)
956 /* Validate header and decode PN. */
957 if (!qrx_validate_hdr(qrx
, rxe
))
960 if (qrx
->msg_callback
!= NULL
)
961 qrx
->msg_callback(0, OSSL_QUIC1_VERSION
, SSL3_RT_QUIC_PACKET
, sop
,
962 eop
- sop
- rxe
->hdr
.len
, qrx
->msg_callback_ssl
,
963 qrx
->msg_callback_arg
);
966 * The AAD data is the entire (unprotected) packet header including the PN.
967 * The packet header has been unprotected in place, so we can just reuse the
968 * PACKET buffer. The header ends where the payload begins.
970 aad_len
= rxe
->hdr
.data
- sop
;
972 /* Ensure the RXE buffer size is adequate for our payload. */
973 if ((rxe
= qrx_reserve_rxe(&qrx
->rx_free
, rxe
, rxe
->hdr
.len
+ i
)) == NULL
) {
975 * Allocation failure, treat as malformed and do not bother processing
976 * any further packets in the datagram as they are likely to also
977 * encounter allocation failures.
984 * We decrypt the packet body to immediately after the token at the start of
985 * the RXE buffer (where present).
987 * Do the decryption from the PACKET (which points into URXE memory) to our
988 * RXE payload (single-copy decryption), then fixup the pointers in the
989 * header to point to our new buffer.
991 * If decryption fails this is considered a permanent error; we defer
992 * packets we don't yet have decryption keys for above, so if this fails,
993 * something has gone wrong with the handshake process or a packet has been
996 dst
= (unsigned char *)rxe_data(rxe
) + i
;
997 if (!qrx_decrypt_pkt_body(qrx
, dst
, rxe
->hdr
.data
, rxe
->hdr
.len
,
998 &dec_len
, sop
, aad_len
, rxe
->pn
, enc_level
,
999 rxe
->hdr
.key_phase
, &rx_key_epoch
))
1003 * -----------------------------------------------------
1004 * IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
1005 * AND MUST BE TIMING-CHANNEL SAFE.
1006 * -----------------------------------------------------
1008 * At this point, we have successfully authenticated the AEAD tag and no
1009 * longer need to worry about exposing the PN, PN length or Key Phase bit in
1010 * timing channels. Invoke any configured validation callback to allow for
1011 * rejection of duplicate PNs.
1013 if (!qrx_validate_hdr_late(qrx
, rxe
))
1016 /* Check for a Key Phase bit differing from our expectation. */
1017 if (rxe
->hdr
.type
== QUIC_PKT_TYPE_1RTT
1018 && rxe
->hdr
.key_phase
!= (el
->key_epoch
& 1))
1019 qrx_key_update_initiated(qrx
, rxe
->pn
);
1022 * We have now successfully decrypted the packet payload. If there are
1023 * additional packets in the datagram, it is possible we will fail to
1024 * decrypt them and need to defer them until we have some key material we
1025 * don't currently possess. If this happens, the URXE will be moved to the
1026 * deferred queue. Since a URXE corresponds to one datagram, which may
1027 * contain multiple packets, we must ensure any packets we have already
1028 * processed in the URXE are not processed again (this is an RFC
1029 * requirement). We do this by marking the nth packet in the datagram as
1032 * We are now committed to returning this decrypted packet to the user,
1033 * meaning we now consider the packet processed and must mark it
1036 pkt_mark(&urxe
->processed
, pkt_idx
);
1039 * Update header to point to the decrypted buffer, which may be shorter
1040 * due to AEAD tags, block padding, etc.
1042 rxe
->hdr
.data
= dst
;
1043 rxe
->hdr
.len
= dec_len
;
1044 rxe
->data_len
= dec_len
;
1045 rxe
->datagram_len
= datagram_len
;
1046 rxe
->key_epoch
= rx_key_epoch
;
1048 /* We processed the PN successfully, so update largest processed PN. */
1049 pn_space
= rxe_determine_pn_space(rxe
);
1050 if (rxe
->pn
> qrx
->largest_pn
[pn_space
])
1051 qrx
->largest_pn
[pn_space
] = rxe
->pn
;
1053 /* Copy across network addresses and RX time from URXE to RXE. */
1054 rxe
->peer
= urxe
->peer
;
1055 rxe
->local
= urxe
->local
;
1056 rxe
->time
= urxe
->time
;
1058 /* Move RXE to pending. */
1059 ossl_list_rxe_remove(&qrx
->rx_free
, rxe
);
1060 ossl_list_rxe_insert_tail(&qrx
->rx_pending
, rxe
);
1061 return 0; /* success, did not defer; not distinguished from failure */
1065 * We cannot process this packet right now (but might be able to later). We
1066 * MUST attempt to process any other packets in the datagram, so defer it
1069 assert(eop
!= NULL
&& eop
>= PACKET_data(pkt
));
1071 * We don't care if this fails as it will just result in the packet being at
1072 * the end of the datagram buffer.
1074 ignore_res(PACKET_forward(pkt
, eop
- PACKET_data(pkt
)));
1075 return 1; /* deferred */
1080 * This packet cannot be processed and will never be processable. We
1081 * were at least able to decode its header and determine its length, so
1082 * we can skip over it and try to process any subsequent packets in the
1085 * Mark as processed as an optimization.
1087 assert(eop
>= PACKET_data(pkt
));
1088 pkt_mark(&urxe
->processed
, pkt_idx
);
1089 /* We don't care if this fails (see above) */
1090 ignore_res(PACKET_forward(pkt
, eop
- PACKET_data(pkt
)));
1093 * This packet cannot be processed and will never be processable.
1094 * Because even its header is not intelligible, we cannot examine any
1095 * further packets in the datagram because its length cannot be
1098 * Advance over the entire remainder of the datagram, and mark it as
1099 * processed as an optimization.
1101 pkt_mark(&urxe
->processed
, pkt_idx
);
1102 /* We don't care if this fails (see above) */
1103 ignore_res(PACKET_forward(pkt
, PACKET_remaining(pkt
)));
1105 return 0; /* failure, did not defer; not distinguished from success */
1108 /* Process a datagram which was received. */
1109 static int qrx_process_datagram(OSSL_QRX
*qrx
, QUIC_URXE
*e
,
1110 const unsigned char *data
,
1113 int have_deferred
= 0;
1116 QUIC_CONN_ID first_dcid
= { 255 };
1118 qrx
->bytes_received
+= data_len
;
1120 if (!PACKET_buf_init(&pkt
, data
, data_len
))
1123 for (; PACKET_remaining(&pkt
) > 0; ++pkt_idx
) {
1125 * A packet smaller than the minimum possible QUIC packet size is not
1126 * considered valid. We also ignore more than a certain number of
1127 * packets within the same datagram.
1129 if (PACKET_remaining(&pkt
) < QUIC_MIN_VALID_PKT_LEN
1130 || pkt_idx
>= QUIC_MAX_PKT_PER_URXE
)
1134 * We note whether packet processing resulted in a deferral since
1135 * this means we need to move the URXE to the deferred list rather
1136 * than the free list after we're finished dealing with it for now.
1138 * However, we don't otherwise care here whether processing succeeded or
1139 * failed, as the RFC says even if a packet in a datagram is malformed,
1140 * we should still try to process any packets following it.
1142 * In the case where the packet is so malformed we can't determine its
1143 * length, qrx_process_pkt will take care of advancing to the end of
1144 * the packet, so we will exit the loop automatically in this case.
1146 if (qrx_process_pkt(qrx
, e
, &pkt
, pkt_idx
, &first_dcid
, data_len
))
1150 /* Only report whether there were any deferrals. */
1151 return have_deferred
;
1154 /* Process a single pending URXE. */
1155 static int qrx_process_one_urxe(OSSL_QRX
*qrx
, QUIC_URXE
*e
)
1159 /* The next URXE we process should be at the head of the pending list. */
1160 if (!ossl_assert(e
== ossl_list_urxe_head(&qrx
->urx_pending
)))
1164 * Attempt to process the datagram. The return value indicates only if
1165 * processing of the datagram was deferred. If we failed to process the
1166 * datagram, we do not attempt to process it again and silently eat the
1169 was_deferred
= qrx_process_datagram(qrx
, e
, ossl_quic_urxe_data(e
),
1173 * Remove the URXE from the pending list and return it to
1174 * either the free or deferred list.
1176 ossl_list_urxe_remove(&qrx
->urx_pending
, e
);
1177 if (was_deferred
> 0 &&
1178 (e
->deferred
|| qrx
->num_deferred
< qrx
->max_deferred
)) {
1179 ossl_list_urxe_insert_tail(&qrx
->urx_deferred
, e
);
1182 ++qrx
->num_deferred
;
1187 --qrx
->num_deferred
;
1189 ossl_quic_demux_release_urxe(qrx
->demux
, e
);
1195 /* Process any pending URXEs to generate pending RXEs. */
1196 static int qrx_process_pending_urxl(OSSL_QRX
*qrx
)
1200 while ((e
= ossl_list_urxe_head(&qrx
->urx_pending
)) != NULL
)
1201 if (!qrx_process_one_urxe(qrx
, e
))
1207 int ossl_qrx_read_pkt(OSSL_QRX
*qrx
, OSSL_QRX_PKT
**ppkt
)
1211 if (!ossl_qrx_processed_read_pending(qrx
)) {
1212 if (!qrx_process_pending_urxl(qrx
))
1215 if (!ossl_qrx_processed_read_pending(qrx
))
1219 rxe
= qrx_pop_pending_rxe(qrx
);
1220 if (!ossl_assert(rxe
!= NULL
))
1223 assert(rxe
->refcount
== 0);
1226 rxe
->pkt
.hdr
= &rxe
->hdr
;
1227 rxe
->pkt
.pn
= rxe
->pn
;
1228 rxe
->pkt
.time
= rxe
->time
;
1229 rxe
->pkt
.datagram_len
= rxe
->datagram_len
;
1231 = BIO_ADDR_family(&rxe
->peer
) != AF_UNSPEC
? &rxe
->peer
: NULL
;
1233 = BIO_ADDR_family(&rxe
->local
) != AF_UNSPEC
? &rxe
->local
: NULL
;
1234 rxe
->pkt
.key_epoch
= rxe
->key_epoch
;
1241 void ossl_qrx_pkt_release(OSSL_QRX_PKT
*pkt
)
1249 assert(rxe
->refcount
> 0);
1250 if (--rxe
->refcount
== 0)
1251 qrx_recycle_rxe(pkt
->qrx
, rxe
);
1254 void ossl_qrx_pkt_up_ref(OSSL_QRX_PKT
*pkt
)
1256 RXE
*rxe
= (RXE
*)pkt
;
1258 assert(rxe
->refcount
> 0);
1262 uint64_t ossl_qrx_get_bytes_received(OSSL_QRX
*qrx
, int clear
)
1264 uint64_t v
= qrx
->bytes_received
;
1267 qrx
->bytes_received
= 0;
1272 int ossl_qrx_set_late_validation_cb(OSSL_QRX
*qrx
,
1273 ossl_qrx_late_validation_cb
*cb
,
1276 qrx
->validation_cb
= cb
;
1277 qrx
->validation_cb_arg
= cb_arg
;
1281 int ossl_qrx_set_key_update_cb(OSSL_QRX
*qrx
,
1282 ossl_qrx_key_update_cb
*cb
,
1285 qrx
->key_update_cb
= cb
;
1286 qrx
->key_update_cb_arg
= cb_arg
;
1290 uint64_t ossl_qrx_get_key_epoch(OSSL_QRX
*qrx
)
1292 OSSL_QRL_ENC_LEVEL
*el
= ossl_qrl_enc_level_set_get(&qrx
->el_set
,
1293 QUIC_ENC_LEVEL_1RTT
, 1);
1295 return el
== NULL
? UINT64_MAX
: el
->key_epoch
;
1298 int ossl_qrx_key_update_timeout(OSSL_QRX
*qrx
, int normal
)
1300 OSSL_QRL_ENC_LEVEL
*el
= ossl_qrl_enc_level_set_get(&qrx
->el_set
,
1301 QUIC_ENC_LEVEL_1RTT
, 1);
1306 if (el
->state
== QRL_EL_STATE_PROV_UPDATING
1307 && !ossl_qrl_enc_level_set_key_update_done(&qrx
->el_set
,
1308 QUIC_ENC_LEVEL_1RTT
))
1311 if (normal
&& el
->state
== QRL_EL_STATE_PROV_COOLDOWN
1312 && !ossl_qrl_enc_level_set_key_cooldown_done(&qrx
->el_set
,
1313 QUIC_ENC_LEVEL_1RTT
))
1319 uint64_t ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX
*qrx
)
1321 return qrx
->forged_pkt_count
;
1324 uint64_t ossl_qrx_get_max_forged_pkt_count(OSSL_QRX
*qrx
,
1327 OSSL_QRL_ENC_LEVEL
*el
= ossl_qrl_enc_level_set_get(&qrx
->el_set
,
1330 return el
== NULL
? UINT64_MAX
1331 : ossl_qrl_get_suite_max_forged_pkt(el
->suite_id
);
1334 void ossl_qrx_allow_1rtt_processing(OSSL_QRX
*qrx
)
1336 if (qrx
->allow_1rtt
)
1339 qrx
->allow_1rtt
= 1;
1340 qrx_requeue_deferred(qrx
);
1343 void ossl_qrx_set_msg_callback(OSSL_QRX
*qrx
, ossl_msg_cb msg_callback
,
1344 SSL
*msg_callback_ssl
)
1346 qrx
->msg_callback
= msg_callback
;
1347 qrx
->msg_callback_ssl
= msg_callback_ssl
;
1350 void ossl_qrx_set_msg_callback_arg(OSSL_QRX
*qrx
, void *msg_callback_arg
)
1352 qrx
->msg_callback_arg
= msg_callback_arg
;