]> git.ipfire.org Git - thirdparty/openssl.git/blob - ssl/quic/quic_record_rx.c
QUIC Record Layer (Refactor and TX Side)
[thirdparty/openssl.git] / ssl / quic / quic_record_rx.c
1 /*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include "internal/quic_record_rx.h"
11 #include "quic_record_shared.h"
12 #include "internal/common.h"
13 #include "../ssl_local.h"
14
15 /*
16 * Mark a packet in a bitfield.
17 *
18 * pkt_idx: index of packet within datagram.
19 */
20 static ossl_inline void pkt_mark(uint64_t *bitf, size_t pkt_idx)
21 {
22 assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
23 *bitf |= ((uint64_t)1) << pkt_idx;
24 }
25
26 /* Returns 1 if a packet is in the bitfield. */
27 static ossl_inline int pkt_is_marked(const uint64_t *bitf, size_t pkt_idx)
28 {
29 assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
30 return (*bitf & (((uint64_t)1) << pkt_idx)) != 0;
31 }
32
33 /*
34 * RXE
35 * ===
36 *
37 * RX Entries (RXEs) store processed (i.e., decrypted) data received from the
38 * network. One RXE is used per received QUIC packet.
39 */
40 typedef struct rxe_st RXE;
41
42 struct rxe_st {
43 RXE *prev, *next;
44 size_t data_len, alloc_len;
45
46 /* Extra fields for per-packet information. */
47 QUIC_PKT_HDR hdr; /* data/len are decrypted payload */
48
49 /* Decoded packet number. */
50 QUIC_PN pn;
51
52 /* Addresses copied from URXE. */
53 BIO_ADDR peer, local;
54
55 /* Total length of the datagram which contained this packet. */
56 size_t datagram_len;
57
58 /*
59 * alloc_len allocated bytes (of which data_len bytes are valid) follow this
60 * structure.
61 */
62 };
63
64 typedef struct ossl_qrx_rxe_list_st {
65 RXE *head, *tail;
66 } RXE_LIST;
67
68 static ossl_inline unsigned char *rxe_data(const RXE *e)
69 {
70 return (unsigned char *)(e + 1);
71 }
72
73 static void rxe_remove(RXE_LIST *l, RXE *e)
74 {
75 if (e->prev != NULL)
76 e->prev->next = e->next;
77 if (e->next != NULL)
78 e->next->prev = e->prev;
79
80 if (e == l->head)
81 l->head = e->next;
82 if (e == l->tail)
83 l->tail = e->prev;
84
85 e->next = e->prev = NULL;
86 }
87
88 static void rxe_insert_tail(RXE_LIST *l, RXE *e)
89 {
90 if (l->tail == NULL) {
91 l->head = l->tail = e;
92 e->next = e->prev = NULL;
93 return;
94 }
95
96 l->tail->next = e;
97 e->prev = l->tail;
98 e->next = NULL;
99 l->tail = e;
100 }
101
102 /*
103 * QRL
104 * ===
105 */
106 struct ossl_qrx_st {
107 OSSL_LIB_CTX *libctx;
108 const char *propq;
109
110 /* Demux to receive datagrams from. */
111 QUIC_DEMUX *demux;
112
113 /* Length of connection IDs used in short-header packets in bytes. */
114 size_t short_conn_id_len;
115
116 /*
117 * List of URXEs which are filled with received encrypted data.
118 * These are returned to the DEMUX's free list as they are processed.
119 */
120 QUIC_URXE_LIST urx_pending;
121
122 /*
123 * List of URXEs which we could not decrypt immediately and which are being
124 * kept in case they can be decrypted later.
125 */
126 QUIC_URXE_LIST urx_deferred;
127
128 /*
129 * List of RXEs which are not currently in use. These are moved
130 * to the pending list as they are filled.
131 */
132 RXE_LIST rx_free;
133
134 /*
135 * List of RXEs which are filled with decrypted packets ready to be passed
136 * to the user. A RXE is removed from all lists inside the QRL when passed
137 * to the user, then returned to the free list when the user returns it.
138 */
139 RXE_LIST rx_pending;
140
141 /* Largest PN we have received and processed in a given PN space. */
142 QUIC_PN largest_pn[QUIC_PN_SPACE_NUM];
143
144 /* Per encryption-level state. */
145 OSSL_QRL_ENC_LEVEL_SET el_set;
146
147 /* Bytes we have received since this counter was last cleared. */
148 uint64_t bytes_received;
149
150 /* Validation callback. */
151 ossl_qrx_early_validation_cb *validation_cb;
152 void *validation_cb_arg;
153 };
154
155 static void qrx_on_rx(QUIC_URXE *urxe, void *arg);
156
157 OSSL_QRX *ossl_qrx_new(const OSSL_QRX_ARGS *args)
158 {
159 OSSL_QRX *qrx;
160 size_t i;
161
162 if (args->demux == NULL)
163 return 0;
164
165 qrx = OPENSSL_zalloc(sizeof(OSSL_QRX));
166 if (qrx == NULL)
167 return 0;
168
169 for (i = 0; i < OSSL_NELEM(qrx->largest_pn); ++i)
170 qrx->largest_pn[i] = args->init_largest_pn[i];
171
172 qrx->libctx = args->libctx;
173 qrx->propq = args->propq;
174 qrx->demux = args->demux;
175 qrx->short_conn_id_len = args->short_conn_id_len;
176 return qrx;
177 }
178
179 static void qrx_cleanup_rxl(RXE_LIST *l)
180 {
181 RXE *e, *enext;
182 for (e = l->head; e != NULL; e = enext) {
183 enext = e->next;
184 OPENSSL_free(e);
185 }
186 l->head = l->tail = NULL;
187 }
188
189 static void qrx_cleanup_urxl(OSSL_QRX *qrx, QUIC_URXE_LIST *l)
190 {
191 QUIC_URXE *e, *enext;
192 for (e = l->head; e != NULL; e = enext) {
193 enext = e->next;
194 ossl_quic_demux_release_urxe(qrx->demux, e);
195 }
196 l->head = l->tail = NULL;
197 }
198
199 void ossl_qrx_free(OSSL_QRX *qrx)
200 {
201 uint32_t i;
202
203 /* Unregister from the RX DEMUX. */
204 ossl_quic_demux_unregister_by_cb(qrx->demux, qrx_on_rx, qrx);
205
206 /* Free RXE queue data. */
207 qrx_cleanup_rxl(&qrx->rx_free);
208 qrx_cleanup_rxl(&qrx->rx_pending);
209 qrx_cleanup_urxl(qrx, &qrx->urx_pending);
210 qrx_cleanup_urxl(qrx, &qrx->urx_deferred);
211
212 /* Drop keying material and crypto resources. */
213 for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
214 ossl_qrl_enc_level_set_discard(&qrx->el_set, i, 1);
215
216 OPENSSL_free(qrx);
217 }
218
219 static void qrx_on_rx(QUIC_URXE *urxe, void *arg)
220 {
221 OSSL_QRX *qrx = arg;
222
223 /* Initialize our own fields inside the URXE and add to the pending list. */
224 urxe->processed = 0;
225 urxe->hpr_removed = 0;
226 ossl_quic_urxe_insert_tail(&qrx->urx_pending, urxe);
227 }
228
229 int ossl_qrx_add_dst_conn_id(OSSL_QRX *qrx,
230 const QUIC_CONN_ID *dst_conn_id)
231 {
232 return ossl_quic_demux_register(qrx->demux,
233 dst_conn_id,
234 qrx_on_rx,
235 qrx);
236 }
237
238 int ossl_qrx_remove_dst_conn_id(OSSL_QRX *qrx,
239 const QUIC_CONN_ID *dst_conn_id)
240 {
241 return ossl_quic_demux_unregister(qrx->demux, dst_conn_id);
242 }
243
244 static void qrx_requeue_deferred(OSSL_QRX *qrx)
245 {
246 QUIC_URXE *e;
247
248 while ((e = qrx->urx_deferred.head) != NULL) {
249 ossl_quic_urxe_remove(&qrx->urx_deferred, e);
250 ossl_quic_urxe_insert_head(&qrx->urx_pending, e);
251 }
252 }
253
254 int ossl_qrx_provide_secret(OSSL_QRX *qrx, uint32_t enc_level,
255 uint32_t suite_id, EVP_MD *md,
256 const unsigned char *secret, size_t secret_len)
257 {
258 if (enc_level >= QUIC_ENC_LEVEL_NUM)
259 return 0;
260
261 if (!ossl_qrl_enc_level_set_provide_secret(&qrx->el_set,
262 qrx->libctx,
263 qrx->propq,
264 enc_level,
265 suite_id,
266 md,
267 secret,
268 secret_len))
269 return 0;
270
271 /*
272 * Any packets we previously could not decrypt, we may now be able to
273 * decrypt, so move any datagrams containing deferred packets from the
274 * deferred to the pending queue.
275 */
276 qrx_requeue_deferred(qrx);
277 return 1;
278 }
279
280 int ossl_qrx_discard_enc_level(OSSL_QRX *qrx, uint32_t enc_level)
281 {
282 if (enc_level >= QUIC_ENC_LEVEL_NUM)
283 return 0;
284
285 ossl_qrl_enc_level_set_discard(&qrx->el_set, enc_level, 1);
286 return 1;
287 }
288
289 /* Returns 1 if there are one or more pending RXEs. */
290 int ossl_qrx_processed_read_pending(OSSL_QRX *qrx)
291 {
292 return qrx->rx_pending.head != NULL;
293 }
294
295 /* Returns 1 if there are yet-unprocessed packets. */
296 int ossl_qrx_unprocessed_read_pending(OSSL_QRX *qrx)
297 {
298 return qrx->urx_pending.head != NULL || qrx->urx_deferred.head != NULL;
299 }
300
301 /* Pop the next pending RXE. Returns NULL if no RXE is pending. */
302 static RXE *qrx_pop_pending_rxe(OSSL_QRX *qrx)
303 {
304 RXE *rxe = qrx->rx_pending.head;
305
306 if (rxe == NULL)
307 return NULL;
308
309 rxe_remove(&qrx->rx_pending, rxe);
310 return rxe;
311 }
312
313 /* Allocate a new RXE. */
314 static RXE *qrx_alloc_rxe(size_t alloc_len)
315 {
316 RXE *rxe;
317
318 if (alloc_len >= SIZE_MAX - sizeof(RXE))
319 return NULL;
320
321 rxe = OPENSSL_malloc(sizeof(RXE) + alloc_len);
322 if (rxe == NULL)
323 return NULL;
324
325 rxe->prev = rxe->next = NULL;
326 rxe->alloc_len = alloc_len;
327 rxe->data_len = 0;
328 return rxe;
329 }
330
331 /*
332 * Ensures there is at least one RXE in the RX free list, allocating a new entry
333 * if necessary. The returned RXE is in the RX free list; it is not popped.
334 *
335 * alloc_len is a hint which may be used to determine the RXE size if allocation
336 * is necessary. Returns NULL on allocation failure.
337 */
338 static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len)
339 {
340 RXE *rxe;
341
342 if (qrx->rx_free.head != NULL)
343 return qrx->rx_free.head;
344
345 rxe = qrx_alloc_rxe(alloc_len);
346 if (rxe == NULL)
347 return NULL;
348
349 rxe_insert_tail(&qrx->rx_free, rxe);
350 return rxe;
351 }
352
353 /*
354 * Resize the data buffer attached to an RXE to be n bytes in size. The address
355 * of the RXE might change; the new address is returned, or NULL on failure, in
356 * which case the original RXE remains valid.
357 */
358 static RXE *qrx_resize_rxe(RXE_LIST *rxl, RXE *rxe, size_t n)
359 {
360 RXE *rxe2;
361
362 /* Should never happen. */
363 if (rxe == NULL)
364 return NULL;
365
366 if (n >= SIZE_MAX - sizeof(RXE))
367 return NULL;
368
369 /*
370 * NOTE: We do not clear old memory, although it does contain decrypted
371 * data.
372 */
373 rxe2 = OPENSSL_realloc(rxe, sizeof(RXE) + n);
374 if (rxe2 == NULL)
375 /* original RXE is still in tact unchanged */
376 return NULL;
377
378 if (rxe != rxe2) {
379 if (rxl->head == rxe)
380 rxl->head = rxe2;
381 if (rxl->tail == rxe)
382 rxl->tail = rxe2;
383 if (rxe->prev != NULL)
384 rxe->prev->next = rxe2;
385 if (rxe->next != NULL)
386 rxe->next->prev = rxe2;
387 }
388
389 rxe2->alloc_len = n;
390 return rxe2;
391 }
392
393 /*
394 * Ensure the data buffer attached to an RXE is at least n bytes in size.
395 * Returns NULL on failure.
396 */
397 static RXE *qrx_reserve_rxe(RXE_LIST *rxl,
398 RXE *rxe, size_t n)
399 {
400 if (rxe->alloc_len >= n)
401 return rxe;
402
403 return qrx_resize_rxe(rxl, rxe, n);
404 }
405
406 /* Return a RXE handed out to the user back to our freelist. */
407 static void qrx_recycle_rxe(OSSL_QRX *qrx, RXE *rxe)
408 {
409 /* RXE should not be in any list */
410 assert(rxe->prev == NULL && rxe->next == NULL);
411 rxe_insert_tail(&qrx->rx_free, rxe);
412 }
413
414 /*
415 * Given a pointer to a pointer pointing to a buffer and the size of that
416 * buffer, copy the buffer into *prxe, expanding the RXE if necessary (its
417 * pointer may change due to realloc). *pi is the offset in bytes to copy the
418 * buffer to, and on success is updated to be the offset pointing after the
419 * copied buffer. *pptr is updated to point to the new location of the buffer.
420 */
421 static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
422 const unsigned char **pptr, size_t buf_len)
423 {
424 RXE *rxe;
425 unsigned char *dst;
426
427 if (!buf_len)
428 return 1;
429
430 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, *prxe, *pi + buf_len)) == NULL)
431 return 0;
432
433 *prxe = rxe;
434 dst = (unsigned char *)rxe_data(rxe) + *pi;
435
436 memcpy(dst, *pptr, buf_len);
437 *pi += buf_len;
438 *pptr = dst;
439 return 1;
440 }
441
442 static uint32_t qrx_determine_enc_level(const QUIC_PKT_HDR *hdr)
443 {
444 switch (hdr->type) {
445 case QUIC_PKT_TYPE_INITIAL:
446 return QUIC_ENC_LEVEL_INITIAL;
447 case QUIC_PKT_TYPE_HANDSHAKE:
448 return QUIC_ENC_LEVEL_HANDSHAKE;
449 case QUIC_PKT_TYPE_0RTT:
450 return QUIC_ENC_LEVEL_0RTT;
451 case QUIC_PKT_TYPE_1RTT:
452 return QUIC_ENC_LEVEL_1RTT;
453
454 default:
455 assert(0);
456 case QUIC_PKT_TYPE_RETRY:
457 case QUIC_PKT_TYPE_VERSION_NEG:
458 return QUIC_ENC_LEVEL_INITIAL; /* not used */
459 }
460 }
461
462 static uint32_t rxe_determine_pn_space(RXE *rxe)
463 {
464 uint32_t enc_level;
465
466 enc_level = qrx_determine_enc_level(&rxe->hdr);
467 return ossl_quic_enc_level_to_pn_space(enc_level);
468 }
469
470 static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
471 RXE *first_rxe)
472 {
473 /* Ensure version is what we want. */
474 if (rxe->hdr.version != QUIC_VERSION_1
475 && rxe->hdr.version != QUIC_VERSION_NONE)
476 return 0;
477
478 /* Clients should never receive 0-RTT packets. */
479 if (rxe->hdr.type == QUIC_PKT_TYPE_0RTT)
480 return 0;
481
482 /* Version negotiation and retry packets must be the first packet. */
483 if (first_rxe != NULL && (rxe->hdr.type == QUIC_PKT_TYPE_VERSION_NEG
484 || rxe->hdr.type == QUIC_PKT_TYPE_RETRY))
485 return 0;
486
487 /*
488 * If this is not the first packet in a datagram, the destination connection
489 * ID must match the one in that packet.
490 */
491 if (first_rxe != NULL &&
492 !ossl_quic_conn_id_eq(&first_rxe->hdr.dst_conn_id,
493 &rxe->hdr.dst_conn_id))
494 return 0;
495
496 return 1;
497 }
498
499 /* Validate header and decode PN. */
500 static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe)
501 {
502 int pn_space = rxe_determine_pn_space(rxe);
503
504 if (!ossl_quic_wire_decode_pkt_hdr_pn(rxe->hdr.pn, rxe->hdr.pn_len,
505 qrx->largest_pn[pn_space],
506 &rxe->pn))
507 return 0;
508
509 /*
510 * Allow our user to decide whether to discard the packet before we try and
511 * decrypt it.
512 */
513 if (qrx->validation_cb != NULL
514 && !qrx->validation_cb(rxe->pn, pn_space, qrx->validation_cb_arg))
515 return 0;
516
517 return 1;
518 }
519
520 /*
521 * Tries to decrypt a packet payload.
522 *
523 * Returns 1 on success or 0 on failure (which is permanent). The payload is
524 * decrypted from src and written to dst. The buffer dst must be of at least
525 * src_len bytes in length. The actual length of the output in bytes is written
526 * to *dec_len on success, which will always be equal to or less than (usually
527 * less than) src_len.
528 */
529 static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
530 const unsigned char *src,
531 size_t src_len, size_t *dec_len,
532 const unsigned char *aad, size_t aad_len,
533 QUIC_PN pn, uint32_t enc_level)
534 {
535 int l = 0, l2 = 0;
536 unsigned char nonce[EVP_MAX_IV_LENGTH];
537 size_t nonce_len, i;
538 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
539 enc_level, 1);
540
541 if (src_len > INT_MAX || aad_len > INT_MAX)
542 return 0;
543
544 /* We should not have been called if we do not have key material. */
545 if (!ossl_assert(el != NULL))
546 return 0;
547
548 if (el->tag_len >= src_len)
549 return 0;
550
551 /*
552 * If we have failed to authenticate a certain number of ciphertexts, refuse
553 * to decrypt any more ciphertexts.
554 */
555 if (el->op_count >= ossl_qrl_get_suite_max_forged_pkt(el->suite_id))
556 return 0;
557
558 /* Construct nonce (nonce=IV ^ PN). */
559 nonce_len = EVP_CIPHER_CTX_get_iv_length(el->cctx);
560 if (!ossl_assert(nonce_len >= sizeof(QUIC_PN)))
561 return 0;
562
563 memcpy(nonce, el->iv, nonce_len);
564 for (i = 0; i < sizeof(QUIC_PN); ++i)
565 nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
566
567 /* type and key will already have been setup; feed the IV. */
568 if (EVP_CipherInit_ex(el->cctx, NULL,
569 NULL, NULL, nonce, /*enc=*/0) != 1)
570 return 0;
571
572 /* Feed the AEAD tag we got so the cipher can validate it. */
573 if (EVP_CIPHER_CTX_ctrl(el->cctx, EVP_CTRL_AEAD_SET_TAG,
574 el->tag_len,
575 (unsigned char *)src + src_len - el->tag_len) != 1)
576 return 0;
577
578 /* Feed AAD data. */
579 if (EVP_CipherUpdate(el->cctx, NULL, &l, aad, aad_len) != 1)
580 return 0;
581
582 /* Feed encrypted packet body. */
583 if (EVP_CipherUpdate(el->cctx, dst, &l, src, src_len - el->tag_len) != 1)
584 return 0;
585
586 /* Ensure authentication succeeded. */
587 if (EVP_CipherFinal_ex(el->cctx, NULL, &l2) != 1) {
588 /* Authentication failed, increment failed auth counter. */
589 ++el->op_count;
590 return 0;
591 }
592
593 *dec_len = l;
594 return 1;
595 }
596
597 static ossl_inline void ignore_res(int x)
598 {
599 /* No-op. */
600 }
601
602 /* Process a single packet in a datagram. */
603 static int qrx_process_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
604 PACKET *pkt, size_t pkt_idx,
605 RXE **first_rxe,
606 size_t datagram_len)
607 {
608 RXE *rxe;
609 const unsigned char *eop = NULL;
610 size_t i, aad_len = 0, dec_len = 0;
611 PACKET orig_pkt = *pkt;
612 const unsigned char *sop = PACKET_data(pkt);
613 unsigned char *dst;
614 char need_second_decode = 0, already_processed = 0;
615 QUIC_PKT_HDR_PTRS ptrs;
616 uint32_t pn_space, enc_level;
617
618 /*
619 * Get a free RXE. If we need to allocate a new one, use the packet length
620 * as a good ballpark figure.
621 */
622 rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(pkt));
623 if (rxe == NULL)
624 return 0;
625
626 /* Have we already processed this packet? */
627 if (pkt_is_marked(&urxe->processed, pkt_idx))
628 already_processed = 1;
629
630 /*
631 * Decode the header into the RXE structure. We first decrypt and read the
632 * unprotected part of the packet header (unless we already removed header
633 * protection, in which case we decode all of it).
634 */
635 need_second_decode = !pkt_is_marked(&urxe->hpr_removed, pkt_idx);
636 if (!ossl_quic_wire_decode_pkt_hdr(pkt,
637 qrx->short_conn_id_len,
638 need_second_decode, &rxe->hdr, &ptrs))
639 goto malformed;
640
641 /*
642 * Our successful decode above included an intelligible length and the
643 * PACKET is now pointing to the end of the QUIC packet.
644 */
645 eop = PACKET_data(pkt);
646
647 /*
648 * Make a note of the first RXE so we can later ensure the destination
649 * connection IDs of all packets in a datagram mater.
650 */
651 if (pkt_idx == 0)
652 *first_rxe = rxe;
653
654 /*
655 * Early header validation. Since we now know the packet length, we can also
656 * now skip over it if we already processed it.
657 */
658 if (already_processed
659 || !qrx_validate_hdr_early(qrx, rxe, pkt_idx == 0 ? NULL : *first_rxe))
660 goto malformed;
661
662 if (rxe->hdr.type == QUIC_PKT_TYPE_VERSION_NEG
663 || rxe->hdr.type == QUIC_PKT_TYPE_RETRY) {
664 /*
665 * Version negotiation and retry packets are a special case. They do not
666 * contain a payload which needs decrypting and have no header
667 * protection.
668 */
669
670 /* Just copy the payload from the URXE to the RXE. */
671 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len)) == NULL)
672 /*
673 * Allocation failure. EOP will be pointing to the end of the
674 * datagram so processing of this datagram will end here.
675 */
676 goto malformed;
677
678 /* We are now committed to returning the packet. */
679 memcpy(rxe_data(rxe), rxe->hdr.data, rxe->hdr.len);
680 pkt_mark(&urxe->processed, pkt_idx);
681
682 rxe->hdr.data = rxe_data(rxe);
683
684 /* Move RXE to pending. */
685 rxe_remove(&qrx->rx_free, rxe);
686 rxe_insert_tail(&qrx->rx_pending, rxe);
687 return 0; /* success, did not defer */
688 }
689
690 /* Determine encryption level of packet. */
691 enc_level = qrx_determine_enc_level(&rxe->hdr);
692
693 /* If we do not have keying material for this encryption level yet, defer. */
694 switch (ossl_qrl_enc_level_set_have_el(&qrx->el_set, enc_level)) {
695 case 1:
696 /* We have keys. */
697 break;
698 case 0:
699 /* No keys yet. */
700 goto cannot_decrypt;
701 default:
702 /* We already discarded keys for this EL, we will never process this.*/
703 goto malformed;
704 }
705
706 /*
707 * We will copy any token included in the packet to the start of our RXE
708 * data buffer (so that we don't reference the URXE buffer any more and can
709 * recycle it). Track our position in the RXE buffer by index instead of
710 * pointer as the pointer may change as reallocs occur.
711 */
712 i = 0;
713
714 /*
715 * rxe->hdr.data is now pointing at the (encrypted) packet payload. rxe->hdr
716 * also has fields pointing into the PACKET buffer which will be going away
717 * soon (the URXE will be reused for another incoming packet).
718 *
719 * Firstly, relocate some of these fields into the RXE as needed.
720 *
721 * Relocate token buffer and fix pointer.
722 */
723 if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL
724 && !qrx_relocate_buffer(qrx, &rxe, &i, &rxe->hdr.token,
725 rxe->hdr.token_len))
726 goto malformed;
727
728 /* Now remove header protection. */
729 *pkt = orig_pkt;
730
731 if (need_second_decode) {
732 OSSL_QRL_ENC_LEVEL *el
733 = ossl_qrl_enc_level_set_get(&qrx->el_set, enc_level, 1);
734
735 assert(el != NULL); /* Already checked above */
736 if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
737 goto malformed;
738
739 /*
740 * We have removed header protection, so don't attempt to do it again if
741 * the packet gets deferred and processed again.
742 */
743 pkt_mark(&urxe->hpr_removed, pkt_idx);
744
745 /* Decode the now unprotected header. */
746 if (ossl_quic_wire_decode_pkt_hdr(pkt, qrx->short_conn_id_len,
747 0, &rxe->hdr, NULL) != 1)
748 goto malformed;
749 }
750
751 /* Validate header and decode PN. */
752 if (!qrx_validate_hdr(qrx, rxe))
753 goto malformed;
754
755 /*
756 * We automatically discard INITIAL keys when successfully decrypting a
757 * HANDSHAKE packet.
758 */
759 if (enc_level == QUIC_ENC_LEVEL_HANDSHAKE)
760 ossl_qrl_enc_level_set_discard(&qrx->el_set, QUIC_ENC_LEVEL_INITIAL, 1);
761
762 /*
763 * The AAD data is the entire (unprotected) packet header including the PN.
764 * The packet header has been unprotected in place, so we can just reuse the
765 * PACKET buffer. The header ends where the payload begins.
766 */
767 aad_len = rxe->hdr.data - sop;
768
769 /* Ensure the RXE buffer size is adequate for our payload. */
770 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL) {
771 /*
772 * Allocation failure, treat as malformed and do not bother processing
773 * any further packets in the datagram as they are likely to also
774 * encounter allocation failures.
775 */
776 eop = NULL;
777 goto malformed;
778 }
779
780 /*
781 * We decrypt the packet body to immediately after the token at the start of
782 * the RXE buffer (where present).
783 *
784 * Do the decryption from the PACKET (which points into URXE memory) to our
785 * RXE payload (single-copy decryption), then fixup the pointers in the
786 * header to point to our new buffer.
787 *
788 * If decryption fails this is considered a permanent error; we defer
789 * packets we don't yet have decryption keys for above, so if this fails,
790 * something has gone wrong with the handshake process or a packet has been
791 * corrupted.
792 */
793 dst = (unsigned char *)rxe_data(rxe) + i;
794 if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
795 &dec_len, sop, aad_len, rxe->pn, enc_level))
796 goto malformed;
797
798 /*
799 * We have now successfully decrypted the packet payload. If there are
800 * additional packets in the datagram, it is possible we will fail to
801 * decrypt them and need to defer them until we have some key material we
802 * don't currently possess. If this happens, the URXE will be moved to the
803 * deferred queue. Since a URXE corresponds to one datagram, which may
804 * contain multiple packets, we must ensure any packets we have already
805 * processed in the URXE are not processed again (this is an RFC
806 * requirement). We do this by marking the nth packet in the datagram as
807 * processed.
808 *
809 * We are now committed to returning this decrypted packet to the user,
810 * meaning we now consider the packet processed and must mark it
811 * accordingly.
812 */
813 pkt_mark(&urxe->processed, pkt_idx);
814
815 /*
816 * Update header to point to the decrypted buffer, which may be shorter
817 * due to AEAD tags, block padding, etc.
818 */
819 rxe->hdr.data = dst;
820 rxe->hdr.len = dec_len;
821 rxe->data_len = dec_len;
822 rxe->datagram_len = datagram_len;
823
824 /* We processed the PN successfully, so update largest processed PN. */
825 pn_space = rxe_determine_pn_space(rxe);
826 if (rxe->pn > qrx->largest_pn[pn_space])
827 qrx->largest_pn[pn_space] = rxe->pn;
828
829 /* Copy across network addresses from URXE to RXE. */
830 rxe->peer = urxe->peer;
831 rxe->local = urxe->local;
832
833 /* Move RXE to pending. */
834 rxe_remove(&qrx->rx_free, rxe);
835 rxe_insert_tail(&qrx->rx_pending, rxe);
836 return 0; /* success, did not defer; not distinguished from failure */
837
838 cannot_decrypt:
839 /*
840 * We cannot process this packet right now (but might be able to later). We
841 * MUST attempt to process any other packets in the datagram, so defer it
842 * and skip over it.
843 */
844 assert(eop != NULL && eop >= PACKET_data(pkt));
845 /*
846 * We don't care if this fails as it will just result in the packet being at
847 * the end of the datagram buffer.
848 */
849 ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
850 return 1; /* deferred */
851
852 malformed:
853 if (eop != NULL) {
854 /*
855 * This packet cannot be processed and will never be processable. We
856 * were at least able to decode its header and determine its length, so
857 * we can skip over it and try to process any subsequent packets in the
858 * datagram.
859 *
860 * Mark as processed as an optimization.
861 */
862 assert(eop >= PACKET_data(pkt));
863 pkt_mark(&urxe->processed, pkt_idx);
864 /* We don't care if this fails (see above) */
865 ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
866 } else {
867 /*
868 * This packet cannot be processed and will never be processable.
869 * Because even its header is not intelligible, we cannot examine any
870 * further packets in the datagram because its length cannot be
871 * discerned.
872 *
873 * Advance over the entire remainder of the datagram, and mark it as
874 * processed gap as an optimization.
875 */
876 pkt_mark(&urxe->processed, pkt_idx);
877 /* We don't care if this fails (see above) */
878 ignore_res(PACKET_forward(pkt, PACKET_remaining(pkt)));
879 }
880 return 0; /* failure, did not defer; not distinguished from success */
881 }
882
883 /* Process a datagram which was received. */
884 static int qrx_process_datagram(OSSL_QRX *qrx, QUIC_URXE *e,
885 const unsigned char *data,
886 size_t data_len)
887 {
888 int have_deferred = 0;
889 PACKET pkt;
890 size_t pkt_idx = 0;
891 RXE *first_rxe = NULL;
892
893 qrx->bytes_received += data_len;
894
895 if (!PACKET_buf_init(&pkt, data, data_len))
896 return 0;
897
898 for (; PACKET_remaining(&pkt) > 0; ++pkt_idx) {
899 /*
900 * A packet smallest than the minimum possible QUIC packet size is not
901 * considered valid. We also ignore more than a certain number of
902 * packets within the same datagram.
903 */
904 if (PACKET_remaining(&pkt) < QUIC_MIN_VALID_PKT_LEN
905 || pkt_idx >= QUIC_MAX_PKT_PER_URXE)
906 break;
907
908 /*
909 * We note whether packet processing resulted in a deferral since
910 * this means we need to move the URXE to the deferred list rather
911 * than the free list after we're finished dealing with it for now.
912 *
913 * However, we don't otherwise care here whether processing succeeded or
914 * failed, as the RFC says even if a packet in a datagram is malformed,
915 * we should still try to process any packets following it.
916 *
917 * In the case where the packet is so malformed we can't determine its
918 * lenngth, qrx_process_pkt will take care of advancing to the end of
919 * the packet, so we will exit the loop automatically in this case.
920 */
921 if (qrx_process_pkt(qrx, e, &pkt, pkt_idx, &first_rxe, data_len))
922 have_deferred = 1;
923 }
924
925 /* Only report whether there were any deferrals. */
926 return have_deferred;
927 }
928
929 /* Process a single pending URXE. */
930 static int qrx_process_one_urxl(OSSL_QRX *qrx, QUIC_URXE *e)
931 {
932 int was_deferred;
933
934 /* The next URXE we process should be at the head of the pending list. */
935 if (!ossl_assert(e == qrx->urx_pending.head))
936 return 0;
937
938 /*
939 * Attempt to process the datagram. The return value indicates only if
940 * processing of the datagram was deferred. If we failed to process the
941 * datagram, we do not attempt to process it again and silently eat the
942 * error.
943 */
944 was_deferred = qrx_process_datagram(qrx, e, ossl_quic_urxe_data(e),
945 e->data_len);
946
947 /*
948 * Remove the URXE from the pending list and return it to
949 * either the free or deferred list.
950 */
951 ossl_quic_urxe_remove(&qrx->urx_pending, e);
952 if (was_deferred > 0)
953 ossl_quic_urxe_insert_tail(&qrx->urx_deferred, e);
954 else
955 ossl_quic_demux_release_urxe(qrx->demux, e);
956
957 return 1;
958 }
959
960 /* Process any pending URXEs to generate pending RXEs. */
961 static int qrx_process_urxl(OSSL_QRX *qrx)
962 {
963 QUIC_URXE *e;
964
965 while ((e = qrx->urx_pending.head) != NULL)
966 if (!qrx_process_one_urxl(qrx, e))
967 return 0;
968
969 return 1;
970 }
971
972 int ossl_qrx_read_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT *pkt)
973 {
974 RXE *rxe;
975
976 if (!ossl_qrx_processed_read_pending(qrx)) {
977 if (!qrx_process_urxl(qrx))
978 return 0;
979
980 if (!ossl_qrx_processed_read_pending(qrx))
981 return 0;
982 }
983
984 rxe = qrx_pop_pending_rxe(qrx);
985 if (!ossl_assert(rxe != NULL))
986 return 0;
987
988 pkt->handle = rxe;
989 pkt->hdr = &rxe->hdr;
990 pkt->peer
991 = BIO_ADDR_family(&rxe->peer) != AF_UNSPEC ? &rxe->peer : NULL;
992 pkt->local
993 = BIO_ADDR_family(&rxe->local) != AF_UNSPEC ? &rxe->local : NULL;
994 return 1;
995 }
996
997 void ossl_qrx_release_pkt(OSSL_QRX *qrx, void *handle)
998 {
999 RXE *rxe = handle;
1000
1001 qrx_recycle_rxe(qrx, rxe);
1002 }
1003
1004 uint64_t ossl_qrx_get_bytes_received(OSSL_QRX *qrx, int clear)
1005 {
1006 uint64_t v = qrx->bytes_received;
1007
1008 if (clear)
1009 qrx->bytes_received = 0;
1010
1011 return v;
1012 }
1013
1014 int ossl_qrx_set_early_validation_cb(OSSL_QRX *qrx,
1015 ossl_qrx_early_validation_cb *cb,
1016 void *cb_arg)
1017 {
1018 qrx->validation_cb = cb;
1019 qrx->validation_cb_arg = cb_arg;
1020 return 1;
1021 }
1022
1023 uint64_t ossl_qrx_get_cur_epoch_forged_pkt_count(OSSL_QRX *qrx,
1024 uint32_t enc_level)
1025 {
1026 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1027 enc_level, 1);
1028
1029 return el == NULL ? UINT64_MAX : el->op_count;
1030 }
1031
1032 uint64_t ossl_qrx_get_max_epoch_forged_pkt_count(OSSL_QRX *qrx,
1033 uint32_t enc_level)
1034 {
1035 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1036 enc_level, 1);
1037
1038 return el == NULL ? UINT64_MAX
1039 : ossl_qrl_get_suite_max_forged_pkt(el->suite_id);
1040 }