]> git.ipfire.org Git - thirdparty/openssl.git/blame - ssl/quic/quic_txp.c
QUIC Front-End I/O API: Ensure BIOs are reffed and freed correctly
[thirdparty/openssl.git] / ssl / quic / quic_txp.c
CommitLineData
a73078b7
HL
1/*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10#include "internal/quic_txp.h"
11#include "internal/quic_fifd.h"
12#include "internal/quic_stream_map.h"
13#include "internal/common.h"
14#include <openssl/err.h>
15
16#define MIN_CRYPTO_HDR_SIZE 3
17
18#define MIN_FRAME_SIZE_HANDSHAKE_DONE 1
19#define MIN_FRAME_SIZE_MAX_DATA 2
20#define MIN_FRAME_SIZE_ACK 5
21#define MIN_FRAME_SIZE_CRYPTO (MIN_CRYPTO_HDR_SIZE + 1)
22#define MIN_FRAME_SIZE_STREAM 3 /* minimum useful size (for non-FIN) */
23#define MIN_FRAME_SIZE_MAX_STREAMS_BIDI 2
24#define MIN_FRAME_SIZE_MAX_STREAMS_UNI 2
25
26struct ossl_quic_tx_packetiser_st {
27 OSSL_QUIC_TX_PACKETISER_ARGS args;
28
29 /*
30 * Opaque initial token blob provided by caller. TXP frees using the
31 * callback when it is no longer needed.
32 */
33 const unsigned char *initial_token;
34 size_t initial_token_len;
35 ossl_quic_initial_token_free_fn *initial_token_free_cb;
36 void *initial_token_free_cb_arg;
37
38 /* Subcomponents of the TXP that we own. */
39 QUIC_FIFD fifd; /* QUIC Frame-in-Flight Dispatcher */
40
41 /* Internal state. */
42 uint64_t next_pn[QUIC_PN_SPACE_NUM]; /* Next PN to use in given PN space. */
43 OSSL_TIME last_tx_time; /* Last time a packet was generated, or 0. */
44
45 /* Internal state - frame (re)generation flags. */
46 unsigned int want_handshake_done : 1;
47 unsigned int want_max_data : 1;
48 unsigned int want_max_streams_bidi : 1;
49 unsigned int want_max_streams_uni : 1;
50
51 /* Internal state - frame (re)generation flags - per PN space. */
52 unsigned int want_ack : QUIC_PN_SPACE_NUM;
53 unsigned int force_ack_eliciting : QUIC_PN_SPACE_NUM;
54
55 /*
56 * Internal state - connection close terminal state.
57 * Once this is set, it is not unset unlike other want_ flags - we keep
58 * sending it in every packet.
59 */
60 unsigned int want_conn_close : 1;
61
cda88baf
HL
62 /* Has the handshake been completed? */
63 unsigned int handshake_complete : 1;
64
a73078b7
HL
65 OSSL_QUIC_FRAME_CONN_CLOSE conn_close_frame;
66
67 /* Internal state - packet assembly. */
68 unsigned char *scratch; /* scratch buffer for packet assembly */
69 size_t scratch_len; /* number of bytes allocated for scratch */
70 OSSL_QTX_IOVEC *iovec; /* scratch iovec array for use with QTX */
71 size_t alloc_iovec; /* size of iovec array */
72};
73
74/*
75 * The TX helper records state used while generating frames into packets. It
76 * enables serialization into the packet to be done "transactionally" where
77 * serialization of a frame can be rolled back if it fails midway (e.g. if it
78 * does not fit).
79 */
80struct tx_helper {
81 OSSL_QUIC_TX_PACKETISER *txp;
82 /*
83 * The Maximum Packet Payload Length in bytes. This is the amount of
84 * space we have to generate frames into.
85 */
86 size_t max_ppl;
87 /*
88 * Number of bytes we have generated so far.
89 */
90 size_t bytes_appended;
91 /*
92 * Number of scratch bytes in txp->scratch we have used so far. Some iovecs
93 * will reference this scratch buffer. When we need to use more of it (e.g.
94 * when we need to put frame headers somewhere), we append to the scratch
95 * buffer, resizing if necessary, and increase this accordingly.
96 */
97 size_t scratch_bytes;
98 /*
99 * Bytes reserved in the MaxPPL budget. We keep this number of bytes spare
100 * until reserve_allowed is set to 1. Currently this is always at most 1, as
101 * a PING frame takes up one byte and this mechanism is only used to ensure
102 * we can encode a PING frame if we have been asked to ensure a packet is
103 * ACK-eliciting and we are unusure if we are going to add any other
104 * ACK-eliciting frames before we reach our MaxPPL budget.
105 */
106 size_t reserve;
107 /*
108 * Number of iovecs we have currently appended. This is the number of
109 * entries valid in txp->iovec.
110 */
111 size_t num_iovec;
112 /*
113 * Whether we are allowed to make use of the reserve bytes in our MaxPPL
114 * budget. This is used to ensure we have room to append a PING frame later
115 * if we need to. Once we know we will not need to append a PING frame, this
116 * is set to 1.
117 */
118 unsigned int reserve_allowed : 1;
119 /*
120 * Set to 1 if we have appended a STREAM frame with an implicit length. If
121 * this happens we should never append another frame after that frame as it
122 * cannot be validly encoded. This is just a safety check.
123 */
124 unsigned int done_implicit : 1;
125 struct {
126 /*
127 * The fields in this structure are valid if active is set, which means
128 * that a serialization transaction is currently in progress.
129 */
130 unsigned char *data;
131 WPACKET wpkt;
132 unsigned int active : 1;
133 } txn;
134};
135
136static void tx_helper_rollback(struct tx_helper *h);
137static int txp_ensure_iovec(OSSL_QUIC_TX_PACKETISER *txp, size_t num);
138
139/* Initialises the TX helper. */
140static int tx_helper_init(struct tx_helper *h, OSSL_QUIC_TX_PACKETISER *txp,
141 size_t max_ppl, size_t reserve)
142{
143 if (reserve > max_ppl)
144 return 0;
145
146 h->txp = txp;
147 h->max_ppl = max_ppl;
148 h->reserve = reserve;
149 h->num_iovec = 0;
150 h->bytes_appended = 0;
151 h->scratch_bytes = 0;
152 h->reserve_allowed = 0;
153 h->done_implicit = 0;
154 h->txn.data = NULL;
155 h->txn.active = 0;
156
157 if (max_ppl > h->txp->scratch_len) {
158 unsigned char *scratch;
159
160 scratch = OPENSSL_realloc(h->txp->scratch, max_ppl);
161 if (scratch == NULL)
162 return 0;
163
164 h->txp->scratch = scratch;
165 h->txp->scratch_len = max_ppl;
166 }
167
168 return 1;
169}
170
171static void tx_helper_cleanup(struct tx_helper *h)
172{
173 if (h->txn.active)
174 tx_helper_rollback(h);
175
176 h->txp = NULL;
177}
178
179static void tx_helper_unrestrict(struct tx_helper *h)
180{
181 h->reserve_allowed = 1;
182}
183
184/*
185 * Append an extent of memory to the iovec list. The memory must remain
186 * allocated until we finish generating the packet and call the QTX.
187 *
188 * In general, the buffers passed to this function will be from one of two
189 * ranges:
190 *
191 * - Application data contained in stream buffers managed elsewhere
192 * in the QUIC stack; or
193 *
194 * - Control frame data appended into txp->scratch using tx_helper_begin and
195 * tx_helper_commit.
196 *
197 */
198static int tx_helper_append_iovec(struct tx_helper *h,
199 const unsigned char *buf,
200 size_t buf_len)
201{
202 if (buf_len == 0)
203 return 1;
204
205 if (!ossl_assert(!h->done_implicit))
206 return 0;
207
208 if (!txp_ensure_iovec(h->txp, h->num_iovec + 1))
209 return 0;
210
211 h->txp->iovec[h->num_iovec].buf = buf;
212 h->txp->iovec[h->num_iovec].buf_len = buf_len;
213
214 ++h->num_iovec;
215 h->bytes_appended += buf_len;
216 return 1;
217}
218
219/*
220 * How many more bytes of space do we have left in our plaintext packet payload?
221 */
222static size_t tx_helper_get_space_left(struct tx_helper *h)
223{
224 return h->max_ppl
225 - (h->reserve_allowed ? 0 : h->reserve) - h->bytes_appended;
226}
227
228/*
229 * Begin a control frame serialization transaction. This allows the
230 * serialization of the control frame to be backed out if it turns out it won't
231 * fit. Write the control frame to the returned WPACKET. Ensure you always
232 * call tx_helper_rollback or tx_helper_commit (or tx_helper_cleanup). Returns
233 * NULL on failure.
234 */
235static WPACKET *tx_helper_begin(struct tx_helper *h)
236{
237 size_t space_left, len;
238 unsigned char *data;
239
240 if (!ossl_assert(!h->txn.active))
241 return NULL;
242
243 if (!ossl_assert(!h->done_implicit))
244 return NULL;
245
246 data = (unsigned char *)h->txp->scratch + h->scratch_bytes;
247 len = h->txp->scratch_len - h->scratch_bytes;
248
249 space_left = tx_helper_get_space_left(h);
250 if (!ossl_assert(space_left <= len))
251 return NULL;
252
253 if (!WPACKET_init_static_len(&h->txn.wpkt, data, len, 0))
254 return NULL;
255
256 if (!WPACKET_set_max_size(&h->txn.wpkt, space_left)) {
257 WPACKET_cleanup(&h->txn.wpkt);
258 return NULL;
259 }
260
261 h->txn.data = data;
262 h->txn.active = 1;
263 return &h->txn.wpkt;
264}
265
266static void tx_helper_end(struct tx_helper *h, int success)
267{
268 if (success)
269 WPACKET_finish(&h->txn.wpkt);
270 else
271 WPACKET_cleanup(&h->txn.wpkt);
272
273 h->txn.active = 0;
274 h->txn.data = NULL;
275}
276
277/* Abort a control frame serialization transaction. */
278static void tx_helper_rollback(struct tx_helper *h)
279{
280 if (!h->txn.active)
281 return;
282
283 tx_helper_end(h, 0);
284}
285
286/* Commit a control frame. */
287static int tx_helper_commit(struct tx_helper *h)
288{
289 size_t l = 0;
290
291 if (!h->txn.active)
292 return 0;
293
294 if (!WPACKET_get_total_written(&h->txn.wpkt, &l)) {
295 tx_helper_end(h, 0);
296 return 0;
297 }
298
299 if (!tx_helper_append_iovec(h, h->txn.data, l)) {
300 tx_helper_end(h, 0);
301 return 0;
302 }
303
304 h->scratch_bytes += l;
305 tx_helper_end(h, 1);
306 return 1;
307}
308
309static QUIC_SSTREAM *get_sstream_by_id(uint64_t stream_id, uint32_t pn_space,
310 void *arg);
311static void on_regen_notify(uint64_t frame_type, uint64_t stream_id,
312 QUIC_TXPIM_PKT *pkt, void *arg);
313static int sstream_is_pending(QUIC_SSTREAM *sstream);
314static int txp_el_pending(OSSL_QUIC_TX_PACKETISER *txp, uint32_t enc_level,
315 uint32_t archetype);
316static int txp_generate_for_el(OSSL_QUIC_TX_PACKETISER *txp, uint32_t enc_level,
317 uint32_t archetype,
318 char is_last_in_dgram,
319 char dgram_contains_initial);
320static size_t txp_determine_pn_len(OSSL_QUIC_TX_PACKETISER *txp);
321static int txp_determine_ppl_from_pl(OSSL_QUIC_TX_PACKETISER *txp,
322 size_t pl,
323 uint32_t enc_level,
324 size_t hdr_len,
325 size_t *r);
326static size_t txp_get_mdpl(OSSL_QUIC_TX_PACKETISER *txp);
327static int txp_generate_for_el_actual(OSSL_QUIC_TX_PACKETISER *txp,
328 uint32_t enc_level,
329 uint32_t archetype,
330 size_t min_ppl,
331 size_t max_ppl,
332 size_t pkt_overhead,
333 QUIC_PKT_HDR *phdr);
334
335OSSL_QUIC_TX_PACKETISER *ossl_quic_tx_packetiser_new(const OSSL_QUIC_TX_PACKETISER_ARGS *args)
336{
337 OSSL_QUIC_TX_PACKETISER *txp;
338
339 if (args == NULL
340 || args->qtx == NULL
341 || args->txpim == NULL
342 || args->cfq == NULL
343 || args->ackm == NULL
344 || args->qsm == NULL
345 || args->conn_txfc == NULL
346 || args->conn_rxfc == NULL) {
347 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
348 return NULL;
349 }
350
351 txp = OPENSSL_zalloc(sizeof(*txp));
352 if (txp == NULL)
353 return NULL;
354
355 txp->args = *args;
356 txp->last_tx_time = ossl_time_zero();
357
358 if (!ossl_quic_fifd_init(&txp->fifd,
359 txp->args.cfq, txp->args.ackm, txp->args.txpim,
360 get_sstream_by_id, txp,
361 on_regen_notify, txp)) {
362 OPENSSL_free(txp);
363 return NULL;
364 }
365
366 return txp;
367}
368
369void ossl_quic_tx_packetiser_free(OSSL_QUIC_TX_PACKETISER *txp)
370{
371 if (txp == NULL)
372 return;
373
374 ossl_quic_tx_packetiser_set_initial_token(txp, NULL, 0, NULL, NULL);
375 ossl_quic_fifd_cleanup(&txp->fifd);
376 OPENSSL_free(txp->iovec);
377 OPENSSL_free(txp->conn_close_frame.reason);
378 OPENSSL_free(txp->scratch);
379 OPENSSL_free(txp);
380}
381
382void ossl_quic_tx_packetiser_set_initial_token(OSSL_QUIC_TX_PACKETISER *txp,
383 const unsigned char *token,
384 size_t token_len,
385 ossl_quic_initial_token_free_fn *free_cb,
386 void *free_cb_arg)
387{
388 if (txp->initial_token != NULL && txp->initial_token_free_cb != NULL)
389 txp->initial_token_free_cb(txp->initial_token, txp->initial_token_len,
390 txp->initial_token_free_cb_arg);
391
392 txp->initial_token = token;
393 txp->initial_token_len = token_len;
394 txp->initial_token_free_cb = free_cb;
395 txp->initial_token_free_cb_arg = free_cb_arg;
396}
397
398int ossl_quic_tx_packetiser_set_cur_dcid(OSSL_QUIC_TX_PACKETISER *txp,
399 const QUIC_CONN_ID *dcid)
400{
401 if (dcid == NULL) {
402 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
403 return 0;
404 }
405
406 txp->args.cur_dcid = *dcid;
407 return 1;
408}
409
410int ossl_quic_tx_packetiser_set_cur_scid(OSSL_QUIC_TX_PACKETISER *txp,
411 const QUIC_CONN_ID *scid)
412{
413 if (scid == NULL) {
414 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
415 return 0;
416 }
417
418 txp->args.cur_scid = *scid;
419 return 1;
420}
421
422/* Change the destination L4 address the TXP uses to send datagrams. */
423int ossl_quic_tx_packetiser_set_peer(OSSL_QUIC_TX_PACKETISER *txp,
424 const BIO_ADDR *peer)
425{
426 if (peer == NULL) {
427 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
428 return 0;
429 }
430
431 txp->args.peer = *peer;
432 return 1;
433}
434
435int ossl_quic_tx_packetiser_discard_enc_level(OSSL_QUIC_TX_PACKETISER *txp,
436 uint32_t enc_level)
437{
438 if (enc_level >= QUIC_ENC_LEVEL_NUM) {
439 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
440 return 0;
441 }
442
443 if (enc_level != QUIC_ENC_LEVEL_0RTT)
444 txp->args.crypto[ossl_quic_enc_level_to_pn_space(enc_level)] = NULL;
445
a73078b7
HL
446 return 1;
447}
448
cda88baf
HL
449void ossl_quic_tx_packetiser_notify_handshake_complete(OSSL_QUIC_TX_PACKETISER *txp)
450{
451 txp->handshake_complete = 1;
452}
453
a73078b7
HL
454void ossl_quic_tx_packetiser_schedule_handshake_done(OSSL_QUIC_TX_PACKETISER *txp)
455{
456 txp->want_handshake_done = 1;
457}
458
459void ossl_quic_tx_packetiser_schedule_ack_eliciting(OSSL_QUIC_TX_PACKETISER *txp,
460 uint32_t pn_space)
461{
462 txp->force_ack_eliciting |= (1UL << pn_space);
463}
464
465#define TXP_ERR_INTERNAL 0 /* Internal (e.g. alloc) error */
466#define TXP_ERR_SUCCESS 1 /* Success */
467#define TXP_ERR_SPACE 2 /* Not enough room for another packet */
468#define TXP_ERR_INPUT 3 /* Invalid/malformed input */
469
04e5226f
HL
470int ossl_quic_tx_packetiser_has_pending(OSSL_QUIC_TX_PACKETISER *txp,
471 uint32_t archetype,
472 uint32_t flags)
473{
474 uint32_t enc_level;
475 int bypass_cc = ((flags & TX_PACKETISER_BYPASS_CC) != 0);
476
477 if (!bypass_cc && !txp->args.cc_method->can_send(txp->args.cc_data))
478 return 0;
479
480 for (enc_level = QUIC_ENC_LEVEL_INITIAL;
481 enc_level < QUIC_ENC_LEVEL_NUM;
482 ++enc_level)
483 if (txp_el_pending(txp, enc_level, archetype))
484 return 1;
485
486 return 0;
487}
488
a73078b7
HL
489/*
490 * Generates a datagram by polling the various ELs to determine if they want to
491 * generate any frames, and generating a datagram which coalesces packets for
492 * any ELs which do.
493 */
494int ossl_quic_tx_packetiser_generate(OSSL_QUIC_TX_PACKETISER *txp,
495 uint32_t archetype)
496{
497 uint32_t enc_level;
498 char have_pkt_for_el[QUIC_ENC_LEVEL_NUM], is_last_in_dgram;
499 size_t num_el_in_dgram = 0, pkts_done = 0;
500 int rc;
501
502 if (!txp->args.cc_method->can_send(txp->args.cc_data))
503 return TX_PACKETISER_RES_NO_PKT;
504
505 for (enc_level = QUIC_ENC_LEVEL_INITIAL;
506 enc_level < QUIC_ENC_LEVEL_NUM;
507 ++enc_level) {
508 have_pkt_for_el[enc_level] = txp_el_pending(txp, enc_level, archetype);
509 if (have_pkt_for_el[enc_level])
510 ++num_el_in_dgram;
511 }
512
513 if (num_el_in_dgram == 0)
514 return TX_PACKETISER_RES_NO_PKT;
515
516 /*
517 * Should not be needed, but a sanity check in case anyone else has been
518 * using the QTX.
519 */
520 ossl_qtx_finish_dgram(txp->args.qtx);
521
522 for (enc_level = QUIC_ENC_LEVEL_INITIAL;
523 enc_level < QUIC_ENC_LEVEL_NUM;
524 ++enc_level) {
525 if (!have_pkt_for_el[enc_level])
526 continue;
527
528 is_last_in_dgram = (pkts_done + 1 == num_el_in_dgram);
529 rc = txp_generate_for_el(txp, enc_level, archetype, is_last_in_dgram,
530 have_pkt_for_el[QUIC_ENC_LEVEL_INITIAL]);
531
532 if (rc != TXP_ERR_SUCCESS) {
533 /*
534 * If we already successfully did at least one, make sure we report
535 * this via the return code.
536 */
537 if (pkts_done > 0)
538 break;
539 else
540 return TX_PACKETISER_RES_FAILURE;
541 }
542
543 ++pkts_done;
544 }
545
546 ossl_qtx_finish_dgram(txp->args.qtx);
547 return TX_PACKETISER_RES_SENT_PKT;
548}
549
550struct archetype_data {
551 unsigned int allow_ack : 1;
552 unsigned int allow_ping : 1;
553 unsigned int allow_crypto : 1;
554 unsigned int allow_handshake_done : 1;
555 unsigned int allow_path_challenge : 1;
556 unsigned int allow_path_response : 1;
557 unsigned int allow_new_conn_id : 1;
558 unsigned int allow_retire_conn_id : 1;
559 unsigned int allow_stream_rel : 1;
560 unsigned int allow_conn_fc : 1;
561 unsigned int allow_conn_close : 1;
562 unsigned int allow_cfq_other : 1;
563 unsigned int allow_new_token : 1;
564 unsigned int allow_force_ack_eliciting : 1;
565};
566
567static const struct archetype_data archetypes[QUIC_ENC_LEVEL_NUM][TX_PACKETISER_ARCHETYPE_NUM] = {
568 /* EL 0(INITIAL) */
569 {
570 /* EL 0(INITIAL) - Archetype 0(NORMAL) */
571 {
572 /*allow_ack =*/ 1,
573 /*allow_ping =*/ 1,
574 /*allow_crypto =*/ 1,
575 /*allow_handshake_done =*/ 0,
576 /*allow_path_challenge =*/ 0,
577 /*allow_path_response =*/ 0,
578 /*allow_new_conn_id =*/ 0,
579 /*allow_retire_conn_id =*/ 0,
580 /*allow_stream_rel =*/ 0,
581 /*allow_conn_fc =*/ 0,
582 /*allow_conn_close =*/ 1,
583 /*allow_cfq_other =*/ 1,
584 /*allow_new_token =*/ 0,
585 /*allow_force_ack_eliciting =*/ 1,
586 },
587 /* EL 0(INITIAL) - Archetype 1(ACK_ONLY) */
588 {
589 /*allow_ack =*/ 1,
590 /*allow_ping =*/ 0,
591 /*allow_crypto =*/ 0,
592 /*allow_handshake_done =*/ 0,
593 /*allow_path_challenge =*/ 0,
594 /*allow_path_response =*/ 0,
595 /*allow_new_conn_id =*/ 0,
596 /*allow_retire_conn_id =*/ 0,
597 /*allow_stream_rel =*/ 0,
598 /*allow_conn_fc =*/ 0,
599 /*allow_conn_close =*/ 0,
600 /*allow_cfq_other =*/ 0,
601 /*allow_new_token =*/ 0,
602 /*allow_force_ack_eliciting =*/ 1,
603 },
604 },
605 /* EL 1(HANDSHAKE) */
606 {
607 /* EL 1(HANDSHAKE) - Archetype 0(NORMAL) */
608 {
609 /*allow_ack =*/ 1,
610 /*allow_ping =*/ 1,
611 /*allow_crypto =*/ 1,
612 /*allow_handshake_done =*/ 0,
613 /*allow_path_challenge =*/ 0,
614 /*allow_path_response =*/ 0,
615 /*allow_new_conn_id =*/ 0,
616 /*allow_retire_conn_id =*/ 0,
617 /*allow_stream_rel =*/ 0,
618 /*allow_conn_fc =*/ 0,
619 /*allow_conn_close =*/ 1,
620 /*allow_cfq_other =*/ 1,
621 /*allow_new_token =*/ 0,
622 /*allow_force_ack_eliciting =*/ 1,
623 },
624 /* EL 1(HANDSHAKE) - Archetype 1(ACK_ONLY) */
625 {
626 /*allow_ack =*/ 1,
627 /*allow_ping =*/ 0,
628 /*allow_crypto =*/ 0,
629 /*allow_handshake_done =*/ 0,
630 /*allow_path_challenge =*/ 0,
631 /*allow_path_response =*/ 0,
632 /*allow_new_conn_id =*/ 0,
633 /*allow_retire_conn_id =*/ 0,
634 /*allow_stream_rel =*/ 0,
635 /*allow_conn_fc =*/ 0,
636 /*allow_conn_close =*/ 0,
637 /*allow_cfq_other =*/ 0,
638 /*allow_new_token =*/ 0,
639 /*allow_force_ack_eliciting =*/ 1,
640 },
641 },
642 /* EL 2(0RTT) */
643 {
644 /* EL 2(0RTT) - Archetype 0(NORMAL) */
645 {
646 /*allow_ack =*/ 0,
647 /*allow_ping =*/ 1,
648 /*allow_crypto =*/ 0,
649 /*allow_handshake_done =*/ 0,
650 /*allow_path_challenge =*/ 0,
651 /*allow_path_response =*/ 0,
652 /*allow_new_conn_id =*/ 1,
653 /*allow_retire_conn_id =*/ 1,
654 /*allow_stream_rel =*/ 1,
655 /*allow_conn_fc =*/ 1,
656 /*allow_conn_close =*/ 1,
657 /*allow_cfq_other =*/ 0,
658 /*allow_new_token =*/ 0,
659 /*allow_force_ack_eliciting =*/ 0,
660 },
661 /* EL 2(0RTT) - Archetype 1(ACK_ONLY) */
662 {
663 /*allow_ack =*/ 0,
664 /*allow_ping =*/ 0,
665 /*allow_crypto =*/ 0,
666 /*allow_handshake_done =*/ 0,
667 /*allow_path_challenge =*/ 0,
668 /*allow_path_response =*/ 0,
669 /*allow_new_conn_id =*/ 0,
670 /*allow_retire_conn_id =*/ 0,
671 /*allow_stream_rel =*/ 0,
672 /*allow_conn_fc =*/ 0,
673 /*allow_conn_close =*/ 0,
674 /*allow_cfq_other =*/ 0,
675 /*allow_new_token =*/ 0,
676 /*allow_force_ack_eliciting =*/ 0,
677 },
678 },
679 /* EL 3(1RTT) */
680 {
681 /* EL 3(1RTT) - Archetype 0(NORMAL) */
682 {
683 /*allow_ack =*/ 1,
684 /*allow_ping =*/ 1,
685 /*allow_crypto =*/ 1,
686 /*allow_handshake_done =*/ 1,
687 /*allow_path_challenge =*/ 0,
688 /*allow_path_response =*/ 0,
689 /*allow_new_conn_id =*/ 1,
690 /*allow_retire_conn_id =*/ 1,
691 /*allow_stream_rel =*/ 1,
692 /*allow_conn_fc =*/ 1,
693 /*allow_conn_close =*/ 1,
694 /*allow_cfq_other =*/ 1,
695 /*allow_new_token =*/ 1,
696 /*allow_force_ack_eliciting =*/ 1,
697 },
698 /* EL 3(1RTT) - Archetype 1(ACK_ONLY) */
699 {
700 /*allow_ack =*/ 1,
701 /*allow_ping =*/ 0,
702 /*allow_crypto =*/ 0,
703 /*allow_handshake_done =*/ 0,
704 /*allow_path_challenge =*/ 0,
705 /*allow_path_response =*/ 0,
706 /*allow_new_conn_id =*/ 0,
707 /*allow_retire_conn_id =*/ 0,
708 /*allow_stream_rel =*/ 0,
709 /*allow_conn_fc =*/ 0,
710 /*allow_conn_close =*/ 0,
711 /*allow_cfq_other =*/ 0,
712 /*allow_new_token =*/ 0,
713 /*allow_force_ack_eliciting =*/ 1,
714 }
715 }
716};
717
718static int txp_get_archetype_data(uint32_t enc_level,
719 uint32_t archetype,
720 struct archetype_data *a)
721{
722 if (enc_level >= QUIC_ENC_LEVEL_NUM
723 || archetype >= TX_PACKETISER_ARCHETYPE_NUM)
724 return 0;
725
726 /* No need to avoid copying this as it should not exceed one int in size. */
727 *a = archetypes[enc_level][archetype];
728 return 1;
729}
730
731/*
732 * Returns 1 if the given EL wants to produce one or more frames.
733 * Always returns 0 if the given EL is discarded.
734 */
735static int txp_el_pending(OSSL_QUIC_TX_PACKETISER *txp, uint32_t enc_level,
736 uint32_t archetype)
737{
738 struct archetype_data a;
739 uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
740 QUIC_CFQ_ITEM *cfq_item;
741
742 if (!ossl_qtx_is_enc_level_provisioned(txp->args.qtx, enc_level))
743 return 0;
744
745 if (!txp_get_archetype_data(enc_level, archetype, &a))
746 return 0;
747
748 /* Does the crypto stream for this EL want to produce anything? */
749 if (a.allow_crypto && sstream_is_pending(txp->args.crypto[pn_space]))
750 return 1;
751
752 /* Does the ACKM for this PN space want to produce anything? */
753 if (a.allow_ack && (ossl_ackm_is_ack_desired(txp->args.ackm, pn_space)
754 || (txp->want_ack & (1UL << pn_space)) != 0))
755 return 1;
756
757 /* Do we need to force emission of an ACK-eliciting packet? */
758 if (a.allow_force_ack_eliciting
759 && (txp->force_ack_eliciting & (1UL << pn_space)) != 0)
760 return 1;
761
762 /* Does the connection-level RXFC want to produce a frame? */
763 if (a.allow_conn_fc && (txp->want_max_data
764 || ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 0)))
765 return 1;
766
767 /* Do we want to produce a MAX_STREAMS frame? */
768 if (a.allow_conn_fc && (txp->want_max_streams_bidi
769 || txp->want_max_streams_uni))
770 return 1;
771
772 /* Do we want to produce a HANDSHAKE_DONE frame? */
773 if (a.allow_handshake_done && txp->want_handshake_done)
774 return 1;
775
776 /* Do we want to produce a CONNECTION_CLOSE frame? */
777 if (a.allow_conn_close && txp->want_conn_close)
778 return 1;
779
780 /* Does the CFQ have any frames queued for this PN space? */
781 if (enc_level != QUIC_ENC_LEVEL_0RTT)
782 for (cfq_item = ossl_quic_cfq_get_priority_head(txp->args.cfq, pn_space);
783 cfq_item != NULL;
784 cfq_item = ossl_quic_cfq_item_get_priority_next(cfq_item, pn_space)) {
785 uint64_t frame_type = ossl_quic_cfq_item_get_frame_type(cfq_item);
786
787 switch (frame_type) {
788 case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
789 if (a.allow_new_conn_id)
790 return 1;
791 break;
792 case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
793 if (a.allow_retire_conn_id)
794 return 1;
795 break;
796 case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
797 if (a.allow_new_token)
798 return 1;
799 break;
800 default:
801 if (a.allow_cfq_other)
802 return 1;
803 break;
804 }
805 }
806
cda88baf 807 if (a.allow_stream_rel && txp->handshake_complete) {
a73078b7
HL
808 QUIC_STREAM_ITER it;
809
810 /* If there are any active streams, 0/1-RTT wants to produce a packet.
811 * Whether a stream is on the active list is required to be precise
812 * (i.e., a stream is never on the active list if we cannot produce a
813 * frame for it), and all stream-related frames are governed by
814 * a.allow_stream_rel (i.e., if we can send one type of stream-related
815 * frame, we can send any of them), so we don't need to inspect
816 * individual streams on the active list, just confirm that the active
817 * list is non-empty.
818 */
819 ossl_quic_stream_iter_init(&it, txp->args.qsm, 0);
820 if (it.stream != NULL)
821 return 1;
822 }
823
824 return 0;
825}
826
827static int sstream_is_pending(QUIC_SSTREAM *sstream)
828{
829 OSSL_QUIC_FRAME_STREAM hdr;
830 OSSL_QTX_IOVEC iov[2];
831 size_t num_iov = OSSL_NELEM(iov);
832
833 return ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov, &num_iov);
834}
835
836/*
837 * Generates a packet for a given EL, coalescing it into the current datagram.
838 *
839 * is_last_in_dgram and dgram_contains_initial are used to determine padding
840 * requirements.
841 *
842 * Returns TXP_ERR_* value.
843 */
844static int txp_generate_for_el(OSSL_QUIC_TX_PACKETISER *txp, uint32_t enc_level,
845 uint32_t archetype,
846 char is_last_in_dgram,
847 char dgram_contains_initial)
848{
849 char must_pad = dgram_contains_initial && is_last_in_dgram;
850 size_t min_dpl, min_pl, min_ppl, cmpl, cmppl, running_total;
851 size_t mdpl, hdr_len, pkt_overhead, cc_limit;
852 uint64_t cc_limit_;
853 QUIC_PKT_HDR phdr;
854 OSSL_TIME time_since_last;
855
856 /* Determine the limit CC imposes on what we can send. */
857 if (ossl_time_is_zero(txp->last_tx_time))
858 time_since_last = ossl_time_zero();
859 else
860 time_since_last = ossl_time_subtract(txp->args.now(txp->args.now_arg),
861 txp->last_tx_time);
862
863 cc_limit_ = txp->args.cc_method->get_send_allowance(txp->args.cc_data,
864 time_since_last,
865 ossl_time_is_zero(time_since_last));
866
867 cc_limit = (cc_limit_ > SIZE_MAX ? SIZE_MAX : (size_t)cc_limit_);
868
869 /* Assemble packet header. */
870 phdr.type = ossl_quic_enc_level_to_pkt_type(enc_level);
871 phdr.spin_bit = 0;
872 phdr.pn_len = txp_determine_pn_len(txp);
873 phdr.partial = 0;
874 phdr.fixed = 1;
875 phdr.version = QUIC_VERSION_1;
876 phdr.dst_conn_id = txp->args.cur_dcid;
877 phdr.src_conn_id = txp->args.cur_scid;
878
879 /*
880 * We need to know the length of the payload to get an accurate header
881 * length for non-1RTT packets, because the Length field found in
882 * Initial/Handshake/0-RTT packets uses a variable-length encoding. However,
883 * we don't have a good idea of the length of our payload, because the
884 * length of the payload depends on the room in the datagram after fitting
885 * the header, which depends on the size of the header.
886 *
887 * In general, it does not matter if a packet is slightly shorter (because
888 * e.g. we predicted use of a 2-byte length field, but ended up only needing
889 * a 1-byte length field). However this does matter for Initial packets
890 * which must be at least 1200 bytes, which is also the assumed default MTU;
891 * therefore in many cases Initial packets will be padded to 1200 bytes,
892 * which means if we overestimated the header size, we will be short by a
893 * few bytes and the server will ignore the packet for being too short. In
894 * this case, however, such packets always *will* be padded to meet 1200
895 * bytes, which requires a 2-byte length field, so we don't actually need to
896 * worry about this. Thus we estimate the header length assuming a 2-byte
897 * length field here, which should in practice work well in all cases.
898 */
899 phdr.len = OSSL_QUIC_VLINT_2B_MAX - phdr.pn_len;
900
901 if (enc_level == QUIC_ENC_LEVEL_INITIAL) {
902 phdr.token = txp->initial_token;
903 phdr.token_len = txp->initial_token_len;
904 } else {
905 phdr.token = NULL;
906 phdr.token_len = 0;
907 }
908
909 hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(phdr.dst_conn_id.id_len,
910 &phdr);
911 if (hdr_len == 0)
912 return TXP_ERR_INPUT;
913
914 /* MinDPL: Minimum total datagram payload length. */
915 min_dpl = must_pad ? QUIC_MIN_INITIAL_DGRAM_LEN : 0;
916
917 /* How much data is already in the current datagram? */
918 running_total = ossl_qtx_get_cur_dgram_len_bytes(txp->args.qtx);
919
920 /* MinPL: Minimum length of the fully encoded packet. */
921 min_pl = running_total < min_dpl ? min_dpl - running_total : 0;
922 if ((uint64_t)min_pl > cc_limit)
923 /*
924 * Congestion control does not allow us to send a packet of adequate
925 * size.
926 */
927 return TXP_ERR_SPACE;
928
929 /* MinPPL: Minimum plaintext payload length needed to meet MinPL. */
930 if (!txp_determine_ppl_from_pl(txp, min_pl, enc_level, hdr_len, &min_ppl))
931 /* MinPL is less than a valid packet size, so just use a MinPPL of 0. */
932 min_ppl = 0;
933
934 /* MDPL: Maximum datagram payload length. */
935 mdpl = txp_get_mdpl(txp);
936
937 /*
938 * CMPL: Maximum encoded packet size we can put into this datagram given any
939 * previous packets coalesced into it.
940 */
941 if (running_total > mdpl)
942 /* Should not be possible, but if it happens: */
943 cmpl = 0;
944 else
945 cmpl = mdpl - running_total;
946
947 /* Clamp CMPL based on congestion control limit. */
948 if (cmpl > cc_limit)
949 cmpl = cc_limit;
950
951 /* CMPPL: Maximum amount we can put into the current datagram payload. */
952 if (!txp_determine_ppl_from_pl(txp, cmpl, enc_level, hdr_len, &cmppl))
953 return TXP_ERR_SPACE;
954
955 /* Packet overhead (size of headers, AEAD tag, etc.) */
956 pkt_overhead = cmpl - cmppl;
957
958 return txp_generate_for_el_actual(txp, enc_level, archetype, min_ppl, cmppl,
959 pkt_overhead, &phdr);
960}
961
962/* Determine how many bytes we should use for the encoded PN. */
963static size_t txp_determine_pn_len(OSSL_QUIC_TX_PACKETISER *txp)
964{
965 return 4; /* TODO(QUIC) */
966}
967
968/* Determine plaintext packet payload length from payload length. */
969static int txp_determine_ppl_from_pl(OSSL_QUIC_TX_PACKETISER *txp,
970 size_t pl,
971 uint32_t enc_level,
972 size_t hdr_len,
973 size_t *r)
974{
975 if (pl < hdr_len)
976 return 0;
977
978 pl -= hdr_len;
979
980 if (!ossl_qtx_calculate_plaintext_payload_len(txp->args.qtx, enc_level,
981 pl, &pl))
982 return 0;
983
984 *r = pl;
985 return 1;
986}
987
988static size_t txp_get_mdpl(OSSL_QUIC_TX_PACKETISER *txp)
989{
990 return ossl_qtx_get_mdpl(txp->args.qtx);
991}
992
993static QUIC_SSTREAM *get_sstream_by_id(uint64_t stream_id, uint32_t pn_space,
994 void *arg)
995{
996 OSSL_QUIC_TX_PACKETISER *txp = arg;
997 QUIC_STREAM *s;
998
999 if (stream_id == UINT64_MAX)
1000 return txp->args.crypto[pn_space];
1001
1002 s = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1003 if (s == NULL)
1004 return NULL;
1005
1006 return s->sstream;
1007}
1008
1009static void on_regen_notify(uint64_t frame_type, uint64_t stream_id,
1010 QUIC_TXPIM_PKT *pkt, void *arg)
1011{
1012 OSSL_QUIC_TX_PACKETISER *txp = arg;
1013
1014 switch (frame_type) {
1015 case OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE:
1016 txp->want_handshake_done = 1;
1017 break;
1018 case OSSL_QUIC_FRAME_TYPE_MAX_DATA:
1019 txp->want_max_data = 1;
1020 break;
1021 case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI:
1022 txp->want_max_streams_bidi = 1;
1023 break;
1024 case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI:
1025 txp->want_max_streams_uni = 1;
1026 break;
1027 case OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN:
1028 txp->want_ack |= (1UL << pkt->ackm_pkt.pkt_space);
1029 break;
1030 case OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA:
1031 {
1032 QUIC_STREAM *s
1033 = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1034
1035 if (s == NULL)
1036 return;
1037
1038 s->want_max_stream_data = 1;
1039 ossl_quic_stream_map_update_state(txp->args.qsm, s);
1040 }
1041 break;
1042 case OSSL_QUIC_FRAME_TYPE_STOP_SENDING:
1043 {
1044 QUIC_STREAM *s
1045 = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1046
1047 if (s == NULL)
1048 return;
1049
1050 s->want_stop_sending = 1;
1051 ossl_quic_stream_map_update_state(txp->args.qsm, s);
1052 }
1053 break;
1054 case OSSL_QUIC_FRAME_TYPE_RESET_STREAM:
1055 {
1056 QUIC_STREAM *s
1057 = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
1058
1059 if (s == NULL)
1060 return;
1061
1062 s->want_reset_stream = 1;
1063 ossl_quic_stream_map_update_state(txp->args.qsm, s);
1064 }
1065 break;
1066 default:
1067 assert(0);
1068 break;
1069 }
1070}
1071
1072static int txp_generate_pre_token(OSSL_QUIC_TX_PACKETISER *txp,
1073 struct tx_helper *h,
1074 QUIC_TXPIM_PKT *tpkt,
1075 uint32_t pn_space,
1076 struct archetype_data *a)
1077{
1078 const OSSL_QUIC_FRAME_ACK *ack;
1079 OSSL_QUIC_FRAME_ACK ack2;
1080
1081 tpkt->ackm_pkt.largest_acked = QUIC_PN_INVALID;
1082
1083 /* ACK Frames (Regenerate) */
1084 if (a->allow_ack
1085 && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_ACK
1086 && (txp->want_ack
1087 || ossl_ackm_is_ack_desired(txp->args.ackm, pn_space))
1088 && (ack = ossl_ackm_get_ack_frame(txp->args.ackm, pn_space)) != NULL) {
1089 WPACKET *wpkt = tx_helper_begin(h);
1090
1091 if (wpkt == NULL)
1092 return 0;
1093
1094 /* We do not currently support ECN */
1095 ack2 = *ack;
1096 ack2.ecn_present = 0;
1097
1098 if (ossl_quic_wire_encode_frame_ack(wpkt,
1099 txp->args.ack_delay_exponent,
1100 &ack2)) {
1101 if (!tx_helper_commit(h))
1102 return 0;
1103
1104 tpkt->had_ack_frame = 1;
1105
1106 if (ack->num_ack_ranges > 0)
1107 tpkt->ackm_pkt.largest_acked = ack->ack_ranges[0].end;
1108 } else {
1109 tx_helper_rollback(h);
1110 }
1111 }
1112
1113 /* CONNECTION_CLOSE Frames (Regenerate) */
1114 if (a->allow_conn_close && txp->want_conn_close) {
1115 WPACKET *wpkt = tx_helper_begin(h);
1116
1117 if (wpkt == NULL)
1118 return 0;
1119
1120 if (ossl_quic_wire_encode_frame_conn_close(wpkt,
1121 &txp->conn_close_frame)) {
1122 if (!tx_helper_commit(h))
1123 return 0;
1124 } else {
1125 tx_helper_rollback(h);
1126 }
1127 }
1128
1129 return 1;
1130}
1131
1132static int try_len(size_t space_left, size_t orig_len,
1133 size_t base_hdr_len, size_t lenbytes,
1134 uint64_t maxn, size_t *hdr_len, size_t *payload_len)
1135{
1136 size_t n;
1137 size_t maxn_ = maxn > SIZE_MAX ? SIZE_MAX : (size_t)maxn;
1138
1139 *hdr_len = base_hdr_len + lenbytes;
1140
1141 n = orig_len;
1142 if (n > maxn_)
1143 n = maxn_;
1144 if (n + *hdr_len > space_left)
1145 n = (space_left >= *hdr_len) ? space_left - *hdr_len : 0;
1146
1147 *payload_len = n;
1148 return n > 0;
1149}
1150
1151static void determine_len(size_t space_left, size_t orig_len,
1152 size_t base_hdr_len,
1153 uint64_t *hlen, uint64_t *len)
1154{
1155 size_t chosen_payload_len = 0;
1156 size_t chosen_hdr_len = 0;
1157 size_t payload_len[4], hdr_len[4];
1158 int i, valid[4] = {0};
1159
1160 valid[0] = try_len(space_left, orig_len, base_hdr_len,
1161 1, OSSL_QUIC_VLINT_1B_MAX,
1162 &hdr_len[0], &payload_len[0]);
1163 valid[1] = try_len(space_left, orig_len, base_hdr_len,
1164 2, OSSL_QUIC_VLINT_2B_MAX,
1165 &hdr_len[1], &payload_len[1]);
1166 valid[2] = try_len(space_left, orig_len, base_hdr_len,
1167 4, OSSL_QUIC_VLINT_4B_MAX,
1168 &hdr_len[2], &payload_len[2]);
1169 valid[3] = try_len(space_left, orig_len, base_hdr_len,
1170 8, OSSL_QUIC_VLINT_8B_MAX,
1171 &hdr_len[3], &payload_len[3]);
1172
1173 for (i = OSSL_NELEM(valid) - 1; i >= 0; --i)
1174 if (valid[i] && payload_len[i] >= chosen_payload_len) {
1175 chosen_payload_len = payload_len[i];
1176 chosen_hdr_len = hdr_len[i];
1177 }
1178
1179 *hlen = chosen_hdr_len;
1180 *len = chosen_payload_len;
1181}
1182
1183/*
1184 * Given a CRYPTO frame header with accurate chdr->len and a budget
1185 * (space_left), try to find the optimal value of chdr->len to fill as much of
1186 * the budget as possible. This is slightly hairy because larger values of
1187 * chdr->len cause larger encoded sizes of the length field of the frame, which
1188 * in turn mean less space available for payload data. We check all possible
1189 * encodings and choose the optimal encoding.
1190 */
1191static int determine_crypto_len(struct tx_helper *h,
1192 OSSL_QUIC_FRAME_CRYPTO *chdr,
1193 size_t space_left,
1194 uint64_t *hlen,
1195 uint64_t *len)
1196{
1197 size_t orig_len;
1198 size_t base_hdr_len; /* CRYPTO header length without length field */
1199
1200 if (chdr->len > SIZE_MAX)
1201 return 0;
1202
1203 orig_len = (size_t)chdr->len;
1204
1205 chdr->len = 0;
1206 base_hdr_len = ossl_quic_wire_get_encoded_frame_len_crypto_hdr(chdr);
1207 chdr->len = orig_len;
1208 if (base_hdr_len == 0)
1209 return 0;
1210
1211 --base_hdr_len;
1212
1213 determine_len(space_left, orig_len, base_hdr_len, hlen, len);
1214 return 1;
1215}
1216
1217static int determine_stream_len(struct tx_helper *h,
1218 OSSL_QUIC_FRAME_STREAM *shdr,
1219 size_t space_left,
1220 uint64_t *hlen,
1221 uint64_t *len)
1222{
1223 size_t orig_len;
1224 size_t base_hdr_len; /* STREAM header length without length field */
1225
1226 if (shdr->len > SIZE_MAX)
1227 return 0;
1228
1229 orig_len = (size_t)shdr->len;
1230
1231 shdr->len = 0;
1232 base_hdr_len = ossl_quic_wire_get_encoded_frame_len_stream_hdr(shdr);
1233 shdr->len = orig_len;
1234 if (base_hdr_len == 0)
1235 return 0;
1236
1237 if (shdr->has_explicit_len)
1238 --base_hdr_len;
1239
1240 determine_len(space_left, orig_len, base_hdr_len, hlen, len);
1241 return 1;
1242}
1243
1244static int txp_generate_crypto_frames(OSSL_QUIC_TX_PACKETISER *txp,
1245 struct tx_helper *h,
1246 uint32_t pn_space,
1247 QUIC_TXPIM_PKT *tpkt,
1248 char *have_ack_eliciting)
1249{
1250 size_t num_stream_iovec;
1251 OSSL_QUIC_FRAME_STREAM shdr = {0};
1252 OSSL_QUIC_FRAME_CRYPTO chdr = {0};
1253 OSSL_QTX_IOVEC iov[2];
1254 uint64_t hdr_bytes;
1255 WPACKET *wpkt;
1256 QUIC_TXPIM_CHUNK chunk;
1257 size_t i, space_left;
1258
1259 for (i = 0;; ++i) {
1260 space_left = tx_helper_get_space_left(h);
1261
1262 if (space_left < MIN_FRAME_SIZE_CRYPTO)
1263 return 1; /* no point trying */
1264
1265 /* Do we have any CRYPTO data waiting? */
1266 num_stream_iovec = OSSL_NELEM(iov);
1267 if (!ossl_quic_sstream_get_stream_frame(txp->args.crypto[pn_space],
1268 i, &shdr, iov,
1269 &num_stream_iovec))
1270 return 1; /* nothing to do */
1271
1272 /* Convert STREAM frame header to CRYPTO frame header */
1273 chdr.offset = shdr.offset;
1274 chdr.len = shdr.len;
1275
1276 if (chdr.len == 0)
1277 return 1; /* nothing to do */
1278
1279 /* Find best fit (header length, payload length) combination. */
1280 if (!determine_crypto_len(h, &chdr, space_left, &hdr_bytes,
1281 &chdr.len)
1282 || hdr_bytes == 0 || chdr.len == 0) {
1283 return 1; /* can't fit anything */
1284 }
1285
1286 /*
1287 * Truncate IOVs to match our chosen length.
1288 *
1289 * The length cannot be more than SIZE_MAX because this length comes
1290 * from our send stream buffer.
1291 */
1292 ossl_quic_sstream_adjust_iov((size_t)chdr.len, iov, num_stream_iovec);
1293
1294 /*
1295 * Ensure we have enough iovecs allocated (1 for the header, up to 2 for
1296 * the the stream data.)
1297 */
1298 if (!txp_ensure_iovec(txp, h->num_iovec + 3))
1299 return 0; /* alloc error */
1300
1301 /* Encode the header. */
1302 wpkt = tx_helper_begin(h);
1303 if (wpkt == NULL)
1304 return 0; /* alloc error */
1305
1306 if (!ossl_quic_wire_encode_frame_crypto_hdr(wpkt, &chdr)) {
1307 tx_helper_rollback(h);
1308 return 1; /* can't fit */
1309 }
1310
1311 if (!tx_helper_commit(h))
1312 return 0; /* alloc error */
1313
1314 /* Add payload iovecs to the helper (infallible). */
1315 for (i = 0; i < num_stream_iovec; ++i)
1316 tx_helper_append_iovec(h, iov[i].buf, iov[i].buf_len);
1317
1318 *have_ack_eliciting = 1;
1319 tx_helper_unrestrict(h); /* no longer need PING */
1320
1321 /* Log chunk to TXPIM. */
1322 chunk.stream_id = UINT64_MAX; /* crypto stream */
1323 chunk.start = chdr.offset;
1324 chunk.end = chdr.offset + chdr.len - 1;
1325 chunk.has_fin = 0; /* Crypto stream never ends */
1326 if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
1327 return 0; /* alloc error */
1328 }
1329}
1330
1331struct chunk_info {
1332 OSSL_QUIC_FRAME_STREAM shdr;
1333 OSSL_QTX_IOVEC iov[2];
1334 size_t num_stream_iovec;
1335 char valid;
1336};
1337
1338static int txp_plan_stream_chunk(OSSL_QUIC_TX_PACKETISER *txp,
1339 struct tx_helper *h,
1340 QUIC_SSTREAM *sstream,
1341 QUIC_TXFC *stream_txfc,
1342 size_t skip,
1343 struct chunk_info *chunk)
1344{
1345 uint64_t fc_credit, fc_swm, fc_limit;
1346
1347 chunk->num_stream_iovec = OSSL_NELEM(chunk->iov);
1348 chunk->valid = ossl_quic_sstream_get_stream_frame(sstream, skip,
1349 &chunk->shdr,
1350 chunk->iov,
1351 &chunk->num_stream_iovec);
1352 if (!chunk->valid)
1353 return 1;
1354
1355 if (!ossl_assert(chunk->shdr.len > 0 || chunk->shdr.is_fin))
1356 /* Should only have 0-length chunk if FIN */
1357 return 0;
1358
1359 /* Clamp according to connection and stream-level TXFC. */
1360 fc_credit = ossl_quic_txfc_get_credit(stream_txfc);
1361 fc_swm = ossl_quic_txfc_get_swm(stream_txfc);
1362 fc_limit = fc_swm + fc_credit;
1363
1364 if (chunk->shdr.len > 0 && chunk->shdr.offset + chunk->shdr.len > fc_limit) {
1365 chunk->shdr.len = (fc_limit <= chunk->shdr.offset)
1366 ? 0 : fc_limit - chunk->shdr.offset;
1367 chunk->shdr.is_fin = 0;
1368 }
1369
1370 if (chunk->shdr.len == 0 && !chunk->shdr.is_fin) {
1371 /*
1372 * Nothing to do due to TXFC. Since SSTREAM returns chunks in ascending
1373 * order of offset we don't need to check any later chunks, so stop
1374 * iterating here.
1375 */
1376 chunk->valid = 0;
1377 return 1;
1378 }
1379
1380 return 1;
1381}
1382
1383/*
1384 * Returns 0 on fatal error (e.g. allocation failure), 1 on success.
1385 * *packet_full is set to 1 if there is no longer enough room for another STREAM
1386 * frame, and *stream_drained is set to 1 if all stream buffers have now been
1387 * sent.
1388 */
1389static int txp_generate_stream_frames(OSSL_QUIC_TX_PACKETISER *txp,
1390 struct tx_helper *h,
1391 uint32_t pn_space,
1392 QUIC_TXPIM_PKT *tpkt,
1393 uint64_t id,
1394 QUIC_SSTREAM *sstream,
1395 QUIC_TXFC *stream_txfc,
1396 QUIC_STREAM *next_stream,
1397 size_t min_ppl,
1398 char *have_ack_eliciting,
1399 char *packet_full,
1400 char *stream_drained,
1401 uint64_t *new_credit_consumed)
1402{
1403 int rc = 0;
1404 struct chunk_info chunks[2] = {0};
1405
1406 OSSL_QUIC_FRAME_STREAM *shdr;
1407 WPACKET *wpkt;
1408 QUIC_TXPIM_CHUNK chunk;
1409 size_t i, j, space_left;
1410 int needs_padding_if_implicit, can_fill_payload, use_explicit_len;
1411 int could_have_following_chunk;
1412 uint64_t hdr_len_implicit, payload_len_implicit;
1413 uint64_t hdr_len_explicit, payload_len_explicit;
1414 uint64_t fc_swm, fc_new_hwm;
1415
1416 fc_swm = ossl_quic_txfc_get_swm(stream_txfc);
1417 fc_new_hwm = fc_swm;
1418
1419 /*
1420 * Load the first two chunks if any offered by the send stream. We retrieve
1421 * the next chunk in advance so we can determine if we need to send any more
1422 * chunks from the same stream after this one, which is needed when
1423 * determining when we can use an implicit length in a STREAM frame.
1424 */
1425 for (i = 0; i < 2; ++i) {
1426 if (!txp_plan_stream_chunk(txp, h, sstream, stream_txfc, i, &chunks[i]))
1427 goto err;
1428
1429 if (i == 0 && !chunks[i].valid) {
1430 /* No chunks, nothing to do. */
1431 *stream_drained = 1;
1432 rc = 1;
1433 goto err;
1434 }
1435 }
1436
1437 for (i = 0;; ++i) {
1438 space_left = tx_helper_get_space_left(h);
1439
1440 if (space_left < MIN_FRAME_SIZE_STREAM) {
1441 *packet_full = 1;
1442 rc = 1;
1443 goto err;
1444 }
1445
1446 if (!chunks[i % 2].valid) {
1447 /* Out of chunks; we're done. */
1448 *stream_drained = 1;
1449 rc = 1;
1450 goto err;
1451 }
1452
1453 if (!ossl_assert(!h->done_implicit))
1454 /*
1455 * Logic below should have ensured we didn't append an
1456 * implicit-length unless we filled the packet or didn't have
1457 * another stream to handle, so this should not be possible.
1458 */
1459 goto err;
1460
1461 shdr = &chunks[i % 2].shdr;
1462 if (i > 0)
1463 /* Load next chunk for lookahead. */
1464 if (!txp_plan_stream_chunk(txp, h, sstream, stream_txfc, i + 1,
1465 &chunks[(i + 1) % 2]))
1466 goto err;
1467
1468 /*
1469 * Find best fit (header length, payload length) combination for if we
1470 * use an implicit length.
1471 */
1472 shdr->has_explicit_len = 0;
1473 hdr_len_implicit = payload_len_implicit = 0;
1474 if (!determine_stream_len(h, shdr, space_left,
1475 &hdr_len_implicit, &payload_len_implicit)
1476 || hdr_len_implicit == 0 || payload_len_implicit == 0) {
1477 *packet_full = 1;
1478 rc = 1;
1479 goto err; /* can't fit anything */
1480 }
1481
1482 /*
1483 * If using the implicit-length representation would need padding, we
1484 * can't use it.
1485 */
1486 needs_padding_if_implicit = (h->bytes_appended + hdr_len_implicit
1487 + payload_len_implicit < min_ppl);
1488
1489 /*
1490 * If there is a next stream, we don't use the implicit length so we can
1491 * add more STREAM frames after this one, unless there is enough data
1492 * for this STREAM frame to fill the packet.
1493 */
1494 can_fill_payload = (hdr_len_implicit + payload_len_implicit
1495 >= space_left);
1496
1497 /*
1498 * Is there is a stream after this one, or another chunk pending
1499 * transmission in this stream?
1500 */
1501 could_have_following_chunk
1502 = (next_stream != NULL || chunks[(i + 1) % 2].valid);
1503
1504 /* Choose between explicit or implicit length representations. */
1505 use_explicit_len = !((can_fill_payload || !could_have_following_chunk)
1506 && !needs_padding_if_implicit);
1507
1508 if (use_explicit_len) {
1509 /*
1510 * Find best fit (header length, payload length) combination for if
1511 * we use an explicit length.
1512 */
1513 shdr->has_explicit_len = 1;
1514 hdr_len_explicit = payload_len_explicit = 0;
1515 if (!determine_stream_len(h, shdr, space_left,
1516 &hdr_len_explicit, &payload_len_explicit)
1517 || hdr_len_explicit == 0 || payload_len_explicit == 0) {
1518 *packet_full = 1;
1519 rc = 1;
1520 goto err; /* can't fit anything */
1521 }
1522
1523 shdr->len = payload_len_explicit;
1524 } else {
1525 shdr->has_explicit_len = 0;
1526 shdr->len = payload_len_implicit;
1527 }
1528
1529 /* Truncate IOVs to match our chosen length. */
1530 ossl_quic_sstream_adjust_iov((size_t)shdr->len, chunks[i % 2].iov,
1531 chunks[i % 2].num_stream_iovec);
1532
1533 /*
1534 * Ensure we have enough iovecs allocated (1 for the header, up to 2 for
1535 * the the stream data.)
1536 */
1537 if (!txp_ensure_iovec(txp, h->num_iovec + 3))
1538 goto err; /* alloc error */
1539
1540 /* Encode the header. */
1541 wpkt = tx_helper_begin(h);
1542 if (wpkt == NULL)
1543 goto err; /* alloc error */
1544
1545 shdr->stream_id = id;
1546 if (!ossl_assert(ossl_quic_wire_encode_frame_stream_hdr(wpkt, shdr))) {
1547 /* (Should not be possible.) */
1548 tx_helper_rollback(h);
1549 *packet_full = 1;
1550 rc = 1;
1551 goto err; /* can't fit */
1552 }
1553
1554 if (!tx_helper_commit(h))
1555 goto err; /* alloc error */
1556
1557 /* Add payload iovecs to the helper (infallible). */
1558 for (j = 0; j < chunks[i % 2].num_stream_iovec; ++j)
1559 tx_helper_append_iovec(h, chunks[i % 2].iov[j].buf,
1560 chunks[i % 2].iov[j].buf_len);
1561
1562 *have_ack_eliciting = 1;
1563 tx_helper_unrestrict(h); /* no longer need PING */
1564 if (!shdr->has_explicit_len)
1565 h->done_implicit = 1;
1566
1567 /* Log new TXFC credit which was consumed. */
1568 if (shdr->len > 0 && shdr->offset + shdr->len > fc_new_hwm)
1569 fc_new_hwm = shdr->offset + shdr->len;
1570
1571 /* Log chunk to TXPIM. */
1572 chunk.stream_id = shdr->stream_id;
1573 chunk.start = shdr->offset;
1574 chunk.end = shdr->offset + shdr->len - 1;
1575 chunk.has_fin = shdr->is_fin;
1576 chunk.has_stop_sending = 0;
1577 chunk.has_reset_stream = 0;
1578 if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
1579 goto err; /* alloc error */
1580 }
1581
1582err:
1583 *new_credit_consumed = fc_new_hwm - fc_swm;
1584 return rc;
1585}
1586
1587static void txp_enlink_tmp(QUIC_STREAM **tmp_head, QUIC_STREAM *stream)
1588{
1589 stream->txp_next = *tmp_head;
1590 *tmp_head = stream;
1591}
1592
1593static int txp_generate_stream_related(OSSL_QUIC_TX_PACKETISER *txp,
1594 struct tx_helper *h,
1595 uint32_t pn_space,
1596 QUIC_TXPIM_PKT *tpkt,
1597 size_t min_ppl,
1598 char *have_ack_eliciting,
1599 QUIC_STREAM **tmp_head)
1600{
1601 QUIC_STREAM_ITER it;
1602 void *rstream;
1603 WPACKET *wpkt;
1604 uint64_t cwm;
1605 QUIC_STREAM *stream, *snext;
1606
1607 for (ossl_quic_stream_iter_init(&it, txp->args.qsm, 1);
1608 it.stream != NULL;) {
1609
1610 stream = it.stream;
1611 ossl_quic_stream_iter_next(&it);
1612 snext = it.stream;
1613
1614 stream->txp_sent_fc = 0;
1615 stream->txp_sent_stop_sending = 0;
1616 stream->txp_sent_reset_stream = 0;
1617 stream->txp_drained = 0;
1618 stream->txp_blocked = 0;
1619 stream->txp_txfc_new_credit_consumed = 0;
1620
1621 rstream = stream->rstream;
1622
1623 /* Stream Abort Frames (STOP_SENDING, RESET_STREAM) */
1624 if (stream->want_stop_sending) {
1625 OSSL_QUIC_FRAME_STOP_SENDING f;
1626
1627 wpkt = tx_helper_begin(h);
1628 if (wpkt == NULL)
1629 return 0; /* alloc error */
1630
1631 f.stream_id = stream->id;
1632 f.app_error_code = stream->stop_sending_aec;
1633 if (!ossl_quic_wire_encode_frame_stop_sending(wpkt, &f)) {
1634 tx_helper_rollback(h); /* can't fit */
1635 txp_enlink_tmp(tmp_head, stream);
1636 break;
1637 }
1638
1639 if (!tx_helper_commit(h))
1640 return 0; /* alloc error */
1641
1642 *have_ack_eliciting = 1;
1643 tx_helper_unrestrict(h); /* no longer need PING */
1644 stream->txp_sent_stop_sending = 1;
1645 }
1646
1647 if (stream->want_reset_stream) {
1648 OSSL_QUIC_FRAME_RESET_STREAM f;
1649
1650 wpkt = tx_helper_begin(h);
1651 if (wpkt == NULL)
1652 return 0; /* alloc error */
1653
1654 f.stream_id = stream->id;
1655 f.app_error_code = stream->reset_stream_aec;
1656 f.final_size = ossl_quic_sstream_get_cur_size(stream->sstream);
1657 if (!ossl_quic_wire_encode_frame_reset_stream(wpkt, &f)) {
1658 tx_helper_rollback(h); /* can't fit */
1659 txp_enlink_tmp(tmp_head, stream);
1660 break;
1661 }
1662
1663 if (!tx_helper_commit(h))
1664 return 0; /* alloc error */
1665
1666 *have_ack_eliciting = 1;
1667 tx_helper_unrestrict(h); /* no longer need PING */
1668 stream->txp_sent_reset_stream = 1;
1669 }
1670
1671 /* Stream Flow Control Frames (MAX_STREAM_DATA) */
1672 if (rstream != NULL
1673 && (stream->want_max_stream_data
1674 || ossl_quic_rxfc_has_cwm_changed(&stream->rxfc, 0))) {
1675
1676 wpkt = tx_helper_begin(h);
1677 if (wpkt == NULL)
1678 return 0; /* alloc error */
1679
1680 cwm = ossl_quic_rxfc_get_cwm(&stream->rxfc);
1681
1682 if (!ossl_quic_wire_encode_frame_max_stream_data(wpkt, stream->id,
1683 cwm)) {
1684 tx_helper_rollback(h); /* can't fit */
1685 txp_enlink_tmp(tmp_head, stream);
1686 break;
1687 }
1688
1689 if (!tx_helper_commit(h))
1690 return 0; /* alloc error */
1691
1692 *have_ack_eliciting = 1;
1693 tx_helper_unrestrict(h); /* no longer need PING */
1694 stream->txp_sent_fc = 1;
1695 }
1696
1697 /* Stream Data Frames (STREAM) */
1698 if (stream->sstream != NULL) {
1699 char packet_full = 0, stream_drained = 0;
1700
1701 if (!txp_generate_stream_frames(txp, h, pn_space, tpkt,
1702 stream->id, stream->sstream,
1703 &stream->txfc,
1704 snext, min_ppl,
1705 have_ack_eliciting,
1706 &packet_full,
1707 &stream_drained,
1708 &stream->txp_txfc_new_credit_consumed)) {
1709 /* Fatal error (allocation, etc.) */
1710 txp_enlink_tmp(tmp_head, stream);
1711 return 0;
1712 }
1713
1714 if (stream_drained)
1715 stream->txp_drained = 1;
1716
1717 if (packet_full) {
1718 txp_enlink_tmp(tmp_head, stream);
1719 break;
1720 }
1721 }
1722
1723 txp_enlink_tmp(tmp_head, stream);
1724 }
1725
1726 return 1;
1727}
1728
1729/*
1730 * Generates a packet for a given EL with the given minimum and maximum
1731 * plaintext packet payload lengths. Returns TXP_ERR_* value.
1732 */
1733static int txp_generate_for_el_actual(OSSL_QUIC_TX_PACKETISER *txp,
1734 uint32_t enc_level,
1735 uint32_t archetype,
1736 size_t min_ppl,
1737 size_t max_ppl,
1738 size_t pkt_overhead,
1739 QUIC_PKT_HDR *phdr)
1740{
1741 int rc = TXP_ERR_SUCCESS;
1742 struct archetype_data a;
1743 uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
1744 struct tx_helper h;
1745 char have_helper = 0, have_ack_eliciting = 0, done_pre_token = 0;
1746 char require_ack_eliciting;
1747 QUIC_CFQ_ITEM *cfq_item;
1748 QUIC_TXPIM_PKT *tpkt = NULL;
1749 OSSL_QTX_PKT pkt;
1750 QUIC_STREAM *tmp_head = NULL, *stream;
1751
1752 if (!txp_get_archetype_data(enc_level, archetype, &a))
1753 goto fatal_err;
1754
1755 require_ack_eliciting
1756 = (a.allow_force_ack_eliciting
1757 && (txp->force_ack_eliciting & (1UL << pn_space)));
1758
1759 /* Minimum cannot be bigger than maximum. */
1760 if (min_ppl > max_ppl)
1761 goto fatal_err;
1762
1763 /* Maximum PN reached? */
1764 if (txp->next_pn[pn_space] >= (((QUIC_PN)1) << 62))
1765 goto fatal_err;
1766
1767 if ((tpkt = ossl_quic_txpim_pkt_alloc(txp->args.txpim)) == NULL)
1768 goto fatal_err;
1769
1770 /*
1771 * Initialise TX helper. If we must be ACK eliciting, reserve 1 byte for
1772 * PING.
1773 */
1774 if (!tx_helper_init(&h, txp, max_ppl, require_ack_eliciting ? 1 : 0))
1775 goto fatal_err;
1776
1777 have_helper = 1;
1778
1779 /*
1780 * Frame Serialization
1781 * ===================
1782 *
1783 * We now serialize frames into the packet in descending order of priority.
1784 */
1785
1786 /* HANDSHAKE_DONE (Regenerate) */
1787 if (a.allow_handshake_done && txp->want_handshake_done
1788 && tx_helper_get_space_left(&h) >= MIN_FRAME_SIZE_HANDSHAKE_DONE) {
1789 WPACKET *wpkt = tx_helper_begin(&h);
1790
1791 if (wpkt == NULL)
1792 goto fatal_err;
1793
1794 if (ossl_quic_wire_encode_frame_handshake_done(wpkt)) {
1795 tpkt->had_handshake_done_frame = 1;
1796 have_ack_eliciting = 1;
1797
1798 if (!tx_helper_commit(&h))
1799 goto fatal_err;
1800
1801 tx_helper_unrestrict(&h); /* no longer need PING */
1802 } else {
1803 tx_helper_rollback(&h);
1804 }
1805 }
1806
1807 /* MAX_DATA (Regenerate) */
1808 if (a.allow_conn_fc
1809 && (txp->want_max_data
1810 || ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 0))
1811 && tx_helper_get_space_left(&h) >= MIN_FRAME_SIZE_MAX_DATA) {
1812 WPACKET *wpkt = tx_helper_begin(&h);
1813 uint64_t cwm = ossl_quic_rxfc_get_cwm(txp->args.conn_rxfc);
1814
1815 if (wpkt == NULL)
1816 goto fatal_err;
1817
1818 if (ossl_quic_wire_encode_frame_max_data(wpkt, cwm)) {
1819 tpkt->had_max_data_frame = 1;
1820 have_ack_eliciting = 1;
1821
1822 if (!tx_helper_commit(&h))
1823 goto fatal_err;
1824
1825 tx_helper_unrestrict(&h); /* no longer need PING */
1826 } else {
1827 tx_helper_rollback(&h);
1828 }
1829 }
1830
1831 /* MAX_STREAMS_BIDI (Regenerate) */
1832 /*
1833 * TODO(STREAMS): Once we support multiple streams, add stream count FC
1834 * and plug this in.
1835 */
1836 if (a.allow_conn_fc
1837 && txp->want_max_streams_bidi
1838 && tx_helper_get_space_left(&h) >= MIN_FRAME_SIZE_MAX_STREAMS_BIDI) {
1839 WPACKET *wpkt = tx_helper_begin(&h);
1840 uint64_t max_streams = 1; /* TODO */
1841
1842 if (wpkt == NULL)
1843 goto fatal_err;
1844
1845 if (ossl_quic_wire_encode_frame_max_streams(wpkt, /*is_uni=*/0,
1846 max_streams)) {
1847 tpkt->had_max_streams_bidi_frame = 1;
1848 have_ack_eliciting = 1;
1849
1850 if (!tx_helper_commit(&h))
1851 goto fatal_err;
1852
1853 tx_helper_unrestrict(&h); /* no longer need PING */
1854 } else {
1855 tx_helper_rollback(&h);
1856 }
1857 }
1858
1859 /* MAX_STREAMS_UNI (Regenerate) */
1860 if (a.allow_conn_fc
1861 && txp->want_max_streams_uni
1862 && tx_helper_get_space_left(&h) >= MIN_FRAME_SIZE_MAX_STREAMS_UNI) {
1863 WPACKET *wpkt = tx_helper_begin(&h);
1864 uint64_t max_streams = 0; /* TODO */
1865
1866 if (wpkt == NULL)
1867 goto fatal_err;
1868
1869 if (ossl_quic_wire_encode_frame_max_streams(wpkt, /*is_uni=*/1,
1870 max_streams)) {
1871 tpkt->had_max_streams_uni_frame = 1;
1872 have_ack_eliciting = 1;
1873
1874 if (!tx_helper_commit(&h))
1875 goto fatal_err;
1876
1877 tx_helper_unrestrict(&h); /* no longer need PING */
1878 } else {
1879 tx_helper_rollback(&h);
1880 }
1881 }
1882
1883 /* GCR Frames */
1884 for (cfq_item = ossl_quic_cfq_get_priority_head(txp->args.cfq, pn_space);
1885 cfq_item != NULL;
1886 cfq_item = ossl_quic_cfq_item_get_priority_next(cfq_item, pn_space)) {
1887 uint64_t frame_type = ossl_quic_cfq_item_get_frame_type(cfq_item);
1888 const unsigned char *encoded = ossl_quic_cfq_item_get_encoded(cfq_item);
1889 size_t encoded_len = ossl_quic_cfq_item_get_encoded_len(cfq_item);
1890
1891 switch (frame_type) {
1892 case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
1893 if (!a.allow_new_conn_id)
1894 continue;
1895 break;
1896 case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
1897 if (!a.allow_retire_conn_id)
1898 continue;
1899 break;
1900 case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
1901 if (!a.allow_new_token)
1902 continue;
1903
1904 /*
1905 * NEW_TOKEN frames are handled via GCR, but some
1906 * Regenerate-strategy frames should come before them (namely
1907 * ACK, CONNECTION_CLOSE, PATH_CHALLENGE and PATH_RESPONSE). If
1908 * we find a NEW_TOKEN frame, do these now. If there are no
1909 * NEW_TOKEN frames in the GCR queue we will handle these below.
1910 */
1911 if (!done_pre_token)
1912 if (txp_generate_pre_token(txp, &h, tpkt, pn_space, &a))
1913 done_pre_token = 1;
1914
1915 break;
1916 default:
1917 if (!a.allow_cfq_other)
1918 continue;
1919 break;
1920 }
1921
1922 /*
1923 * If the frame is too big, don't try to schedule any more GCR frames in
1924 * this packet rather than sending subsequent ones out of order.
1925 */
1926 if (encoded_len > tx_helper_get_space_left(&h))
1927 break;
1928
1929 if (!tx_helper_append_iovec(&h, encoded, encoded_len))
1930 goto fatal_err;
1931
1932 ossl_quic_txpim_pkt_add_cfq_item(tpkt, cfq_item);
1933
1934 if (ossl_quic_frame_type_is_ack_eliciting(frame_type)) {
1935 have_ack_eliciting = 1;
1936 tx_helper_unrestrict(&h); /* no longer need PING */
1937 }
1938 }
1939
1940 /*
1941 * If we didn't generate ACK, CONNECTION_CLOSE, PATH_CHALLENGE or
1942 * PATH_RESPONSE (as desired) before, do so now.
1943 */
1944 if (!done_pre_token)
1945 if (txp_generate_pre_token(txp, &h, tpkt, pn_space, &a))
1946 done_pre_token = 1;
1947
1948 /* CRYPTO Frames */
1949 if (a.allow_crypto)
1950 if (!txp_generate_crypto_frames(txp, &h, pn_space, tpkt,
1951 &have_ack_eliciting))
1952 goto fatal_err;
1953
1954 /* Stream-specific frames */
cda88baf 1955 if (a.allow_stream_rel && txp->handshake_complete)
a73078b7
HL
1956 if (!txp_generate_stream_related(txp, &h, pn_space, tpkt, min_ppl,
1957 &have_ack_eliciting,
1958 &tmp_head))
1959 goto fatal_err;
1960
1961 /* PING */
1962 tx_helper_unrestrict(&h);
1963
1964 if (require_ack_eliciting && !have_ack_eliciting && a.allow_ping) {
1965 WPACKET *wpkt;
1966
1967 wpkt = tx_helper_begin(&h);
1968 if (wpkt == NULL)
1969 goto fatal_err;
1970
1971 if (!ossl_quic_wire_encode_frame_ping(wpkt)
1972 || !tx_helper_commit(&h))
1973 /*
1974 * We treat a request to be ACK-eliciting as a requirement, so this
1975 * is an error.
1976 */
1977 goto fatal_err;
1978
1979 have_ack_eliciting = 1;
1980 }
1981
1982 /* PADDING */
1983 if (h.bytes_appended < min_ppl) {
1984 WPACKET *wpkt = tx_helper_begin(&h);
1985 if (wpkt == NULL)
1986 goto fatal_err;
1987
1988 if (!ossl_quic_wire_encode_padding(wpkt, min_ppl - h.bytes_appended)
1989 || !tx_helper_commit(&h))
1990 goto fatal_err;
1991 }
1992
1993 /*
1994 * Dispatch
1995 * ========
1996 */
1997 /* ACKM Data */
1998 tpkt->ackm_pkt.num_bytes = h.bytes_appended + pkt_overhead;
1999 tpkt->ackm_pkt.pkt_num = txp->next_pn[pn_space];
2000 /* largest_acked is set in txp_generate_pre_token */
2001 tpkt->ackm_pkt.pkt_space = pn_space;
2002 tpkt->ackm_pkt.is_inflight = 1;
2003 tpkt->ackm_pkt.is_ack_eliciting = have_ack_eliciting;
2004 tpkt->ackm_pkt.is_pto_probe = 0;
2005 tpkt->ackm_pkt.is_mtu_probe = 0;
2006 tpkt->ackm_pkt.time = ossl_time_now();
2007
2008 /* Packet Information for QTX */
2009 pkt.hdr = phdr;
2010 pkt.iovec = txp->iovec;
2011 pkt.num_iovec = h.num_iovec;
2012 pkt.local = NULL;
2013 pkt.peer = BIO_ADDR_family(&txp->args.peer) == AF_UNSPEC
2014 ? NULL : &txp->args.peer;
2015 pkt.pn = txp->next_pn[pn_space];
2016 pkt.flags = OSSL_QTX_PKT_FLAG_COALESCE; /* always try to coalesce */
2017
2018 /* Do TX key update if needed. */
2019 if (enc_level == QUIC_ENC_LEVEL_1RTT) {
2020 uint64_t cur_pkt_count, max_pkt_count;
2021
2022 cur_pkt_count = ossl_qtx_get_cur_epoch_pkt_count(txp->args.qtx, enc_level);
2023 max_pkt_count = ossl_qtx_get_max_epoch_pkt_count(txp->args.qtx, enc_level);
2024
2025 if (cur_pkt_count >= max_pkt_count / 2)
2026 if (!ossl_qtx_trigger_key_update(txp->args.qtx))
2027 goto fatal_err;
2028 }
2029
2030 if (!ossl_assert(h.bytes_appended > 0))
2031 goto fatal_err;
2032
2033 /* Generate TXPIM chunks representing STOP_SENDING and RESET_STREAM frames. */
2034 for (stream = tmp_head; stream != NULL; stream = stream->txp_next)
2035 if (stream->txp_sent_stop_sending || stream->txp_sent_reset_stream) {
2036 /* Log STOP_SENDING chunk to TXPIM. */
2037 QUIC_TXPIM_CHUNK chunk;
2038
2039 chunk.stream_id = stream->id;
2040 chunk.start = UINT64_MAX;
2041 chunk.end = 0;
2042 chunk.has_fin = 0;
2043 chunk.has_stop_sending = stream->txp_sent_stop_sending;
2044 chunk.has_reset_stream = stream->txp_sent_reset_stream;
2045 if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
2046 return 0; /* alloc error */
2047 }
2048
2049 /* Dispatch to FIFD. */
2050 if (!ossl_quic_fifd_pkt_commit(&txp->fifd, tpkt))
2051 goto fatal_err;
2052
2053 /* Send the packet. */
2054 if (!ossl_qtx_write_pkt(txp->args.qtx, &pkt))
2055 goto fatal_err;
2056
2057 ++txp->next_pn[pn_space];
2058
2059 /*
2060 * Record FC and stream abort frames as sent; deactivate streams which no
2061 * longer have anything to do.
2062 */
2063 for (stream = tmp_head; stream != NULL; stream = stream->txp_next) {
2064 if (stream->txp_sent_fc) {
2065 stream->want_max_stream_data = 0;
2066 ossl_quic_rxfc_has_cwm_changed(&stream->rxfc, 1);
2067 }
2068
2069 if (stream->txp_sent_stop_sending)
2070 stream->want_stop_sending = 0;
2071
2072 if (stream->txp_sent_reset_stream)
2073 stream->want_reset_stream = 0;
2074
2075 if (stream->txp_txfc_new_credit_consumed > 0) {
2076 if (!ossl_assert(ossl_quic_txfc_consume_credit(&stream->txfc,
2077 stream->txp_txfc_new_credit_consumed)))
2078 /*
2079 * Should not be possible, but we should continue with our
2080 * bookkeeping as we have already committed the packet to the
2081 * FIFD. Just change the value we return.
2082 */
2083 rc = TXP_ERR_INTERNAL;
2084
2085 stream->txp_txfc_new_credit_consumed = 0;
2086 }
2087
2088 /*
2089 * If we no longer need to generate any flow control (MAX_STREAM_DATA),
2090 * STOP_SENDING or RESET_STREAM frames, nor any STREAM frames (because
2091 * the stream is drained of data or TXFC-blocked), we can mark the
2092 * stream as inactive.
2093 */
2094 ossl_quic_stream_map_update_state(txp->args.qsm, stream);
2095
2096 if (!stream->want_max_stream_data
2097 && !stream->want_stop_sending
2098 && !stream->want_reset_stream
2099 && (stream->txp_drained || stream->txp_blocked))
2100 assert(!stream->active);
2101 }
2102
2103 /* We have now sent the packet, so update state accordingly. */
2104 if (have_ack_eliciting)
2105 txp->force_ack_eliciting &= ~(1UL << pn_space);
2106
2107 if (tpkt->had_handshake_done_frame)
2108 txp->want_handshake_done = 0;
2109
2110 if (tpkt->had_max_data_frame) {
2111 txp->want_max_data = 0;
2112 ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 1);
2113 }
2114
2115 if (tpkt->had_max_streams_bidi_frame)
2116 txp->want_max_streams_bidi = 0;
2117
2118 if (tpkt->had_max_streams_uni_frame)
2119 txp->want_max_streams_uni = 0;
2120
2121 if (tpkt->had_ack_frame)
2122 txp->want_ack &= ~(1UL << pn_space);
2123
2124 /* Done. */
2125 tx_helper_cleanup(&h);
2126 return rc;
2127
2128fatal_err:
2129 /*
2130 * Handler for fatal errors, i.e. errors causing us to abort the entire
2131 * packet rather than just one frame. Examples of such errors include
2132 * allocation errors.
2133 */
2134 if (have_helper)
2135 tx_helper_cleanup(&h);
2136 if (tpkt != NULL)
2137 ossl_quic_txpim_pkt_release(txp->args.txpim, tpkt);
2138 return TXP_ERR_INTERNAL;
2139}
2140
2141/* Ensure the iovec array is at least num elements long. */
2142static int txp_ensure_iovec(OSSL_QUIC_TX_PACKETISER *txp, size_t num)
2143{
2144 OSSL_QTX_IOVEC *iovec;
2145
2146 if (txp->alloc_iovec >= num)
2147 return 1;
2148
2149 num = txp->alloc_iovec != 0 ? txp->alloc_iovec * 2 : 8;
2150
2151 iovec = OPENSSL_realloc(txp->iovec, sizeof(OSSL_QTX_IOVEC) * num);
2152 if (iovec == NULL)
2153 return 0;
2154
2155 txp->iovec = iovec;
2156 txp->alloc_iovec = num;
2157 return 1;
2158}
2159
2160int ossl_quic_tx_packetiser_schedule_conn_close(OSSL_QUIC_TX_PACKETISER *txp,
2161 const OSSL_QUIC_FRAME_CONN_CLOSE *f)
2162{
2163 char *reason = NULL;
2164 size_t reason_len = f->reason_len;
2165 size_t max_reason_len = txp_get_mdpl(txp) / 2;
2166
2167 if (txp->want_conn_close)
2168 return 0;
2169
2170 /*
2171 * Arbitrarily limit the length of the reason length string to half of the
2172 * MDPL.
2173 */
2174 if (reason_len > max_reason_len)
2175 reason_len = max_reason_len;
2176
2177 if (reason_len > 0) {
2178 reason = OPENSSL_memdup(f->reason, reason_len);
2179 if (reason == NULL)
2180 return 0;
2181 }
2182
2183 txp->conn_close_frame = *f;
2184 txp->conn_close_frame.reason = reason;
2185 txp->conn_close_frame.reason_len = reason_len;
2186 txp->want_conn_close = 1;
2187 return 1;
2188}