2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include <openssl/ssl.h>
14 #include "../../ssl_local.h"
15 #include "../record_local.h"
16 #include "recmethod_local.h"
18 static int tls1_set_crypto_state(OSSL_RECORD_LAYER
*rl
, int level
,
19 unsigned char *key
, size_t keylen
,
20 unsigned char *iv
, size_t ivlen
,
21 unsigned char *mackey
, size_t mackeylen
,
22 const EVP_CIPHER
*ciph
,
28 EVP_CIPHER_CTX
*ciph_ctx
;
30 int enc
= (rl
->direction
== OSSL_RECORD_DIRECTION_WRITE
) ? 1 : 0;
32 if (level
!= OSSL_RECORD_PROTECTION_LEVEL_APPLICATION
)
33 return OSSL_RECORD_RETURN_FATAL
;
35 if ((rl
->enc_ctx
= EVP_CIPHER_CTX_new()) == NULL
) {
36 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_EVP_LIB
);
37 return OSSL_RECORD_RETURN_FATAL
;
40 ciph_ctx
= rl
->enc_ctx
;
42 rl
->md_ctx
= EVP_MD_CTX_new();
43 if (rl
->md_ctx
== NULL
) {
44 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
45 return OSSL_RECORD_RETURN_FATAL
;
47 #ifndef OPENSSL_NO_COMP
49 rl
->compctx
= COMP_CTX_new(comp
);
50 if (rl
->compctx
== NULL
) {
51 ERR_raise(ERR_LIB_SSL
, SSL_R_COMPRESSION_LIBRARY_ERROR
);
52 return OSSL_RECORD_RETURN_FATAL
;
58 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
59 * setting up the MAC key.
61 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) == 0) {
62 if (mactype
== EVP_PKEY_HMAC
) {
63 mac_key
= EVP_PKEY_new_raw_private_key_ex(rl
->libctx
, "HMAC",
68 * If its not HMAC then the only other types of MAC we support are
69 * the GOST MACs, so we need to use the old style way of creating
72 mac_key
= EVP_PKEY_new_mac_key(mactype
, NULL
, mackey
,
76 || EVP_DigestSignInit_ex(rl
->md_ctx
, NULL
, EVP_MD_get0_name(md
),
77 rl
->libctx
, rl
->propq
, mac_key
,
79 EVP_PKEY_free(mac_key
);
80 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
81 return OSSL_RECORD_RETURN_FATAL
;
83 EVP_PKEY_free(mac_key
);
86 if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_GCM_MODE
) {
87 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, key
, NULL
, enc
)
88 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_GCM_SET_IV_FIXED
,
89 (int)ivlen
, iv
) <= 0) {
90 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
91 return OSSL_RECORD_RETURN_FATAL
;
93 } else if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_CCM_MODE
) {
94 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, NULL
, NULL
, enc
)
95 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_IVLEN
, 12,
97 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_TAG
,
98 (int)taglen
, NULL
) <= 0
99 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_CCM_SET_IV_FIXED
,
101 || !EVP_CipherInit_ex(ciph_ctx
, NULL
, NULL
, key
, NULL
, enc
)) {
102 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
103 return OSSL_RECORD_RETURN_FATAL
;
106 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, key
, iv
, enc
)) {
107 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
108 return OSSL_RECORD_RETURN_FATAL
;
111 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
112 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0
114 && EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_MAC_KEY
,
115 (int)mackeylen
, mackey
) <= 0) {
116 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
117 return OSSL_RECORD_RETURN_FATAL
;
119 if (EVP_CIPHER_get0_provider(ciph
) != NULL
120 && !ossl_set_tls_provider_parameters(rl
, ciph_ctx
, ciph
, md
))
121 return OSSL_RECORD_RETURN_FATAL
;
123 /* Calculate the explict IV length */
124 if (RLAYER_USE_EXPLICIT_IV(rl
)) {
125 int mode
= EVP_CIPHER_CTX_get_mode(ciph_ctx
);
128 if (mode
== EVP_CIPH_CBC_MODE
) {
129 eivlen
= EVP_CIPHER_CTX_get_iv_length(ciph_ctx
);
131 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_LIBRARY_BUG
);
132 return OSSL_RECORD_RETURN_FATAL
;
136 } else if (mode
== EVP_CIPH_GCM_MODE
) {
137 /* Need explicit part of IV for GCM mode */
138 eivlen
= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
139 } else if (mode
== EVP_CIPH_CCM_MODE
) {
140 eivlen
= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
142 rl
->eivlen
= (size_t)eivlen
;
145 return OSSL_RECORD_RETURN_SUCCESS
;
148 #define MAX_PADDING 256
150 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
151 * internal error, but not otherwise. It is the responsibility of the caller to
152 * report a bad_record_mac - if appropriate (DTLS just drops the record).
155 * 0: if the record is publicly invalid, or an internal error, or AEAD
156 * decryption failed, or Encrypt-then-mac decryption failed.
157 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
159 static int tls1_cipher(OSSL_RECORD_LAYER
*rl
, SSL3_RECORD
*recs
, size_t n_recs
,
160 int sending
, SSL_MAC_BUF
*macs
, size_t macsize
)
163 size_t reclen
[SSL_MAX_PIPELINES
];
164 unsigned char buf
[SSL_MAX_PIPELINES
][EVP_AEAD_TLS1_AAD_LEN
];
165 unsigned char *data
[SSL_MAX_PIPELINES
];
166 int pad
= 0, tmpr
, provided
;
167 size_t bs
, ctr
, padnum
, loop
;
168 unsigned char padval
;
169 const EVP_CIPHER
*enc
;
172 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
176 if (EVP_MD_CTX_get0_md(rl
->md_ctx
)) {
177 int n
= EVP_MD_CTX_get_size(rl
->md_ctx
);
179 if (!ossl_assert(n
>= 0)) {
180 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
185 if (!ossl_assert(rl
->enc_ctx
!= NULL
)) {
186 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
190 enc
= EVP_CIPHER_CTX_get0_cipher(rl
->enc_ctx
);
195 /* For TLSv1.1 and later explicit IV */
196 if (RLAYER_USE_EXPLICIT_IV(rl
)
197 && EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CBC_MODE
)
198 ivlen
= EVP_CIPHER_get_iv_length(enc
);
202 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
203 if (recs
[ctr
].data
!= recs
[ctr
].input
) {
204 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
206 } else if (RAND_bytes_ex(rl
->libctx
, recs
[ctr
].input
,
208 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
214 if (!ossl_assert(enc
!= NULL
)) {
215 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
219 provided
= (EVP_CIPHER_get0_provider(enc
) != NULL
);
221 bs
= EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds
));
224 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
225 & EVP_CIPH_FLAG_PIPELINE
) == 0) {
227 * We shouldn't have been called with pipeline data if the
228 * cipher doesn't support pipelining
230 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
234 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
235 reclen
[ctr
] = recs
[ctr
].length
;
237 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
238 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0) {
244 unsigned char dtlsseq
[8], *p
= dtlsseq
;
247 memcpy(p
, &seq
[2], 6);
248 memcpy(buf
[ctr
], dtlsseq
, 8);
250 memcpy(buf
[ctr
], seq
, 8);
251 if (!tls_increment_sequence_ctr(rl
)) {
252 /* RLAYERfatal already called */
257 buf
[ctr
][8] = recs
[ctr
].type
;
258 buf
[ctr
][9] = (unsigned char)(rl
->version
>> 8);
259 buf
[ctr
][10] = (unsigned char)(rl
->version
);
260 buf
[ctr
][11] = (unsigned char)(recs
[ctr
].length
>> 8);
261 buf
[ctr
][12] = (unsigned char)(recs
[ctr
].length
& 0xff);
262 pad
= EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_AEAD_TLS1_AAD
,
263 EVP_AEAD_TLS1_AAD_LEN
, buf
[ctr
]);
265 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
271 recs
[ctr
].length
+= pad
;
273 } else if ((bs
!= 1) && sending
&& !provided
) {
275 * We only do this for legacy ciphers. Provided ciphers add the
276 * padding on the provider side.
278 padnum
= bs
- (reclen
[ctr
] % bs
);
280 /* Add weird padding of up to 256 bytes */
282 if (padnum
> MAX_PADDING
) {
283 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
286 /* we need to add 'padnum' padding bytes of value padval */
287 padval
= (unsigned char)(padnum
- 1);
288 for (loop
= reclen
[ctr
]; loop
< reclen
[ctr
] + padnum
; loop
++)
289 recs
[ctr
].input
[loop
] = padval
;
290 reclen
[ctr
] += padnum
;
291 recs
[ctr
].length
+= padnum
;
295 if (reclen
[ctr
] == 0 || reclen
[ctr
] % bs
!= 0) {
296 /* Publicly invalid */
302 /* Set the output buffers */
303 for (ctr
= 0; ctr
< n_recs
; ctr
++)
304 data
[ctr
] = recs
[ctr
].data
;
306 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS
,
307 (int)n_recs
, data
) <= 0) {
308 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
311 /* Set the input buffers */
312 for (ctr
= 0; ctr
< n_recs
; ctr
++)
313 data
[ctr
] = recs
[ctr
].input
;
315 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_BUFS
,
316 (int)n_recs
, data
) <= 0
317 || EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_LENS
,
318 (int)n_recs
, reclen
) <= 0) {
319 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
324 if (!rl
->isdtls
&& rl
->tlstree
) {
325 int decrement_seq
= 0;
328 * When sending, seq is incremented after MAC calculation.
329 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
330 * Otherwise we have to decrease it in the implementation
332 if (sending
&& !rl
->use_etm
)
335 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_TLSTREE
, decrement_seq
,
336 rl
->sequence
) <= 0) {
338 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
346 /* Provided cipher - we do not support pipelining on this path */
348 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
352 if (!EVP_CipherUpdate(ds
, recs
[0].data
, &outlen
, recs
[0].input
,
353 (unsigned int)reclen
[0]))
355 recs
[0].length
= outlen
;
358 * The length returned from EVP_CipherUpdate above is the actual
359 * payload length. We need to adjust the data/input ptr to skip over
363 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
364 recs
[0].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
365 recs
[0].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
366 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
367 recs
[0].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
368 recs
[0].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
369 } else if (bs
!= 1 && RLAYER_USE_EXPLICIT_IV(rl
)) {
372 recs
[0].orig_len
-= bs
;
375 /* Now get a pointer to the MAC (if applicable) */
377 OSSL_PARAM params
[2], *p
= params
;
382 *p
++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC
,
383 (void **)&macs
[0].mac
,
385 *p
= OSSL_PARAM_construct_end();
387 if (!EVP_CIPHER_CTX_get_params(ds
, params
)) {
388 /* Shouldn't normally happen */
389 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
,
390 ERR_R_INTERNAL_ERROR
);
398 tmpr
= EVP_Cipher(ds
, recs
[0].data
, recs
[0].input
,
399 (unsigned int)reclen
[0]);
400 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
401 & EVP_CIPH_FLAG_CUSTOM_CIPHER
) != 0
404 /* AEAD can fail to verify MAC */
409 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
410 /* Adjust the record to remove the explicit IV/MAC/Tag */
411 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
412 recs
[ctr
].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
413 recs
[ctr
].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
414 recs
[ctr
].length
-= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
415 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
416 recs
[ctr
].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
417 recs
[ctr
].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
418 recs
[ctr
].length
-= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
419 } else if (bs
!= 1 && RLAYER_USE_EXPLICIT_IV(rl
)) {
420 if (recs
[ctr
].length
< bs
)
422 recs
[ctr
].data
+= bs
;
423 recs
[ctr
].input
+= bs
;
424 recs
[ctr
].length
-= bs
;
425 recs
[ctr
].orig_len
-= bs
;
429 * If using Mac-then-encrypt, then this will succeed but
430 * with a random MAC if padding is invalid
432 if (!tls1_cbc_remove_padding_and_mac(&recs
[ctr
].length
,
435 (macs
!= NULL
) ? &macs
[ctr
].mac
: NULL
,
436 (macs
!= NULL
) ? &macs
[ctr
].alloced
439 pad
? (size_t)pad
: macsize
,
440 (EVP_CIPHER_get_flags(enc
)
441 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0,
450 static int tls1_mac(OSSL_RECORD_LAYER
*rl
, SSL3_RECORD
*rec
, unsigned char *md
,
453 unsigned char *seq
= rl
->sequence
;
456 EVP_MD_CTX
*hmac
= NULL
, *mac_ctx
;
457 unsigned char header
[13];
463 t
= EVP_MD_CTX_get_size(hash
);
464 if (!ossl_assert(t
>= 0))
468 if (rl
->stream_mac
) {
471 hmac
= EVP_MD_CTX_new();
472 if (hmac
== NULL
|| !EVP_MD_CTX_copy(hmac
, hash
)) {
480 && EVP_MD_CTX_ctrl(mac_ctx
, EVP_MD_CTRL_TLSTREE
, 0, seq
) <= 0)
484 unsigned char dtlsseq
[8], *p
= dtlsseq
;
487 memcpy(p
, &seq
[2], 6);
489 memcpy(header
, dtlsseq
, 8);
491 memcpy(header
, seq
, 8);
494 header
[8] = rec
->type
;
495 header
[9] = (unsigned char)(rl
->version
>> 8);
496 header
[10] = (unsigned char)(rl
->version
);
497 header
[11] = (unsigned char)(rec
->length
>> 8);
498 header
[12] = (unsigned char)(rec
->length
& 0xff);
500 if (!sending
&& !rl
->use_etm
501 && EVP_CIPHER_CTX_get_mode(rl
->enc_ctx
) == EVP_CIPH_CBC_MODE
502 && ssl3_cbc_record_digest_supported(mac_ctx
)) {
503 OSSL_PARAM tls_hmac_params
[2], *p
= tls_hmac_params
;
505 *p
++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE
,
507 *p
++ = OSSL_PARAM_construct_end();
509 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx
),
514 if (EVP_DigestSignUpdate(mac_ctx
, header
, sizeof(header
)) <= 0
515 || EVP_DigestSignUpdate(mac_ctx
, rec
->input
, rec
->length
) <= 0
516 || EVP_DigestSignFinal(mac_ctx
, md
, &md_size
) <= 0)
519 OSSL_TRACE_BEGIN(TLS
) {
520 BIO_printf(trc_out
, "seq:\n");
521 BIO_dump_indent(trc_out
, seq
, 8, 4);
522 BIO_printf(trc_out
, "rec:\n");
523 BIO_dump_indent(trc_out
, rec
->data
, rec
->length
, 4);
524 } OSSL_TRACE_END(TLS
);
526 if (!rl
->isdtls
&& !tls_increment_sequence_ctr(rl
)) {
527 /* RLAYERfatal already called */
531 OSSL_TRACE_BEGIN(TLS
) {
532 BIO_printf(trc_out
, "md:\n");
533 BIO_dump_indent(trc_out
, md
, md_size
, 4);
534 } OSSL_TRACE_END(TLS
);
537 EVP_MD_CTX_free(hmac
);
541 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
542 # ifndef OPENSSL_NO_COMP
543 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
544 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
545 + SSL3_RT_HEADER_LENGTH \
546 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
548 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
549 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
550 + SSL3_RT_HEADER_LENGTH)
551 # endif /* OPENSSL_NO_COMP */
553 # ifndef OPENSSL_NO_COMP
554 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
555 + SSL3_RT_HEADER_LENGTH \
556 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
558 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
559 + SSL3_RT_HEADER_LENGTH)
560 # endif /* OPENSSL_NO_COMP */
563 /* This function is also used by the SSLv3 implementation */
564 int tls1_allocate_write_buffers(OSSL_RECORD_LAYER
*rl
,
565 OSSL_RECORD_TEMPLATE
*templates
,
566 size_t numtempl
, size_t *prefix
)
568 /* Do we need to add an empty record prefix? */
569 *prefix
= rl
->need_empty_fragments
570 && templates
[0].type
== SSL3_RT_APPLICATION_DATA
;
573 * In the prefix case we can allocate a much smaller buffer. Otherwise we
574 * just allocate the default buffer size
576 if (!tls_setup_write_buffer(rl
, numtempl
+ *prefix
,
577 *prefix
? MAX_PREFIX_LEN
: 0, 0)) {
578 /* RLAYERfatal() already called */
585 /* This function is also used by the SSLv3 implementation */
586 int tls1_initialise_write_packets(OSSL_RECORD_LAYER
*rl
,
587 OSSL_RECORD_TEMPLATE
*templates
,
589 OSSL_RECORD_TEMPLATE
*prefixtempl
,
598 /* Do we need to add an empty record prefix? */
599 prefix
= rl
->need_empty_fragments
600 && templates
[0].type
== SSL3_RT_APPLICATION_DATA
;
604 * countermeasure against known-IV weakness in CBC ciphersuites (see
605 * http://www.openssl.org/~bodo/tls-cbc.txt)
607 prefixtempl
->buf
= NULL
;
608 prefixtempl
->version
= templates
[0].version
;
609 prefixtempl
->buflen
= 0;
610 prefixtempl
->type
= SSL3_RT_APPLICATION_DATA
;
614 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
615 align
= (size_t)SSL3_BUFFER_get_buf(wb
) + SSL3_RT_HEADER_LENGTH
;
616 align
= SSL3_ALIGN_PAYLOAD
- 1
617 - ((align
- 1) % SSL3_ALIGN_PAYLOAD
);
619 SSL3_BUFFER_set_offset(wb
, align
);
621 if (!WPACKET_init_static_len(&pkt
[0], SSL3_BUFFER_get_buf(wb
),
622 SSL3_BUFFER_get_len(wb
), 0)) {
623 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
627 if (!WPACKET_allocate_bytes(&pkt
[0], align
, NULL
)) {
628 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
633 return tls_initialise_write_packets_default(rl
, templates
, numtempl
,
635 pkt
+ prefix
, bufs
+ prefix
,
639 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
640 struct record_functions_st tls_1_funcs
= {
641 tls1_set_crypto_state
,
644 tls_default_set_protocol_version
,
646 tls_get_more_records
,
647 tls_default_validate_record_header
,
648 tls_default_post_process_record
,
649 tls_get_max_records_multiblock
,
650 tls_write_records_multiblock
, /* Defined in tls_multib.c */
651 tls1_allocate_write_buffers
,
652 tls1_initialise_write_packets
,
654 tls_prepare_record_header_default
,
656 tls_prepare_for_encryption_default
,
657 tls_post_encryption_processing_default
,
661 struct record_functions_st dtls_1_funcs
= {
662 tls1_set_crypto_state
,
665 tls_default_set_protocol_version
,
667 dtls_get_more_records
,
671 tls_write_records_default
,
673 * Don't use tls1_allocate_write_buffers since that handles empty fragment
674 * records which aren't needed in DTLS. We just use the default allocation
677 tls_allocate_write_buffers_default
,
678 /* Don't use tls1_initialise_write_packets for same reason as above */
679 tls_initialise_write_packets_default
,
681 dtls_prepare_record_header
,
683 tls_prepare_for_encryption_default
,
684 dtls_post_encryption_processing
,