2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include <openssl/ssl.h>
14 #include "../../ssl_local.h"
15 #include "../record_local.h"
16 #include "recmethod_local.h"
18 static int tls1_set_crypto_state(OSSL_RECORD_LAYER
*rl
, int level
,
19 unsigned char *key
, size_t keylen
,
20 unsigned char *iv
, size_t ivlen
,
21 unsigned char *mackey
, size_t mackeylen
,
22 const EVP_CIPHER
*ciph
,
28 EVP_CIPHER_CTX
*ciph_ctx
;
30 int enc
= (rl
->direction
== OSSL_RECORD_DIRECTION_WRITE
) ? 1 : 0;
32 if (level
!= OSSL_RECORD_PROTECTION_LEVEL_APPLICATION
)
33 return OSSL_RECORD_RETURN_FATAL
;
35 if ((rl
->enc_ctx
= EVP_CIPHER_CTX_new()) == NULL
) {
36 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_EVP_LIB
);
37 return OSSL_RECORD_RETURN_FATAL
;
40 ciph_ctx
= rl
->enc_ctx
;
42 rl
->md_ctx
= EVP_MD_CTX_new();
43 if (rl
->md_ctx
== NULL
) {
44 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
45 return OSSL_RECORD_RETURN_FATAL
;
47 #ifndef OPENSSL_NO_COMP
49 rl
->compctx
= COMP_CTX_new(comp
);
50 if (rl
->compctx
== NULL
) {
51 ERR_raise(ERR_LIB_SSL
, SSL_R_COMPRESSION_LIBRARY_ERROR
);
52 return OSSL_RECORD_RETURN_FATAL
;
58 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
59 * setting up the MAC key.
61 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) == 0) {
62 if (mactype
== EVP_PKEY_HMAC
) {
63 mac_key
= EVP_PKEY_new_raw_private_key_ex(rl
->libctx
, "HMAC",
68 * If its not HMAC then the only other types of MAC we support are
69 * the GOST MACs, so we need to use the old style way of creating
72 mac_key
= EVP_PKEY_new_mac_key(mactype
, NULL
, mackey
,
76 || EVP_DigestSignInit_ex(rl
->md_ctx
, NULL
, EVP_MD_get0_name(md
),
77 rl
->libctx
, rl
->propq
, mac_key
,
79 EVP_PKEY_free(mac_key
);
80 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
81 return OSSL_RECORD_RETURN_FATAL
;
83 EVP_PKEY_free(mac_key
);
86 if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_GCM_MODE
) {
87 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, key
, NULL
, enc
)
88 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_GCM_SET_IV_FIXED
,
89 (int)ivlen
, iv
) <= 0) {
90 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
91 return OSSL_RECORD_RETURN_FATAL
;
93 } else if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_CCM_MODE
) {
94 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, NULL
, NULL
, enc
)
95 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_IVLEN
, 12,
97 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_TAG
,
98 (int)taglen
, NULL
) <= 0
99 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_CCM_SET_IV_FIXED
,
101 || !EVP_CipherInit_ex(ciph_ctx
, NULL
, NULL
, key
, NULL
, enc
)) {
102 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
103 return OSSL_RECORD_RETURN_FATAL
;
106 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, key
, iv
, enc
)) {
107 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
108 return OSSL_RECORD_RETURN_FATAL
;
111 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
112 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0
114 && EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_MAC_KEY
,
115 (int)mackeylen
, mackey
) <= 0) {
116 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
117 return OSSL_RECORD_RETURN_FATAL
;
119 if (EVP_CIPHER_get0_provider(ciph
) != NULL
120 && !ossl_set_tls_provider_parameters(rl
, ciph_ctx
, ciph
, md
))
121 return OSSL_RECORD_RETURN_FATAL
;
123 /* Calculate the explict IV length */
124 if (RLAYER_USE_EXPLICIT_IV(rl
)) {
125 int mode
= EVP_CIPHER_CTX_get_mode(ciph_ctx
);
128 if (mode
== EVP_CIPH_CBC_MODE
) {
129 eivlen
= EVP_CIPHER_CTX_get_iv_length(ciph_ctx
);
131 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_LIBRARY_BUG
);
132 return OSSL_RECORD_RETURN_FATAL
;
136 } else if (mode
== EVP_CIPH_GCM_MODE
) {
137 /* Need explicit part of IV for GCM mode */
138 eivlen
= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
139 } else if (mode
== EVP_CIPH_CCM_MODE
) {
140 eivlen
= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
142 rl
->eivlen
= (size_t)eivlen
;
145 return OSSL_RECORD_RETURN_SUCCESS
;
148 #define MAX_PADDING 256
150 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
151 * internal error, but not otherwise. It is the responsibility of the caller to
152 * report a bad_record_mac - if appropriate (DTLS just drops the record).
155 * 0: if the record is publicly invalid, or an internal error, or AEAD
156 * decryption failed, or Encrypt-then-mac decryption failed.
157 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
159 static int tls1_cipher(OSSL_RECORD_LAYER
*rl
, SSL3_RECORD
*recs
, size_t n_recs
,
160 int sending
, SSL_MAC_BUF
*macs
, size_t macsize
)
163 size_t reclen
[SSL_MAX_PIPELINES
];
164 unsigned char buf
[SSL_MAX_PIPELINES
][EVP_AEAD_TLS1_AAD_LEN
];
165 int pad
= 0, tmpr
, provided
;
166 size_t bs
, ctr
, padnum
, loop
;
167 unsigned char padval
;
168 const EVP_CIPHER
*enc
;
171 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
175 if (EVP_MD_CTX_get0_md(rl
->md_ctx
)) {
176 int n
= EVP_MD_CTX_get_size(rl
->md_ctx
);
178 if (!ossl_assert(n
>= 0)) {
179 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
184 if (!ossl_assert(rl
->enc_ctx
!= NULL
)) {
185 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
189 enc
= EVP_CIPHER_CTX_get0_cipher(rl
->enc_ctx
);
194 /* For TLSv1.1 and later explicit IV */
195 if (RLAYER_USE_EXPLICIT_IV(rl
)
196 && EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CBC_MODE
)
197 ivlen
= EVP_CIPHER_get_iv_length(enc
);
201 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
202 if (recs
[ctr
].data
!= recs
[ctr
].input
) {
203 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
205 } else if (RAND_bytes_ex(rl
->libctx
, recs
[ctr
].input
,
207 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
213 if (!ossl_assert(enc
!= NULL
)) {
214 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
218 provided
= (EVP_CIPHER_get0_provider(enc
) != NULL
);
220 bs
= EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds
));
223 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
224 & EVP_CIPH_FLAG_PIPELINE
) == 0) {
226 * We shouldn't have been called with pipeline data if the
227 * cipher doesn't support pipelining
229 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
233 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
234 reclen
[ctr
] = recs
[ctr
].length
;
236 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
237 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0) {
243 unsigned char dtlsseq
[8], *p
= dtlsseq
;
246 memcpy(p
, &seq
[2], 6);
247 memcpy(buf
[ctr
], dtlsseq
, 8);
249 memcpy(buf
[ctr
], seq
, 8);
250 if (!tls_increment_sequence_ctr(rl
)) {
251 /* RLAYERfatal already called */
256 buf
[ctr
][8] = recs
[ctr
].type
;
257 buf
[ctr
][9] = (unsigned char)(rl
->version
>> 8);
258 buf
[ctr
][10] = (unsigned char)(rl
->version
);
259 buf
[ctr
][11] = (unsigned char)(recs
[ctr
].length
>> 8);
260 buf
[ctr
][12] = (unsigned char)(recs
[ctr
].length
& 0xff);
261 pad
= EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_AEAD_TLS1_AAD
,
262 EVP_AEAD_TLS1_AAD_LEN
, buf
[ctr
]);
264 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
270 recs
[ctr
].length
+= pad
;
272 } else if ((bs
!= 1) && sending
&& !provided
) {
274 * We only do this for legacy ciphers. Provided ciphers add the
275 * padding on the provider side.
277 padnum
= bs
- (reclen
[ctr
] % bs
);
279 /* Add weird padding of up to 256 bytes */
281 if (padnum
> MAX_PADDING
) {
282 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
285 /* we need to add 'padnum' padding bytes of value padval */
286 padval
= (unsigned char)(padnum
- 1);
287 for (loop
= reclen
[ctr
]; loop
< reclen
[ctr
] + padnum
; loop
++)
288 recs
[ctr
].input
[loop
] = padval
;
289 reclen
[ctr
] += padnum
;
290 recs
[ctr
].length
+= padnum
;
294 if (reclen
[ctr
] == 0 || reclen
[ctr
] % bs
!= 0) {
295 /* Publicly invalid */
301 unsigned char *data
[SSL_MAX_PIPELINES
];
303 /* Set the output buffers */
304 for (ctr
= 0; ctr
< n_recs
; ctr
++)
305 data
[ctr
] = recs
[ctr
].data
;
307 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS
,
308 (int)n_recs
, data
) <= 0) {
309 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
312 /* Set the input buffers */
313 for (ctr
= 0; ctr
< n_recs
; ctr
++)
314 data
[ctr
] = recs
[ctr
].input
;
316 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_BUFS
,
317 (int)n_recs
, data
) <= 0
318 || EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_LENS
,
319 (int)n_recs
, reclen
) <= 0) {
320 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
325 if (!rl
->isdtls
&& rl
->tlstree
) {
326 int decrement_seq
= 0;
329 * When sending, seq is incremented after MAC calculation.
330 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
331 * Otherwise we have to decrease it in the implementation
333 if (sending
&& !rl
->use_etm
)
336 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_TLSTREE
, decrement_seq
,
337 rl
->sequence
) <= 0) {
339 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
347 /* Provided cipher - we do not support pipelining on this path */
349 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
353 if (!EVP_CipherUpdate(ds
, recs
[0].data
, &outlen
, recs
[0].input
,
354 (unsigned int)reclen
[0]))
356 recs
[0].length
= outlen
;
359 * The length returned from EVP_CipherUpdate above is the actual
360 * payload length. We need to adjust the data/input ptr to skip over
364 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
365 recs
[0].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
366 recs
[0].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
367 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
368 recs
[0].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
369 recs
[0].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
370 } else if (bs
!= 1 && RLAYER_USE_EXPLICIT_IV(rl
)) {
373 recs
[0].orig_len
-= bs
;
376 /* Now get a pointer to the MAC (if applicable) */
378 OSSL_PARAM params
[2], *p
= params
;
383 *p
++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC
,
384 (void **)&macs
[0].mac
,
386 *p
= OSSL_PARAM_construct_end();
388 if (!EVP_CIPHER_CTX_get_params(ds
, params
)) {
389 /* Shouldn't normally happen */
390 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
,
391 ERR_R_INTERNAL_ERROR
);
399 tmpr
= EVP_Cipher(ds
, recs
[0].data
, recs
[0].input
,
400 (unsigned int)reclen
[0]);
401 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
402 & EVP_CIPH_FLAG_CUSTOM_CIPHER
) != 0
405 /* AEAD can fail to verify MAC */
410 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
411 /* Adjust the record to remove the explicit IV/MAC/Tag */
412 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
413 recs
[ctr
].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
414 recs
[ctr
].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
415 recs
[ctr
].length
-= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
416 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
417 recs
[ctr
].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
418 recs
[ctr
].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
419 recs
[ctr
].length
-= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
420 } else if (bs
!= 1 && RLAYER_USE_EXPLICIT_IV(rl
)) {
421 if (recs
[ctr
].length
< bs
)
423 recs
[ctr
].data
+= bs
;
424 recs
[ctr
].input
+= bs
;
425 recs
[ctr
].length
-= bs
;
426 recs
[ctr
].orig_len
-= bs
;
430 * If using Mac-then-encrypt, then this will succeed but
431 * with a random MAC if padding is invalid
433 if (!tls1_cbc_remove_padding_and_mac(&recs
[ctr
].length
,
436 (macs
!= NULL
) ? &macs
[ctr
].mac
: NULL
,
437 (macs
!= NULL
) ? &macs
[ctr
].alloced
440 pad
? (size_t)pad
: macsize
,
441 (EVP_CIPHER_get_flags(enc
)
442 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0,
451 static int tls1_mac(OSSL_RECORD_LAYER
*rl
, SSL3_RECORD
*rec
, unsigned char *md
,
454 unsigned char *seq
= rl
->sequence
;
457 EVP_MD_CTX
*hmac
= NULL
, *mac_ctx
;
458 unsigned char header
[13];
464 t
= EVP_MD_CTX_get_size(hash
);
465 if (!ossl_assert(t
>= 0))
469 if (rl
->stream_mac
) {
472 hmac
= EVP_MD_CTX_new();
473 if (hmac
== NULL
|| !EVP_MD_CTX_copy(hmac
, hash
)) {
481 && EVP_MD_CTX_ctrl(mac_ctx
, EVP_MD_CTRL_TLSTREE
, 0, seq
) <= 0)
485 unsigned char dtlsseq
[8], *p
= dtlsseq
;
488 memcpy(p
, &seq
[2], 6);
490 memcpy(header
, dtlsseq
, 8);
492 memcpy(header
, seq
, 8);
495 header
[8] = rec
->type
;
496 header
[9] = (unsigned char)(rl
->version
>> 8);
497 header
[10] = (unsigned char)(rl
->version
);
498 header
[11] = (unsigned char)(rec
->length
>> 8);
499 header
[12] = (unsigned char)(rec
->length
& 0xff);
501 if (!sending
&& !rl
->use_etm
502 && EVP_CIPHER_CTX_get_mode(rl
->enc_ctx
) == EVP_CIPH_CBC_MODE
503 && ssl3_cbc_record_digest_supported(mac_ctx
)) {
504 OSSL_PARAM tls_hmac_params
[2], *p
= tls_hmac_params
;
506 *p
++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE
,
508 *p
++ = OSSL_PARAM_construct_end();
510 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx
),
515 if (EVP_DigestSignUpdate(mac_ctx
, header
, sizeof(header
)) <= 0
516 || EVP_DigestSignUpdate(mac_ctx
, rec
->input
, rec
->length
) <= 0
517 || EVP_DigestSignFinal(mac_ctx
, md
, &md_size
) <= 0)
520 OSSL_TRACE_BEGIN(TLS
) {
521 BIO_printf(trc_out
, "seq:\n");
522 BIO_dump_indent(trc_out
, seq
, 8, 4);
523 BIO_printf(trc_out
, "rec:\n");
524 BIO_dump_indent(trc_out
, rec
->data
, rec
->length
, 4);
525 } OSSL_TRACE_END(TLS
);
527 if (!rl
->isdtls
&& !tls_increment_sequence_ctr(rl
)) {
528 /* RLAYERfatal already called */
532 OSSL_TRACE_BEGIN(TLS
) {
533 BIO_printf(trc_out
, "md:\n");
534 BIO_dump_indent(trc_out
, md
, md_size
, 4);
535 } OSSL_TRACE_END(TLS
);
538 EVP_MD_CTX_free(hmac
);
542 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
543 # ifndef OPENSSL_NO_COMP
544 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
545 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
546 + SSL3_RT_HEADER_LENGTH \
547 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
549 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
550 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
551 + SSL3_RT_HEADER_LENGTH)
552 # endif /* OPENSSL_NO_COMP */
554 # ifndef OPENSSL_NO_COMP
555 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
556 + SSL3_RT_HEADER_LENGTH \
557 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
559 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
560 + SSL3_RT_HEADER_LENGTH)
561 # endif /* OPENSSL_NO_COMP */
564 /* This function is also used by the SSLv3 implementation */
565 int tls1_allocate_write_buffers(OSSL_RECORD_LAYER
*rl
,
566 OSSL_RECORD_TEMPLATE
*templates
,
567 size_t numtempl
, size_t *prefix
)
569 /* Do we need to add an empty record prefix? */
570 *prefix
= rl
->need_empty_fragments
571 && templates
[0].type
== SSL3_RT_APPLICATION_DATA
;
574 * In the prefix case we can allocate a much smaller buffer. Otherwise we
575 * just allocate the default buffer size
577 if (!tls_setup_write_buffer(rl
, numtempl
+ *prefix
,
578 *prefix
? MAX_PREFIX_LEN
: 0, 0)) {
579 /* RLAYERfatal() already called */
586 /* This function is also used by the SSLv3 implementation */
587 int tls1_initialise_write_packets(OSSL_RECORD_LAYER
*rl
,
588 OSSL_RECORD_TEMPLATE
*templates
,
590 OSSL_RECORD_TEMPLATE
*prefixtempl
,
599 /* Do we need to add an empty record prefix? */
600 prefix
= rl
->need_empty_fragments
601 && templates
[0].type
== SSL3_RT_APPLICATION_DATA
;
605 * countermeasure against known-IV weakness in CBC ciphersuites (see
606 * http://www.openssl.org/~bodo/tls-cbc.txt)
608 prefixtempl
->buf
= NULL
;
609 prefixtempl
->version
= templates
[0].version
;
610 prefixtempl
->buflen
= 0;
611 prefixtempl
->type
= SSL3_RT_APPLICATION_DATA
;
615 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
616 align
= (size_t)SSL3_BUFFER_get_buf(wb
) + SSL3_RT_HEADER_LENGTH
;
617 align
= SSL3_ALIGN_PAYLOAD
- 1
618 - ((align
- 1) % SSL3_ALIGN_PAYLOAD
);
620 SSL3_BUFFER_set_offset(wb
, align
);
622 if (!WPACKET_init_static_len(&pkt
[0], SSL3_BUFFER_get_buf(wb
),
623 SSL3_BUFFER_get_len(wb
), 0)) {
624 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
628 if (!WPACKET_allocate_bytes(&pkt
[0], align
, NULL
)) {
629 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
634 return tls_initialise_write_packets_default(rl
, templates
, numtempl
,
636 pkt
+ prefix
, bufs
+ prefix
,
640 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
641 struct record_functions_st tls_1_funcs
= {
642 tls1_set_crypto_state
,
645 tls_default_set_protocol_version
,
647 tls_get_more_records
,
648 tls_default_validate_record_header
,
649 tls_default_post_process_record
,
650 tls_get_max_records_multiblock
,
651 tls_write_records_multiblock
, /* Defined in tls_multib.c */
652 tls1_allocate_write_buffers
,
653 tls1_initialise_write_packets
,
655 tls_prepare_record_header_default
,
657 tls_prepare_for_encryption_default
,
658 tls_post_encryption_processing_default
,
662 struct record_functions_st dtls_1_funcs
= {
663 tls1_set_crypto_state
,
666 tls_default_set_protocol_version
,
668 dtls_get_more_records
,
674 * Don't use tls1_allocate_write_buffers since that handles empty fragment
675 * records which aren't needed in DTLS. We just use the default allocation
678 tls_allocate_write_buffers_default
,
679 /* Don't use tls1_initialise_write_packets for same reason as above */
680 tls_initialise_write_packets_default
,
682 dtls_prepare_record_header
,
684 tls_prepare_for_encryption_default
,
685 tls_post_encryption_processing_default
,