2 * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include <openssl/ssl.h>
14 #include "internal/ssl3_cbc.h"
15 #include "../../ssl_local.h"
16 #include "../record_local.h"
17 #include "recmethod_local.h"
19 static int tls1_set_crypto_state(OSSL_RECORD_LAYER
*rl
, int level
,
20 unsigned char *key
, size_t keylen
,
21 unsigned char *iv
, size_t ivlen
,
22 unsigned char *mackey
, size_t mackeylen
,
23 const EVP_CIPHER
*ciph
,
29 EVP_CIPHER_CTX
*ciph_ctx
;
31 int enc
= (rl
->direction
== OSSL_RECORD_DIRECTION_WRITE
) ? 1 : 0;
33 if (level
!= OSSL_RECORD_PROTECTION_LEVEL_APPLICATION
)
34 return OSSL_RECORD_RETURN_FATAL
;
36 if ((rl
->enc_ctx
= EVP_CIPHER_CTX_new()) == NULL
) {
37 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_EVP_LIB
);
38 return OSSL_RECORD_RETURN_FATAL
;
41 ciph_ctx
= rl
->enc_ctx
;
43 rl
->md_ctx
= EVP_MD_CTX_new();
44 if (rl
->md_ctx
== NULL
) {
45 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
46 return OSSL_RECORD_RETURN_FATAL
;
48 #ifndef OPENSSL_NO_COMP
50 rl
->compctx
= COMP_CTX_new(comp
);
51 if (rl
->compctx
== NULL
) {
52 ERR_raise(ERR_LIB_SSL
, SSL_R_COMPRESSION_LIBRARY_ERROR
);
53 return OSSL_RECORD_RETURN_FATAL
;
59 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
60 * setting up the MAC key.
62 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) == 0) {
63 if (mactype
== EVP_PKEY_HMAC
) {
64 mac_key
= EVP_PKEY_new_raw_private_key_ex(rl
->libctx
, "HMAC",
69 * If its not HMAC then the only other types of MAC we support are
70 * the GOST MACs, so we need to use the old style way of creating
73 mac_key
= EVP_PKEY_new_mac_key(mactype
, NULL
, mackey
,
77 || EVP_DigestSignInit_ex(rl
->md_ctx
, NULL
, EVP_MD_get0_name(md
),
78 rl
->libctx
, rl
->propq
, mac_key
,
80 EVP_PKEY_free(mac_key
);
81 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
82 return OSSL_RECORD_RETURN_FATAL
;
84 EVP_PKEY_free(mac_key
);
87 if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_GCM_MODE
) {
88 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, key
, NULL
, enc
)
89 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_GCM_SET_IV_FIXED
,
90 (int)ivlen
, iv
) <= 0) {
91 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
92 return OSSL_RECORD_RETURN_FATAL
;
94 } else if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_CCM_MODE
) {
95 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, NULL
, NULL
, enc
)
96 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_IVLEN
, 12,
98 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_TAG
,
99 (int)taglen
, NULL
) <= 0
100 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_CCM_SET_IV_FIXED
,
102 || !EVP_CipherInit_ex(ciph_ctx
, NULL
, NULL
, key
, NULL
, enc
)) {
103 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
104 return OSSL_RECORD_RETURN_FATAL
;
107 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, key
, iv
, enc
)) {
108 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
109 return OSSL_RECORD_RETURN_FATAL
;
112 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
113 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0
115 && EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_MAC_KEY
,
116 (int)mackeylen
, mackey
) <= 0) {
117 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
118 return OSSL_RECORD_RETURN_FATAL
;
122 * The cipher we actually ended up using in the EVP_CIPHER_CTX may be
123 * different to that in ciph if we have an ENGINE in use
125 if (EVP_CIPHER_get0_provider(EVP_CIPHER_CTX_get0_cipher(ciph_ctx
)) != NULL
126 && !ossl_set_tls_provider_parameters(rl
, ciph_ctx
, ciph
, md
)) {
127 /* ERR_raise already called */
128 return OSSL_RECORD_RETURN_FATAL
;
131 /* Calculate the explicit IV length */
132 if (RLAYER_USE_EXPLICIT_IV(rl
)) {
133 int mode
= EVP_CIPHER_CTX_get_mode(ciph_ctx
);
136 if (mode
== EVP_CIPH_CBC_MODE
) {
137 eivlen
= EVP_CIPHER_CTX_get_iv_length(ciph_ctx
);
139 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_LIBRARY_BUG
);
140 return OSSL_RECORD_RETURN_FATAL
;
144 } else if (mode
== EVP_CIPH_GCM_MODE
) {
145 /* Need explicit part of IV for GCM mode */
146 eivlen
= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
147 } else if (mode
== EVP_CIPH_CCM_MODE
) {
148 eivlen
= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
150 rl
->eivlen
= (size_t)eivlen
;
153 return OSSL_RECORD_RETURN_SUCCESS
;
156 #define MAX_PADDING 256
158 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
159 * internal error, but not otherwise. It is the responsibility of the caller to
160 * report a bad_record_mac - if appropriate (DTLS just drops the record).
163 * 0: if the record is publicly invalid, or an internal error, or AEAD
164 * decryption failed, or Encrypt-then-mac decryption failed.
165 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
167 static int tls1_cipher(OSSL_RECORD_LAYER
*rl
, TLS_RL_RECORD
*recs
,
168 size_t n_recs
, int sending
, SSL_MAC_BUF
*macs
,
172 size_t reclen
[SSL_MAX_PIPELINES
];
173 unsigned char buf
[SSL_MAX_PIPELINES
][EVP_AEAD_TLS1_AAD_LEN
];
174 unsigned char *data
[SSL_MAX_PIPELINES
];
175 int pad
= 0, tmpr
, provided
;
176 size_t bs
, ctr
, padnum
, loop
;
177 unsigned char padval
;
178 const EVP_CIPHER
*enc
;
181 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
185 if (EVP_MD_CTX_get0_md(rl
->md_ctx
)) {
186 int n
= EVP_MD_CTX_get_size(rl
->md_ctx
);
188 if (!ossl_assert(n
>= 0)) {
189 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
194 if (!ossl_assert(rl
->enc_ctx
!= NULL
)) {
195 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
199 enc
= EVP_CIPHER_CTX_get0_cipher(rl
->enc_ctx
);
204 /* For TLSv1.1 and later explicit IV */
205 if (RLAYER_USE_EXPLICIT_IV(rl
)
206 && EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CBC_MODE
)
207 ivlen
= EVP_CIPHER_get_iv_length(enc
);
211 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
212 if (recs
[ctr
].data
!= recs
[ctr
].input
) {
213 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
215 } else if (RAND_bytes_ex(rl
->libctx
, recs
[ctr
].input
,
217 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
223 if (!ossl_assert(enc
!= NULL
)) {
224 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
228 provided
= (EVP_CIPHER_get0_provider(enc
) != NULL
);
230 bs
= EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds
));
233 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_BAD_CIPHER
);
238 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
239 & EVP_CIPH_FLAG_PIPELINE
) == 0) {
241 * We shouldn't have been called with pipeline data if the
242 * cipher doesn't support pipelining
244 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
248 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
249 reclen
[ctr
] = recs
[ctr
].length
;
251 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
252 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0) {
258 unsigned char dtlsseq
[8], *p
= dtlsseq
;
261 memcpy(p
, &seq
[2], 6);
262 memcpy(buf
[ctr
], dtlsseq
, 8);
264 memcpy(buf
[ctr
], seq
, 8);
265 if (!tls_increment_sequence_ctr(rl
)) {
266 /* RLAYERfatal already called */
271 buf
[ctr
][8] = recs
[ctr
].type
;
272 buf
[ctr
][9] = (unsigned char)(rl
->version
>> 8);
273 buf
[ctr
][10] = (unsigned char)(rl
->version
);
274 buf
[ctr
][11] = (unsigned char)(recs
[ctr
].length
>> 8);
275 buf
[ctr
][12] = (unsigned char)(recs
[ctr
].length
& 0xff);
276 pad
= EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_AEAD_TLS1_AAD
,
277 EVP_AEAD_TLS1_AAD_LEN
, buf
[ctr
]);
279 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
285 recs
[ctr
].length
+= pad
;
287 } else if ((bs
!= 1) && sending
&& !provided
) {
289 * We only do this for legacy ciphers. Provided ciphers add the
290 * padding on the provider side.
292 padnum
= bs
- (reclen
[ctr
] % bs
);
294 /* Add weird padding of up to 256 bytes */
296 if (padnum
> MAX_PADDING
) {
297 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
300 /* we need to add 'padnum' padding bytes of value padval */
301 padval
= (unsigned char)(padnum
- 1);
302 for (loop
= reclen
[ctr
]; loop
< reclen
[ctr
] + padnum
; loop
++)
303 recs
[ctr
].input
[loop
] = padval
;
304 reclen
[ctr
] += padnum
;
305 recs
[ctr
].length
+= padnum
;
309 if (reclen
[ctr
] == 0 || reclen
[ctr
] % bs
!= 0) {
310 /* Publicly invalid */
316 /* Set the output buffers */
317 for (ctr
= 0; ctr
< n_recs
; ctr
++)
318 data
[ctr
] = recs
[ctr
].data
;
320 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS
,
321 (int)n_recs
, data
) <= 0) {
322 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
325 /* Set the input buffers */
326 for (ctr
= 0; ctr
< n_recs
; ctr
++)
327 data
[ctr
] = recs
[ctr
].input
;
329 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_BUFS
,
330 (int)n_recs
, data
) <= 0
331 || EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_LENS
,
332 (int)n_recs
, reclen
) <= 0) {
333 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
338 if (!rl
->isdtls
&& rl
->tlstree
) {
339 int decrement_seq
= 0;
342 * When sending, seq is incremented after MAC calculation.
343 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
344 * Otherwise we have to decrease it in the implementation
346 if (sending
&& !rl
->use_etm
)
349 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_TLSTREE
, decrement_seq
,
350 rl
->sequence
) <= 0) {
352 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
360 /* Provided cipher - we do not support pipelining on this path */
362 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
366 if (!EVP_CipherUpdate(ds
, recs
[0].data
, &outlen
, recs
[0].input
,
367 (unsigned int)reclen
[0]))
369 recs
[0].length
= outlen
;
372 * The length returned from EVP_CipherUpdate above is the actual
373 * payload length. We need to adjust the data/input ptr to skip over
377 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
378 recs
[0].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
379 recs
[0].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
380 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
381 recs
[0].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
382 recs
[0].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
383 } else if (bs
!= 1 && RLAYER_USE_EXPLICIT_IV(rl
)) {
386 recs
[0].orig_len
-= bs
;
389 /* Now get a pointer to the MAC (if applicable) */
391 OSSL_PARAM params
[2], *p
= params
;
396 *p
++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC
,
397 (void **)&macs
[0].mac
,
399 *p
= OSSL_PARAM_construct_end();
401 if (!EVP_CIPHER_CTX_get_params(ds
, params
)) {
402 /* Shouldn't normally happen */
403 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
,
404 ERR_R_INTERNAL_ERROR
);
412 tmpr
= EVP_Cipher(ds
, recs
[0].data
, recs
[0].input
,
413 (unsigned int)reclen
[0]);
414 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
415 & EVP_CIPH_FLAG_CUSTOM_CIPHER
) != 0
418 /* AEAD can fail to verify MAC */
423 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
424 /* Adjust the record to remove the explicit IV/MAC/Tag */
425 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
426 recs
[ctr
].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
427 recs
[ctr
].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
428 recs
[ctr
].length
-= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
429 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
430 recs
[ctr
].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
431 recs
[ctr
].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
432 recs
[ctr
].length
-= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
433 } else if (bs
!= 1 && RLAYER_USE_EXPLICIT_IV(rl
)) {
434 if (recs
[ctr
].length
< bs
)
436 recs
[ctr
].data
+= bs
;
437 recs
[ctr
].input
+= bs
;
438 recs
[ctr
].length
-= bs
;
439 recs
[ctr
].orig_len
-= bs
;
443 * If using Mac-then-encrypt, then this will succeed but
444 * with a random MAC if padding is invalid
446 if (!tls1_cbc_remove_padding_and_mac(&recs
[ctr
].length
,
449 (macs
!= NULL
) ? &macs
[ctr
].mac
: NULL
,
450 (macs
!= NULL
) ? &macs
[ctr
].alloced
453 pad
? (size_t)pad
: macsize
,
454 (EVP_CIPHER_get_flags(enc
)
455 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0,
464 static int tls1_mac(OSSL_RECORD_LAYER
*rl
, TLS_RL_RECORD
*rec
, unsigned char *md
,
467 unsigned char *seq
= rl
->sequence
;
470 EVP_MD_CTX
*hmac
= NULL
, *mac_ctx
;
471 unsigned char header
[13];
477 t
= EVP_MD_CTX_get_size(hash
);
478 if (!ossl_assert(t
>= 0))
482 if (rl
->stream_mac
) {
485 hmac
= EVP_MD_CTX_new();
486 if (hmac
== NULL
|| !EVP_MD_CTX_copy(hmac
, hash
)) {
494 && EVP_MD_CTX_ctrl(mac_ctx
, EVP_MD_CTRL_TLSTREE
, 0, seq
) <= 0)
498 unsigned char dtlsseq
[8], *p
= dtlsseq
;
501 memcpy(p
, &seq
[2], 6);
503 memcpy(header
, dtlsseq
, 8);
505 memcpy(header
, seq
, 8);
508 header
[8] = rec
->type
;
509 header
[9] = (unsigned char)(rl
->version
>> 8);
510 header
[10] = (unsigned char)(rl
->version
);
511 header
[11] = (unsigned char)(rec
->length
>> 8);
512 header
[12] = (unsigned char)(rec
->length
& 0xff);
514 if (!sending
&& !rl
->use_etm
515 && EVP_CIPHER_CTX_get_mode(rl
->enc_ctx
) == EVP_CIPH_CBC_MODE
516 && ssl3_cbc_record_digest_supported(mac_ctx
)) {
517 OSSL_PARAM tls_hmac_params
[2], *p
= tls_hmac_params
;
519 *p
++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE
,
521 *p
++ = OSSL_PARAM_construct_end();
523 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx
),
528 if (EVP_DigestSignUpdate(mac_ctx
, header
, sizeof(header
)) <= 0
529 || EVP_DigestSignUpdate(mac_ctx
, rec
->input
, rec
->length
) <= 0
530 || EVP_DigestSignFinal(mac_ctx
, md
, &md_size
) <= 0)
533 OSSL_TRACE_BEGIN(TLS
) {
534 BIO_printf(trc_out
, "seq:\n");
535 BIO_dump_indent(trc_out
, seq
, 8, 4);
536 BIO_printf(trc_out
, "rec:\n");
537 BIO_dump_indent(trc_out
, rec
->data
, rec
->length
, 4);
538 } OSSL_TRACE_END(TLS
);
540 if (!rl
->isdtls
&& !tls_increment_sequence_ctr(rl
)) {
541 /* RLAYERfatal already called */
545 OSSL_TRACE_BEGIN(TLS
) {
546 BIO_printf(trc_out
, "md:\n");
547 BIO_dump_indent(trc_out
, md
, md_size
, 4);
548 } OSSL_TRACE_END(TLS
);
551 EVP_MD_CTX_free(hmac
);
555 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
556 # ifndef OPENSSL_NO_COMP
557 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
558 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
559 + SSL3_RT_HEADER_LENGTH \
560 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
562 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
563 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
564 + SSL3_RT_HEADER_LENGTH)
565 # endif /* OPENSSL_NO_COMP */
567 # ifndef OPENSSL_NO_COMP
568 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
569 + SSL3_RT_HEADER_LENGTH \
570 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
572 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
573 + SSL3_RT_HEADER_LENGTH)
574 # endif /* OPENSSL_NO_COMP */
577 /* This function is also used by the SSLv3 implementation */
578 int tls1_allocate_write_buffers(OSSL_RECORD_LAYER
*rl
,
579 OSSL_RECORD_TEMPLATE
*templates
,
580 size_t numtempl
, size_t *prefix
)
582 /* Do we need to add an empty record prefix? */
583 *prefix
= rl
->need_empty_fragments
584 && templates
[0].type
== SSL3_RT_APPLICATION_DATA
;
587 * In the prefix case we can allocate a much smaller buffer. Otherwise we
588 * just allocate the default buffer size
590 if (!tls_setup_write_buffer(rl
, numtempl
+ *prefix
,
591 *prefix
? MAX_PREFIX_LEN
: 0, 0)) {
592 /* RLAYERfatal() already called */
599 /* This function is also used by the SSLv3 implementation */
600 int tls1_initialise_write_packets(OSSL_RECORD_LAYER
*rl
,
601 OSSL_RECORD_TEMPLATE
*templates
,
603 OSSL_RECORD_TEMPLATE
*prefixtempl
,
612 /* Do we need to add an empty record prefix? */
613 prefix
= rl
->need_empty_fragments
614 && templates
[0].type
== SSL3_RT_APPLICATION_DATA
;
618 * countermeasure against known-IV weakness in CBC ciphersuites (see
619 * http://www.openssl.org/~bodo/tls-cbc.txt)
621 prefixtempl
->buf
= NULL
;
622 prefixtempl
->version
= templates
[0].version
;
623 prefixtempl
->buflen
= 0;
624 prefixtempl
->type
= SSL3_RT_APPLICATION_DATA
;
628 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
629 align
= (size_t)TLS_BUFFER_get_buf(wb
) + SSL3_RT_HEADER_LENGTH
;
630 align
= SSL3_ALIGN_PAYLOAD
- 1
631 - ((align
- 1) % SSL3_ALIGN_PAYLOAD
);
633 TLS_BUFFER_set_offset(wb
, align
);
635 if (!WPACKET_init_static_len(&pkt
[0], TLS_BUFFER_get_buf(wb
),
636 TLS_BUFFER_get_len(wb
), 0)) {
637 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
641 if (!WPACKET_allocate_bytes(&pkt
[0], align
, NULL
)) {
642 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
647 return tls_initialise_write_packets_default(rl
, templates
, numtempl
,
649 pkt
+ prefix
, bufs
+ prefix
,
653 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
654 const struct record_functions_st tls_1_funcs
= {
655 tls1_set_crypto_state
,
658 tls_default_set_protocol_version
,
660 tls_get_more_records
,
661 tls_default_validate_record_header
,
662 tls_default_post_process_record
,
663 tls_get_max_records_multiblock
,
664 tls_write_records_multiblock
, /* Defined in tls_multib.c */
665 tls1_allocate_write_buffers
,
666 tls1_initialise_write_packets
,
668 tls_prepare_record_header_default
,
670 tls_prepare_for_encryption_default
,
671 tls_post_encryption_processing_default
,
675 const struct record_functions_st dtls_1_funcs
= {
676 tls1_set_crypto_state
,
679 tls_default_set_protocol_version
,
681 dtls_get_more_records
,
685 tls_write_records_default
,
687 * Don't use tls1_allocate_write_buffers since that handles empty fragment
688 * records which aren't needed in DTLS. We just use the default allocation
691 tls_allocate_write_buffers_default
,
692 /* Don't use tls1_initialise_write_packets for same reason as above */
693 tls_initialise_write_packets_default
,
695 dtls_prepare_record_header
,
697 tls_prepare_for_encryption_default
,
698 dtls_post_encryption_processing
,