2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include "../../ssl_local.h"
14 #include "../record_local.h"
15 #include "recmethod_local.h"
17 /* TODO(RECLAYER): Handle OPENSSL_NO_COMP */
18 static int tls1_set_crypto_state(OSSL_RECORD_LAYER
*rl
, int level
,
19 unsigned char *key
, size_t keylen
,
20 unsigned char *iv
, size_t ivlen
,
21 unsigned char *mackey
, size_t mackeylen
,
22 const EVP_CIPHER
*ciph
,
24 /* TODO(RECLAYER): This probably should not be an int */
28 /* TODO(RECLAYER): Remove me */
31 EVP_CIPHER_CTX
*ciph_ctx
;
34 if (level
!= OSSL_RECORD_PROTECTION_LEVEL_APPLICATION
)
35 return OSSL_RECORD_RETURN_FATAL
;
38 s
->s3
.flags
|= TLS1_FLAGS_ENCRYPT_THEN_MAC_READ
;
40 s
->s3
.flags
&= ~TLS1_FLAGS_ENCRYPT_THEN_MAC_READ
;
42 if (s
->s3
.tmp
.new_cipher
->algorithm2
& TLS1_STREAM_MAC
)
43 s
->mac_flags
|= SSL_MAC_FLAG_READ_MAC_STREAM
;
45 s
->mac_flags
&= ~SSL_MAC_FLAG_READ_MAC_STREAM
;
47 if (s
->s3
.tmp
.new_cipher
->algorithm2
& TLS1_TLSTREE
)
48 s
->mac_flags
|= SSL_MAC_FLAG_READ_MAC_TLSTREE
;
50 s
->mac_flags
&= ~SSL_MAC_FLAG_READ_MAC_TLSTREE
;
52 if ((rl
->enc_read_ctx
= EVP_CIPHER_CTX_new()) == NULL
) {
53 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_MALLOC_FAILURE
);
54 return OSSL_RECORD_RETURN_FATAL
;
57 ciph_ctx
= rl
->enc_read_ctx
;
59 rl
->read_hash
= EVP_MD_CTX_new();
60 if (rl
->read_hash
== NULL
) {
61 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
62 return OSSL_RECORD_RETURN_FATAL
;
64 #ifndef OPENSSL_NO_COMP
66 rl
->expand
= COMP_CTX_new(comp
->method
);
67 if (rl
->expand
== NULL
) {
68 ERR_raise(ERR_LIB_SSL
, SSL_R_COMPRESSION_LIBRARY_ERROR
);
69 return OSSL_RECORD_RETURN_FATAL
;
74 * this is done by dtls1_reset_seq_numbers for DTLS
77 RECORD_LAYER_reset_read_sequence(&s
->rlayer
);
80 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
81 * setting up the MAC key.
83 if (!(EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
)) {
84 if (mactype
== EVP_PKEY_HMAC
) {
85 mac_key
= EVP_PKEY_new_raw_private_key_ex(rl
->libctx
, "HMAC",
90 * If its not HMAC then the only other types of MAC we support are
91 * the GOST MACs, so we need to use the old style way of creating
94 mac_key
= EVP_PKEY_new_mac_key(mactype
, NULL
, mackey
,
98 || EVP_DigestSignInit_ex(rl
->read_hash
, NULL
, EVP_MD_get0_name(md
),
99 rl
->libctx
, rl
->propq
, mac_key
,
101 EVP_PKEY_free(mac_key
);
102 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
103 return OSSL_RECORD_RETURN_FATAL
;
105 EVP_PKEY_free(mac_key
);
108 if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_GCM_MODE
) {
109 if (!EVP_DecryptInit_ex(ciph_ctx
, ciph
, NULL
, key
, NULL
)
110 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_GCM_SET_IV_FIXED
,
111 (int)ivlen
, iv
) <= 0) {
112 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
113 return OSSL_RECORD_RETURN_FATAL
;
115 } else if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_CCM_MODE
) {
116 if (!EVP_DecryptInit_ex(ciph_ctx
, ciph
, NULL
, NULL
, NULL
)
117 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_IVLEN
, 12,
119 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_TAG
,
120 (int)taglen
, NULL
) <= 0
121 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_CCM_SET_IV_FIXED
,
124 * TODO(RECLAYER): Why do we defer setting the key until here?
125 * why not in the initial EVP_DecryptInit_ex() call?
127 || !EVP_DecryptInit_ex(ciph_ctx
, NULL
, NULL
, key
, NULL
)) {
128 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
129 return OSSL_RECORD_RETURN_FATAL
;
132 if (!EVP_DecryptInit_ex(ciph_ctx
, ciph
, NULL
, key
, iv
)) {
133 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
134 return OSSL_RECORD_RETURN_FATAL
;
137 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
138 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0
140 && EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_MAC_KEY
,
141 (int)mackeylen
, mackey
) <= 0) {
142 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
143 return OSSL_RECORD_RETURN_FATAL
;
145 if (EVP_CIPHER_get0_provider(ciph
) != NULL
146 && !ossl_set_tls_provider_parameters(rl
, ciph_ctx
, ciph
, md
, s
))
147 return OSSL_RECORD_RETURN_FATAL
;
149 return OSSL_RECORD_RETURN_SUCCESS
;
152 #define MAX_PADDING 256
154 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls SSLfatal on internal
155 * error, but not otherwise. It is the responsibility of the caller to report
156 * a bad_record_mac - if appropriate (DTLS just drops the record).
159 * 0: if the record is publicly invalid, or an internal error, or AEAD
160 * decryption failed, or Encrypt-then-mac decryption failed.
161 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
163 static int tls1_cipher(OSSL_RECORD_LAYER
*rl
, SSL3_RECORD
*recs
, size_t n_recs
,
164 int sending
, SSL_MAC_BUF
*macs
, size_t macsize
,
165 /* TODO(RECLAYER): Remove me */ SSL_CONNECTION
*s
)
168 size_t reclen
[SSL_MAX_PIPELINES
];
169 unsigned char buf
[SSL_MAX_PIPELINES
][EVP_AEAD_TLS1_AAD_LEN
];
170 int i
, pad
= 0, tmpr
, provided
;
171 size_t bs
, ctr
, padnum
, loop
;
172 unsigned char padval
;
173 const EVP_CIPHER
*enc
;
174 int tlstree_enc
= sending
? (s
->mac_flags
& SSL_MAC_FLAG_WRITE_MAC_TLSTREE
)
175 : (s
->mac_flags
& SSL_MAC_FLAG_READ_MAC_TLSTREE
);
176 SSL_CTX
*sctx
= SSL_CONNECTION_GET_CTX(s
);
179 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
186 if (EVP_MD_CTX_get0_md(s
->write_hash
)) {
187 int n
= EVP_MD_CTX_get_size(s
->write_hash
);
188 if (!ossl_assert(n
>= 0)) {
189 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
193 ds
= s
->enc_write_ctx
;
194 if (!ossl_assert(s
->enc_write_ctx
)) {
195 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
199 enc
= EVP_CIPHER_CTX_get0_cipher(s
->enc_write_ctx
);
200 /* For TLSv1.1 and later explicit IV */
201 if (SSL_USE_EXPLICIT_IV(s
)
202 && EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CBC_MODE
)
203 ivlen
= EVP_CIPHER_get_iv_length(enc
);
207 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
208 if (recs
[ctr
].data
!= recs
[ctr
].input
) {
210 * we can't write into the input stream: Can this ever
213 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
215 } else if (RAND_bytes_ex(sctx
->libctx
, recs
[ctr
].input
,
217 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
223 if (EVP_MD_CTX_get0_md(rl
->read_hash
)) {
224 int n
= EVP_MD_CTX_get_size(rl
->read_hash
);
225 if (!ossl_assert(n
>= 0)) {
226 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
230 ds
= rl
->enc_read_ctx
;
231 if (!ossl_assert(rl
->enc_read_ctx
)) {
232 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
236 enc
= EVP_CIPHER_CTX_get0_cipher(rl
->enc_read_ctx
);
239 if ((s
->session
== NULL
) || (enc
== NULL
)) {
240 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
244 provided
= (EVP_CIPHER_get0_provider(enc
) != NULL
);
246 bs
= EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds
));
249 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
250 & EVP_CIPH_FLAG_PIPELINE
) == 0) {
252 * We shouldn't have been called with pipeline data if the
253 * cipher doesn't support pipelining
255 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
259 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
260 reclen
[ctr
] = recs
[ctr
].length
;
262 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
263 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0) {
266 seq
= sending
? RECORD_LAYER_get_write_sequence(&s
->rlayer
)
267 : RECORD_LAYER_get_read_sequence(&s
->rlayer
);
269 if (SSL_CONNECTION_IS_DTLS(s
)) {
270 /* DTLS does not support pipelining */
271 unsigned char dtlsseq
[8], *p
= dtlsseq
;
273 s2n(sending
? DTLS_RECORD_LAYER_get_w_epoch(&s
->rlayer
) :
274 DTLS_RECORD_LAYER_get_r_epoch(&s
->rlayer
), p
);
275 memcpy(p
, &seq
[2], 6);
276 memcpy(buf
[ctr
], dtlsseq
, 8);
278 memcpy(buf
[ctr
], seq
, 8);
279 for (i
= 7; i
>= 0; i
--) { /* increment */
286 buf
[ctr
][8] = recs
[ctr
].type
;
287 buf
[ctr
][9] = (unsigned char)(rl
->version
>> 8);
288 buf
[ctr
][10] = (unsigned char)(rl
->version
);
289 buf
[ctr
][11] = (unsigned char)(recs
[ctr
].length
>> 8);
290 buf
[ctr
][12] = (unsigned char)(recs
[ctr
].length
& 0xff);
291 pad
= EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_AEAD_TLS1_AAD
,
292 EVP_AEAD_TLS1_AAD_LEN
, buf
[ctr
]);
294 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
300 recs
[ctr
].length
+= pad
;
303 } else if ((bs
!= 1) && sending
&& !provided
) {
305 * We only do this for legacy ciphers. Provided ciphers add the
306 * padding on the provider side.
308 padnum
= bs
- (reclen
[ctr
] % bs
);
310 /* Add weird padding of up to 256 bytes */
312 if (padnum
> MAX_PADDING
) {
313 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
316 /* we need to add 'padnum' padding bytes of value padval */
317 padval
= (unsigned char)(padnum
- 1);
318 for (loop
= reclen
[ctr
]; loop
< reclen
[ctr
] + padnum
; loop
++)
319 recs
[ctr
].input
[loop
] = padval
;
320 reclen
[ctr
] += padnum
;
321 recs
[ctr
].length
+= padnum
;
325 if (reclen
[ctr
] == 0 || reclen
[ctr
] % bs
!= 0) {
326 /* Publicly invalid */
332 unsigned char *data
[SSL_MAX_PIPELINES
];
334 /* Set the output buffers */
335 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
336 data
[ctr
] = recs
[ctr
].data
;
338 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS
,
339 (int)n_recs
, data
) <= 0) {
340 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
343 /* Set the input buffers */
344 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
345 data
[ctr
] = recs
[ctr
].input
;
347 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_BUFS
,
348 (int)n_recs
, data
) <= 0
349 || EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_LENS
,
350 (int)n_recs
, reclen
) <= 0) {
351 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
356 if (!SSL_CONNECTION_IS_DTLS(s
) && tlstree_enc
) {
358 int decrement_seq
= 0;
361 * When sending, seq is incremented after MAC calculation.
362 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
363 * Otherwise we have to decrease it in the implementation
365 if (sending
&& !SSL_WRITE_ETM(s
))
368 seq
= sending
? RECORD_LAYER_get_write_sequence(&s
->rlayer
)
369 : RECORD_LAYER_get_read_sequence(&s
->rlayer
);
370 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_TLSTREE
, decrement_seq
, seq
) <= 0) {
371 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
379 /* Provided cipher - we do not support pipelining on this path */
381 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
385 if (!EVP_CipherUpdate(ds
, recs
[0].data
, &outlen
, recs
[0].input
,
386 (unsigned int)reclen
[0]))
388 recs
[0].length
= outlen
;
391 * The length returned from EVP_CipherUpdate above is the actual
392 * payload length. We need to adjust the data/input ptr to skip over
396 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
397 recs
[0].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
398 recs
[0].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
399 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
400 recs
[0].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
401 recs
[0].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
402 } else if (bs
!= 1 && SSL_USE_EXPLICIT_IV(s
)) {
405 recs
[0].orig_len
-= bs
;
408 /* Now get a pointer to the MAC (if applicable) */
410 OSSL_PARAM params
[2], *p
= params
;
415 *p
++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC
,
416 (void **)&macs
[0].mac
,
418 *p
= OSSL_PARAM_construct_end();
420 if (!EVP_CIPHER_CTX_get_params(ds
, params
)) {
421 /* Shouldn't normally happen */
422 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
,
423 ERR_R_INTERNAL_ERROR
);
431 tmpr
= EVP_Cipher(ds
, recs
[0].data
, recs
[0].input
,
432 (unsigned int)reclen
[0]);
433 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
434 & EVP_CIPH_FLAG_CUSTOM_CIPHER
) != 0
437 /* AEAD can fail to verify MAC */
442 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
443 /* Adjust the record to remove the explicit IV/MAC/Tag */
444 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
445 recs
[ctr
].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
446 recs
[ctr
].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
447 recs
[ctr
].length
-= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
448 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
449 recs
[ctr
].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
450 recs
[ctr
].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
451 recs
[ctr
].length
-= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
452 } else if (bs
!= 1 && SSL_USE_EXPLICIT_IV(s
)) {
453 if (recs
[ctr
].length
< bs
)
455 recs
[ctr
].data
+= bs
;
456 recs
[ctr
].input
+= bs
;
457 recs
[ctr
].length
-= bs
;
458 recs
[ctr
].orig_len
-= bs
;
462 * If using Mac-then-encrypt, then this will succeed but
463 * with a random MAC if padding is invalid
465 if (!tls1_cbc_remove_padding_and_mac(&recs
[ctr
].length
,
468 (macs
!= NULL
) ? &macs
[ctr
].mac
: NULL
,
469 (macs
!= NULL
) ? &macs
[ctr
].alloced
472 pad
? (size_t)pad
: macsize
,
473 (EVP_CIPHER_get_flags(enc
)
474 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0,
483 static int tls1_mac(OSSL_RECORD_LAYER
*rl
, SSL3_RECORD
*rec
, unsigned char *md
,
484 int sending
, SSL_CONNECTION
*ssl
)
490 EVP_MD_CTX
*hmac
= NULL
, *mac_ctx
;
491 unsigned char header
[13];
492 int stream_mac
= sending
? (ssl
->mac_flags
& SSL_MAC_FLAG_WRITE_MAC_STREAM
)
493 : (ssl
->mac_flags
& SSL_MAC_FLAG_READ_MAC_STREAM
);
494 int tlstree_mac
= sending
? (ssl
->mac_flags
& SSL_MAC_FLAG_WRITE_MAC_TLSTREE
)
495 : (ssl
->mac_flags
& SSL_MAC_FLAG_READ_MAC_TLSTREE
);
500 seq
= RECORD_LAYER_get_write_sequence(&ssl
->rlayer
);
501 hash
= ssl
->write_hash
;
503 seq
= RECORD_LAYER_get_read_sequence(&ssl
->rlayer
);
504 hash
= rl
->read_hash
;
507 t
= EVP_MD_CTX_get_size(hash
);
508 if (!ossl_assert(t
>= 0))
512 /* I should fix this up TLS TLS TLS TLS TLS XXXXXXXX */
516 hmac
= EVP_MD_CTX_new();
517 if (hmac
== NULL
|| !EVP_MD_CTX_copy(hmac
, hash
)) {
525 && EVP_MD_CTX_ctrl(mac_ctx
, EVP_MD_CTRL_TLSTREE
, 0, seq
) <= 0) {
530 unsigned char dtlsseq
[8], *p
= dtlsseq
;
532 s2n(sending
? DTLS_RECORD_LAYER_get_w_epoch(&ssl
->rlayer
) :
533 DTLS_RECORD_LAYER_get_r_epoch(&ssl
->rlayer
), p
);
534 memcpy(p
, &seq
[2], 6);
536 memcpy(header
, dtlsseq
, 8);
538 memcpy(header
, seq
, 8);
540 header
[8] = rec
->type
;
541 header
[9] = (unsigned char)(ssl
->version
>> 8);
542 header
[10] = (unsigned char)(ssl
->version
);
543 header
[11] = (unsigned char)(rec
->length
>> 8);
544 header
[12] = (unsigned char)(rec
->length
& 0xff);
546 if (!sending
&& !SSL_READ_ETM(ssl
)
547 && EVP_CIPHER_CTX_get_mode(rl
->enc_read_ctx
) == EVP_CIPH_CBC_MODE
548 && ssl3_cbc_record_digest_supported(mac_ctx
)) {
549 OSSL_PARAM tls_hmac_params
[2], *p
= tls_hmac_params
;
551 *p
++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE
,
553 *p
++ = OSSL_PARAM_construct_end();
555 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx
),
561 if (EVP_DigestSignUpdate(mac_ctx
, header
, sizeof(header
)) <= 0
562 || EVP_DigestSignUpdate(mac_ctx
, rec
->input
, rec
->length
) <= 0
563 || EVP_DigestSignFinal(mac_ctx
, md
, &md_size
) <= 0) {
567 OSSL_TRACE_BEGIN(TLS
) {
568 BIO_printf(trc_out
, "seq:\n");
569 BIO_dump_indent(trc_out
, seq
, 8, 4);
570 BIO_printf(trc_out
, "rec:\n");
571 BIO_dump_indent(trc_out
, rec
->data
, rec
->length
, 4);
572 } OSSL_TRACE_END(TLS
);
574 if (!SSL_CONNECTION_IS_DTLS(ssl
)) {
575 for (i
= 7; i
>= 0; i
--) {
581 OSSL_TRACE_BEGIN(TLS
) {
582 BIO_printf(trc_out
, "md:\n");
583 BIO_dump_indent(trc_out
, md
, md_size
, 4);
584 } OSSL_TRACE_END(TLS
);
587 EVP_MD_CTX_free(hmac
);
591 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
592 struct record_functions_st tls_1_funcs
= {
593 tls1_set_crypto_state
,