]> git.ipfire.org Git - thirdparty/openssl.git/blob - ssl/record/methods/tls1_meth.c
Distinguish between fatal and non-fatal errors when creating a record layer
[thirdparty/openssl.git] / ssl / record / methods / tls1_meth.c
1 /*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include "../../ssl_local.h"
14 #include "../record_local.h"
15 #include "recmethod_local.h"
16
17 /* TODO(RECLAYER): Handle OPENSSL_NO_COMP */
18 static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
19 unsigned char *key, size_t keylen,
20 unsigned char *iv, size_t ivlen,
21 unsigned char *mackey, size_t mackeylen,
22 const EVP_CIPHER *ciph,
23 size_t taglen,
24 /* TODO(RECLAYER): This probably should not be an int */
25 int mactype,
26 const EVP_MD *md,
27 const SSL_COMP *comp,
28 /* TODO(RECLAYER): Remove me */
29 SSL_CONNECTION *s)
30 {
31 EVP_CIPHER_CTX *ciph_ctx;
32 EVP_PKEY *mac_key;
33
34 if (level != OSSL_RECORD_PROTECTION_LEVEL_APPLICATION)
35 return OSSL_RECORD_RETURN_FATAL;
36
37 if (s->ext.use_etm)
38 s->s3.flags |= TLS1_FLAGS_ENCRYPT_THEN_MAC_READ;
39 else
40 s->s3.flags &= ~TLS1_FLAGS_ENCRYPT_THEN_MAC_READ;
41
42 if (s->s3.tmp.new_cipher->algorithm2 & TLS1_STREAM_MAC)
43 s->mac_flags |= SSL_MAC_FLAG_READ_MAC_STREAM;
44 else
45 s->mac_flags &= ~SSL_MAC_FLAG_READ_MAC_STREAM;
46
47 if (s->s3.tmp.new_cipher->algorithm2 & TLS1_TLSTREE)
48 s->mac_flags |= SSL_MAC_FLAG_READ_MAC_TLSTREE;
49 else
50 s->mac_flags &= ~SSL_MAC_FLAG_READ_MAC_TLSTREE;
51
52 if ((rl->enc_read_ctx = EVP_CIPHER_CTX_new()) == NULL) {
53 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_MALLOC_FAILURE);
54 return OSSL_RECORD_RETURN_FATAL;
55 }
56
57 ciph_ctx = rl->enc_read_ctx;
58
59 rl->read_hash = EVP_MD_CTX_new();
60 if (rl->read_hash == NULL) {
61 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
62 return OSSL_RECORD_RETURN_FATAL;
63 }
64 #ifndef OPENSSL_NO_COMP
65 if (comp != NULL) {
66 rl->expand = COMP_CTX_new(comp->method);
67 if (rl->expand == NULL) {
68 ERR_raise(ERR_LIB_SSL, SSL_R_COMPRESSION_LIBRARY_ERROR);
69 return OSSL_RECORD_RETURN_FATAL;
70 }
71 }
72 #endif
73 /*
74 * this is done by dtls1_reset_seq_numbers for DTLS
75 */
76 if (!rl->isdtls)
77 RECORD_LAYER_reset_read_sequence(&s->rlayer);
78
79 /*
80 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
81 * setting up the MAC key.
82 */
83 if (!(EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER)) {
84 if (mactype == EVP_PKEY_HMAC) {
85 mac_key = EVP_PKEY_new_raw_private_key_ex(rl->libctx, "HMAC",
86 rl->propq, mackey,
87 mackeylen);
88 } else {
89 /*
90 * If its not HMAC then the only other types of MAC we support are
91 * the GOST MACs, so we need to use the old style way of creating
92 * a MAC key.
93 */
94 mac_key = EVP_PKEY_new_mac_key(mactype, NULL, mackey,
95 (int)mackeylen);
96 }
97 if (mac_key == NULL
98 || EVP_DigestSignInit_ex(rl->read_hash, NULL, EVP_MD_get0_name(md),
99 rl->libctx, rl->propq, mac_key,
100 NULL) <= 0) {
101 EVP_PKEY_free(mac_key);
102 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
103 return OSSL_RECORD_RETURN_FATAL;
104 }
105 EVP_PKEY_free(mac_key);
106 }
107
108 if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_GCM_MODE) {
109 if (!EVP_DecryptInit_ex(ciph_ctx, ciph, NULL, key, NULL)
110 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_GCM_SET_IV_FIXED,
111 (int)ivlen, iv) <= 0) {
112 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
113 return OSSL_RECORD_RETURN_FATAL;
114 }
115 } else if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_CCM_MODE) {
116 if (!EVP_DecryptInit_ex(ciph_ctx, ciph, NULL, NULL, NULL)
117 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, 12,
118 NULL) <= 0
119 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG,
120 (int)taglen, NULL) <= 0
121 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_CCM_SET_IV_FIXED,
122 (int)ivlen, iv) <= 0
123 /*
124 * TODO(RECLAYER): Why do we defer setting the key until here?
125 * why not in the initial EVP_DecryptInit_ex() call?
126 */
127 || !EVP_DecryptInit_ex(ciph_ctx, NULL, NULL, key, NULL)) {
128 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
129 return OSSL_RECORD_RETURN_FATAL;
130 }
131 } else {
132 if (!EVP_DecryptInit_ex(ciph_ctx, ciph, NULL, key, iv)) {
133 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
134 return OSSL_RECORD_RETURN_FATAL;
135 }
136 }
137 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
138 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) != 0
139 && mackeylen != 0
140 && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_MAC_KEY,
141 (int)mackeylen, mackey) <= 0) {
142 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
143 return OSSL_RECORD_RETURN_FATAL;
144 }
145 if (EVP_CIPHER_get0_provider(ciph) != NULL
146 && !ossl_set_tls_provider_parameters(rl, ciph_ctx, ciph, md, s))
147 return OSSL_RECORD_RETURN_FATAL;
148
149 return OSSL_RECORD_RETURN_SUCCESS;
150 }
151
152 #define MAX_PADDING 256
153 /*-
154 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls SSLfatal on internal
155 * error, but not otherwise. It is the responsibility of the caller to report
156 * a bad_record_mac - if appropriate (DTLS just drops the record).
157 *
158 * Returns:
159 * 0: if the record is publicly invalid, or an internal error, or AEAD
160 * decryption failed, or Encrypt-then-mac decryption failed.
161 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
162 */
163 static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
164 int sending, SSL_MAC_BUF *macs, size_t macsize,
165 /* TODO(RECLAYER): Remove me */ SSL_CONNECTION *s)
166 {
167 EVP_CIPHER_CTX *ds;
168 size_t reclen[SSL_MAX_PIPELINES];
169 unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN];
170 int i, pad = 0, tmpr, provided;
171 size_t bs, ctr, padnum, loop;
172 unsigned char padval;
173 const EVP_CIPHER *enc;
174 int tlstree_enc = sending ? (s->mac_flags & SSL_MAC_FLAG_WRITE_MAC_TLSTREE)
175 : (s->mac_flags & SSL_MAC_FLAG_READ_MAC_TLSTREE);
176 SSL_CTX *sctx = SSL_CONNECTION_GET_CTX(s);
177
178 if (n_recs == 0) {
179 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
180 return 0;
181 }
182
183 if (sending) {
184 int ivlen;
185
186 if (EVP_MD_CTX_get0_md(s->write_hash)) {
187 int n = EVP_MD_CTX_get_size(s->write_hash);
188 if (!ossl_assert(n >= 0)) {
189 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
190 return 0;
191 }
192 }
193 ds = s->enc_write_ctx;
194 if (!ossl_assert(s->enc_write_ctx)) {
195 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
196 return 0;
197 }
198
199 enc = EVP_CIPHER_CTX_get0_cipher(s->enc_write_ctx);
200 /* For TLSv1.1 and later explicit IV */
201 if (SSL_USE_EXPLICIT_IV(s)
202 && EVP_CIPHER_get_mode(enc) == EVP_CIPH_CBC_MODE)
203 ivlen = EVP_CIPHER_get_iv_length(enc);
204 else
205 ivlen = 0;
206 if (ivlen > 1) {
207 for (ctr = 0; ctr < n_recs; ctr++) {
208 if (recs[ctr].data != recs[ctr].input) {
209 /*
210 * we can't write into the input stream: Can this ever
211 * happen?? (steve)
212 */
213 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
214 return 0;
215 } else if (RAND_bytes_ex(sctx->libctx, recs[ctr].input,
216 ivlen, 0) <= 0) {
217 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
218 return 0;
219 }
220 }
221 }
222 } else {
223 if (EVP_MD_CTX_get0_md(rl->read_hash)) {
224 int n = EVP_MD_CTX_get_size(rl->read_hash);
225 if (!ossl_assert(n >= 0)) {
226 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
227 return 0;
228 }
229 }
230 ds = rl->enc_read_ctx;
231 if (!ossl_assert(rl->enc_read_ctx)) {
232 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
233 return 0;
234 }
235
236 enc = EVP_CIPHER_CTX_get0_cipher(rl->enc_read_ctx);
237 }
238
239 if ((s->session == NULL) || (enc == NULL)) {
240 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
241 return 0;
242 }
243
244 provided = (EVP_CIPHER_get0_provider(enc) != NULL);
245
246 bs = EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds));
247
248 if (n_recs > 1) {
249 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
250 & EVP_CIPH_FLAG_PIPELINE) == 0) {
251 /*
252 * We shouldn't have been called with pipeline data if the
253 * cipher doesn't support pipelining
254 */
255 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
256 return 0;
257 }
258 }
259 for (ctr = 0; ctr < n_recs; ctr++) {
260 reclen[ctr] = recs[ctr].length;
261
262 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
263 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0) {
264 unsigned char *seq;
265
266 seq = sending ? RECORD_LAYER_get_write_sequence(&s->rlayer)
267 : RECORD_LAYER_get_read_sequence(&s->rlayer);
268
269 if (SSL_CONNECTION_IS_DTLS(s)) {
270 /* DTLS does not support pipelining */
271 unsigned char dtlsseq[8], *p = dtlsseq;
272
273 s2n(sending ? DTLS_RECORD_LAYER_get_w_epoch(&s->rlayer) :
274 DTLS_RECORD_LAYER_get_r_epoch(&s->rlayer), p);
275 memcpy(p, &seq[2], 6);
276 memcpy(buf[ctr], dtlsseq, 8);
277 } else {
278 memcpy(buf[ctr], seq, 8);
279 for (i = 7; i >= 0; i--) { /* increment */
280 ++seq[i];
281 if (seq[i] != 0)
282 break;
283 }
284 }
285
286 buf[ctr][8] = recs[ctr].type;
287 buf[ctr][9] = (unsigned char)(rl->version >> 8);
288 buf[ctr][10] = (unsigned char)(rl->version);
289 buf[ctr][11] = (unsigned char)(recs[ctr].length >> 8);
290 buf[ctr][12] = (unsigned char)(recs[ctr].length & 0xff);
291 pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD,
292 EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
293 if (pad <= 0) {
294 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
295 return 0;
296 }
297
298 if (sending) {
299 reclen[ctr] += pad;
300 recs[ctr].length += pad;
301 }
302
303 } else if ((bs != 1) && sending && !provided) {
304 /*
305 * We only do this for legacy ciphers. Provided ciphers add the
306 * padding on the provider side.
307 */
308 padnum = bs - (reclen[ctr] % bs);
309
310 /* Add weird padding of up to 256 bytes */
311
312 if (padnum > MAX_PADDING) {
313 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
314 return 0;
315 }
316 /* we need to add 'padnum' padding bytes of value padval */
317 padval = (unsigned char)(padnum - 1);
318 for (loop = reclen[ctr]; loop < reclen[ctr] + padnum; loop++)
319 recs[ctr].input[loop] = padval;
320 reclen[ctr] += padnum;
321 recs[ctr].length += padnum;
322 }
323
324 if (!sending) {
325 if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) {
326 /* Publicly invalid */
327 return 0;
328 }
329 }
330 }
331 if (n_recs > 1) {
332 unsigned char *data[SSL_MAX_PIPELINES];
333
334 /* Set the output buffers */
335 for (ctr = 0; ctr < n_recs; ctr++) {
336 data[ctr] = recs[ctr].data;
337 }
338 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS,
339 (int)n_recs, data) <= 0) {
340 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
341 return 0;
342 }
343 /* Set the input buffers */
344 for (ctr = 0; ctr < n_recs; ctr++) {
345 data[ctr] = recs[ctr].input;
346 }
347 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS,
348 (int)n_recs, data) <= 0
349 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS,
350 (int)n_recs, reclen) <= 0) {
351 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
352 return 0;
353 }
354 }
355
356 if (!SSL_CONNECTION_IS_DTLS(s) && tlstree_enc) {
357 unsigned char *seq;
358 int decrement_seq = 0;
359
360 /*
361 * When sending, seq is incremented after MAC calculation.
362 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
363 * Otherwise we have to decrease it in the implementation
364 */
365 if (sending && !SSL_WRITE_ETM(s))
366 decrement_seq = 1;
367
368 seq = sending ? RECORD_LAYER_get_write_sequence(&s->rlayer)
369 : RECORD_LAYER_get_read_sequence(&s->rlayer);
370 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_TLSTREE, decrement_seq, seq) <= 0) {
371 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
372 return 0;
373 }
374 }
375
376 if (provided) {
377 int outlen;
378
379 /* Provided cipher - we do not support pipelining on this path */
380 if (n_recs > 1) {
381 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
382 return 0;
383 }
384
385 if (!EVP_CipherUpdate(ds, recs[0].data, &outlen, recs[0].input,
386 (unsigned int)reclen[0]))
387 return 0;
388 recs[0].length = outlen;
389
390 /*
391 * The length returned from EVP_CipherUpdate above is the actual
392 * payload length. We need to adjust the data/input ptr to skip over
393 * any explicit IV
394 */
395 if (!sending) {
396 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
397 recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
398 recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
399 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
400 recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
401 recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
402 } else if (bs != 1 && SSL_USE_EXPLICIT_IV(s)) {
403 recs[0].data += bs;
404 recs[0].input += bs;
405 recs[0].orig_len -= bs;
406 }
407
408 /* Now get a pointer to the MAC (if applicable) */
409 if (macs != NULL) {
410 OSSL_PARAM params[2], *p = params;
411
412 /* Get the MAC */
413 macs[0].alloced = 0;
414
415 *p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
416 (void **)&macs[0].mac,
417 macsize);
418 *p = OSSL_PARAM_construct_end();
419
420 if (!EVP_CIPHER_CTX_get_params(ds, params)) {
421 /* Shouldn't normally happen */
422 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR,
423 ERR_R_INTERNAL_ERROR);
424 return 0;
425 }
426 }
427 }
428 } else {
429 /* Legacy cipher */
430
431 tmpr = EVP_Cipher(ds, recs[0].data, recs[0].input,
432 (unsigned int)reclen[0]);
433 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
434 & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0
435 ? (tmpr < 0)
436 : (tmpr == 0)) {
437 /* AEAD can fail to verify MAC */
438 return 0;
439 }
440
441 if (!sending) {
442 for (ctr = 0; ctr < n_recs; ctr++) {
443 /* Adjust the record to remove the explicit IV/MAC/Tag */
444 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
445 recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
446 recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
447 recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
448 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
449 recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
450 recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
451 recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
452 } else if (bs != 1 && SSL_USE_EXPLICIT_IV(s)) {
453 if (recs[ctr].length < bs)
454 return 0;
455 recs[ctr].data += bs;
456 recs[ctr].input += bs;
457 recs[ctr].length -= bs;
458 recs[ctr].orig_len -= bs;
459 }
460
461 /*
462 * If using Mac-then-encrypt, then this will succeed but
463 * with a random MAC if padding is invalid
464 */
465 if (!tls1_cbc_remove_padding_and_mac(&recs[ctr].length,
466 recs[ctr].orig_len,
467 recs[ctr].data,
468 (macs != NULL) ? &macs[ctr].mac : NULL,
469 (macs != NULL) ? &macs[ctr].alloced
470 : NULL,
471 bs,
472 pad ? (size_t)pad : macsize,
473 (EVP_CIPHER_get_flags(enc)
474 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0,
475 sctx->libctx))
476 return 0;
477 }
478 }
479 }
480 return 1;
481 }
482
483 static int tls1_mac(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rec, unsigned char *md,
484 int sending, SSL_CONNECTION *ssl)
485 {
486 unsigned char *seq;
487 EVP_MD_CTX *hash;
488 size_t md_size;
489 int i;
490 EVP_MD_CTX *hmac = NULL, *mac_ctx;
491 unsigned char header[13];
492 int stream_mac = sending ? (ssl->mac_flags & SSL_MAC_FLAG_WRITE_MAC_STREAM)
493 : (ssl->mac_flags & SSL_MAC_FLAG_READ_MAC_STREAM);
494 int tlstree_mac = sending ? (ssl->mac_flags & SSL_MAC_FLAG_WRITE_MAC_TLSTREE)
495 : (ssl->mac_flags & SSL_MAC_FLAG_READ_MAC_TLSTREE);
496 int t;
497 int ret = 0;
498
499 if (sending) {
500 seq = RECORD_LAYER_get_write_sequence(&ssl->rlayer);
501 hash = ssl->write_hash;
502 } else {
503 seq = RECORD_LAYER_get_read_sequence(&ssl->rlayer);
504 hash = rl->read_hash;
505 }
506
507 t = EVP_MD_CTX_get_size(hash);
508 if (!ossl_assert(t >= 0))
509 return 0;
510 md_size = t;
511
512 /* I should fix this up TLS TLS TLS TLS TLS XXXXXXXX */
513 if (stream_mac) {
514 mac_ctx = hash;
515 } else {
516 hmac = EVP_MD_CTX_new();
517 if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) {
518 goto end;
519 }
520 mac_ctx = hmac;
521 }
522
523 if (!rl->isdtls
524 && tlstree_mac
525 && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0) {
526 goto end;
527 }
528
529 if (rl->isdtls) {
530 unsigned char dtlsseq[8], *p = dtlsseq;
531
532 s2n(sending ? DTLS_RECORD_LAYER_get_w_epoch(&ssl->rlayer) :
533 DTLS_RECORD_LAYER_get_r_epoch(&ssl->rlayer), p);
534 memcpy(p, &seq[2], 6);
535
536 memcpy(header, dtlsseq, 8);
537 } else
538 memcpy(header, seq, 8);
539
540 header[8] = rec->type;
541 header[9] = (unsigned char)(ssl->version >> 8);
542 header[10] = (unsigned char)(ssl->version);
543 header[11] = (unsigned char)(rec->length >> 8);
544 header[12] = (unsigned char)(rec->length & 0xff);
545
546 if (!sending && !SSL_READ_ETM(ssl)
547 && EVP_CIPHER_CTX_get_mode(rl->enc_read_ctx) == EVP_CIPH_CBC_MODE
548 && ssl3_cbc_record_digest_supported(mac_ctx)) {
549 OSSL_PARAM tls_hmac_params[2], *p = tls_hmac_params;
550
551 *p++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE,
552 &rec->orig_len);
553 *p++ = OSSL_PARAM_construct_end();
554
555 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx),
556 tls_hmac_params)) {
557 goto end;
558 }
559 }
560
561 if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0
562 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0
563 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0) {
564 goto end;
565 }
566
567 OSSL_TRACE_BEGIN(TLS) {
568 BIO_printf(trc_out, "seq:\n");
569 BIO_dump_indent(trc_out, seq, 8, 4);
570 BIO_printf(trc_out, "rec:\n");
571 BIO_dump_indent(trc_out, rec->data, rec->length, 4);
572 } OSSL_TRACE_END(TLS);
573
574 if (!SSL_CONNECTION_IS_DTLS(ssl)) {
575 for (i = 7; i >= 0; i--) {
576 ++seq[i];
577 if (seq[i] != 0)
578 break;
579 }
580 }
581 OSSL_TRACE_BEGIN(TLS) {
582 BIO_printf(trc_out, "md:\n");
583 BIO_dump_indent(trc_out, md, md_size, 4);
584 } OSSL_TRACE_END(TLS);
585 ret = 1;
586 end:
587 EVP_MD_CTX_free(hmac);
588 return ret;
589 }
590
591 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
592 struct record_functions_st tls_1_funcs = {
593 tls1_set_crypto_state,
594 tls1_cipher,
595 tls1_mac
596 };