]> git.ipfire.org Git - thirdparty/openssl.git/blob - ssl/record/methods/tls1_meth.c
Copyright year updates
[thirdparty/openssl.git] / ssl / record / methods / tls1_meth.c
1 /*
2 * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include <openssl/ssl.h>
14 #include "internal/ssl3_cbc.h"
15 #include "../../ssl_local.h"
16 #include "../record_local.h"
17 #include "recmethod_local.h"
18
19 static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
20 unsigned char *key, size_t keylen,
21 unsigned char *iv, size_t ivlen,
22 unsigned char *mackey, size_t mackeylen,
23 const EVP_CIPHER *ciph,
24 size_t taglen,
25 int mactype,
26 const EVP_MD *md,
27 COMP_METHOD *comp)
28 {
29 EVP_CIPHER_CTX *ciph_ctx;
30 EVP_PKEY *mac_key;
31 int enc = (rl->direction == OSSL_RECORD_DIRECTION_WRITE) ? 1 : 0;
32
33 if (level != OSSL_RECORD_PROTECTION_LEVEL_APPLICATION)
34 return OSSL_RECORD_RETURN_FATAL;
35
36 if ((rl->enc_ctx = EVP_CIPHER_CTX_new()) == NULL) {
37 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_EVP_LIB);
38 return OSSL_RECORD_RETURN_FATAL;
39 }
40
41 ciph_ctx = rl->enc_ctx;
42
43 rl->md_ctx = EVP_MD_CTX_new();
44 if (rl->md_ctx == NULL) {
45 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
46 return OSSL_RECORD_RETURN_FATAL;
47 }
48 #ifndef OPENSSL_NO_COMP
49 if (comp != NULL) {
50 rl->compctx = COMP_CTX_new(comp);
51 if (rl->compctx == NULL) {
52 ERR_raise(ERR_LIB_SSL, SSL_R_COMPRESSION_LIBRARY_ERROR);
53 return OSSL_RECORD_RETURN_FATAL;
54 }
55 }
56 #endif
57
58 /*
59 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
60 * setting up the MAC key.
61 */
62 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) == 0) {
63 if (mactype == EVP_PKEY_HMAC) {
64 mac_key = EVP_PKEY_new_raw_private_key_ex(rl->libctx, "HMAC",
65 rl->propq, mackey,
66 mackeylen);
67 } else {
68 /*
69 * If its not HMAC then the only other types of MAC we support are
70 * the GOST MACs, so we need to use the old style way of creating
71 * a MAC key.
72 */
73 mac_key = EVP_PKEY_new_mac_key(mactype, NULL, mackey,
74 (int)mackeylen);
75 }
76 if (mac_key == NULL
77 || EVP_DigestSignInit_ex(rl->md_ctx, NULL, EVP_MD_get0_name(md),
78 rl->libctx, rl->propq, mac_key,
79 NULL) <= 0) {
80 EVP_PKEY_free(mac_key);
81 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
82 return OSSL_RECORD_RETURN_FATAL;
83 }
84 EVP_PKEY_free(mac_key);
85 }
86
87 if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_GCM_MODE) {
88 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, NULL, enc)
89 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_GCM_SET_IV_FIXED,
90 (int)ivlen, iv) <= 0) {
91 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
92 return OSSL_RECORD_RETURN_FATAL;
93 }
94 } else if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_CCM_MODE) {
95 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, NULL, NULL, enc)
96 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, 12,
97 NULL) <= 0
98 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG,
99 (int)taglen, NULL) <= 0
100 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_CCM_SET_IV_FIXED,
101 (int)ivlen, iv) <= 0
102 || !EVP_CipherInit_ex(ciph_ctx, NULL, NULL, key, NULL, enc)) {
103 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
104 return OSSL_RECORD_RETURN_FATAL;
105 }
106 } else {
107 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, iv, enc)) {
108 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
109 return OSSL_RECORD_RETURN_FATAL;
110 }
111 }
112 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
113 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) != 0
114 && mackeylen != 0
115 && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_MAC_KEY,
116 (int)mackeylen, mackey) <= 0) {
117 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
118 return OSSL_RECORD_RETURN_FATAL;
119 }
120
121 /*
122 * The cipher we actually ended up using in the EVP_CIPHER_CTX may be
123 * different to that in ciph if we have an ENGINE in use
124 */
125 if (EVP_CIPHER_get0_provider(EVP_CIPHER_CTX_get0_cipher(ciph_ctx)) != NULL
126 && !ossl_set_tls_provider_parameters(rl, ciph_ctx, ciph, md)) {
127 /* ERR_raise already called */
128 return OSSL_RECORD_RETURN_FATAL;
129 }
130
131 /* Calculate the explicit IV length */
132 if (RLAYER_USE_EXPLICIT_IV(rl)) {
133 int mode = EVP_CIPHER_CTX_get_mode(ciph_ctx);
134 int eivlen = 0;
135
136 if (mode == EVP_CIPH_CBC_MODE) {
137 eivlen = EVP_CIPHER_CTX_get_iv_length(ciph_ctx);
138 if (eivlen < 0) {
139 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_LIBRARY_BUG);
140 return OSSL_RECORD_RETURN_FATAL;
141 }
142 if (eivlen <= 1)
143 eivlen = 0;
144 } else if (mode == EVP_CIPH_GCM_MODE) {
145 /* Need explicit part of IV for GCM mode */
146 eivlen = EVP_GCM_TLS_EXPLICIT_IV_LEN;
147 } else if (mode == EVP_CIPH_CCM_MODE) {
148 eivlen = EVP_CCM_TLS_EXPLICIT_IV_LEN;
149 }
150 rl->eivlen = (size_t)eivlen;
151 }
152
153 return OSSL_RECORD_RETURN_SUCCESS;
154 }
155
156 #define MAX_PADDING 256
157 /*-
158 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
159 * internal error, but not otherwise. It is the responsibility of the caller to
160 * report a bad_record_mac - if appropriate (DTLS just drops the record).
161 *
162 * Returns:
163 * 0: if the record is publicly invalid, or an internal error, or AEAD
164 * decryption failed, or Encrypt-then-mac decryption failed.
165 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
166 */
167 static int tls1_cipher(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *recs,
168 size_t n_recs, int sending, SSL_MAC_BUF *macs,
169 size_t macsize)
170 {
171 EVP_CIPHER_CTX *ds;
172 size_t reclen[SSL_MAX_PIPELINES];
173 unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN];
174 unsigned char *data[SSL_MAX_PIPELINES];
175 int pad = 0, tmpr, provided;
176 size_t bs, ctr, padnum, loop;
177 unsigned char padval;
178 const EVP_CIPHER *enc;
179
180 if (n_recs == 0) {
181 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
182 return 0;
183 }
184
185 if (EVP_MD_CTX_get0_md(rl->md_ctx)) {
186 int n = EVP_MD_CTX_get_size(rl->md_ctx);
187
188 if (!ossl_assert(n >= 0)) {
189 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
190 return 0;
191 }
192 }
193 ds = rl->enc_ctx;
194 if (!ossl_assert(rl->enc_ctx != NULL)) {
195 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
196 return 0;
197 }
198
199 enc = EVP_CIPHER_CTX_get0_cipher(rl->enc_ctx);
200
201 if (sending) {
202 int ivlen;
203
204 /* For TLSv1.1 and later explicit IV */
205 if (RLAYER_USE_EXPLICIT_IV(rl)
206 && EVP_CIPHER_get_mode(enc) == EVP_CIPH_CBC_MODE)
207 ivlen = EVP_CIPHER_get_iv_length(enc);
208 else
209 ivlen = 0;
210 if (ivlen > 1) {
211 for (ctr = 0; ctr < n_recs; ctr++) {
212 if (recs[ctr].data != recs[ctr].input) {
213 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
214 return 0;
215 } else if (RAND_bytes_ex(rl->libctx, recs[ctr].input,
216 ivlen, 0) <= 0) {
217 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
218 return 0;
219 }
220 }
221 }
222 }
223 if (!ossl_assert(enc != NULL)) {
224 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
225 return 0;
226 }
227
228 provided = (EVP_CIPHER_get0_provider(enc) != NULL);
229
230 bs = EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds));
231
232 if (bs == 0) {
233 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_BAD_CIPHER);
234 return 0;
235 }
236
237 if (n_recs > 1) {
238 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
239 & EVP_CIPH_FLAG_PIPELINE) == 0) {
240 /*
241 * We shouldn't have been called with pipeline data if the
242 * cipher doesn't support pipelining
243 */
244 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
245 return 0;
246 }
247 }
248 for (ctr = 0; ctr < n_recs; ctr++) {
249 reclen[ctr] = recs[ctr].length;
250
251 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
252 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0) {
253 unsigned char *seq;
254
255 seq = rl->sequence;
256
257 if (rl->isdtls) {
258 unsigned char dtlsseq[8], *p = dtlsseq;
259
260 s2n(rl->epoch, p);
261 memcpy(p, &seq[2], 6);
262 memcpy(buf[ctr], dtlsseq, 8);
263 } else {
264 memcpy(buf[ctr], seq, 8);
265 if (!tls_increment_sequence_ctr(rl)) {
266 /* RLAYERfatal already called */
267 return 0;
268 }
269 }
270
271 buf[ctr][8] = recs[ctr].type;
272 buf[ctr][9] = (unsigned char)(rl->version >> 8);
273 buf[ctr][10] = (unsigned char)(rl->version);
274 buf[ctr][11] = (unsigned char)(recs[ctr].length >> 8);
275 buf[ctr][12] = (unsigned char)(recs[ctr].length & 0xff);
276 pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD,
277 EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
278 if (pad <= 0) {
279 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
280 return 0;
281 }
282
283 if (sending) {
284 reclen[ctr] += pad;
285 recs[ctr].length += pad;
286 }
287 } else if ((bs != 1) && sending && !provided) {
288 /*
289 * We only do this for legacy ciphers. Provided ciphers add the
290 * padding on the provider side.
291 */
292 padnum = bs - (reclen[ctr] % bs);
293
294 /* Add weird padding of up to 256 bytes */
295
296 if (padnum > MAX_PADDING) {
297 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
298 return 0;
299 }
300 /* we need to add 'padnum' padding bytes of value padval */
301 padval = (unsigned char)(padnum - 1);
302 for (loop = reclen[ctr]; loop < reclen[ctr] + padnum; loop++)
303 recs[ctr].input[loop] = padval;
304 reclen[ctr] += padnum;
305 recs[ctr].length += padnum;
306 }
307
308 if (!sending) {
309 if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) {
310 /* Publicly invalid */
311 return 0;
312 }
313 }
314 }
315 if (n_recs > 1) {
316 /* Set the output buffers */
317 for (ctr = 0; ctr < n_recs; ctr++)
318 data[ctr] = recs[ctr].data;
319
320 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS,
321 (int)n_recs, data) <= 0) {
322 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
323 return 0;
324 }
325 /* Set the input buffers */
326 for (ctr = 0; ctr < n_recs; ctr++)
327 data[ctr] = recs[ctr].input;
328
329 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS,
330 (int)n_recs, data) <= 0
331 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS,
332 (int)n_recs, reclen) <= 0) {
333 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
334 return 0;
335 }
336 }
337
338 if (!rl->isdtls && rl->tlstree) {
339 int decrement_seq = 0;
340
341 /*
342 * When sending, seq is incremented after MAC calculation.
343 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
344 * Otherwise we have to decrease it in the implementation
345 */
346 if (sending && !rl->use_etm)
347 decrement_seq = 1;
348
349 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_TLSTREE, decrement_seq,
350 rl->sequence) <= 0) {
351
352 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
353 return 0;
354 }
355 }
356
357 if (provided) {
358 int outlen;
359
360 /* Provided cipher - we do not support pipelining on this path */
361 if (n_recs > 1) {
362 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
363 return 0;
364 }
365
366 if (!EVP_CipherUpdate(ds, recs[0].data, &outlen, recs[0].input,
367 (unsigned int)reclen[0]))
368 return 0;
369 recs[0].length = outlen;
370
371 /*
372 * The length returned from EVP_CipherUpdate above is the actual
373 * payload length. We need to adjust the data/input ptr to skip over
374 * any explicit IV
375 */
376 if (!sending) {
377 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
378 recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
379 recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
380 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
381 recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
382 recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
383 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
384 recs[0].data += bs;
385 recs[0].input += bs;
386 recs[0].orig_len -= bs;
387 }
388
389 /* Now get a pointer to the MAC (if applicable) */
390 if (macs != NULL) {
391 OSSL_PARAM params[2], *p = params;
392
393 /* Get the MAC */
394 macs[0].alloced = 0;
395
396 *p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
397 (void **)&macs[0].mac,
398 macsize);
399 *p = OSSL_PARAM_construct_end();
400
401 if (!EVP_CIPHER_CTX_get_params(ds, params)) {
402 /* Shouldn't normally happen */
403 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR,
404 ERR_R_INTERNAL_ERROR);
405 return 0;
406 }
407 }
408 }
409 } else {
410 /* Legacy cipher */
411
412 tmpr = EVP_Cipher(ds, recs[0].data, recs[0].input,
413 (unsigned int)reclen[0]);
414 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
415 & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0
416 ? (tmpr < 0)
417 : (tmpr == 0)) {
418 /* AEAD can fail to verify MAC */
419 return 0;
420 }
421
422 if (!sending) {
423 for (ctr = 0; ctr < n_recs; ctr++) {
424 /* Adjust the record to remove the explicit IV/MAC/Tag */
425 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
426 recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
427 recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
428 recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
429 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
430 recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
431 recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
432 recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
433 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
434 if (recs[ctr].length < bs)
435 return 0;
436 recs[ctr].data += bs;
437 recs[ctr].input += bs;
438 recs[ctr].length -= bs;
439 recs[ctr].orig_len -= bs;
440 }
441
442 /*
443 * If using Mac-then-encrypt, then this will succeed but
444 * with a random MAC if padding is invalid
445 */
446 if (!tls1_cbc_remove_padding_and_mac(&recs[ctr].length,
447 recs[ctr].orig_len,
448 recs[ctr].data,
449 (macs != NULL) ? &macs[ctr].mac : NULL,
450 (macs != NULL) ? &macs[ctr].alloced
451 : NULL,
452 bs,
453 pad ? (size_t)pad : macsize,
454 (EVP_CIPHER_get_flags(enc)
455 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0,
456 rl->libctx))
457 return 0;
458 }
459 }
460 }
461 return 1;
462 }
463
464 static int tls1_mac(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *rec, unsigned char *md,
465 int sending)
466 {
467 unsigned char *seq = rl->sequence;
468 EVP_MD_CTX *hash;
469 size_t md_size;
470 EVP_MD_CTX *hmac = NULL, *mac_ctx;
471 unsigned char header[13];
472 int t;
473 int ret = 0;
474
475 hash = rl->md_ctx;
476
477 t = EVP_MD_CTX_get_size(hash);
478 if (!ossl_assert(t >= 0))
479 return 0;
480 md_size = t;
481
482 if (rl->stream_mac) {
483 mac_ctx = hash;
484 } else {
485 hmac = EVP_MD_CTX_new();
486 if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) {
487 goto end;
488 }
489 mac_ctx = hmac;
490 }
491
492 if (!rl->isdtls
493 && rl->tlstree
494 && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0)
495 goto end;
496
497 if (rl->isdtls) {
498 unsigned char dtlsseq[8], *p = dtlsseq;
499
500 s2n(rl->epoch, p);
501 memcpy(p, &seq[2], 6);
502
503 memcpy(header, dtlsseq, 8);
504 } else {
505 memcpy(header, seq, 8);
506 }
507
508 header[8] = rec->type;
509 header[9] = (unsigned char)(rl->version >> 8);
510 header[10] = (unsigned char)(rl->version);
511 header[11] = (unsigned char)(rec->length >> 8);
512 header[12] = (unsigned char)(rec->length & 0xff);
513
514 if (!sending && !rl->use_etm
515 && EVP_CIPHER_CTX_get_mode(rl->enc_ctx) == EVP_CIPH_CBC_MODE
516 && ssl3_cbc_record_digest_supported(mac_ctx)) {
517 OSSL_PARAM tls_hmac_params[2], *p = tls_hmac_params;
518
519 *p++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE,
520 &rec->orig_len);
521 *p++ = OSSL_PARAM_construct_end();
522
523 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx),
524 tls_hmac_params))
525 goto end;
526 }
527
528 if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0
529 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0
530 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0)
531 goto end;
532
533 OSSL_TRACE_BEGIN(TLS) {
534 BIO_printf(trc_out, "seq:\n");
535 BIO_dump_indent(trc_out, seq, 8, 4);
536 BIO_printf(trc_out, "rec:\n");
537 BIO_dump_indent(trc_out, rec->data, rec->length, 4);
538 } OSSL_TRACE_END(TLS);
539
540 if (!rl->isdtls && !tls_increment_sequence_ctr(rl)) {
541 /* RLAYERfatal already called */
542 goto end;
543 }
544
545 OSSL_TRACE_BEGIN(TLS) {
546 BIO_printf(trc_out, "md:\n");
547 BIO_dump_indent(trc_out, md, md_size, 4);
548 } OSSL_TRACE_END(TLS);
549 ret = 1;
550 end:
551 EVP_MD_CTX_free(hmac);
552 return ret;
553 }
554
555 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
556 # ifndef OPENSSL_NO_COMP
557 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
558 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
559 + SSL3_RT_HEADER_LENGTH \
560 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
561 # else
562 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
563 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
564 + SSL3_RT_HEADER_LENGTH)
565 # endif /* OPENSSL_NO_COMP */
566 #else
567 # ifndef OPENSSL_NO_COMP
568 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
569 + SSL3_RT_HEADER_LENGTH \
570 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
571 # else
572 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
573 + SSL3_RT_HEADER_LENGTH)
574 # endif /* OPENSSL_NO_COMP */
575 #endif
576
577 /* This function is also used by the SSLv3 implementation */
578 int tls1_allocate_write_buffers(OSSL_RECORD_LAYER *rl,
579 OSSL_RECORD_TEMPLATE *templates,
580 size_t numtempl, size_t *prefix)
581 {
582 /* Do we need to add an empty record prefix? */
583 *prefix = rl->need_empty_fragments
584 && templates[0].type == SSL3_RT_APPLICATION_DATA;
585
586 /*
587 * In the prefix case we can allocate a much smaller buffer. Otherwise we
588 * just allocate the default buffer size
589 */
590 if (!tls_setup_write_buffer(rl, numtempl + *prefix,
591 *prefix ? MAX_PREFIX_LEN : 0, 0)) {
592 /* RLAYERfatal() already called */
593 return 0;
594 }
595
596 return 1;
597 }
598
599 /* This function is also used by the SSLv3 implementation */
600 int tls1_initialise_write_packets(OSSL_RECORD_LAYER *rl,
601 OSSL_RECORD_TEMPLATE *templates,
602 size_t numtempl,
603 OSSL_RECORD_TEMPLATE *prefixtempl,
604 WPACKET *pkt,
605 TLS_BUFFER *bufs,
606 size_t *wpinited)
607 {
608 size_t align = 0;
609 TLS_BUFFER *wb;
610 size_t prefix;
611
612 /* Do we need to add an empty record prefix? */
613 prefix = rl->need_empty_fragments
614 && templates[0].type == SSL3_RT_APPLICATION_DATA;
615
616 if (prefix) {
617 /*
618 * countermeasure against known-IV weakness in CBC ciphersuites (see
619 * http://www.openssl.org/~bodo/tls-cbc.txt)
620 */
621 prefixtempl->buf = NULL;
622 prefixtempl->version = templates[0].version;
623 prefixtempl->buflen = 0;
624 prefixtempl->type = SSL3_RT_APPLICATION_DATA;
625
626 wb = &bufs[0];
627
628 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
629 align = (size_t)TLS_BUFFER_get_buf(wb) + SSL3_RT_HEADER_LENGTH;
630 align = SSL3_ALIGN_PAYLOAD - 1
631 - ((align - 1) % SSL3_ALIGN_PAYLOAD);
632 #endif
633 TLS_BUFFER_set_offset(wb, align);
634
635 if (!WPACKET_init_static_len(&pkt[0], TLS_BUFFER_get_buf(wb),
636 TLS_BUFFER_get_len(wb), 0)) {
637 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
638 return 0;
639 }
640 *wpinited = 1;
641 if (!WPACKET_allocate_bytes(&pkt[0], align, NULL)) {
642 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
643 return 0;
644 }
645 }
646
647 return tls_initialise_write_packets_default(rl, templates, numtempl,
648 NULL,
649 pkt + prefix, bufs + prefix,
650 wpinited);
651 }
652
653 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
654 const struct record_functions_st tls_1_funcs = {
655 tls1_set_crypto_state,
656 tls1_cipher,
657 tls1_mac,
658 tls_default_set_protocol_version,
659 tls_default_read_n,
660 tls_get_more_records,
661 tls_default_validate_record_header,
662 tls_default_post_process_record,
663 tls_get_max_records_multiblock,
664 tls_write_records_multiblock, /* Defined in tls_multib.c */
665 tls1_allocate_write_buffers,
666 tls1_initialise_write_packets,
667 NULL,
668 tls_prepare_record_header_default,
669 NULL,
670 tls_prepare_for_encryption_default,
671 tls_post_encryption_processing_default,
672 NULL
673 };
674
675 const struct record_functions_st dtls_1_funcs = {
676 tls1_set_crypto_state,
677 tls1_cipher,
678 tls1_mac,
679 tls_default_set_protocol_version,
680 tls_default_read_n,
681 dtls_get_more_records,
682 NULL,
683 NULL,
684 NULL,
685 tls_write_records_default,
686 /*
687 * Don't use tls1_allocate_write_buffers since that handles empty fragment
688 * records which aren't needed in DTLS. We just use the default allocation
689 * instead.
690 */
691 tls_allocate_write_buffers_default,
692 /* Don't use tls1_initialise_write_packets for same reason as above */
693 tls_initialise_write_packets_default,
694 NULL,
695 dtls_prepare_record_header,
696 NULL,
697 tls_prepare_for_encryption_default,
698 dtls_post_encryption_processing,
699 NULL
700 };