]> git.ipfire.org Git - thirdparty/openssl.git/blame - ssl/record/methods/tls1_meth.c
Rationalize FIPS sources
[thirdparty/openssl.git] / ssl / record / methods / tls1_meth.c
CommitLineData
50023e9b
MC
1/*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10#include <openssl/evp.h>
11#include <openssl/core_names.h>
12#include <openssl/rand.h>
91fe8ff0 13#include <openssl/ssl.h>
25624c90 14#include "internal/ssl3_cbc.h"
50023e9b
MC
15#include "../../ssl_local.h"
16#include "../record_local.h"
17#include "recmethod_local.h"
18
50023e9b
MC
19static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
20 unsigned char *key, size_t keylen,
21 unsigned char *iv, size_t ivlen,
22 unsigned char *mackey, size_t mackeylen,
23 const EVP_CIPHER *ciph,
24 size_t taglen,
50023e9b
MC
25 int mactype,
26 const EVP_MD *md,
1e76110b 27 COMP_METHOD *comp)
50023e9b
MC
28{
29 EVP_CIPHER_CTX *ciph_ctx;
30 EVP_PKEY *mac_key;
9251c3c4 31 int enc = (rl->direction == OSSL_RECORD_DIRECTION_WRITE) ? 1 : 0;
50023e9b
MC
32
33 if (level != OSSL_RECORD_PROTECTION_LEVEL_APPLICATION)
7c293999 34 return OSSL_RECORD_RETURN_FATAL;
50023e9b 35
6366bdd9 36 if ((rl->enc_ctx = EVP_CIPHER_CTX_new()) == NULL) {
e077455e 37 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_EVP_LIB);
7c293999 38 return OSSL_RECORD_RETURN_FATAL;
50023e9b
MC
39 }
40
6366bdd9 41 ciph_ctx = rl->enc_ctx;
50023e9b 42
6366bdd9
MC
43 rl->md_ctx = EVP_MD_CTX_new();
44 if (rl->md_ctx == NULL) {
50023e9b 45 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
7c293999 46 return OSSL_RECORD_RETURN_FATAL;
50023e9b
MC
47 }
48#ifndef OPENSSL_NO_COMP
49 if (comp != NULL) {
1e76110b 50 rl->compctx = COMP_CTX_new(comp);
9251c3c4 51 if (rl->compctx == NULL) {
7c293999
MC
52 ERR_raise(ERR_LIB_SSL, SSL_R_COMPRESSION_LIBRARY_ERROR);
53 return OSSL_RECORD_RETURN_FATAL;
50023e9b
MC
54 }
55 }
56#endif
50023e9b
MC
57
58 /*
59 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
60 * setting up the MAC key.
61 */
1704961c 62 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) == 0) {
50023e9b
MC
63 if (mactype == EVP_PKEY_HMAC) {
64 mac_key = EVP_PKEY_new_raw_private_key_ex(rl->libctx, "HMAC",
65 rl->propq, mackey,
66 mackeylen);
67 } else {
68 /*
69 * If its not HMAC then the only other types of MAC we support are
70 * the GOST MACs, so we need to use the old style way of creating
71 * a MAC key.
72 */
73 mac_key = EVP_PKEY_new_mac_key(mactype, NULL, mackey,
74 (int)mackeylen);
75 }
76 if (mac_key == NULL
6366bdd9 77 || EVP_DigestSignInit_ex(rl->md_ctx, NULL, EVP_MD_get0_name(md),
50023e9b
MC
78 rl->libctx, rl->propq, mac_key,
79 NULL) <= 0) {
80 EVP_PKEY_free(mac_key);
7c293999
MC
81 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
82 return OSSL_RECORD_RETURN_FATAL;
50023e9b
MC
83 }
84 EVP_PKEY_free(mac_key);
85 }
86
87 if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_GCM_MODE) {
9251c3c4 88 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, NULL, enc)
50023e9b 89 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_GCM_SET_IV_FIXED,
7c293999
MC
90 (int)ivlen, iv) <= 0) {
91 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
92 return OSSL_RECORD_RETURN_FATAL;
50023e9b
MC
93 }
94 } else if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_CCM_MODE) {
9251c3c4 95 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, NULL, NULL, enc)
50023e9b
MC
96 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, 12,
97 NULL) <= 0
98 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG,
99 (int)taglen, NULL) <= 0
100 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_CCM_SET_IV_FIXED,
101 (int)ivlen, iv) <= 0
9251c3c4 102 || !EVP_CipherInit_ex(ciph_ctx, NULL, NULL, key, NULL, enc)) {
7c293999
MC
103 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
104 return OSSL_RECORD_RETURN_FATAL;
50023e9b
MC
105 }
106 } else {
9251c3c4 107 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, iv, enc)) {
7c293999
MC
108 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
109 return OSSL_RECORD_RETURN_FATAL;
50023e9b
MC
110 }
111 }
112 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
113 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) != 0
114 && mackeylen != 0
115 && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_MAC_KEY,
116 (int)mackeylen, mackey) <= 0) {
7c293999
MC
117 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
118 return OSSL_RECORD_RETURN_FATAL;
50023e9b
MC
119 }
120 if (EVP_CIPHER_get0_provider(ciph) != NULL
7f2f0ac7 121 && !ossl_set_tls_provider_parameters(rl, ciph_ctx, ciph, md))
7c293999 122 return OSSL_RECORD_RETURN_FATAL;
50023e9b 123
9251c3c4
MC
124 /* Calculate the explict IV length */
125 if (RLAYER_USE_EXPLICIT_IV(rl)) {
126 int mode = EVP_CIPHER_CTX_get_mode(ciph_ctx);
127 int eivlen = 0;
128
129 if (mode == EVP_CIPH_CBC_MODE) {
130 eivlen = EVP_CIPHER_CTX_get_iv_length(ciph_ctx);
131 if (eivlen < 0) {
132 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_LIBRARY_BUG);
133 return OSSL_RECORD_RETURN_FATAL;
134 }
135 if (eivlen <= 1)
136 eivlen = 0;
137 } else if (mode == EVP_CIPH_GCM_MODE) {
138 /* Need explicit part of IV for GCM mode */
139 eivlen = EVP_GCM_TLS_EXPLICIT_IV_LEN;
140 } else if (mode == EVP_CIPH_CCM_MODE) {
141 eivlen = EVP_CCM_TLS_EXPLICIT_IV_LEN;
142 }
143 rl->eivlen = (size_t)eivlen;
144 }
145
7c293999 146 return OSSL_RECORD_RETURN_SUCCESS;
50023e9b
MC
147}
148
149#define MAX_PADDING 256
150/*-
1704961c
MC
151 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
152 * internal error, but not otherwise. It is the responsibility of the caller to
153 * report a bad_record_mac - if appropriate (DTLS just drops the record).
50023e9b
MC
154 *
155 * Returns:
156 * 0: if the record is publicly invalid, or an internal error, or AEAD
157 * decryption failed, or Encrypt-then-mac decryption failed.
158 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
159 */
22094d11
MC
160static int tls1_cipher(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *recs,
161 size_t n_recs, int sending, SSL_MAC_BUF *macs,
162 size_t macsize)
50023e9b
MC
163{
164 EVP_CIPHER_CTX *ds;
165 size_t reclen[SSL_MAX_PIPELINES];
166 unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN];
3961af37 167 unsigned char *data[SSL_MAX_PIPELINES];
bed07b18 168 int pad = 0, tmpr, provided;
50023e9b
MC
169 size_t bs, ctr, padnum, loop;
170 unsigned char padval;
171 const EVP_CIPHER *enc;
50023e9b
MC
172
173 if (n_recs == 0) {
174 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
175 return 0;
176 }
177
6366bdd9
MC
178 if (EVP_MD_CTX_get0_md(rl->md_ctx)) {
179 int n = EVP_MD_CTX_get_size(rl->md_ctx);
1704961c 180
6366bdd9 181 if (!ossl_assert(n >= 0)) {
50023e9b
MC
182 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
183 return 0;
184 }
6366bdd9
MC
185 }
186 ds = rl->enc_ctx;
1704961c 187 if (!ossl_assert(rl->enc_ctx != NULL)) {
6366bdd9
MC
188 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
189 return 0;
190 }
191
192 enc = EVP_CIPHER_CTX_get0_cipher(rl->enc_ctx);
193
194 if (sending) {
195 int ivlen;
50023e9b 196
50023e9b 197 /* For TLSv1.1 and later explicit IV */
8124ab56 198 if (RLAYER_USE_EXPLICIT_IV(rl)
50023e9b
MC
199 && EVP_CIPHER_get_mode(enc) == EVP_CIPH_CBC_MODE)
200 ivlen = EVP_CIPHER_get_iv_length(enc);
201 else
202 ivlen = 0;
203 if (ivlen > 1) {
204 for (ctr = 0; ctr < n_recs; ctr++) {
205 if (recs[ctr].data != recs[ctr].input) {
50023e9b
MC
206 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
207 return 0;
6366bdd9 208 } else if (RAND_bytes_ex(rl->libctx, recs[ctr].input,
1704961c 209 ivlen, 0) <= 0) {
50023e9b
MC
210 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
211 return 0;
212 }
213 }
214 }
50023e9b 215 }
8124ab56 216 if (!ossl_assert(enc != NULL)) {
50023e9b
MC
217 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
218 return 0;
219 }
220
221 provided = (EVP_CIPHER_get0_provider(enc) != NULL);
222
223 bs = EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds));
224
225 if (n_recs > 1) {
226 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
1704961c 227 & EVP_CIPH_FLAG_PIPELINE) == 0) {
50023e9b 228 /*
d4ee3456
MC
229 * We shouldn't have been called with pipeline data if the
230 * cipher doesn't support pipelining
231 */
50023e9b
MC
232 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
233 return 0;
234 }
235 }
236 for (ctr = 0; ctr < n_recs; ctr++) {
237 reclen[ctr] = recs[ctr].length;
238
239 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
1704961c 240 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0) {
50023e9b
MC
241 unsigned char *seq;
242
0755722c 243 seq = rl->sequence;
50023e9b 244
8124ab56 245 if (rl->isdtls) {
50023e9b
MC
246 unsigned char dtlsseq[8], *p = dtlsseq;
247
bfc0f10d 248 s2n(rl->epoch, p);
50023e9b
MC
249 memcpy(p, &seq[2], 6);
250 memcpy(buf[ctr], dtlsseq, 8);
251 } else {
252 memcpy(buf[ctr], seq, 8);
bed07b18
MC
253 if (!tls_increment_sequence_ctr(rl)) {
254 /* RLAYERfatal already called */
255 return 0;
50023e9b
MC
256 }
257 }
258
259 buf[ctr][8] = recs[ctr].type;
260 buf[ctr][9] = (unsigned char)(rl->version >> 8);
261 buf[ctr][10] = (unsigned char)(rl->version);
262 buf[ctr][11] = (unsigned char)(recs[ctr].length >> 8);
263 buf[ctr][12] = (unsigned char)(recs[ctr].length & 0xff);
264 pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD,
1704961c 265 EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
50023e9b
MC
266 if (pad <= 0) {
267 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
268 return 0;
269 }
270
271 if (sending) {
272 reclen[ctr] += pad;
273 recs[ctr].length += pad;
274 }
50023e9b
MC
275 } else if ((bs != 1) && sending && !provided) {
276 /*
d4ee3456
MC
277 * We only do this for legacy ciphers. Provided ciphers add the
278 * padding on the provider side.
279 */
50023e9b
MC
280 padnum = bs - (reclen[ctr] % bs);
281
282 /* Add weird padding of up to 256 bytes */
283
284 if (padnum > MAX_PADDING) {
285 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
286 return 0;
287 }
288 /* we need to add 'padnum' padding bytes of value padval */
289 padval = (unsigned char)(padnum - 1);
290 for (loop = reclen[ctr]; loop < reclen[ctr] + padnum; loop++)
291 recs[ctr].input[loop] = padval;
292 reclen[ctr] += padnum;
293 recs[ctr].length += padnum;
294 }
295
296 if (!sending) {
297 if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) {
298 /* Publicly invalid */
299 return 0;
300 }
301 }
302 }
303 if (n_recs > 1) {
50023e9b 304 /* Set the output buffers */
1704961c 305 for (ctr = 0; ctr < n_recs; ctr++)
50023e9b 306 data[ctr] = recs[ctr].data;
1704961c 307
50023e9b
MC
308 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS,
309 (int)n_recs, data) <= 0) {
310 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
311 return 0;
312 }
313 /* Set the input buffers */
1704961c 314 for (ctr = 0; ctr < n_recs; ctr++)
50023e9b 315 data[ctr] = recs[ctr].input;
1704961c 316
50023e9b
MC
317 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS,
318 (int)n_recs, data) <= 0
319 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS,
1704961c 320 (int)n_recs, reclen) <= 0) {
50023e9b
MC
321 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
322 return 0;
323 }
324 }
325
8124ab56 326 if (!rl->isdtls && rl->tlstree) {
50023e9b
MC
327 int decrement_seq = 0;
328
329 /*
d4ee3456
MC
330 * When sending, seq is incremented after MAC calculation.
331 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
332 * Otherwise we have to decrease it in the implementation
333 */
7f2f0ac7 334 if (sending && !rl->use_etm)
50023e9b
MC
335 decrement_seq = 1;
336
bed07b18
MC
337 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_TLSTREE, decrement_seq,
338 rl->sequence) <= 0) {
339
50023e9b
MC
340 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
341 return 0;
342 }
343 }
344
345 if (provided) {
346 int outlen;
347
348 /* Provided cipher - we do not support pipelining on this path */
1704961c 349 if (n_recs > 1) {
50023e9b
MC
350 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
351 return 0;
352 }
353
354 if (!EVP_CipherUpdate(ds, recs[0].data, &outlen, recs[0].input,
1704961c 355 (unsigned int)reclen[0]))
50023e9b
MC
356 return 0;
357 recs[0].length = outlen;
358
359 /*
d4ee3456
MC
360 * The length returned from EVP_CipherUpdate above is the actual
361 * payload length. We need to adjust the data/input ptr to skip over
362 * any explicit IV
363 */
50023e9b
MC
364 if (!sending) {
365 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
1704961c
MC
366 recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
367 recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
50023e9b 368 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
1704961c
MC
369 recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
370 recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
88d61680 371 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
50023e9b
MC
372 recs[0].data += bs;
373 recs[0].input += bs;
374 recs[0].orig_len -= bs;
375 }
376
377 /* Now get a pointer to the MAC (if applicable) */
378 if (macs != NULL) {
379 OSSL_PARAM params[2], *p = params;
380
381 /* Get the MAC */
382 macs[0].alloced = 0;
383
384 *p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
1704961c
MC
385 (void **)&macs[0].mac,
386 macsize);
50023e9b
MC
387 *p = OSSL_PARAM_construct_end();
388
389 if (!EVP_CIPHER_CTX_get_params(ds, params)) {
390 /* Shouldn't normally happen */
391 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR,
392 ERR_R_INTERNAL_ERROR);
393 return 0;
394 }
395 }
396 }
397 } else {
398 /* Legacy cipher */
399
400 tmpr = EVP_Cipher(ds, recs[0].data, recs[0].input,
1704961c 401 (unsigned int)reclen[0]);
50023e9b 402 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
1704961c 403 & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0
50023e9b
MC
404 ? (tmpr < 0)
405 : (tmpr == 0)) {
406 /* AEAD can fail to verify MAC */
407 return 0;
408 }
409
410 if (!sending) {
411 for (ctr = 0; ctr < n_recs; ctr++) {
412 /* Adjust the record to remove the explicit IV/MAC/Tag */
413 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
414 recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
415 recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
416 recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
417 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
418 recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
419 recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
420 recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
88d61680 421 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
50023e9b
MC
422 if (recs[ctr].length < bs)
423 return 0;
424 recs[ctr].data += bs;
425 recs[ctr].input += bs;
426 recs[ctr].length -= bs;
427 recs[ctr].orig_len -= bs;
428 }
429
430 /*
d4ee3456
MC
431 * If using Mac-then-encrypt, then this will succeed but
432 * with a random MAC if padding is invalid
433 */
50023e9b
MC
434 if (!tls1_cbc_remove_padding_and_mac(&recs[ctr].length,
435 recs[ctr].orig_len,
436 recs[ctr].data,
437 (macs != NULL) ? &macs[ctr].mac : NULL,
438 (macs != NULL) ? &macs[ctr].alloced
1704961c 439 : NULL,
50023e9b
MC
440 bs,
441 pad ? (size_t)pad : macsize,
442 (EVP_CIPHER_get_flags(enc)
443 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0,
8124ab56 444 rl->libctx))
50023e9b
MC
445 return 0;
446 }
447 }
448 }
449 return 1;
450}
451
22094d11 452static int tls1_mac(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *rec, unsigned char *md,
8124ab56 453 int sending)
50023e9b 454{
0755722c 455 unsigned char *seq = rl->sequence;
50023e9b
MC
456 EVP_MD_CTX *hash;
457 size_t md_size;
50023e9b
MC
458 EVP_MD_CTX *hmac = NULL, *mac_ctx;
459 unsigned char header[13];
50023e9b
MC
460 int t;
461 int ret = 0;
462
6366bdd9 463 hash = rl->md_ctx;
50023e9b
MC
464
465 t = EVP_MD_CTX_get_size(hash);
466 if (!ossl_assert(t >= 0))
467 return 0;
468 md_size = t;
469
8124ab56 470 if (rl->stream_mac) {
50023e9b
MC
471 mac_ctx = hash;
472 } else {
473 hmac = EVP_MD_CTX_new();
474 if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) {
475 goto end;
476 }
477 mac_ctx = hmac;
478 }
479
480 if (!rl->isdtls
8124ab56 481 && rl->tlstree
1704961c 482 && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0)
50023e9b 483 goto end;
50023e9b
MC
484
485 if (rl->isdtls) {
486 unsigned char dtlsseq[8], *p = dtlsseq;
487
bfc0f10d 488 s2n(rl->epoch, p);
50023e9b
MC
489 memcpy(p, &seq[2], 6);
490
491 memcpy(header, dtlsseq, 8);
1704961c 492 } else {
50023e9b 493 memcpy(header, seq, 8);
1704961c 494 }
50023e9b
MC
495
496 header[8] = rec->type;
8124ab56
MC
497 header[9] = (unsigned char)(rl->version >> 8);
498 header[10] = (unsigned char)(rl->version);
50023e9b
MC
499 header[11] = (unsigned char)(rec->length >> 8);
500 header[12] = (unsigned char)(rec->length & 0xff);
501
7f2f0ac7 502 if (!sending && !rl->use_etm
6366bdd9 503 && EVP_CIPHER_CTX_get_mode(rl->enc_ctx) == EVP_CIPH_CBC_MODE
50023e9b
MC
504 && ssl3_cbc_record_digest_supported(mac_ctx)) {
505 OSSL_PARAM tls_hmac_params[2], *p = tls_hmac_params;
506
507 *p++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE,
508 &rec->orig_len);
509 *p++ = OSSL_PARAM_construct_end();
510
511 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx),
1704961c 512 tls_hmac_params))
50023e9b 513 goto end;
50023e9b
MC
514 }
515
516 if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0
517 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0
1704961c 518 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0)
50023e9b 519 goto end;
50023e9b
MC
520
521 OSSL_TRACE_BEGIN(TLS) {
522 BIO_printf(trc_out, "seq:\n");
523 BIO_dump_indent(trc_out, seq, 8, 4);
524 BIO_printf(trc_out, "rec:\n");
525 BIO_dump_indent(trc_out, rec->data, rec->length, 4);
526 } OSSL_TRACE_END(TLS);
527
bed07b18
MC
528 if (!rl->isdtls && !tls_increment_sequence_ctr(rl)) {
529 /* RLAYERfatal already called */
530 goto end;
50023e9b 531 }
bed07b18 532
50023e9b
MC
533 OSSL_TRACE_BEGIN(TLS) {
534 BIO_printf(trc_out, "md:\n");
535 BIO_dump_indent(trc_out, md, md_size, 4);
536 } OSSL_TRACE_END(TLS);
537 ret = 1;
538 end:
539 EVP_MD_CTX_free(hmac);
540 return ret;
541}
542
91fe8ff0
MC
543#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
544# ifndef OPENSSL_NO_COMP
545# define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
546 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
547 + SSL3_RT_HEADER_LENGTH \
548 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
549# else
550# define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
551 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
552 + SSL3_RT_HEADER_LENGTH)
553# endif /* OPENSSL_NO_COMP */
554#else
555# ifndef OPENSSL_NO_COMP
556# define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
557 + SSL3_RT_HEADER_LENGTH \
558 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
559# else
560# define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
561 + SSL3_RT_HEADER_LENGTH)
562# endif /* OPENSSL_NO_COMP */
563#endif
564
565/* This function is also used by the SSLv3 implementation */
566int tls1_allocate_write_buffers(OSSL_RECORD_LAYER *rl,
567 OSSL_RECORD_TEMPLATE *templates,
568 size_t numtempl, size_t *prefix)
569{
570 /* Do we need to add an empty record prefix? */
571 *prefix = rl->need_empty_fragments
572 && templates[0].type == SSL3_RT_APPLICATION_DATA;
573
574 /*
575 * In the prefix case we can allocate a much smaller buffer. Otherwise we
576 * just allocate the default buffer size
577 */
578 if (!tls_setup_write_buffer(rl, numtempl + *prefix,
579 *prefix ? MAX_PREFIX_LEN : 0, 0)) {
580 /* RLAYERfatal() already called */
581 return 0;
582 }
583
584 return 1;
585}
586
587/* This function is also used by the SSLv3 implementation */
588int tls1_initialise_write_packets(OSSL_RECORD_LAYER *rl,
589 OSSL_RECORD_TEMPLATE *templates,
590 size_t numtempl,
591 OSSL_RECORD_TEMPLATE *prefixtempl,
592 WPACKET *pkt,
e9189cc4 593 TLS_BUFFER *bufs,
91fe8ff0
MC
594 size_t *wpinited)
595{
596 size_t align = 0;
e9189cc4 597 TLS_BUFFER *wb;
91fe8ff0
MC
598 size_t prefix;
599
600 /* Do we need to add an empty record prefix? */
601 prefix = rl->need_empty_fragments
602 && templates[0].type == SSL3_RT_APPLICATION_DATA;
603
604 if (prefix) {
605 /*
606 * countermeasure against known-IV weakness in CBC ciphersuites (see
607 * http://www.openssl.org/~bodo/tls-cbc.txt)
608 */
609 prefixtempl->buf = NULL;
610 prefixtempl->version = templates[0].version;
611 prefixtempl->buflen = 0;
612 prefixtempl->type = SSL3_RT_APPLICATION_DATA;
613
614 wb = &bufs[0];
615
616#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
e9189cc4 617 align = (size_t)TLS_BUFFER_get_buf(wb) + SSL3_RT_HEADER_LENGTH;
91fe8ff0
MC
618 align = SSL3_ALIGN_PAYLOAD - 1
619 - ((align - 1) % SSL3_ALIGN_PAYLOAD);
620#endif
e9189cc4 621 TLS_BUFFER_set_offset(wb, align);
91fe8ff0 622
e9189cc4
MC
623 if (!WPACKET_init_static_len(&pkt[0], TLS_BUFFER_get_buf(wb),
624 TLS_BUFFER_get_len(wb), 0)) {
91fe8ff0
MC
625 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
626 return 0;
627 }
628 *wpinited = 1;
629 if (!WPACKET_allocate_bytes(&pkt[0], align, NULL)) {
630 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
631 return 0;
632 }
633 }
634
635 return tls_initialise_write_packets_default(rl, templates, numtempl,
636 NULL,
637 pkt + prefix, bufs + prefix,
638 wpinited);
639}
640
50023e9b
MC
641/* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
642struct record_functions_st tls_1_funcs = {
643 tls1_set_crypto_state,
644 tls1_cipher,
1853d20a
MC
645 tls1_mac,
646 tls_default_set_protocol_version,
bafe524b
MC
647 tls_default_read_n,
648 tls_get_more_records,
1853d20a 649 tls_default_validate_record_header,
bafe524b
MC
650 tls_default_post_process_record,
651 tls_get_max_records_multiblock,
91fe8ff0
MC
652 tls_write_records_multiblock, /* Defined in tls_multib.c */
653 tls1_allocate_write_buffers,
7ca61d63 654 tls1_initialise_write_packets,
aca70ca8 655 NULL,
2582de25 656 tls_prepare_record_header_default,
757ef3ba 657 NULL,
2a354d54 658 tls_prepare_for_encryption_default,
ace38195
MC
659 tls_post_encryption_processing_default,
660 NULL
50023e9b 661};
222cf410
MC
662
663struct record_functions_st dtls_1_funcs = {
664 tls1_set_crypto_state,
222cf410
MC
665 tls1_cipher,
666 tls1_mac,
667 tls_default_set_protocol_version,
bafe524b
MC
668 tls_default_read_n,
669 dtls_get_more_records,
670 NULL,
671 NULL,
222cf410 672 NULL,
43dfa5a9 673 tls_write_records_default,
bf04cbfa
MC
674 /*
675 * Don't use tls1_allocate_write_buffers since that handles empty fragment
676 * records which aren't needed in DTLS. We just use the default allocation
677 * instead.
678 */
679 tls_allocate_write_buffers_default,
248a9bf2
MC
680 /* Don't use tls1_initialise_write_packets for same reason as above */
681 tls_initialise_write_packets_default,
2582de25 682 NULL,
b9e37f8f 683 dtls_prepare_record_header,
757ef3ba 684 NULL,
b9e37f8f 685 tls_prepare_for_encryption_default,
421386e3 686 dtls_post_encryption_processing,
222cf410
MC
687 NULL
688};