]> git.ipfire.org Git - thirdparty/openssl.git/blob - ssl/record/methods/tls1_meth.c
Consolidate sequence counter incrementing code
[thirdparty/openssl.git] / ssl / record / methods / tls1_meth.c
1 /*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include <openssl/ssl.h>
14 #include "../../ssl_local.h"
15 #include "../record_local.h"
16 #include "recmethod_local.h"
17
18 static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
19 unsigned char *key, size_t keylen,
20 unsigned char *iv, size_t ivlen,
21 unsigned char *mackey, size_t mackeylen,
22 const EVP_CIPHER *ciph,
23 size_t taglen,
24 int mactype,
25 const EVP_MD *md,
26 COMP_METHOD *comp)
27 {
28 EVP_CIPHER_CTX *ciph_ctx;
29 EVP_PKEY *mac_key;
30 int enc = (rl->direction == OSSL_RECORD_DIRECTION_WRITE) ? 1 : 0;
31
32 if (level != OSSL_RECORD_PROTECTION_LEVEL_APPLICATION)
33 return OSSL_RECORD_RETURN_FATAL;
34
35 if ((rl->enc_ctx = EVP_CIPHER_CTX_new()) == NULL) {
36 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_EVP_LIB);
37 return OSSL_RECORD_RETURN_FATAL;
38 }
39
40 ciph_ctx = rl->enc_ctx;
41
42 rl->md_ctx = EVP_MD_CTX_new();
43 if (rl->md_ctx == NULL) {
44 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
45 return OSSL_RECORD_RETURN_FATAL;
46 }
47 #ifndef OPENSSL_NO_COMP
48 if (comp != NULL) {
49 rl->compctx = COMP_CTX_new(comp);
50 if (rl->compctx == NULL) {
51 ERR_raise(ERR_LIB_SSL, SSL_R_COMPRESSION_LIBRARY_ERROR);
52 return OSSL_RECORD_RETURN_FATAL;
53 }
54 }
55 #endif
56
57 /*
58 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
59 * setting up the MAC key.
60 */
61 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) == 0) {
62 if (mactype == EVP_PKEY_HMAC) {
63 mac_key = EVP_PKEY_new_raw_private_key_ex(rl->libctx, "HMAC",
64 rl->propq, mackey,
65 mackeylen);
66 } else {
67 /*
68 * If its not HMAC then the only other types of MAC we support are
69 * the GOST MACs, so we need to use the old style way of creating
70 * a MAC key.
71 */
72 mac_key = EVP_PKEY_new_mac_key(mactype, NULL, mackey,
73 (int)mackeylen);
74 }
75 if (mac_key == NULL
76 || EVP_DigestSignInit_ex(rl->md_ctx, NULL, EVP_MD_get0_name(md),
77 rl->libctx, rl->propq, mac_key,
78 NULL) <= 0) {
79 EVP_PKEY_free(mac_key);
80 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
81 return OSSL_RECORD_RETURN_FATAL;
82 }
83 EVP_PKEY_free(mac_key);
84 }
85
86 if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_GCM_MODE) {
87 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, NULL, enc)
88 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_GCM_SET_IV_FIXED,
89 (int)ivlen, iv) <= 0) {
90 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
91 return OSSL_RECORD_RETURN_FATAL;
92 }
93 } else if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_CCM_MODE) {
94 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, NULL, NULL, enc)
95 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, 12,
96 NULL) <= 0
97 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG,
98 (int)taglen, NULL) <= 0
99 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_CCM_SET_IV_FIXED,
100 (int)ivlen, iv) <= 0
101 || !EVP_CipherInit_ex(ciph_ctx, NULL, NULL, key, NULL, enc)) {
102 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
103 return OSSL_RECORD_RETURN_FATAL;
104 }
105 } else {
106 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, iv, enc)) {
107 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
108 return OSSL_RECORD_RETURN_FATAL;
109 }
110 }
111 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
112 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) != 0
113 && mackeylen != 0
114 && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_MAC_KEY,
115 (int)mackeylen, mackey) <= 0) {
116 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
117 return OSSL_RECORD_RETURN_FATAL;
118 }
119 if (EVP_CIPHER_get0_provider(ciph) != NULL
120 && !ossl_set_tls_provider_parameters(rl, ciph_ctx, ciph, md))
121 return OSSL_RECORD_RETURN_FATAL;
122
123 /* Calculate the explict IV length */
124 if (RLAYER_USE_EXPLICIT_IV(rl)) {
125 int mode = EVP_CIPHER_CTX_get_mode(ciph_ctx);
126 int eivlen = 0;
127
128 if (mode == EVP_CIPH_CBC_MODE) {
129 eivlen = EVP_CIPHER_CTX_get_iv_length(ciph_ctx);
130 if (eivlen < 0) {
131 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_LIBRARY_BUG);
132 return OSSL_RECORD_RETURN_FATAL;
133 }
134 if (eivlen <= 1)
135 eivlen = 0;
136 } else if (mode == EVP_CIPH_GCM_MODE) {
137 /* Need explicit part of IV for GCM mode */
138 eivlen = EVP_GCM_TLS_EXPLICIT_IV_LEN;
139 } else if (mode == EVP_CIPH_CCM_MODE) {
140 eivlen = EVP_CCM_TLS_EXPLICIT_IV_LEN;
141 }
142 rl->eivlen = (size_t)eivlen;
143 }
144
145 return OSSL_RECORD_RETURN_SUCCESS;
146 }
147
148 #define MAX_PADDING 256
149 /*-
150 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
151 * internal error, but not otherwise. It is the responsibility of the caller to
152 * report a bad_record_mac - if appropriate (DTLS just drops the record).
153 *
154 * Returns:
155 * 0: if the record is publicly invalid, or an internal error, or AEAD
156 * decryption failed, or Encrypt-then-mac decryption failed.
157 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
158 */
159 static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
160 int sending, SSL_MAC_BUF *macs, size_t macsize)
161 {
162 EVP_CIPHER_CTX *ds;
163 size_t reclen[SSL_MAX_PIPELINES];
164 unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN];
165 int pad = 0, tmpr, provided;
166 size_t bs, ctr, padnum, loop;
167 unsigned char padval;
168 const EVP_CIPHER *enc;
169
170 if (n_recs == 0) {
171 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
172 return 0;
173 }
174
175 if (EVP_MD_CTX_get0_md(rl->md_ctx)) {
176 int n = EVP_MD_CTX_get_size(rl->md_ctx);
177
178 if (!ossl_assert(n >= 0)) {
179 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
180 return 0;
181 }
182 }
183 ds = rl->enc_ctx;
184 if (!ossl_assert(rl->enc_ctx != NULL)) {
185 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
186 return 0;
187 }
188
189 enc = EVP_CIPHER_CTX_get0_cipher(rl->enc_ctx);
190
191 if (sending) {
192 int ivlen;
193
194 /* For TLSv1.1 and later explicit IV */
195 if (RLAYER_USE_EXPLICIT_IV(rl)
196 && EVP_CIPHER_get_mode(enc) == EVP_CIPH_CBC_MODE)
197 ivlen = EVP_CIPHER_get_iv_length(enc);
198 else
199 ivlen = 0;
200 if (ivlen > 1) {
201 for (ctr = 0; ctr < n_recs; ctr++) {
202 if (recs[ctr].data != recs[ctr].input) {
203 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
204 return 0;
205 } else if (RAND_bytes_ex(rl->libctx, recs[ctr].input,
206 ivlen, 0) <= 0) {
207 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
208 return 0;
209 }
210 }
211 }
212 }
213 if (!ossl_assert(enc != NULL)) {
214 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
215 return 0;
216 }
217
218 provided = (EVP_CIPHER_get0_provider(enc) != NULL);
219
220 bs = EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds));
221
222 if (n_recs > 1) {
223 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
224 & EVP_CIPH_FLAG_PIPELINE) == 0) {
225 /*
226 * We shouldn't have been called with pipeline data if the
227 * cipher doesn't support pipelining
228 */
229 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
230 return 0;
231 }
232 }
233 for (ctr = 0; ctr < n_recs; ctr++) {
234 reclen[ctr] = recs[ctr].length;
235
236 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
237 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0) {
238 unsigned char *seq;
239
240 seq = rl->sequence;
241
242 if (rl->isdtls) {
243 unsigned char dtlsseq[8], *p = dtlsseq;
244
245 s2n(rl->epoch, p);
246 memcpy(p, &seq[2], 6);
247 memcpy(buf[ctr], dtlsseq, 8);
248 } else {
249 memcpy(buf[ctr], seq, 8);
250 if (!tls_increment_sequence_ctr(rl)) {
251 /* RLAYERfatal already called */
252 return 0;
253 }
254 }
255
256 buf[ctr][8] = recs[ctr].type;
257 buf[ctr][9] = (unsigned char)(rl->version >> 8);
258 buf[ctr][10] = (unsigned char)(rl->version);
259 buf[ctr][11] = (unsigned char)(recs[ctr].length >> 8);
260 buf[ctr][12] = (unsigned char)(recs[ctr].length & 0xff);
261 pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD,
262 EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
263 if (pad <= 0) {
264 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
265 return 0;
266 }
267
268 if (sending) {
269 reclen[ctr] += pad;
270 recs[ctr].length += pad;
271 }
272 } else if ((bs != 1) && sending && !provided) {
273 /*
274 * We only do this for legacy ciphers. Provided ciphers add the
275 * padding on the provider side.
276 */
277 padnum = bs - (reclen[ctr] % bs);
278
279 /* Add weird padding of up to 256 bytes */
280
281 if (padnum > MAX_PADDING) {
282 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
283 return 0;
284 }
285 /* we need to add 'padnum' padding bytes of value padval */
286 padval = (unsigned char)(padnum - 1);
287 for (loop = reclen[ctr]; loop < reclen[ctr] + padnum; loop++)
288 recs[ctr].input[loop] = padval;
289 reclen[ctr] += padnum;
290 recs[ctr].length += padnum;
291 }
292
293 if (!sending) {
294 if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) {
295 /* Publicly invalid */
296 return 0;
297 }
298 }
299 }
300 if (n_recs > 1) {
301 unsigned char *data[SSL_MAX_PIPELINES];
302
303 /* Set the output buffers */
304 for (ctr = 0; ctr < n_recs; ctr++)
305 data[ctr] = recs[ctr].data;
306
307 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS,
308 (int)n_recs, data) <= 0) {
309 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
310 return 0;
311 }
312 /* Set the input buffers */
313 for (ctr = 0; ctr < n_recs; ctr++)
314 data[ctr] = recs[ctr].input;
315
316 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS,
317 (int)n_recs, data) <= 0
318 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS,
319 (int)n_recs, reclen) <= 0) {
320 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
321 return 0;
322 }
323 }
324
325 if (!rl->isdtls && rl->tlstree) {
326 int decrement_seq = 0;
327
328 /*
329 * When sending, seq is incremented after MAC calculation.
330 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
331 * Otherwise we have to decrease it in the implementation
332 */
333 if (sending && !rl->use_etm)
334 decrement_seq = 1;
335
336 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_TLSTREE, decrement_seq,
337 rl->sequence) <= 0) {
338
339 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
340 return 0;
341 }
342 }
343
344 if (provided) {
345 int outlen;
346
347 /* Provided cipher - we do not support pipelining on this path */
348 if (n_recs > 1) {
349 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
350 return 0;
351 }
352
353 if (!EVP_CipherUpdate(ds, recs[0].data, &outlen, recs[0].input,
354 (unsigned int)reclen[0]))
355 return 0;
356 recs[0].length = outlen;
357
358 /*
359 * The length returned from EVP_CipherUpdate above is the actual
360 * payload length. We need to adjust the data/input ptr to skip over
361 * any explicit IV
362 */
363 if (!sending) {
364 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
365 recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
366 recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
367 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
368 recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
369 recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
370 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
371 recs[0].data += bs;
372 recs[0].input += bs;
373 recs[0].orig_len -= bs;
374 }
375
376 /* Now get a pointer to the MAC (if applicable) */
377 if (macs != NULL) {
378 OSSL_PARAM params[2], *p = params;
379
380 /* Get the MAC */
381 macs[0].alloced = 0;
382
383 *p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
384 (void **)&macs[0].mac,
385 macsize);
386 *p = OSSL_PARAM_construct_end();
387
388 if (!EVP_CIPHER_CTX_get_params(ds, params)) {
389 /* Shouldn't normally happen */
390 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR,
391 ERR_R_INTERNAL_ERROR);
392 return 0;
393 }
394 }
395 }
396 } else {
397 /* Legacy cipher */
398
399 tmpr = EVP_Cipher(ds, recs[0].data, recs[0].input,
400 (unsigned int)reclen[0]);
401 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
402 & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0
403 ? (tmpr < 0)
404 : (tmpr == 0)) {
405 /* AEAD can fail to verify MAC */
406 return 0;
407 }
408
409 if (!sending) {
410 for (ctr = 0; ctr < n_recs; ctr++) {
411 /* Adjust the record to remove the explicit IV/MAC/Tag */
412 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
413 recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
414 recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
415 recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
416 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
417 recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
418 recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
419 recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
420 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
421 if (recs[ctr].length < bs)
422 return 0;
423 recs[ctr].data += bs;
424 recs[ctr].input += bs;
425 recs[ctr].length -= bs;
426 recs[ctr].orig_len -= bs;
427 }
428
429 /*
430 * If using Mac-then-encrypt, then this will succeed but
431 * with a random MAC if padding is invalid
432 */
433 if (!tls1_cbc_remove_padding_and_mac(&recs[ctr].length,
434 recs[ctr].orig_len,
435 recs[ctr].data,
436 (macs != NULL) ? &macs[ctr].mac : NULL,
437 (macs != NULL) ? &macs[ctr].alloced
438 : NULL,
439 bs,
440 pad ? (size_t)pad : macsize,
441 (EVP_CIPHER_get_flags(enc)
442 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0,
443 rl->libctx))
444 return 0;
445 }
446 }
447 }
448 return 1;
449 }
450
451 static int tls1_mac(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rec, unsigned char *md,
452 int sending)
453 {
454 unsigned char *seq = rl->sequence;
455 EVP_MD_CTX *hash;
456 size_t md_size;
457 EVP_MD_CTX *hmac = NULL, *mac_ctx;
458 unsigned char header[13];
459 int t;
460 int ret = 0;
461
462 hash = rl->md_ctx;
463
464 t = EVP_MD_CTX_get_size(hash);
465 if (!ossl_assert(t >= 0))
466 return 0;
467 md_size = t;
468
469 if (rl->stream_mac) {
470 mac_ctx = hash;
471 } else {
472 hmac = EVP_MD_CTX_new();
473 if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) {
474 goto end;
475 }
476 mac_ctx = hmac;
477 }
478
479 if (!rl->isdtls
480 && rl->tlstree
481 && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0)
482 goto end;
483
484 if (rl->isdtls) {
485 unsigned char dtlsseq[8], *p = dtlsseq;
486
487 s2n(rl->epoch, p);
488 memcpy(p, &seq[2], 6);
489
490 memcpy(header, dtlsseq, 8);
491 } else {
492 memcpy(header, seq, 8);
493 }
494
495 header[8] = rec->type;
496 header[9] = (unsigned char)(rl->version >> 8);
497 header[10] = (unsigned char)(rl->version);
498 header[11] = (unsigned char)(rec->length >> 8);
499 header[12] = (unsigned char)(rec->length & 0xff);
500
501 if (!sending && !rl->use_etm
502 && EVP_CIPHER_CTX_get_mode(rl->enc_ctx) == EVP_CIPH_CBC_MODE
503 && ssl3_cbc_record_digest_supported(mac_ctx)) {
504 OSSL_PARAM tls_hmac_params[2], *p = tls_hmac_params;
505
506 *p++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE,
507 &rec->orig_len);
508 *p++ = OSSL_PARAM_construct_end();
509
510 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx),
511 tls_hmac_params))
512 goto end;
513 }
514
515 if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0
516 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0
517 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0)
518 goto end;
519
520 OSSL_TRACE_BEGIN(TLS) {
521 BIO_printf(trc_out, "seq:\n");
522 BIO_dump_indent(trc_out, seq, 8, 4);
523 BIO_printf(trc_out, "rec:\n");
524 BIO_dump_indent(trc_out, rec->data, rec->length, 4);
525 } OSSL_TRACE_END(TLS);
526
527 if (!rl->isdtls && !tls_increment_sequence_ctr(rl)) {
528 /* RLAYERfatal already called */
529 goto end;
530 }
531
532 OSSL_TRACE_BEGIN(TLS) {
533 BIO_printf(trc_out, "md:\n");
534 BIO_dump_indent(trc_out, md, md_size, 4);
535 } OSSL_TRACE_END(TLS);
536 ret = 1;
537 end:
538 EVP_MD_CTX_free(hmac);
539 return ret;
540 }
541
542 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
543 # ifndef OPENSSL_NO_COMP
544 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
545 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
546 + SSL3_RT_HEADER_LENGTH \
547 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
548 # else
549 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
550 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
551 + SSL3_RT_HEADER_LENGTH)
552 # endif /* OPENSSL_NO_COMP */
553 #else
554 # ifndef OPENSSL_NO_COMP
555 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
556 + SSL3_RT_HEADER_LENGTH \
557 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
558 # else
559 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
560 + SSL3_RT_HEADER_LENGTH)
561 # endif /* OPENSSL_NO_COMP */
562 #endif
563
564 /* This function is also used by the SSLv3 implementation */
565 int tls1_allocate_write_buffers(OSSL_RECORD_LAYER *rl,
566 OSSL_RECORD_TEMPLATE *templates,
567 size_t numtempl, size_t *prefix)
568 {
569 /* Do we need to add an empty record prefix? */
570 *prefix = rl->need_empty_fragments
571 && templates[0].type == SSL3_RT_APPLICATION_DATA;
572
573 /*
574 * In the prefix case we can allocate a much smaller buffer. Otherwise we
575 * just allocate the default buffer size
576 */
577 if (!tls_setup_write_buffer(rl, numtempl + *prefix,
578 *prefix ? MAX_PREFIX_LEN : 0, 0)) {
579 /* RLAYERfatal() already called */
580 return 0;
581 }
582
583 return 1;
584 }
585
586 /* This function is also used by the SSLv3 implementation */
587 int tls1_initialise_write_packets(OSSL_RECORD_LAYER *rl,
588 OSSL_RECORD_TEMPLATE *templates,
589 size_t numtempl,
590 OSSL_RECORD_TEMPLATE *prefixtempl,
591 WPACKET *pkt,
592 SSL3_BUFFER *bufs,
593 size_t *wpinited)
594 {
595 size_t align = 0;
596 SSL3_BUFFER *wb;
597 size_t prefix;
598
599 /* Do we need to add an empty record prefix? */
600 prefix = rl->need_empty_fragments
601 && templates[0].type == SSL3_RT_APPLICATION_DATA;
602
603 if (prefix) {
604 /*
605 * countermeasure against known-IV weakness in CBC ciphersuites (see
606 * http://www.openssl.org/~bodo/tls-cbc.txt)
607 */
608 prefixtempl->buf = NULL;
609 prefixtempl->version = templates[0].version;
610 prefixtempl->buflen = 0;
611 prefixtempl->type = SSL3_RT_APPLICATION_DATA;
612
613 wb = &bufs[0];
614
615 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
616 align = (size_t)SSL3_BUFFER_get_buf(wb) + SSL3_RT_HEADER_LENGTH;
617 align = SSL3_ALIGN_PAYLOAD - 1
618 - ((align - 1) % SSL3_ALIGN_PAYLOAD);
619 #endif
620 SSL3_BUFFER_set_offset(wb, align);
621
622 if (!WPACKET_init_static_len(&pkt[0], SSL3_BUFFER_get_buf(wb),
623 SSL3_BUFFER_get_len(wb), 0)) {
624 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
625 return 0;
626 }
627 *wpinited = 1;
628 if (!WPACKET_allocate_bytes(&pkt[0], align, NULL)) {
629 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
630 return 0;
631 }
632 }
633
634 return tls_initialise_write_packets_default(rl, templates, numtempl,
635 NULL,
636 pkt + prefix, bufs + prefix,
637 wpinited);
638 }
639
640 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
641 struct record_functions_st tls_1_funcs = {
642 tls1_set_crypto_state,
643 tls1_cipher,
644 tls1_mac,
645 tls_default_set_protocol_version,
646 tls_default_read_n,
647 tls_get_more_records,
648 tls_default_validate_record_header,
649 tls_default_post_process_record,
650 tls_get_max_records_multiblock,
651 tls_write_records_multiblock, /* Defined in tls_multib.c */
652 tls1_allocate_write_buffers,
653 tls1_initialise_write_packets,
654 NULL,
655 tls_prepare_record_header_default,
656 NULL,
657 tls_prepare_for_encryption_default,
658 tls_post_encryption_processing_default,
659 NULL
660 };
661
662 struct record_functions_st dtls_1_funcs = {
663 tls1_set_crypto_state,
664 tls1_cipher,
665 tls1_mac,
666 tls_default_set_protocol_version,
667 tls_default_read_n,
668 dtls_get_more_records,
669 NULL,
670 NULL,
671 NULL,
672 dtls_write_records,
673 /*
674 * Don't use tls1_allocate_write_buffers since that handles empty fragment
675 * records which aren't needed in DTLS. We just use the default allocation
676 * instead.
677 */
678 tls_allocate_write_buffers_default,
679 /* Don't use tls1_initialise_write_packets for same reason as above */
680 tls_initialise_write_packets_default,
681 NULL,
682 dtls_prepare_record_header,
683 NULL,
684 tls_prepare_for_encryption_default,
685 tls_post_encryption_processing_default,
686 NULL
687 };