]> git.ipfire.org Git - thirdparty/openssl.git/blob - ssl/record/methods/tls1_meth.c
5f6ff3f806b1b54268d121ee79d4bf49dcf79015
[thirdparty/openssl.git] / ssl / record / methods / tls1_meth.c
1 /*
2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include <openssl/ssl.h>
14 #include "../../ssl_local.h"
15 #include "../record_local.h"
16 #include "recmethod_local.h"
17
18 static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
19 unsigned char *key, size_t keylen,
20 unsigned char *iv, size_t ivlen,
21 unsigned char *mackey, size_t mackeylen,
22 const EVP_CIPHER *ciph,
23 size_t taglen,
24 int mactype,
25 const EVP_MD *md,
26 COMP_METHOD *comp)
27 {
28 EVP_CIPHER_CTX *ciph_ctx;
29 EVP_PKEY *mac_key;
30 int enc = (rl->direction == OSSL_RECORD_DIRECTION_WRITE) ? 1 : 0;
31
32 if (level != OSSL_RECORD_PROTECTION_LEVEL_APPLICATION)
33 return OSSL_RECORD_RETURN_FATAL;
34
35 if ((rl->enc_ctx = EVP_CIPHER_CTX_new()) == NULL) {
36 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_EVP_LIB);
37 return OSSL_RECORD_RETURN_FATAL;
38 }
39
40 ciph_ctx = rl->enc_ctx;
41
42 rl->md_ctx = EVP_MD_CTX_new();
43 if (rl->md_ctx == NULL) {
44 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
45 return OSSL_RECORD_RETURN_FATAL;
46 }
47 #ifndef OPENSSL_NO_COMP
48 if (comp != NULL) {
49 rl->compctx = COMP_CTX_new(comp);
50 if (rl->compctx == NULL) {
51 ERR_raise(ERR_LIB_SSL, SSL_R_COMPRESSION_LIBRARY_ERROR);
52 return OSSL_RECORD_RETURN_FATAL;
53 }
54 }
55 #endif
56
57 /*
58 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
59 * setting up the MAC key.
60 */
61 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) == 0) {
62 if (mactype == EVP_PKEY_HMAC) {
63 mac_key = EVP_PKEY_new_raw_private_key_ex(rl->libctx, "HMAC",
64 rl->propq, mackey,
65 mackeylen);
66 } else {
67 /*
68 * If its not HMAC then the only other types of MAC we support are
69 * the GOST MACs, so we need to use the old style way of creating
70 * a MAC key.
71 */
72 mac_key = EVP_PKEY_new_mac_key(mactype, NULL, mackey,
73 (int)mackeylen);
74 }
75 if (mac_key == NULL
76 || EVP_DigestSignInit_ex(rl->md_ctx, NULL, EVP_MD_get0_name(md),
77 rl->libctx, rl->propq, mac_key,
78 NULL) <= 0) {
79 EVP_PKEY_free(mac_key);
80 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
81 return OSSL_RECORD_RETURN_FATAL;
82 }
83 EVP_PKEY_free(mac_key);
84 }
85
86 if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_GCM_MODE) {
87 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, NULL, enc)
88 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_GCM_SET_IV_FIXED,
89 (int)ivlen, iv) <= 0) {
90 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
91 return OSSL_RECORD_RETURN_FATAL;
92 }
93 } else if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_CCM_MODE) {
94 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, NULL, NULL, enc)
95 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, 12,
96 NULL) <= 0
97 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG,
98 (int)taglen, NULL) <= 0
99 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_CCM_SET_IV_FIXED,
100 (int)ivlen, iv) <= 0
101 || !EVP_CipherInit_ex(ciph_ctx, NULL, NULL, key, NULL, enc)) {
102 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
103 return OSSL_RECORD_RETURN_FATAL;
104 }
105 } else {
106 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, iv, enc)) {
107 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
108 return OSSL_RECORD_RETURN_FATAL;
109 }
110 }
111 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
112 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) != 0
113 && mackeylen != 0
114 && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_MAC_KEY,
115 (int)mackeylen, mackey) <= 0) {
116 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
117 return OSSL_RECORD_RETURN_FATAL;
118 }
119 if (EVP_CIPHER_get0_provider(ciph) != NULL
120 && !ossl_set_tls_provider_parameters(rl, ciph_ctx, ciph, md))
121 return OSSL_RECORD_RETURN_FATAL;
122
123 /* Calculate the explict IV length */
124 if (RLAYER_USE_EXPLICIT_IV(rl)) {
125 int mode = EVP_CIPHER_CTX_get_mode(ciph_ctx);
126 int eivlen = 0;
127
128 if (mode == EVP_CIPH_CBC_MODE) {
129 eivlen = EVP_CIPHER_CTX_get_iv_length(ciph_ctx);
130 if (eivlen < 0) {
131 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_LIBRARY_BUG);
132 return OSSL_RECORD_RETURN_FATAL;
133 }
134 if (eivlen <= 1)
135 eivlen = 0;
136 } else if (mode == EVP_CIPH_GCM_MODE) {
137 /* Need explicit part of IV for GCM mode */
138 eivlen = EVP_GCM_TLS_EXPLICIT_IV_LEN;
139 } else if (mode == EVP_CIPH_CCM_MODE) {
140 eivlen = EVP_CCM_TLS_EXPLICIT_IV_LEN;
141 }
142 rl->eivlen = (size_t)eivlen;
143 }
144
145 return OSSL_RECORD_RETURN_SUCCESS;
146 }
147
148 #define MAX_PADDING 256
149 /*-
150 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
151 * internal error, but not otherwise. It is the responsibility of the caller to
152 * report a bad_record_mac - if appropriate (DTLS just drops the record).
153 *
154 * Returns:
155 * 0: if the record is publicly invalid, or an internal error, or AEAD
156 * decryption failed, or Encrypt-then-mac decryption failed.
157 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
158 */
159 static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
160 int sending, SSL_MAC_BUF *macs, size_t macsize)
161 {
162 EVP_CIPHER_CTX *ds;
163 size_t reclen[SSL_MAX_PIPELINES];
164 unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN];
165 int i, pad = 0, tmpr, provided;
166 size_t bs, ctr, padnum, loop;
167 unsigned char padval;
168 const EVP_CIPHER *enc;
169
170 if (n_recs == 0) {
171 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
172 return 0;
173 }
174
175 if (EVP_MD_CTX_get0_md(rl->md_ctx)) {
176 int n = EVP_MD_CTX_get_size(rl->md_ctx);
177
178 if (!ossl_assert(n >= 0)) {
179 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
180 return 0;
181 }
182 }
183 ds = rl->enc_ctx;
184 if (!ossl_assert(rl->enc_ctx != NULL)) {
185 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
186 return 0;
187 }
188
189 enc = EVP_CIPHER_CTX_get0_cipher(rl->enc_ctx);
190
191 if (sending) {
192 int ivlen;
193
194 /* For TLSv1.1 and later explicit IV */
195 if (RLAYER_USE_EXPLICIT_IV(rl)
196 && EVP_CIPHER_get_mode(enc) == EVP_CIPH_CBC_MODE)
197 ivlen = EVP_CIPHER_get_iv_length(enc);
198 else
199 ivlen = 0;
200 if (ivlen > 1) {
201 for (ctr = 0; ctr < n_recs; ctr++) {
202 if (recs[ctr].data != recs[ctr].input) {
203 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
204 return 0;
205 } else if (RAND_bytes_ex(rl->libctx, recs[ctr].input,
206 ivlen, 0) <= 0) {
207 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
208 return 0;
209 }
210 }
211 }
212 }
213 if (!ossl_assert(enc != NULL)) {
214 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
215 return 0;
216 }
217
218 provided = (EVP_CIPHER_get0_provider(enc) != NULL);
219
220 bs = EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds));
221
222 if (n_recs > 1) {
223 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
224 & EVP_CIPH_FLAG_PIPELINE) == 0) {
225 /*
226 * We shouldn't have been called with pipeline data if the
227 * cipher doesn't support pipelining
228 */
229 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
230 return 0;
231 }
232 }
233 for (ctr = 0; ctr < n_recs; ctr++) {
234 reclen[ctr] = recs[ctr].length;
235
236 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
237 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0) {
238 unsigned char *seq;
239
240 seq = rl->sequence;
241
242 if (rl->isdtls) {
243 unsigned char dtlsseq[8], *p = dtlsseq;
244
245 s2n(rl->epoch, p);
246 memcpy(p, &seq[2], 6);
247 memcpy(buf[ctr], dtlsseq, 8);
248 } else {
249 memcpy(buf[ctr], seq, 8);
250 for (i = 7; i >= 0; i--) { /* increment */
251 ++seq[i];
252 if (seq[i] != 0)
253 break;
254 }
255 }
256
257 buf[ctr][8] = recs[ctr].type;
258 buf[ctr][9] = (unsigned char)(rl->version >> 8);
259 buf[ctr][10] = (unsigned char)(rl->version);
260 buf[ctr][11] = (unsigned char)(recs[ctr].length >> 8);
261 buf[ctr][12] = (unsigned char)(recs[ctr].length & 0xff);
262 pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD,
263 EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
264 if (pad <= 0) {
265 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
266 return 0;
267 }
268
269 if (sending) {
270 reclen[ctr] += pad;
271 recs[ctr].length += pad;
272 }
273 } else if ((bs != 1) && sending && !provided) {
274 /*
275 * We only do this for legacy ciphers. Provided ciphers add the
276 * padding on the provider side.
277 */
278 padnum = bs - (reclen[ctr] % bs);
279
280 /* Add weird padding of up to 256 bytes */
281
282 if (padnum > MAX_PADDING) {
283 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
284 return 0;
285 }
286 /* we need to add 'padnum' padding bytes of value padval */
287 padval = (unsigned char)(padnum - 1);
288 for (loop = reclen[ctr]; loop < reclen[ctr] + padnum; loop++)
289 recs[ctr].input[loop] = padval;
290 reclen[ctr] += padnum;
291 recs[ctr].length += padnum;
292 }
293
294 if (!sending) {
295 if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) {
296 /* Publicly invalid */
297 return 0;
298 }
299 }
300 }
301 if (n_recs > 1) {
302 unsigned char *data[SSL_MAX_PIPELINES];
303
304 /* Set the output buffers */
305 for (ctr = 0; ctr < n_recs; ctr++)
306 data[ctr] = recs[ctr].data;
307
308 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS,
309 (int)n_recs, data) <= 0) {
310 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
311 return 0;
312 }
313 /* Set the input buffers */
314 for (ctr = 0; ctr < n_recs; ctr++)
315 data[ctr] = recs[ctr].input;
316
317 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS,
318 (int)n_recs, data) <= 0
319 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS,
320 (int)n_recs, reclen) <= 0) {
321 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
322 return 0;
323 }
324 }
325
326 if (!rl->isdtls && rl->tlstree) {
327 unsigned char *seq;
328 int decrement_seq = 0;
329
330 /*
331 * When sending, seq is incremented after MAC calculation.
332 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
333 * Otherwise we have to decrease it in the implementation
334 */
335 if (sending && !rl->use_etm)
336 decrement_seq = 1;
337
338 seq = rl->sequence;
339 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_TLSTREE, decrement_seq, seq) <= 0) {
340 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
341 return 0;
342 }
343 }
344
345 if (provided) {
346 int outlen;
347
348 /* Provided cipher - we do not support pipelining on this path */
349 if (n_recs > 1) {
350 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
351 return 0;
352 }
353
354 if (!EVP_CipherUpdate(ds, recs[0].data, &outlen, recs[0].input,
355 (unsigned int)reclen[0]))
356 return 0;
357 recs[0].length = outlen;
358
359 /*
360 * The length returned from EVP_CipherUpdate above is the actual
361 * payload length. We need to adjust the data/input ptr to skip over
362 * any explicit IV
363 */
364 if (!sending) {
365 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
366 recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
367 recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
368 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
369 recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
370 recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
371 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
372 recs[0].data += bs;
373 recs[0].input += bs;
374 recs[0].orig_len -= bs;
375 }
376
377 /* Now get a pointer to the MAC (if applicable) */
378 if (macs != NULL) {
379 OSSL_PARAM params[2], *p = params;
380
381 /* Get the MAC */
382 macs[0].alloced = 0;
383
384 *p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
385 (void **)&macs[0].mac,
386 macsize);
387 *p = OSSL_PARAM_construct_end();
388
389 if (!EVP_CIPHER_CTX_get_params(ds, params)) {
390 /* Shouldn't normally happen */
391 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR,
392 ERR_R_INTERNAL_ERROR);
393 return 0;
394 }
395 }
396 }
397 } else {
398 /* Legacy cipher */
399
400 tmpr = EVP_Cipher(ds, recs[0].data, recs[0].input,
401 (unsigned int)reclen[0]);
402 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
403 & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0
404 ? (tmpr < 0)
405 : (tmpr == 0)) {
406 /* AEAD can fail to verify MAC */
407 return 0;
408 }
409
410 if (!sending) {
411 for (ctr = 0; ctr < n_recs; ctr++) {
412 /* Adjust the record to remove the explicit IV/MAC/Tag */
413 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
414 recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
415 recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
416 recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
417 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
418 recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
419 recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
420 recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
421 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
422 if (recs[ctr].length < bs)
423 return 0;
424 recs[ctr].data += bs;
425 recs[ctr].input += bs;
426 recs[ctr].length -= bs;
427 recs[ctr].orig_len -= bs;
428 }
429
430 /*
431 * If using Mac-then-encrypt, then this will succeed but
432 * with a random MAC if padding is invalid
433 */
434 if (!tls1_cbc_remove_padding_and_mac(&recs[ctr].length,
435 recs[ctr].orig_len,
436 recs[ctr].data,
437 (macs != NULL) ? &macs[ctr].mac : NULL,
438 (macs != NULL) ? &macs[ctr].alloced
439 : NULL,
440 bs,
441 pad ? (size_t)pad : macsize,
442 (EVP_CIPHER_get_flags(enc)
443 & EVP_CIPH_FLAG_AEAD_CIPHER) != 0,
444 rl->libctx))
445 return 0;
446 }
447 }
448 }
449 return 1;
450 }
451
452 static int tls1_mac(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rec, unsigned char *md,
453 int sending)
454 {
455 unsigned char *seq = rl->sequence;
456 EVP_MD_CTX *hash;
457 size_t md_size;
458 int i;
459 EVP_MD_CTX *hmac = NULL, *mac_ctx;
460 unsigned char header[13];
461 int t;
462 int ret = 0;
463
464 hash = rl->md_ctx;
465
466 t = EVP_MD_CTX_get_size(hash);
467 if (!ossl_assert(t >= 0))
468 return 0;
469 md_size = t;
470
471 if (rl->stream_mac) {
472 mac_ctx = hash;
473 } else {
474 hmac = EVP_MD_CTX_new();
475 if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) {
476 goto end;
477 }
478 mac_ctx = hmac;
479 }
480
481 if (!rl->isdtls
482 && rl->tlstree
483 && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0)
484 goto end;
485
486 if (rl->isdtls) {
487 unsigned char dtlsseq[8], *p = dtlsseq;
488
489 s2n(rl->epoch, p);
490 memcpy(p, &seq[2], 6);
491
492 memcpy(header, dtlsseq, 8);
493 } else {
494 memcpy(header, seq, 8);
495 }
496
497 header[8] = rec->type;
498 header[9] = (unsigned char)(rl->version >> 8);
499 header[10] = (unsigned char)(rl->version);
500 header[11] = (unsigned char)(rec->length >> 8);
501 header[12] = (unsigned char)(rec->length & 0xff);
502
503 if (!sending && !rl->use_etm
504 && EVP_CIPHER_CTX_get_mode(rl->enc_ctx) == EVP_CIPH_CBC_MODE
505 && ssl3_cbc_record_digest_supported(mac_ctx)) {
506 OSSL_PARAM tls_hmac_params[2], *p = tls_hmac_params;
507
508 *p++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE,
509 &rec->orig_len);
510 *p++ = OSSL_PARAM_construct_end();
511
512 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx),
513 tls_hmac_params))
514 goto end;
515 }
516
517 if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0
518 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0
519 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0)
520 goto end;
521
522 OSSL_TRACE_BEGIN(TLS) {
523 BIO_printf(trc_out, "seq:\n");
524 BIO_dump_indent(trc_out, seq, 8, 4);
525 BIO_printf(trc_out, "rec:\n");
526 BIO_dump_indent(trc_out, rec->data, rec->length, 4);
527 } OSSL_TRACE_END(TLS);
528
529 if (!rl->isdtls) {
530 for (i = 7; i >= 0; i--) {
531 ++seq[i];
532 if (seq[i] != 0)
533 break;
534 }
535 }
536 OSSL_TRACE_BEGIN(TLS) {
537 BIO_printf(trc_out, "md:\n");
538 BIO_dump_indent(trc_out, md, md_size, 4);
539 } OSSL_TRACE_END(TLS);
540 ret = 1;
541 end:
542 EVP_MD_CTX_free(hmac);
543 return ret;
544 }
545
546 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
547 # ifndef OPENSSL_NO_COMP
548 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
549 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
550 + SSL3_RT_HEADER_LENGTH \
551 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
552 # else
553 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
554 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
555 + SSL3_RT_HEADER_LENGTH)
556 # endif /* OPENSSL_NO_COMP */
557 #else
558 # ifndef OPENSSL_NO_COMP
559 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
560 + SSL3_RT_HEADER_LENGTH \
561 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
562 # else
563 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
564 + SSL3_RT_HEADER_LENGTH)
565 # endif /* OPENSSL_NO_COMP */
566 #endif
567
568 /* This function is also used by the SSLv3 implementation */
569 int tls1_allocate_write_buffers(OSSL_RECORD_LAYER *rl,
570 OSSL_RECORD_TEMPLATE *templates,
571 size_t numtempl, size_t *prefix)
572 {
573 /* Do we need to add an empty record prefix? */
574 *prefix = rl->need_empty_fragments
575 && templates[0].type == SSL3_RT_APPLICATION_DATA;
576
577 /*
578 * In the prefix case we can allocate a much smaller buffer. Otherwise we
579 * just allocate the default buffer size
580 */
581 if (!tls_setup_write_buffer(rl, numtempl + *prefix,
582 *prefix ? MAX_PREFIX_LEN : 0, 0)) {
583 /* RLAYERfatal() already called */
584 return 0;
585 }
586
587 return 1;
588 }
589
590 /* This function is also used by the SSLv3 implementation */
591 int tls1_initialise_write_packets(OSSL_RECORD_LAYER *rl,
592 OSSL_RECORD_TEMPLATE *templates,
593 size_t numtempl,
594 OSSL_RECORD_TEMPLATE *prefixtempl,
595 WPACKET *pkt,
596 SSL3_BUFFER *bufs,
597 size_t *wpinited)
598 {
599 size_t align = 0;
600 SSL3_BUFFER *wb;
601 size_t prefix;
602
603 /* Do we need to add an empty record prefix? */
604 prefix = rl->need_empty_fragments
605 && templates[0].type == SSL3_RT_APPLICATION_DATA;
606
607 if (prefix) {
608 /*
609 * countermeasure against known-IV weakness in CBC ciphersuites (see
610 * http://www.openssl.org/~bodo/tls-cbc.txt)
611 */
612 prefixtempl->buf = NULL;
613 prefixtempl->version = templates[0].version;
614 prefixtempl->buflen = 0;
615 prefixtempl->type = SSL3_RT_APPLICATION_DATA;
616
617 wb = &bufs[0];
618
619 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
620 align = (size_t)SSL3_BUFFER_get_buf(wb) + SSL3_RT_HEADER_LENGTH;
621 align = SSL3_ALIGN_PAYLOAD - 1
622 - ((align - 1) % SSL3_ALIGN_PAYLOAD);
623 #endif
624 SSL3_BUFFER_set_offset(wb, align);
625
626 if (!WPACKET_init_static_len(&pkt[0], SSL3_BUFFER_get_buf(wb),
627 SSL3_BUFFER_get_len(wb), 0)) {
628 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
629 return 0;
630 }
631 *wpinited = 1;
632 if (!WPACKET_allocate_bytes(&pkt[0], align, NULL)) {
633 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
634 return 0;
635 }
636 }
637
638 return tls_initialise_write_packets_default(rl, templates, numtempl,
639 NULL,
640 pkt + prefix, bufs + prefix,
641 wpinited);
642 }
643
644 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
645 struct record_functions_st tls_1_funcs = {
646 tls1_set_crypto_state,
647 tls1_cipher,
648 tls1_mac,
649 tls_default_set_protocol_version,
650 tls_default_read_n,
651 tls_get_more_records,
652 tls_default_validate_record_header,
653 tls_default_post_process_record,
654 tls_get_max_records_multiblock,
655 tls_write_records_multiblock, /* Defined in tls_multib.c */
656 tls1_allocate_write_buffers,
657 tls1_initialise_write_packets,
658 NULL,
659 tls_prepare_record_header_default,
660 NULL,
661 tls_prepare_for_encryption_default,
662 tls_post_encryption_processing_default,
663 NULL
664 };
665
666 struct record_functions_st dtls_1_funcs = {
667 tls1_set_crypto_state,
668 tls1_cipher,
669 tls1_mac,
670 tls_default_set_protocol_version,
671 tls_default_read_n,
672 dtls_get_more_records,
673 NULL,
674 NULL,
675 NULL,
676 dtls_write_records,
677 /*
678 * Don't use tls1_allocate_write_buffers since that handles empty fragment
679 * records which aren't needed in DTLS. We just use the default allocation
680 * instead.
681 */
682 tls_allocate_write_buffers_default,
683 /* Don't use tls1_initialise_write_packets for same reason as above */
684 tls_initialise_write_packets_default,
685 NULL,
686 NULL,
687 NULL,
688 NULL,
689 NULL
690 };