2 * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include <openssl/ssl.h>
14 #include "../../ssl_local.h"
15 #include "../record_local.h"
16 #include "recmethod_local.h"
18 static int tls1_set_crypto_state(OSSL_RECORD_LAYER
*rl
, int level
,
19 unsigned char *key
, size_t keylen
,
20 unsigned char *iv
, size_t ivlen
,
21 unsigned char *mackey
, size_t mackeylen
,
22 const EVP_CIPHER
*ciph
,
28 EVP_CIPHER_CTX
*ciph_ctx
;
30 int enc
= (rl
->direction
== OSSL_RECORD_DIRECTION_WRITE
) ? 1 : 0;
32 if (level
!= OSSL_RECORD_PROTECTION_LEVEL_APPLICATION
)
33 return OSSL_RECORD_RETURN_FATAL
;
35 if ((rl
->enc_ctx
= EVP_CIPHER_CTX_new()) == NULL
) {
36 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_EVP_LIB
);
37 return OSSL_RECORD_RETURN_FATAL
;
40 ciph_ctx
= rl
->enc_ctx
;
42 rl
->md_ctx
= EVP_MD_CTX_new();
43 if (rl
->md_ctx
== NULL
) {
44 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
45 return OSSL_RECORD_RETURN_FATAL
;
47 #ifndef OPENSSL_NO_COMP
49 rl
->compctx
= COMP_CTX_new(comp
);
50 if (rl
->compctx
== NULL
) {
51 ERR_raise(ERR_LIB_SSL
, SSL_R_COMPRESSION_LIBRARY_ERROR
);
52 return OSSL_RECORD_RETURN_FATAL
;
58 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
59 * setting up the MAC key.
61 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) == 0) {
62 if (mactype
== EVP_PKEY_HMAC
) {
63 mac_key
= EVP_PKEY_new_raw_private_key_ex(rl
->libctx
, "HMAC",
68 * If its not HMAC then the only other types of MAC we support are
69 * the GOST MACs, so we need to use the old style way of creating
72 mac_key
= EVP_PKEY_new_mac_key(mactype
, NULL
, mackey
,
76 || EVP_DigestSignInit_ex(rl
->md_ctx
, NULL
, EVP_MD_get0_name(md
),
77 rl
->libctx
, rl
->propq
, mac_key
,
79 EVP_PKEY_free(mac_key
);
80 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
81 return OSSL_RECORD_RETURN_FATAL
;
83 EVP_PKEY_free(mac_key
);
86 if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_GCM_MODE
) {
87 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, key
, NULL
, enc
)
88 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_GCM_SET_IV_FIXED
,
89 (int)ivlen
, iv
) <= 0) {
90 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
91 return OSSL_RECORD_RETURN_FATAL
;
93 } else if (EVP_CIPHER_get_mode(ciph
) == EVP_CIPH_CCM_MODE
) {
94 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, NULL
, NULL
, enc
)
95 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_IVLEN
, 12,
97 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_TAG
,
98 (int)taglen
, NULL
) <= 0
99 || EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_CCM_SET_IV_FIXED
,
101 || !EVP_CipherInit_ex(ciph_ctx
, NULL
, NULL
, key
, NULL
, enc
)) {
102 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
103 return OSSL_RECORD_RETURN_FATAL
;
106 if (!EVP_CipherInit_ex(ciph_ctx
, ciph
, NULL
, key
, iv
, enc
)) {
107 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
108 return OSSL_RECORD_RETURN_FATAL
;
111 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
112 if ((EVP_CIPHER_get_flags(ciph
) & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0
114 && EVP_CIPHER_CTX_ctrl(ciph_ctx
, EVP_CTRL_AEAD_SET_MAC_KEY
,
115 (int)mackeylen
, mackey
) <= 0) {
116 ERR_raise(ERR_LIB_SSL
, ERR_R_INTERNAL_ERROR
);
117 return OSSL_RECORD_RETURN_FATAL
;
119 if (EVP_CIPHER_get0_provider(ciph
) != NULL
120 && !ossl_set_tls_provider_parameters(rl
, ciph_ctx
, ciph
, md
))
121 return OSSL_RECORD_RETURN_FATAL
;
123 /* Calculate the explict IV length */
124 if (RLAYER_USE_EXPLICIT_IV(rl
)) {
125 int mode
= EVP_CIPHER_CTX_get_mode(ciph_ctx
);
128 if (mode
== EVP_CIPH_CBC_MODE
) {
129 eivlen
= EVP_CIPHER_CTX_get_iv_length(ciph_ctx
);
131 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_LIBRARY_BUG
);
132 return OSSL_RECORD_RETURN_FATAL
;
136 } else if (mode
== EVP_CIPH_GCM_MODE
) {
137 /* Need explicit part of IV for GCM mode */
138 eivlen
= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
139 } else if (mode
== EVP_CIPH_CCM_MODE
) {
140 eivlen
= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
142 rl
->eivlen
= (size_t)eivlen
;
145 return OSSL_RECORD_RETURN_SUCCESS
;
148 #define MAX_PADDING 256
150 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
151 * internal error, but not otherwise. It is the responsibility of the caller to
152 * report a bad_record_mac - if appropriate (DTLS just drops the record).
155 * 0: if the record is publicly invalid, or an internal error, or AEAD
156 * decryption failed, or Encrypt-then-mac decryption failed.
157 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
159 static int tls1_cipher(OSSL_RECORD_LAYER
*rl
, SSL3_RECORD
*recs
, size_t n_recs
,
160 int sending
, SSL_MAC_BUF
*macs
, size_t macsize
)
163 size_t reclen
[SSL_MAX_PIPELINES
];
164 unsigned char buf
[SSL_MAX_PIPELINES
][EVP_AEAD_TLS1_AAD_LEN
];
165 int i
, pad
= 0, tmpr
, provided
;
166 size_t bs
, ctr
, padnum
, loop
;
167 unsigned char padval
;
168 const EVP_CIPHER
*enc
;
171 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
175 if (EVP_MD_CTX_get0_md(rl
->md_ctx
)) {
176 int n
= EVP_MD_CTX_get_size(rl
->md_ctx
);
178 if (!ossl_assert(n
>= 0)) {
179 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
184 if (!ossl_assert(rl
->enc_ctx
!= NULL
)) {
185 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
189 enc
= EVP_CIPHER_CTX_get0_cipher(rl
->enc_ctx
);
194 /* For TLSv1.1 and later explicit IV */
195 if (RLAYER_USE_EXPLICIT_IV(rl
)
196 && EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CBC_MODE
)
197 ivlen
= EVP_CIPHER_get_iv_length(enc
);
201 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
202 if (recs
[ctr
].data
!= recs
[ctr
].input
) {
203 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
205 } else if (RAND_bytes_ex(rl
->libctx
, recs
[ctr
].input
,
207 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
213 if (!ossl_assert(enc
!= NULL
)) {
214 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
218 provided
= (EVP_CIPHER_get0_provider(enc
) != NULL
);
220 bs
= EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds
));
223 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
224 & EVP_CIPH_FLAG_PIPELINE
) == 0) {
226 * We shouldn't have been called with pipeline data if the
227 * cipher doesn't support pipelining
229 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
233 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
234 reclen
[ctr
] = recs
[ctr
].length
;
236 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
237 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0) {
243 unsigned char dtlsseq
[8], *p
= dtlsseq
;
246 memcpy(p
, &seq
[2], 6);
247 memcpy(buf
[ctr
], dtlsseq
, 8);
249 memcpy(buf
[ctr
], seq
, 8);
250 for (i
= 7; i
>= 0; i
--) { /* increment */
257 buf
[ctr
][8] = recs
[ctr
].type
;
258 buf
[ctr
][9] = (unsigned char)(rl
->version
>> 8);
259 buf
[ctr
][10] = (unsigned char)(rl
->version
);
260 buf
[ctr
][11] = (unsigned char)(recs
[ctr
].length
>> 8);
261 buf
[ctr
][12] = (unsigned char)(recs
[ctr
].length
& 0xff);
262 pad
= EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_AEAD_TLS1_AAD
,
263 EVP_AEAD_TLS1_AAD_LEN
, buf
[ctr
]);
265 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
271 recs
[ctr
].length
+= pad
;
273 } else if ((bs
!= 1) && sending
&& !provided
) {
275 * We only do this for legacy ciphers. Provided ciphers add the
276 * padding on the provider side.
278 padnum
= bs
- (reclen
[ctr
] % bs
);
280 /* Add weird padding of up to 256 bytes */
282 if (padnum
> MAX_PADDING
) {
283 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
286 /* we need to add 'padnum' padding bytes of value padval */
287 padval
= (unsigned char)(padnum
- 1);
288 for (loop
= reclen
[ctr
]; loop
< reclen
[ctr
] + padnum
; loop
++)
289 recs
[ctr
].input
[loop
] = padval
;
290 reclen
[ctr
] += padnum
;
291 recs
[ctr
].length
+= padnum
;
295 if (reclen
[ctr
] == 0 || reclen
[ctr
] % bs
!= 0) {
296 /* Publicly invalid */
302 unsigned char *data
[SSL_MAX_PIPELINES
];
304 /* Set the output buffers */
305 for (ctr
= 0; ctr
< n_recs
; ctr
++)
306 data
[ctr
] = recs
[ctr
].data
;
308 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS
,
309 (int)n_recs
, data
) <= 0) {
310 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
313 /* Set the input buffers */
314 for (ctr
= 0; ctr
< n_recs
; ctr
++)
315 data
[ctr
] = recs
[ctr
].input
;
317 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_BUFS
,
318 (int)n_recs
, data
) <= 0
319 || EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_SET_PIPELINE_INPUT_LENS
,
320 (int)n_recs
, reclen
) <= 0) {
321 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, SSL_R_PIPELINE_FAILURE
);
326 if (!rl
->isdtls
&& rl
->tlstree
) {
328 int decrement_seq
= 0;
331 * When sending, seq is incremented after MAC calculation.
332 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
333 * Otherwise we have to decrease it in the implementation
335 if (sending
&& !rl
->use_etm
)
339 if (EVP_CIPHER_CTX_ctrl(ds
, EVP_CTRL_TLSTREE
, decrement_seq
, seq
) <= 0) {
340 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
348 /* Provided cipher - we do not support pipelining on this path */
350 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
354 if (!EVP_CipherUpdate(ds
, recs
[0].data
, &outlen
, recs
[0].input
,
355 (unsigned int)reclen
[0]))
357 recs
[0].length
= outlen
;
360 * The length returned from EVP_CipherUpdate above is the actual
361 * payload length. We need to adjust the data/input ptr to skip over
365 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
366 recs
[0].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
367 recs
[0].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
368 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
369 recs
[0].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
370 recs
[0].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
371 } else if (bs
!= 1 && RLAYER_USE_EXPLICIT_IV(rl
)) {
374 recs
[0].orig_len
-= bs
;
377 /* Now get a pointer to the MAC (if applicable) */
379 OSSL_PARAM params
[2], *p
= params
;
384 *p
++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC
,
385 (void **)&macs
[0].mac
,
387 *p
= OSSL_PARAM_construct_end();
389 if (!EVP_CIPHER_CTX_get_params(ds
, params
)) {
390 /* Shouldn't normally happen */
391 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
,
392 ERR_R_INTERNAL_ERROR
);
400 tmpr
= EVP_Cipher(ds
, recs
[0].data
, recs
[0].input
,
401 (unsigned int)reclen
[0]);
402 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds
))
403 & EVP_CIPH_FLAG_CUSTOM_CIPHER
) != 0
406 /* AEAD can fail to verify MAC */
411 for (ctr
= 0; ctr
< n_recs
; ctr
++) {
412 /* Adjust the record to remove the explicit IV/MAC/Tag */
413 if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_GCM_MODE
) {
414 recs
[ctr
].data
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
415 recs
[ctr
].input
+= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
416 recs
[ctr
].length
-= EVP_GCM_TLS_EXPLICIT_IV_LEN
;
417 } else if (EVP_CIPHER_get_mode(enc
) == EVP_CIPH_CCM_MODE
) {
418 recs
[ctr
].data
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
419 recs
[ctr
].input
+= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
420 recs
[ctr
].length
-= EVP_CCM_TLS_EXPLICIT_IV_LEN
;
421 } else if (bs
!= 1 && RLAYER_USE_EXPLICIT_IV(rl
)) {
422 if (recs
[ctr
].length
< bs
)
424 recs
[ctr
].data
+= bs
;
425 recs
[ctr
].input
+= bs
;
426 recs
[ctr
].length
-= bs
;
427 recs
[ctr
].orig_len
-= bs
;
431 * If using Mac-then-encrypt, then this will succeed but
432 * with a random MAC if padding is invalid
434 if (!tls1_cbc_remove_padding_and_mac(&recs
[ctr
].length
,
437 (macs
!= NULL
) ? &macs
[ctr
].mac
: NULL
,
438 (macs
!= NULL
) ? &macs
[ctr
].alloced
441 pad
? (size_t)pad
: macsize
,
442 (EVP_CIPHER_get_flags(enc
)
443 & EVP_CIPH_FLAG_AEAD_CIPHER
) != 0,
452 static int tls1_mac(OSSL_RECORD_LAYER
*rl
, SSL3_RECORD
*rec
, unsigned char *md
,
455 unsigned char *seq
= rl
->sequence
;
459 EVP_MD_CTX
*hmac
= NULL
, *mac_ctx
;
460 unsigned char header
[13];
466 t
= EVP_MD_CTX_get_size(hash
);
467 if (!ossl_assert(t
>= 0))
471 if (rl
->stream_mac
) {
474 hmac
= EVP_MD_CTX_new();
475 if (hmac
== NULL
|| !EVP_MD_CTX_copy(hmac
, hash
)) {
483 && EVP_MD_CTX_ctrl(mac_ctx
, EVP_MD_CTRL_TLSTREE
, 0, seq
) <= 0)
487 unsigned char dtlsseq
[8], *p
= dtlsseq
;
490 memcpy(p
, &seq
[2], 6);
492 memcpy(header
, dtlsseq
, 8);
494 memcpy(header
, seq
, 8);
497 header
[8] = rec
->type
;
498 header
[9] = (unsigned char)(rl
->version
>> 8);
499 header
[10] = (unsigned char)(rl
->version
);
500 header
[11] = (unsigned char)(rec
->length
>> 8);
501 header
[12] = (unsigned char)(rec
->length
& 0xff);
503 if (!sending
&& !rl
->use_etm
504 && EVP_CIPHER_CTX_get_mode(rl
->enc_ctx
) == EVP_CIPH_CBC_MODE
505 && ssl3_cbc_record_digest_supported(mac_ctx
)) {
506 OSSL_PARAM tls_hmac_params
[2], *p
= tls_hmac_params
;
508 *p
++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE
,
510 *p
++ = OSSL_PARAM_construct_end();
512 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx
),
517 if (EVP_DigestSignUpdate(mac_ctx
, header
, sizeof(header
)) <= 0
518 || EVP_DigestSignUpdate(mac_ctx
, rec
->input
, rec
->length
) <= 0
519 || EVP_DigestSignFinal(mac_ctx
, md
, &md_size
) <= 0)
522 OSSL_TRACE_BEGIN(TLS
) {
523 BIO_printf(trc_out
, "seq:\n");
524 BIO_dump_indent(trc_out
, seq
, 8, 4);
525 BIO_printf(trc_out
, "rec:\n");
526 BIO_dump_indent(trc_out
, rec
->data
, rec
->length
, 4);
527 } OSSL_TRACE_END(TLS
);
530 for (i
= 7; i
>= 0; i
--) {
536 OSSL_TRACE_BEGIN(TLS
) {
537 BIO_printf(trc_out
, "md:\n");
538 BIO_dump_indent(trc_out
, md
, md_size
, 4);
539 } OSSL_TRACE_END(TLS
);
542 EVP_MD_CTX_free(hmac
);
546 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
547 # ifndef OPENSSL_NO_COMP
548 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
549 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
550 + SSL3_RT_HEADER_LENGTH \
551 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
553 # define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
554 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
555 + SSL3_RT_HEADER_LENGTH)
556 # endif /* OPENSSL_NO_COMP */
558 # ifndef OPENSSL_NO_COMP
559 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
560 + SSL3_RT_HEADER_LENGTH \
561 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
563 # define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
564 + SSL3_RT_HEADER_LENGTH)
565 # endif /* OPENSSL_NO_COMP */
568 /* This function is also used by the SSLv3 implementation */
569 int tls1_allocate_write_buffers(OSSL_RECORD_LAYER
*rl
,
570 OSSL_RECORD_TEMPLATE
*templates
,
571 size_t numtempl
, size_t *prefix
)
573 /* Do we need to add an empty record prefix? */
574 *prefix
= rl
->need_empty_fragments
575 && templates
[0].type
== SSL3_RT_APPLICATION_DATA
;
578 * In the prefix case we can allocate a much smaller buffer. Otherwise we
579 * just allocate the default buffer size
581 if (!tls_setup_write_buffer(rl
, numtempl
+ *prefix
,
582 *prefix
? MAX_PREFIX_LEN
: 0, 0)) {
583 /* RLAYERfatal() already called */
590 /* This function is also used by the SSLv3 implementation */
591 int tls1_initialise_write_packets(OSSL_RECORD_LAYER
*rl
,
592 OSSL_RECORD_TEMPLATE
*templates
,
594 OSSL_RECORD_TEMPLATE
*prefixtempl
,
603 /* Do we need to add an empty record prefix? */
604 prefix
= rl
->need_empty_fragments
605 && templates
[0].type
== SSL3_RT_APPLICATION_DATA
;
609 * countermeasure against known-IV weakness in CBC ciphersuites (see
610 * http://www.openssl.org/~bodo/tls-cbc.txt)
612 prefixtempl
->buf
= NULL
;
613 prefixtempl
->version
= templates
[0].version
;
614 prefixtempl
->buflen
= 0;
615 prefixtempl
->type
= SSL3_RT_APPLICATION_DATA
;
619 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
620 align
= (size_t)SSL3_BUFFER_get_buf(wb
) + SSL3_RT_HEADER_LENGTH
;
621 align
= SSL3_ALIGN_PAYLOAD
- 1
622 - ((align
- 1) % SSL3_ALIGN_PAYLOAD
);
624 SSL3_BUFFER_set_offset(wb
, align
);
626 if (!WPACKET_init_static_len(&pkt
[0], SSL3_BUFFER_get_buf(wb
),
627 SSL3_BUFFER_get_len(wb
), 0)) {
628 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
632 if (!WPACKET_allocate_bytes(&pkt
[0], align
, NULL
)) {
633 RLAYERfatal(rl
, SSL_AD_INTERNAL_ERROR
, ERR_R_INTERNAL_ERROR
);
638 return tls_initialise_write_packets_default(rl
, templates
, numtempl
,
640 pkt
+ prefix
, bufs
+ prefix
,
644 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
645 struct record_functions_st tls_1_funcs
= {
646 tls1_set_crypto_state
,
649 tls_default_set_protocol_version
,
651 tls_get_more_records
,
652 tls_default_validate_record_header
,
653 tls_default_post_process_record
,
654 tls_get_max_records_multiblock
,
655 tls_write_records_multiblock
, /* Defined in tls_multib.c */
656 tls1_allocate_write_buffers
,
657 tls1_initialise_write_packets
,
659 tls_prepare_record_header_default
,
661 tls_prepare_for_encryption_default
,
662 tls_post_encryption_processing_default
,
666 struct record_functions_st dtls_1_funcs
= {
667 tls1_set_crypto_state
,
670 tls_default_set_protocol_version
,
672 dtls_get_more_records
,