1 /* ====================================================================
2 * Copyright (c) 2011-2013 The OpenSSL Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * licensing@OpenSSL.org.
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
30 * 6. Redistributions of any form whatsoever must retain the following
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ====================================================================
50 #include <openssl/opensslconf.h>
55 #include <openssl/evp.h>
56 #include <openssl/objects.h>
57 #include <openssl/aes.h>
58 #include <openssl/sha.h>
59 #include <openssl/rand.h>
60 #include "modes_lcl.h"
61 #include "internal/evp_int.h"
63 #ifndef EVP_CIPH_FLAG_AEAD_CIPHER
64 # define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000
65 # define EVP_CTRL_AEAD_TLS1_AAD 0x16
66 # define EVP_CTRL_AEAD_SET_MAC_KEY 0x17
69 #if !defined(EVP_CIPH_FLAG_DEFAULT_ASN1)
70 # define EVP_CIPH_FLAG_DEFAULT_ASN1 0
73 #if !defined(EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK)
74 # define EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK 0
77 #define TLS1_1_VERSION 0x0302
81 SHA_CTX head
, tail
, md
;
82 size_t payload_length
; /* AAD length in decrypt case */
85 unsigned char tls_aad
[16]; /* 13 used */
89 #define NO_PAYLOAD_LENGTH ((size_t)-1)
91 #if defined(AES_ASM) && ( \
92 defined(__x86_64) || defined(__x86_64__) || \
93 defined(_M_AMD64) || defined(_M_X64) )
95 extern unsigned int OPENSSL_ia32cap_P
[];
96 # define AESNI_CAPABLE (1<<(57-32))
98 int aesni_set_encrypt_key(const unsigned char *userKey
, int bits
,
100 int aesni_set_decrypt_key(const unsigned char *userKey
, int bits
,
103 void aesni_cbc_encrypt(const unsigned char *in
,
106 const AES_KEY
*key
, unsigned char *ivec
, int enc
);
108 void aesni_cbc_sha1_enc(const void *inp
, void *out
, size_t blocks
,
109 const AES_KEY
*key
, unsigned char iv
[16],
110 SHA_CTX
*ctx
, const void *in0
);
112 void aesni256_cbc_sha1_dec(const void *inp
, void *out
, size_t blocks
,
113 const AES_KEY
*key
, unsigned char iv
[16],
114 SHA_CTX
*ctx
, const void *in0
);
116 # define data(ctx) ((EVP_AES_HMAC_SHA1 *)EVP_CIPHER_CTX_get_cipher_data(ctx))
118 static int aesni_cbc_hmac_sha1_init_key(EVP_CIPHER_CTX
*ctx
,
119 const unsigned char *inkey
,
120 const unsigned char *iv
, int enc
)
122 EVP_AES_HMAC_SHA1
*key
= data(ctx
);
126 ret
= aesni_set_encrypt_key(inkey
,
127 EVP_CIPHER_CTX_key_length(ctx
) * 8,
130 ret
= aesni_set_decrypt_key(inkey
,
131 EVP_CIPHER_CTX_key_length(ctx
) * 8,
134 SHA1_Init(&key
->head
); /* handy when benchmarking */
135 key
->tail
= key
->head
;
138 key
->payload_length
= NO_PAYLOAD_LENGTH
;
140 return ret
< 0 ? 0 : 1;
143 # define STITCHED_CALL
144 # undef STITCHED_DECRYPT_CALL
146 # if !defined(STITCHED_CALL)
150 void sha1_block_data_order(void *c
, const void *p
, size_t len
);
152 static void sha1_update(SHA_CTX
*c
, const void *data
, size_t len
)
154 const unsigned char *ptr
= data
;
157 if ((res
= c
->num
)) {
158 res
= SHA_CBLOCK
- res
;
161 SHA1_Update(c
, ptr
, res
);
166 res
= len
% SHA_CBLOCK
;
170 sha1_block_data_order(c
, ptr
, len
/ SHA_CBLOCK
);
175 if (c
->Nl
< (unsigned int)len
)
180 SHA1_Update(c
, ptr
, res
);
186 # define SHA1_Update sha1_update
188 # if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
191 unsigned int A
[8], B
[8], C
[8], D
[8], E
[8];
194 const unsigned char *ptr
;
198 void sha1_multi_block(SHA1_MB_CTX
*, const HASH_DESC
*, int);
201 const unsigned char *inp
;
207 void aesni_multi_cbc_encrypt(CIPH_DESC
*, void *, int);
209 static size_t tls1_1_multi_block_encrypt(EVP_AES_HMAC_SHA1
*key
,
211 const unsigned char *inp
,
212 size_t inp_len
, int n4x
)
213 { /* n4x is 1 or 2 */
214 HASH_DESC hash_d
[8], edges
[8];
216 unsigned char storage
[sizeof(SHA1_MB_CTX
) + 32];
223 unsigned int frag
, last
, packlen
, i
, x4
= 4 * n4x
, minblocks
, processed
=
231 /* ask for IVs in bulk */
232 if (RAND_bytes((IVs
= blocks
[0].c
), 16 * x4
) <= 0)
235 ctx
= (SHA1_MB_CTX
*) (storage
+ 32 - ((size_t)storage
% 32)); /* align */
237 frag
= (unsigned int)inp_len
>> (1 + n4x
);
238 last
= (unsigned int)inp_len
+ frag
- (frag
<< (1 + n4x
));
239 if (last
> frag
&& ((last
+ 13 + 9) % 64) < (x4
- 1)) {
244 packlen
= 5 + 16 + ((frag
+ 20 + 16) & -16);
246 /* populate descriptors with pointers and IVs */
249 /* 5+16 is place for header and explicit IV */
250 ciph_d
[0].out
= out
+ 5 + 16;
251 memcpy(ciph_d
[0].out
- 16, IVs
, 16);
252 memcpy(ciph_d
[0].iv
, IVs
, 16);
255 for (i
= 1; i
< x4
; i
++) {
256 ciph_d
[i
].inp
= hash_d
[i
].ptr
= hash_d
[i
- 1].ptr
+ frag
;
257 ciph_d
[i
].out
= ciph_d
[i
- 1].out
+ packlen
;
258 memcpy(ciph_d
[i
].out
- 16, IVs
, 16);
259 memcpy(ciph_d
[i
].iv
, IVs
, 16);
264 memcpy(blocks
[0].c
, key
->md
.data
, 8);
265 seqnum
= BSWAP8(blocks
[0].q
[0]);
267 for (i
= 0; i
< x4
; i
++) {
268 unsigned int len
= (i
== (x4
- 1) ? last
: frag
);
269 # if !defined(BSWAP8)
270 unsigned int carry
, j
;
273 ctx
->A
[i
] = key
->md
.h0
;
274 ctx
->B
[i
] = key
->md
.h1
;
275 ctx
->C
[i
] = key
->md
.h2
;
276 ctx
->D
[i
] = key
->md
.h3
;
277 ctx
->E
[i
] = key
->md
.h4
;
281 blocks
[i
].q
[0] = BSWAP8(seqnum
+ i
);
283 for (carry
= i
, j
= 8; j
--;) {
284 blocks
[i
].c
[j
] = ((u8
*)key
->md
.data
)[j
] + carry
;
285 carry
= (blocks
[i
].c
[j
] - carry
) >> (sizeof(carry
) * 8 - 1);
288 blocks
[i
].c
[8] = ((u8
*)key
->md
.data
)[8];
289 blocks
[i
].c
[9] = ((u8
*)key
->md
.data
)[9];
290 blocks
[i
].c
[10] = ((u8
*)key
->md
.data
)[10];
292 blocks
[i
].c
[11] = (u8
)(len
>> 8);
293 blocks
[i
].c
[12] = (u8
)(len
);
295 memcpy(blocks
[i
].c
+ 13, hash_d
[i
].ptr
, 64 - 13);
296 hash_d
[i
].ptr
+= 64 - 13;
297 hash_d
[i
].blocks
= (len
- (64 - 13)) / 64;
299 edges
[i
].ptr
= blocks
[i
].c
;
303 /* hash 13-byte headers and first 64-13 bytes of inputs */
304 sha1_multi_block(ctx
, edges
, n4x
);
305 /* hash bulk inputs */
306 # define MAXCHUNKSIZE 2048
308 # error "MAXCHUNKSIZE is not divisible by 64"
311 * goal is to minimize pressure on L1 cache by moving in shorter steps,
312 * so that hashed data is still in the cache by the time we encrypt it
314 minblocks
= ((frag
<= last
? frag
: last
) - (64 - 13)) / 64;
315 if (minblocks
> MAXCHUNKSIZE
/ 64) {
316 for (i
= 0; i
< x4
; i
++) {
317 edges
[i
].ptr
= hash_d
[i
].ptr
;
318 edges
[i
].blocks
= MAXCHUNKSIZE
/ 64;
319 ciph_d
[i
].blocks
= MAXCHUNKSIZE
/ 16;
322 sha1_multi_block(ctx
, edges
, n4x
);
323 aesni_multi_cbc_encrypt(ciph_d
, &key
->ks
, n4x
);
325 for (i
= 0; i
< x4
; i
++) {
326 edges
[i
].ptr
= hash_d
[i
].ptr
+= MAXCHUNKSIZE
;
327 hash_d
[i
].blocks
-= MAXCHUNKSIZE
/ 64;
328 edges
[i
].blocks
= MAXCHUNKSIZE
/ 64;
329 ciph_d
[i
].inp
+= MAXCHUNKSIZE
;
330 ciph_d
[i
].out
+= MAXCHUNKSIZE
;
331 ciph_d
[i
].blocks
= MAXCHUNKSIZE
/ 16;
332 memcpy(ciph_d
[i
].iv
, ciph_d
[i
].out
- 16, 16);
334 processed
+= MAXCHUNKSIZE
;
335 minblocks
-= MAXCHUNKSIZE
/ 64;
336 } while (minblocks
> MAXCHUNKSIZE
/ 64);
340 sha1_multi_block(ctx
, hash_d
, n4x
);
342 memset(blocks
, 0, sizeof(blocks
));
343 for (i
= 0; i
< x4
; i
++) {
344 unsigned int len
= (i
== (x4
- 1) ? last
: frag
),
345 off
= hash_d
[i
].blocks
* 64;
346 const unsigned char *ptr
= hash_d
[i
].ptr
+ off
;
348 off
= (len
- processed
) - (64 - 13) - off
; /* remainder actually */
349 memcpy(blocks
[i
].c
, ptr
, off
);
350 blocks
[i
].c
[off
] = 0x80;
351 len
+= 64 + 13; /* 64 is HMAC header */
352 len
*= 8; /* convert to bits */
353 if (off
< (64 - 8)) {
355 blocks
[i
].d
[15] = BSWAP4(len
);
357 PUTU32(blocks
[i
].c
+ 60, len
);
362 blocks
[i
].d
[31] = BSWAP4(len
);
364 PUTU32(blocks
[i
].c
+ 124, len
);
368 edges
[i
].ptr
= blocks
[i
].c
;
371 /* hash input tails and finalize */
372 sha1_multi_block(ctx
, edges
, n4x
);
374 memset(blocks
, 0, sizeof(blocks
));
375 for (i
= 0; i
< x4
; i
++) {
377 blocks
[i
].d
[0] = BSWAP4(ctx
->A
[i
]);
378 ctx
->A
[i
] = key
->tail
.h0
;
379 blocks
[i
].d
[1] = BSWAP4(ctx
->B
[i
]);
380 ctx
->B
[i
] = key
->tail
.h1
;
381 blocks
[i
].d
[2] = BSWAP4(ctx
->C
[i
]);
382 ctx
->C
[i
] = key
->tail
.h2
;
383 blocks
[i
].d
[3] = BSWAP4(ctx
->D
[i
]);
384 ctx
->D
[i
] = key
->tail
.h3
;
385 blocks
[i
].d
[4] = BSWAP4(ctx
->E
[i
]);
386 ctx
->E
[i
] = key
->tail
.h4
;
387 blocks
[i
].c
[20] = 0x80;
388 blocks
[i
].d
[15] = BSWAP4((64 + 20) * 8);
390 PUTU32(blocks
[i
].c
+ 0, ctx
->A
[i
]);
391 ctx
->A
[i
] = key
->tail
.h0
;
392 PUTU32(blocks
[i
].c
+ 4, ctx
->B
[i
]);
393 ctx
->B
[i
] = key
->tail
.h1
;
394 PUTU32(blocks
[i
].c
+ 8, ctx
->C
[i
]);
395 ctx
->C
[i
] = key
->tail
.h2
;
396 PUTU32(blocks
[i
].c
+ 12, ctx
->D
[i
]);
397 ctx
->D
[i
] = key
->tail
.h3
;
398 PUTU32(blocks
[i
].c
+ 16, ctx
->E
[i
]);
399 ctx
->E
[i
] = key
->tail
.h4
;
400 blocks
[i
].c
[20] = 0x80;
401 PUTU32(blocks
[i
].c
+ 60, (64 + 20) * 8);
403 edges
[i
].ptr
= blocks
[i
].c
;
408 sha1_multi_block(ctx
, edges
, n4x
);
410 for (i
= 0; i
< x4
; i
++) {
411 unsigned int len
= (i
== (x4
- 1) ? last
: frag
), pad
, j
;
412 unsigned char *out0
= out
;
414 memcpy(ciph_d
[i
].out
, ciph_d
[i
].inp
, len
- processed
);
415 ciph_d
[i
].inp
= ciph_d
[i
].out
;
420 PUTU32(out
+ 0, ctx
->A
[i
]);
421 PUTU32(out
+ 4, ctx
->B
[i
]);
422 PUTU32(out
+ 8, ctx
->C
[i
]);
423 PUTU32(out
+ 12, ctx
->D
[i
]);
424 PUTU32(out
+ 16, ctx
->E
[i
]);
430 for (j
= 0; j
<= pad
; j
++)
434 ciph_d
[i
].blocks
= (len
- processed
) / 16;
435 len
+= 16; /* account for explicit iv */
438 out0
[0] = ((u8
*)key
->md
.data
)[8];
439 out0
[1] = ((u8
*)key
->md
.data
)[9];
440 out0
[2] = ((u8
*)key
->md
.data
)[10];
441 out0
[3] = (u8
)(len
>> 8);
448 aesni_multi_cbc_encrypt(ciph_d
, &key
->ks
, n4x
);
450 OPENSSL_cleanse(blocks
, sizeof(blocks
));
451 OPENSSL_cleanse(ctx
, sizeof(*ctx
));
457 static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX
*ctx
, unsigned char *out
,
458 const unsigned char *in
, size_t len
)
460 EVP_AES_HMAC_SHA1
*key
= data(ctx
);
462 size_t plen
= key
->payload_length
, iv
= 0, /* explicit IV in TLS 1.1 and
465 # if defined(STITCHED_CALL)
466 size_t aes_off
= 0, blocks
;
468 sha_off
= SHA_CBLOCK
- key
->md
.num
;
471 key
->payload_length
= NO_PAYLOAD_LENGTH
;
473 if (len
% AES_BLOCK_SIZE
)
476 if (EVP_CIPHER_CTX_encrypting(ctx
)) {
477 if (plen
== NO_PAYLOAD_LENGTH
)
480 ((plen
+ SHA_DIGEST_LENGTH
+
481 AES_BLOCK_SIZE
) & -AES_BLOCK_SIZE
))
483 else if (key
->aux
.tls_ver
>= TLS1_1_VERSION
)
486 # if defined(STITCHED_CALL)
487 if (plen
> (sha_off
+ iv
)
488 && (blocks
= (plen
- (sha_off
+ iv
)) / SHA_CBLOCK
)) {
489 SHA1_Update(&key
->md
, in
+ iv
, sha_off
);
491 aesni_cbc_sha1_enc(in
, out
, blocks
, &key
->ks
,
492 EVP_CIPHER_CTX_iv_noconst(ctx
),
493 &key
->md
, in
+ iv
+ sha_off
);
494 blocks
*= SHA_CBLOCK
;
497 key
->md
.Nh
+= blocks
>> 29;
498 key
->md
.Nl
+= blocks
<<= 3;
499 if (key
->md
.Nl
< (unsigned int)blocks
)
506 SHA1_Update(&key
->md
, in
+ sha_off
, plen
- sha_off
);
508 if (plen
!= len
) { /* "TLS" mode of operation */
510 memcpy(out
+ aes_off
, in
+ aes_off
, plen
- aes_off
);
512 /* calculate HMAC and append it to payload */
513 SHA1_Final(out
+ plen
, &key
->md
);
515 SHA1_Update(&key
->md
, out
+ plen
, SHA_DIGEST_LENGTH
);
516 SHA1_Final(out
+ plen
, &key
->md
);
518 /* pad the payload|hmac */
519 plen
+= SHA_DIGEST_LENGTH
;
520 for (l
= len
- plen
- 1; plen
< len
; plen
++)
522 /* encrypt HMAC|padding at once */
523 aesni_cbc_encrypt(out
+ aes_off
, out
+ aes_off
, len
- aes_off
,
524 &key
->ks
, EVP_CIPHER_CTX_iv_noconst(ctx
), 1);
526 aesni_cbc_encrypt(in
+ aes_off
, out
+ aes_off
, len
- aes_off
,
527 &key
->ks
, EVP_CIPHER_CTX_iv_noconst(ctx
), 1);
531 unsigned int u
[SHA_DIGEST_LENGTH
/ sizeof(unsigned int)];
532 unsigned char c
[32 + SHA_DIGEST_LENGTH
];
535 /* arrange cache line alignment */
536 pmac
= (void *)(((size_t)mac
.c
+ 31) & ((size_t)0 - 32));
538 if (plen
!= NO_PAYLOAD_LENGTH
) { /* "TLS" mode of operation */
539 size_t inp_len
, mask
, j
, i
;
540 unsigned int res
, maxpad
, pad
, bitlen
;
543 unsigned int u
[SHA_LBLOCK
];
544 unsigned char c
[SHA_CBLOCK
];
545 } *data
= (void *)key
->md
.data
;
546 # if defined(STITCHED_DECRYPT_CALL)
547 unsigned char tail_iv
[AES_BLOCK_SIZE
];
551 if ((key
->aux
.tls_aad
[plen
- 4] << 8 | key
->aux
.tls_aad
[plen
- 3])
553 if (len
< (AES_BLOCK_SIZE
+ SHA_DIGEST_LENGTH
+ 1))
556 /* omit explicit iv */
557 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx
), in
, AES_BLOCK_SIZE
);
559 in
+= AES_BLOCK_SIZE
;
560 out
+= AES_BLOCK_SIZE
;
561 len
-= AES_BLOCK_SIZE
;
562 } else if (len
< (SHA_DIGEST_LENGTH
+ 1))
565 # if defined(STITCHED_DECRYPT_CALL)
566 if (len
>= 1024 && ctx
->key_len
== 32) {
567 /* decrypt last block */
568 memcpy(tail_iv
, in
+ len
- 2 * AES_BLOCK_SIZE
,
570 aesni_cbc_encrypt(in
+ len
- AES_BLOCK_SIZE
,
571 out
+ len
- AES_BLOCK_SIZE
, AES_BLOCK_SIZE
,
572 &key
->ks
, tail_iv
, 0);
576 /* decrypt HMAC|padding at once */
577 aesni_cbc_encrypt(in
, out
, len
, &key
->ks
,
578 EVP_CIPHER_CTX_iv_noconst(ctx
), 0);
580 /* figure out payload length */
582 maxpad
= len
- (SHA_DIGEST_LENGTH
+ 1);
583 maxpad
|= (255 - maxpad
) >> (sizeof(maxpad
) * 8 - 8);
586 inp_len
= len
- (SHA_DIGEST_LENGTH
+ pad
+ 1);
587 mask
= (0 - ((inp_len
- len
) >> (sizeof(inp_len
) * 8 - 1)));
591 key
->aux
.tls_aad
[plen
- 2] = inp_len
>> 8;
592 key
->aux
.tls_aad
[plen
- 1] = inp_len
;
596 SHA1_Update(&key
->md
, key
->aux
.tls_aad
, plen
);
598 # if defined(STITCHED_DECRYPT_CALL)
600 blocks
= (len
- (256 + 32 + SHA_CBLOCK
)) / SHA_CBLOCK
;
601 aes_off
= len
- AES_BLOCK_SIZE
- blocks
* SHA_CBLOCK
;
602 sha_off
= SHA_CBLOCK
- plen
;
604 aesni_cbc_encrypt(in
, out
, aes_off
, &key
->ks
, ctx
->iv
, 0);
606 SHA1_Update(&key
->md
, out
, sha_off
);
607 aesni256_cbc_sha1_dec(in
+ aes_off
,
608 out
+ aes_off
, blocks
, &key
->ks
,
609 ctx
->iv
, &key
->md
, out
+ sha_off
);
611 sha_off
+= blocks
*= SHA_CBLOCK
;
616 key
->md
.Nl
+= (blocks
<< 3); /* at most 18 bits */
617 memcpy(ctx
->iv
, tail_iv
, AES_BLOCK_SIZE
);
622 len
-= SHA_DIGEST_LENGTH
; /* amend mac */
623 if (len
>= (256 + SHA_CBLOCK
)) {
624 j
= (len
- (256 + SHA_CBLOCK
)) & (0 - SHA_CBLOCK
);
625 j
+= SHA_CBLOCK
- key
->md
.num
;
626 SHA1_Update(&key
->md
, out
, j
);
632 /* but pretend as if we hashed padded payload */
633 bitlen
= key
->md
.Nl
+ (inp_len
<< 3); /* at most 18 bits */
635 bitlen
= BSWAP4(bitlen
);
638 mac
.c
[1] = (unsigned char)(bitlen
>> 16);
639 mac
.c
[2] = (unsigned char)(bitlen
>> 8);
640 mac
.c
[3] = (unsigned char)bitlen
;
650 for (res
= key
->md
.num
, j
= 0; j
< len
; j
++) {
652 mask
= (j
- inp_len
) >> (sizeof(j
) * 8 - 8);
654 c
|= 0x80 & ~mask
& ~((inp_len
- j
) >> (sizeof(j
) * 8 - 8));
655 data
->c
[res
++] = (unsigned char)c
;
657 if (res
!= SHA_CBLOCK
)
660 /* j is not incremented yet */
661 mask
= 0 - ((inp_len
+ 7 - j
) >> (sizeof(j
) * 8 - 1));
662 data
->u
[SHA_LBLOCK
- 1] |= bitlen
& mask
;
663 sha1_block_data_order(&key
->md
, data
, 1);
664 mask
&= 0 - ((j
- inp_len
- 72) >> (sizeof(j
) * 8 - 1));
665 pmac
->u
[0] |= key
->md
.h0
& mask
;
666 pmac
->u
[1] |= key
->md
.h1
& mask
;
667 pmac
->u
[2] |= key
->md
.h2
& mask
;
668 pmac
->u
[3] |= key
->md
.h3
& mask
;
669 pmac
->u
[4] |= key
->md
.h4
& mask
;
673 for (i
= res
; i
< SHA_CBLOCK
; i
++, j
++)
676 if (res
> SHA_CBLOCK
- 8) {
677 mask
= 0 - ((inp_len
+ 8 - j
) >> (sizeof(j
) * 8 - 1));
678 data
->u
[SHA_LBLOCK
- 1] |= bitlen
& mask
;
679 sha1_block_data_order(&key
->md
, data
, 1);
680 mask
&= 0 - ((j
- inp_len
- 73) >> (sizeof(j
) * 8 - 1));
681 pmac
->u
[0] |= key
->md
.h0
& mask
;
682 pmac
->u
[1] |= key
->md
.h1
& mask
;
683 pmac
->u
[2] |= key
->md
.h2
& mask
;
684 pmac
->u
[3] |= key
->md
.h3
& mask
;
685 pmac
->u
[4] |= key
->md
.h4
& mask
;
687 memset(data
, 0, SHA_CBLOCK
);
690 data
->u
[SHA_LBLOCK
- 1] = bitlen
;
691 sha1_block_data_order(&key
->md
, data
, 1);
692 mask
= 0 - ((j
- inp_len
- 73) >> (sizeof(j
) * 8 - 1));
693 pmac
->u
[0] |= key
->md
.h0
& mask
;
694 pmac
->u
[1] |= key
->md
.h1
& mask
;
695 pmac
->u
[2] |= key
->md
.h2
& mask
;
696 pmac
->u
[3] |= key
->md
.h3
& mask
;
697 pmac
->u
[4] |= key
->md
.h4
& mask
;
700 pmac
->u
[0] = BSWAP4(pmac
->u
[0]);
701 pmac
->u
[1] = BSWAP4(pmac
->u
[1]);
702 pmac
->u
[2] = BSWAP4(pmac
->u
[2]);
703 pmac
->u
[3] = BSWAP4(pmac
->u
[3]);
704 pmac
->u
[4] = BSWAP4(pmac
->u
[4]);
706 for (i
= 0; i
< 5; i
++) {
708 pmac
->c
[4 * i
+ 0] = (unsigned char)(res
>> 24);
709 pmac
->c
[4 * i
+ 1] = (unsigned char)(res
>> 16);
710 pmac
->c
[4 * i
+ 2] = (unsigned char)(res
>> 8);
711 pmac
->c
[4 * i
+ 3] = (unsigned char)res
;
714 len
+= SHA_DIGEST_LENGTH
;
716 SHA1_Update(&key
->md
, out
, inp_len
);
718 SHA1_Final(pmac
->c
, &key
->md
);
721 unsigned int inp_blocks
, pad_blocks
;
723 /* but pretend as if we hashed padded payload */
725 1 + ((SHA_CBLOCK
- 9 - res
) >> (sizeof(res
) * 8 - 1));
726 res
+= (unsigned int)(len
- inp_len
);
727 pad_blocks
= res
/ SHA_CBLOCK
;
730 1 + ((SHA_CBLOCK
- 9 - res
) >> (sizeof(res
) * 8 - 1));
731 for (; inp_blocks
< pad_blocks
; inp_blocks
++)
732 sha1_block_data_order(&key
->md
, data
, 1);
736 SHA1_Update(&key
->md
, pmac
->c
, SHA_DIGEST_LENGTH
);
737 SHA1_Final(pmac
->c
, &key
->md
);
744 unsigned char *p
= out
+ len
- 1 - maxpad
- SHA_DIGEST_LENGTH
;
745 size_t off
= out
- p
;
746 unsigned int c
, cmask
;
748 maxpad
+= SHA_DIGEST_LENGTH
;
749 for (res
= 0, i
= 0, j
= 0; j
< maxpad
; j
++) {
752 ((int)(j
- off
- SHA_DIGEST_LENGTH
)) >> (sizeof(int) *
754 res
|= (c
^ pad
) & ~cmask
; /* ... and padding */
755 cmask
&= ((int)(off
- 1 - j
)) >> (sizeof(int) * 8 - 1);
756 res
|= (c
^ pmac
->c
[i
]) & cmask
;
759 maxpad
-= SHA_DIGEST_LENGTH
;
761 res
= 0 - ((0 - res
) >> (sizeof(res
) * 8 - 1));
765 for (res
= 0, i
= 0; i
< SHA_DIGEST_LENGTH
; i
++)
766 res
|= out
[i
] ^ pmac
->c
[i
];
767 res
= 0 - ((0 - res
) >> (sizeof(res
) * 8 - 1));
771 pad
= (pad
& ~res
) | (maxpad
& res
);
772 out
= out
+ len
- 1 - pad
;
773 for (res
= 0, i
= 0; i
< pad
; i
++)
776 res
= (0 - res
) >> (sizeof(res
) * 8 - 1);
781 # if defined(STITCHED_DECRYPT_CALL)
782 if (len
>= 1024 && ctx
->key_len
== 32) {
783 if (sha_off
%= SHA_CBLOCK
)
784 blocks
= (len
- 3 * SHA_CBLOCK
) / SHA_CBLOCK
;
786 blocks
= (len
- 2 * SHA_CBLOCK
) / SHA_CBLOCK
;
787 aes_off
= len
- blocks
* SHA_CBLOCK
;
789 aesni_cbc_encrypt(in
, out
, aes_off
, &key
->ks
, ctx
->iv
, 0);
790 SHA1_Update(&key
->md
, out
, sha_off
);
791 aesni256_cbc_sha1_dec(in
+ aes_off
,
792 out
+ aes_off
, blocks
, &key
->ks
,
793 ctx
->iv
, &key
->md
, out
+ sha_off
);
795 sha_off
+= blocks
*= SHA_CBLOCK
;
799 key
->md
.Nh
+= blocks
>> 29;
800 key
->md
.Nl
+= blocks
<<= 3;
801 if (key
->md
.Nl
< (unsigned int)blocks
)
805 /* decrypt HMAC|padding at once */
806 aesni_cbc_encrypt(in
, out
, len
, &key
->ks
,
807 EVP_CIPHER_CTX_iv_noconst(ctx
), 0);
809 SHA1_Update(&key
->md
, out
, len
);
816 static int aesni_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX
*ctx
, int type
, int arg
,
819 EVP_AES_HMAC_SHA1
*key
= data(ctx
);
822 case EVP_CTRL_AEAD_SET_MAC_KEY
:
825 unsigned char hmac_key
[64];
827 memset(hmac_key
, 0, sizeof(hmac_key
));
829 if (arg
> (int)sizeof(hmac_key
)) {
830 SHA1_Init(&key
->head
);
831 SHA1_Update(&key
->head
, ptr
, arg
);
832 SHA1_Final(hmac_key
, &key
->head
);
834 memcpy(hmac_key
, ptr
, arg
);
837 for (i
= 0; i
< sizeof(hmac_key
); i
++)
838 hmac_key
[i
] ^= 0x36; /* ipad */
839 SHA1_Init(&key
->head
);
840 SHA1_Update(&key
->head
, hmac_key
, sizeof(hmac_key
));
842 for (i
= 0; i
< sizeof(hmac_key
); i
++)
843 hmac_key
[i
] ^= 0x36 ^ 0x5c; /* opad */
844 SHA1_Init(&key
->tail
);
845 SHA1_Update(&key
->tail
, hmac_key
, sizeof(hmac_key
));
847 OPENSSL_cleanse(hmac_key
, sizeof(hmac_key
));
851 case EVP_CTRL_AEAD_TLS1_AAD
:
853 unsigned char *p
= ptr
;
856 if (arg
!= EVP_AEAD_TLS1_AAD_LEN
)
859 len
= p
[arg
- 2] << 8 | p
[arg
- 1];
861 if (EVP_CIPHER_CTX_encrypting(ctx
)) {
862 key
->payload_length
= len
;
863 if ((key
->aux
.tls_ver
=
864 p
[arg
- 4] << 8 | p
[arg
- 3]) >= TLS1_1_VERSION
) {
865 len
-= AES_BLOCK_SIZE
;
866 p
[arg
- 2] = len
>> 8;
870 SHA1_Update(&key
->md
, p
, arg
);
872 return (int)(((len
+ SHA_DIGEST_LENGTH
+
873 AES_BLOCK_SIZE
) & -AES_BLOCK_SIZE
)
876 memcpy(key
->aux
.tls_aad
, ptr
, arg
);
877 key
->payload_length
= arg
;
879 return SHA_DIGEST_LENGTH
;
882 # if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
883 case EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE
:
884 return (int)(5 + 16 + ((arg
+ 20 + 16) & -16));
885 case EVP_CTRL_TLS1_1_MULTIBLOCK_AAD
:
887 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM
*param
=
888 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM
*) ptr
;
889 unsigned int n4x
= 1, x4
;
890 unsigned int frag
, last
, packlen
, inp_len
;
892 if (arg
< (int)sizeof(EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM
))
895 inp_len
= param
->inp
[11] << 8 | param
->inp
[12];
897 if (EVP_CIPHER_CTX_encrypting(ctx
)) {
898 if ((param
->inp
[9] << 8 | param
->inp
[10]) < TLS1_1_VERSION
)
903 return 0; /* too short */
905 if (inp_len
>= 8192 && OPENSSL_ia32cap_P
[2] & (1 << 5))
907 } else if ((n4x
= param
->interleave
/ 4) && n4x
<= 2)
908 inp_len
= param
->len
;
913 SHA1_Update(&key
->md
, param
->inp
, 13);
918 frag
= inp_len
>> n4x
;
919 last
= inp_len
+ frag
- (frag
<< n4x
);
920 if (last
> frag
&& ((last
+ 13 + 9) % 64 < (x4
- 1))) {
925 packlen
= 5 + 16 + ((frag
+ 20 + 16) & -16);
926 packlen
= (packlen
<< n4x
) - packlen
;
927 packlen
+= 5 + 16 + ((last
+ 20 + 16) & -16);
929 param
->interleave
= x4
;
933 return -1; /* not yet */
935 case EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT
:
937 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM
*param
=
938 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM
*) ptr
;
940 return (int)tls1_1_multi_block_encrypt(key
, param
->out
,
941 param
->inp
, param
->len
,
942 param
->interleave
/ 4);
944 case EVP_CTRL_TLS1_1_MULTIBLOCK_DECRYPT
:
951 static EVP_CIPHER aesni_128_cbc_hmac_sha1_cipher
= {
952 # ifdef NID_aes_128_cbc_hmac_sha1
953 NID_aes_128_cbc_hmac_sha1
,
957 AES_BLOCK_SIZE
, 16, AES_BLOCK_SIZE
,
958 EVP_CIPH_CBC_MODE
| EVP_CIPH_FLAG_DEFAULT_ASN1
|
959 EVP_CIPH_FLAG_AEAD_CIPHER
| EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
,
960 aesni_cbc_hmac_sha1_init_key
,
961 aesni_cbc_hmac_sha1_cipher
,
963 sizeof(EVP_AES_HMAC_SHA1
),
964 EVP_CIPH_FLAG_DEFAULT_ASN1
? NULL
: EVP_CIPHER_set_asn1_iv
,
965 EVP_CIPH_FLAG_DEFAULT_ASN1
? NULL
: EVP_CIPHER_get_asn1_iv
,
966 aesni_cbc_hmac_sha1_ctrl
,
970 static EVP_CIPHER aesni_256_cbc_hmac_sha1_cipher
= {
971 # ifdef NID_aes_256_cbc_hmac_sha1
972 NID_aes_256_cbc_hmac_sha1
,
976 AES_BLOCK_SIZE
, 32, AES_BLOCK_SIZE
,
977 EVP_CIPH_CBC_MODE
| EVP_CIPH_FLAG_DEFAULT_ASN1
|
978 EVP_CIPH_FLAG_AEAD_CIPHER
| EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
,
979 aesni_cbc_hmac_sha1_init_key
,
980 aesni_cbc_hmac_sha1_cipher
,
982 sizeof(EVP_AES_HMAC_SHA1
),
983 EVP_CIPH_FLAG_DEFAULT_ASN1
? NULL
: EVP_CIPHER_set_asn1_iv
,
984 EVP_CIPH_FLAG_DEFAULT_ASN1
? NULL
: EVP_CIPHER_get_asn1_iv
,
985 aesni_cbc_hmac_sha1_ctrl
,
989 const EVP_CIPHER
*EVP_aes_128_cbc_hmac_sha1(void)
991 return (OPENSSL_ia32cap_P
[1] & AESNI_CAPABLE
?
992 &aesni_128_cbc_hmac_sha1_cipher
: NULL
);
995 const EVP_CIPHER
*EVP_aes_256_cbc_hmac_sha1(void)
997 return (OPENSSL_ia32cap_P
[1] & AESNI_CAPABLE
?
998 &aesni_256_cbc_hmac_sha1_cipher
: NULL
);
1001 const EVP_CIPHER
*EVP_aes_128_cbc_hmac_sha1(void)
1006 const EVP_CIPHER
*EVP_aes_256_cbc_hmac_sha1(void)