2 * Copyright 2011-2019 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include "cipher_aes_cbc_hmac_sha.h"
12 #ifndef AES_CBC_HMAC_SHA_CAPABLE
13 int cipher_capable_aes_cbc_hmac_sha1(void)
19 # include "crypto/rand.h"
20 # include "crypto/evp.h"
21 # include "internal/constant_time.h"
23 void sha1_block_data_order(void *c
, const void *p
, size_t len
);
24 void aesni_cbc_sha1_enc(const void *inp
, void *out
, size_t blocks
,
25 const AES_KEY
*key
, unsigned char iv
[16],
26 SHA_CTX
*ctx
, const void *in0
);
28 int cipher_capable_aes_cbc_hmac_sha1(void)
30 return AESNI_CBC_HMAC_SHA_CAPABLE
;
33 static int aesni_cbc_hmac_sha1_init_key(PROV_CIPHER_CTX
*vctx
,
34 const unsigned char *key
, size_t keylen
)
37 PROV_AES_HMAC_SHA_CTX
*ctx
= (PROV_AES_HMAC_SHA_CTX
*)vctx
;
38 PROV_AES_HMAC_SHA1_CTX
*sctx
= (PROV_AES_HMAC_SHA1_CTX
*)vctx
;
41 ret
= aesni_set_encrypt_key(key
, keylen
* 8, &ctx
->ks
);
43 ret
= aesni_set_decrypt_key(key
, keylen
* 8, &ctx
->ks
);
45 SHA1_Init(&sctx
->head
); /* handy when benchmarking */
46 sctx
->tail
= sctx
->head
;
47 sctx
->md
= sctx
->head
;
49 ctx
->payload_length
= NO_PAYLOAD_LENGTH
;
51 return ret
< 0 ? 0 : 1;
54 static void sha1_update(SHA_CTX
*c
, const void *data
, size_t len
)
56 const unsigned char *ptr
= data
;
60 res
= SHA_CBLOCK
- res
;
63 SHA1_Update(c
, ptr
, res
);
68 res
= len
% SHA_CBLOCK
;
72 sha1_block_data_order(c
, ptr
, len
/ SHA_CBLOCK
);
77 if (c
->Nl
< (unsigned int)len
)
82 SHA1_Update(c
, ptr
, res
);
85 # if !defined(OPENSSL_NO_MULTIBLOCK)
88 unsigned int A
[8], B
[8], C
[8], D
[8], E
[8];
92 const unsigned char *ptr
;
97 const unsigned char *inp
;
103 void sha1_multi_block(SHA1_MB_CTX
*, const HASH_DESC
*, int);
104 void aesni_multi_cbc_encrypt(CIPH_DESC
*, void *, int);
106 static size_t tls1_multi_block_encrypt(void *vctx
,
108 const unsigned char *inp
,
109 size_t inp_len
, int n4x
)
110 { /* n4x is 1 or 2 */
111 PROV_AES_HMAC_SHA_CTX
*ctx
= (PROV_AES_HMAC_SHA_CTX
*)vctx
;
112 PROV_AES_HMAC_SHA1_CTX
*sctx
= (PROV_AES_HMAC_SHA1_CTX
*)vctx
;
113 HASH_DESC hash_d
[8], edges
[8];
115 unsigned char storage
[sizeof(SHA1_MB_CTX
) + 32];
122 unsigned int frag
, last
, packlen
, i
;
123 unsigned int x4
= 4 * n4x
, minblocks
, processed
= 0;
130 /* ask for IVs in bulk */
131 if (rand_bytes_ex(ctx
->base
.libctx
, (IVs
= blocks
[0].c
), 16 * x4
) <= 0)
134 mctx
= (SHA1_MB_CTX
*) (storage
+ 32 - ((size_t)storage
% 32)); /* align */
136 frag
= (unsigned int)inp_len
>> (1 + n4x
);
137 last
= (unsigned int)inp_len
+ frag
- (frag
<< (1 + n4x
));
138 if (last
> frag
&& ((last
+ 13 + 9) % 64) < (x4
- 1)) {
143 packlen
= 5 + 16 + ((frag
+ 20 + 16) & -16);
145 /* populate descriptors with pointers and IVs */
148 /* 5+16 is place for header and explicit IV */
149 ciph_d
[0].out
= out
+ 5 + 16;
150 memcpy(ciph_d
[0].out
- 16, IVs
, 16);
151 memcpy(ciph_d
[0].iv
, IVs
, 16);
154 for (i
= 1; i
< x4
; i
++) {
155 ciph_d
[i
].inp
= hash_d
[i
].ptr
= hash_d
[i
- 1].ptr
+ frag
;
156 ciph_d
[i
].out
= ciph_d
[i
- 1].out
+ packlen
;
157 memcpy(ciph_d
[i
].out
- 16, IVs
, 16);
158 memcpy(ciph_d
[i
].iv
, IVs
, 16);
163 memcpy(blocks
[0].c
, sctx
->md
.data
, 8);
164 seqnum
= BSWAP8(blocks
[0].q
[0]);
166 for (i
= 0; i
< x4
; i
++) {
167 unsigned int len
= (i
== (x4
- 1) ? last
: frag
);
168 # if !defined(BSWAP8)
169 unsigned int carry
, j
;
172 mctx
->A
[i
] = sctx
->md
.h0
;
173 mctx
->B
[i
] = sctx
->md
.h1
;
174 mctx
->C
[i
] = sctx
->md
.h2
;
175 mctx
->D
[i
] = sctx
->md
.h3
;
176 mctx
->E
[i
] = sctx
->md
.h4
;
180 blocks
[i
].q
[0] = BSWAP8(seqnum
+ i
);
182 for (carry
= i
, j
= 8; j
--;) {
183 blocks
[i
].c
[j
] = ((u8
*)sctx
->md
.data
)[j
] + carry
;
184 carry
= (blocks
[i
].c
[j
] - carry
) >> (sizeof(carry
) * 8 - 1);
187 blocks
[i
].c
[8] = ((u8
*)sctx
->md
.data
)[8];
188 blocks
[i
].c
[9] = ((u8
*)sctx
->md
.data
)[9];
189 blocks
[i
].c
[10] = ((u8
*)sctx
->md
.data
)[10];
191 blocks
[i
].c
[11] = (u8
)(len
>> 8);
192 blocks
[i
].c
[12] = (u8
)(len
);
194 memcpy(blocks
[i
].c
+ 13, hash_d
[i
].ptr
, 64 - 13);
195 hash_d
[i
].ptr
+= 64 - 13;
196 hash_d
[i
].blocks
= (len
- (64 - 13)) / 64;
198 edges
[i
].ptr
= blocks
[i
].c
;
202 /* hash 13-byte headers and first 64-13 bytes of inputs */
203 sha1_multi_block(mctx
, edges
, n4x
);
204 /* hash bulk inputs */
205 # define MAXCHUNKSIZE 2048
207 # error "MAXCHUNKSIZE is not divisible by 64"
210 * goal is to minimize pressure on L1 cache by moving in shorter steps,
211 * so that hashed data is still in the cache by the time we encrypt it
213 minblocks
= ((frag
<= last
? frag
: last
) - (64 - 13)) / 64;
214 if (minblocks
> MAXCHUNKSIZE
/ 64) {
215 for (i
= 0; i
< x4
; i
++) {
216 edges
[i
].ptr
= hash_d
[i
].ptr
;
217 edges
[i
].blocks
= MAXCHUNKSIZE
/ 64;
218 ciph_d
[i
].blocks
= MAXCHUNKSIZE
/ 16;
221 sha1_multi_block(mctx
, edges
, n4x
);
222 aesni_multi_cbc_encrypt(ciph_d
, &ctx
->ks
, n4x
);
224 for (i
= 0; i
< x4
; i
++) {
225 edges
[i
].ptr
= hash_d
[i
].ptr
+= MAXCHUNKSIZE
;
226 hash_d
[i
].blocks
-= MAXCHUNKSIZE
/ 64;
227 edges
[i
].blocks
= MAXCHUNKSIZE
/ 64;
228 ciph_d
[i
].inp
+= MAXCHUNKSIZE
;
229 ciph_d
[i
].out
+= MAXCHUNKSIZE
;
230 ciph_d
[i
].blocks
= MAXCHUNKSIZE
/ 16;
231 memcpy(ciph_d
[i
].iv
, ciph_d
[i
].out
- 16, 16);
233 processed
+= MAXCHUNKSIZE
;
234 minblocks
-= MAXCHUNKSIZE
/ 64;
235 } while (minblocks
> MAXCHUNKSIZE
/ 64);
239 sha1_multi_block(mctx
, hash_d
, n4x
);
241 memset(blocks
, 0, sizeof(blocks
));
242 for (i
= 0; i
< x4
; i
++) {
243 unsigned int len
= (i
== (x4
- 1) ? last
: frag
),
244 off
= hash_d
[i
].blocks
* 64;
245 const unsigned char *ptr
= hash_d
[i
].ptr
+ off
;
247 off
= (len
- processed
) - (64 - 13) - off
; /* remainder actually */
248 memcpy(blocks
[i
].c
, ptr
, off
);
249 blocks
[i
].c
[off
] = 0x80;
250 len
+= 64 + 13; /* 64 is HMAC header */
251 len
*= 8; /* convert to bits */
252 if (off
< (64 - 8)) {
254 blocks
[i
].d
[15] = BSWAP4(len
);
256 PUTU32(blocks
[i
].c
+ 60, len
);
261 blocks
[i
].d
[31] = BSWAP4(len
);
263 PUTU32(blocks
[i
].c
+ 124, len
);
267 edges
[i
].ptr
= blocks
[i
].c
;
270 /* hash input tails and finalize */
271 sha1_multi_block(mctx
, edges
, n4x
);
273 memset(blocks
, 0, sizeof(blocks
));
274 for (i
= 0; i
< x4
; i
++) {
276 blocks
[i
].d
[0] = BSWAP4(mctx
->A
[i
]);
277 mctx
->A
[i
] = sctx
->tail
.h0
;
278 blocks
[i
].d
[1] = BSWAP4(mctx
->B
[i
]);
279 mctx
->B
[i
] = sctx
->tail
.h1
;
280 blocks
[i
].d
[2] = BSWAP4(mctx
->C
[i
]);
281 mctx
->C
[i
] = sctx
->tail
.h2
;
282 blocks
[i
].d
[3] = BSWAP4(mctx
->D
[i
]);
283 mctx
->D
[i
] = sctx
->tail
.h3
;
284 blocks
[i
].d
[4] = BSWAP4(mctx
->E
[i
]);
285 mctx
->E
[i
] = sctx
->tail
.h4
;
286 blocks
[i
].c
[20] = 0x80;
287 blocks
[i
].d
[15] = BSWAP4((64 + 20) * 8);
289 PUTU32(blocks
[i
].c
+ 0, mctx
->A
[i
]);
290 mctx
->A
[i
] = sctx
->tail
.h0
;
291 PUTU32(blocks
[i
].c
+ 4, mctx
->B
[i
]);
292 mctx
->B
[i
] = sctx
->tail
.h1
;
293 PUTU32(blocks
[i
].c
+ 8, mctx
->C
[i
]);
294 mctx
->C
[i
] = sctx
->tail
.h2
;
295 PUTU32(blocks
[i
].c
+ 12, mctx
->D
[i
]);
296 mctx
->D
[i
] = sctx
->tail
.h3
;
297 PUTU32(blocks
[i
].c
+ 16, mctx
->E
[i
]);
298 mctx
->E
[i
] = sctx
->tail
.h4
;
299 blocks
[i
].c
[20] = 0x80;
300 PUTU32(blocks
[i
].c
+ 60, (64 + 20) * 8);
302 edges
[i
].ptr
= blocks
[i
].c
;
307 sha1_multi_block(mctx
, edges
, n4x
);
309 for (i
= 0; i
< x4
; i
++) {
310 unsigned int len
= (i
== (x4
- 1) ? last
: frag
), pad
, j
;
311 unsigned char *out0
= out
;
313 memcpy(ciph_d
[i
].out
, ciph_d
[i
].inp
, len
- processed
);
314 ciph_d
[i
].inp
= ciph_d
[i
].out
;
319 PUTU32(out
+ 0, mctx
->A
[i
]);
320 PUTU32(out
+ 4, mctx
->B
[i
]);
321 PUTU32(out
+ 8, mctx
->C
[i
]);
322 PUTU32(out
+ 12, mctx
->D
[i
]);
323 PUTU32(out
+ 16, mctx
->E
[i
]);
329 for (j
= 0; j
<= pad
; j
++)
333 ciph_d
[i
].blocks
= (len
- processed
) / 16;
334 len
+= 16; /* account for explicit iv */
337 out0
[0] = ((u8
*)sctx
->md
.data
)[8];
338 out0
[1] = ((u8
*)sctx
->md
.data
)[9];
339 out0
[2] = ((u8
*)sctx
->md
.data
)[10];
340 out0
[3] = (u8
)(len
>> 8);
347 aesni_multi_cbc_encrypt(ciph_d
, &ctx
->ks
, n4x
);
349 OPENSSL_cleanse(blocks
, sizeof(blocks
));
350 OPENSSL_cleanse(mctx
, sizeof(*mctx
));
352 ctx
->multiblock_encrypt_len
= ret
;
355 # endif /* OPENSSL_NO_MULTIBLOCK */
357 static int aesni_cbc_hmac_sha1_cipher(PROV_CIPHER_CTX
*vctx
,
359 const unsigned char *in
, size_t len
)
361 PROV_AES_HMAC_SHA_CTX
*ctx
= (PROV_AES_HMAC_SHA_CTX
*)vctx
;
362 PROV_AES_HMAC_SHA1_CTX
*sctx
= (PROV_AES_HMAC_SHA1_CTX
*)vctx
;
364 size_t plen
= ctx
->payload_length
;
365 size_t iv
= 0; /* explicit IV in TLS 1.1 and later */
366 size_t aes_off
= 0, blocks
;
367 size_t sha_off
= SHA_CBLOCK
- sctx
->md
.num
;
369 ctx
->payload_length
= NO_PAYLOAD_LENGTH
;
371 if (len
% AES_BLOCK_SIZE
)
375 if (plen
== NO_PAYLOAD_LENGTH
)
378 ((plen
+ SHA_DIGEST_LENGTH
+
379 AES_BLOCK_SIZE
) & -AES_BLOCK_SIZE
))
381 else if (ctx
->aux
.tls_ver
>= TLS1_1_VERSION
)
384 if (plen
> (sha_off
+ iv
)
385 && (blocks
= (plen
- (sha_off
+ iv
)) / SHA_CBLOCK
)) {
386 sha1_update(&sctx
->md
, in
+ iv
, sha_off
);
388 aesni_cbc_sha1_enc(in
, out
, blocks
, &ctx
->ks
, ctx
->base
.iv
,
389 &sctx
->md
, in
+ iv
+ sha_off
);
390 blocks
*= SHA_CBLOCK
;
393 sctx
->md
.Nh
+= blocks
>> 29;
394 sctx
->md
.Nl
+= blocks
<<= 3;
395 if (sctx
->md
.Nl
< (unsigned int)blocks
)
401 sha1_update(&sctx
->md
, in
+ sha_off
, plen
- sha_off
);
403 if (plen
!= len
) { /* "TLS" mode of operation */
405 memcpy(out
+ aes_off
, in
+ aes_off
, plen
- aes_off
);
407 /* calculate HMAC and append it to payload */
408 SHA1_Final(out
+ plen
, &sctx
->md
);
409 sctx
->md
= sctx
->tail
;
410 sha1_update(&sctx
->md
, out
+ plen
, SHA_DIGEST_LENGTH
);
411 SHA1_Final(out
+ plen
, &sctx
->md
);
413 /* pad the payload|hmac */
414 plen
+= SHA_DIGEST_LENGTH
;
415 for (l
= len
- plen
- 1; plen
< len
; plen
++)
417 /* encrypt HMAC|padding at once */
418 aesni_cbc_encrypt(out
+ aes_off
, out
+ aes_off
, len
- aes_off
,
419 &ctx
->ks
, ctx
->base
.iv
, 1);
421 aesni_cbc_encrypt(in
+ aes_off
, out
+ aes_off
, len
- aes_off
,
422 &ctx
->ks
, ctx
->base
.iv
, 1);
426 unsigned int u
[SHA_DIGEST_LENGTH
/ sizeof(unsigned int)];
427 unsigned char c
[32 + SHA_DIGEST_LENGTH
];
430 /* arrange cache line alignment */
431 pmac
= (void *)(((size_t)mac
.c
+ 31) & ((size_t)0 - 32));
433 if (plen
!= NO_PAYLOAD_LENGTH
) { /* "TLS" mode of operation */
434 size_t inp_len
, mask
, j
, i
;
435 unsigned int res
, maxpad
, pad
, bitlen
;
438 unsigned int u
[SHA_LBLOCK
];
439 unsigned char c
[SHA_CBLOCK
];
440 } *data
= (void *)sctx
->md
.data
;
442 if ((ctx
->aux
.tls_aad
[plen
- 4] << 8 | ctx
->aux
.tls_aad
[plen
- 3])
444 if (len
< (AES_BLOCK_SIZE
+ SHA_DIGEST_LENGTH
+ 1))
447 /* omit explicit iv */
448 memcpy(ctx
->base
.iv
, in
, AES_BLOCK_SIZE
);
450 in
+= AES_BLOCK_SIZE
;
451 out
+= AES_BLOCK_SIZE
;
452 len
-= AES_BLOCK_SIZE
;
453 } else if (len
< (SHA_DIGEST_LENGTH
+ 1))
456 /* decrypt HMAC|padding at once */
457 aesni_cbc_encrypt(in
, out
, len
, &ctx
->ks
, ctx
->base
.iv
, 0);
459 /* figure out payload length */
461 maxpad
= len
- (SHA_DIGEST_LENGTH
+ 1);
462 maxpad
|= (255 - maxpad
) >> (sizeof(maxpad
) * 8 - 8);
465 mask
= constant_time_ge(maxpad
, pad
);
468 * If pad is invalid then we will fail the above test but we must
469 * continue anyway because we are in constant time code. However,
470 * we'll use the maxpad value instead of the supplied pad to make
471 * sure we perform well defined pointer arithmetic.
473 pad
= constant_time_select(mask
, pad
, maxpad
);
475 inp_len
= len
- (SHA_DIGEST_LENGTH
+ pad
+ 1);
477 ctx
->aux
.tls_aad
[plen
- 2] = inp_len
>> 8;
478 ctx
->aux
.tls_aad
[plen
- 1] = inp_len
;
481 sctx
->md
= sctx
->head
;
482 sha1_update(&sctx
->md
, ctx
->aux
.tls_aad
, plen
);
484 /* code containing lucky-13 fix */
485 len
-= SHA_DIGEST_LENGTH
; /* amend mac */
486 if (len
>= (256 + SHA_CBLOCK
)) {
487 j
= (len
- (256 + SHA_CBLOCK
)) & (0 - SHA_CBLOCK
);
488 j
+= SHA_CBLOCK
- sctx
->md
.num
;
489 sha1_update(&sctx
->md
, out
, j
);
495 /* but pretend as if we hashed padded payload */
496 bitlen
= sctx
->md
.Nl
+ (inp_len
<< 3); /* at most 18 bits */
498 bitlen
= BSWAP4(bitlen
);
501 mac
.c
[1] = (unsigned char)(bitlen
>> 16);
502 mac
.c
[2] = (unsigned char)(bitlen
>> 8);
503 mac
.c
[3] = (unsigned char)bitlen
;
513 for (res
= sctx
->md
.num
, j
= 0; j
< len
; j
++) {
515 mask
= (j
- inp_len
) >> (sizeof(j
) * 8 - 8);
517 c
|= 0x80 & ~mask
& ~((inp_len
- j
) >> (sizeof(j
) * 8 - 8));
518 data
->c
[res
++] = (unsigned char)c
;
520 if (res
!= SHA_CBLOCK
)
523 /* j is not incremented yet */
524 mask
= 0 - ((inp_len
+ 7 - j
) >> (sizeof(j
) * 8 - 1));
525 data
->u
[SHA_LBLOCK
- 1] |= bitlen
& mask
;
526 sha1_block_data_order(&sctx
->md
, data
, 1);
527 mask
&= 0 - ((j
- inp_len
- 72) >> (sizeof(j
) * 8 - 1));
528 pmac
->u
[0] |= sctx
->md
.h0
& mask
;
529 pmac
->u
[1] |= sctx
->md
.h1
& mask
;
530 pmac
->u
[2] |= sctx
->md
.h2
& mask
;
531 pmac
->u
[3] |= sctx
->md
.h3
& mask
;
532 pmac
->u
[4] |= sctx
->md
.h4
& mask
;
536 for (i
= res
; i
< SHA_CBLOCK
; i
++, j
++)
539 if (res
> SHA_CBLOCK
- 8) {
540 mask
= 0 - ((inp_len
+ 8 - j
) >> (sizeof(j
) * 8 - 1));
541 data
->u
[SHA_LBLOCK
- 1] |= bitlen
& mask
;
542 sha1_block_data_order(&sctx
->md
, data
, 1);
543 mask
&= 0 - ((j
- inp_len
- 73) >> (sizeof(j
) * 8 - 1));
544 pmac
->u
[0] |= sctx
->md
.h0
& mask
;
545 pmac
->u
[1] |= sctx
->md
.h1
& mask
;
546 pmac
->u
[2] |= sctx
->md
.h2
& mask
;
547 pmac
->u
[3] |= sctx
->md
.h3
& mask
;
548 pmac
->u
[4] |= sctx
->md
.h4
& mask
;
550 memset(data
, 0, SHA_CBLOCK
);
553 data
->u
[SHA_LBLOCK
- 1] = bitlen
;
554 sha1_block_data_order(&sctx
->md
, data
, 1);
555 mask
= 0 - ((j
- inp_len
- 73) >> (sizeof(j
) * 8 - 1));
556 pmac
->u
[0] |= sctx
->md
.h0
& mask
;
557 pmac
->u
[1] |= sctx
->md
.h1
& mask
;
558 pmac
->u
[2] |= sctx
->md
.h2
& mask
;
559 pmac
->u
[3] |= sctx
->md
.h3
& mask
;
560 pmac
->u
[4] |= sctx
->md
.h4
& mask
;
563 pmac
->u
[0] = BSWAP4(pmac
->u
[0]);
564 pmac
->u
[1] = BSWAP4(pmac
->u
[1]);
565 pmac
->u
[2] = BSWAP4(pmac
->u
[2]);
566 pmac
->u
[3] = BSWAP4(pmac
->u
[3]);
567 pmac
->u
[4] = BSWAP4(pmac
->u
[4]);
569 for (i
= 0; i
< 5; i
++) {
571 pmac
->c
[4 * i
+ 0] = (unsigned char)(res
>> 24);
572 pmac
->c
[4 * i
+ 1] = (unsigned char)(res
>> 16);
573 pmac
->c
[4 * i
+ 2] = (unsigned char)(res
>> 8);
574 pmac
->c
[4 * i
+ 3] = (unsigned char)res
;
577 len
+= SHA_DIGEST_LENGTH
;
578 sctx
->md
= sctx
->tail
;
579 sha1_update(&sctx
->md
, pmac
->c
, SHA_DIGEST_LENGTH
);
580 SHA1_Final(pmac
->c
, &sctx
->md
);
585 /* version of code with lucky-13 fix */
587 unsigned char *p
= out
+ len
- 1 - maxpad
- SHA_DIGEST_LENGTH
;
588 size_t off
= out
- p
;
589 unsigned int c
, cmask
;
591 maxpad
+= SHA_DIGEST_LENGTH
;
592 for (res
= 0, i
= 0, j
= 0; j
< maxpad
; j
++) {
595 ((int)(j
- off
- SHA_DIGEST_LENGTH
)) >> (sizeof(int) *
597 res
|= (c
^ pad
) & ~cmask
; /* ... and padding */
598 cmask
&= ((int)(off
- 1 - j
)) >> (sizeof(int) * 8 - 1);
599 res
|= (c
^ pmac
->c
[i
]) & cmask
;
602 maxpad
-= SHA_DIGEST_LENGTH
;
604 res
= 0 - ((0 - res
) >> (sizeof(res
) * 8 - 1));
609 /* decrypt HMAC|padding at once */
610 aesni_cbc_encrypt(in
, out
, len
, &ctx
->ks
, ctx
->base
.iv
, 0);
611 sha1_update(&sctx
->md
, out
, len
);
618 /* EVP_CTRL_AEAD_SET_MAC_KEY */
619 static void aesni_cbc_hmac_sha1_set_mac_key(void *vctx
,
620 const unsigned char *mac
, size_t len
)
622 PROV_AES_HMAC_SHA1_CTX
*ctx
= (PROV_AES_HMAC_SHA1_CTX
*)vctx
;
624 unsigned char hmac_key
[64];
626 memset(hmac_key
, 0, sizeof(hmac_key
));
628 if (len
> (int)sizeof(hmac_key
)) {
629 SHA1_Init(&ctx
->head
);
630 sha1_update(&ctx
->head
, mac
, len
);
631 SHA1_Final(hmac_key
, &ctx
->head
);
633 memcpy(hmac_key
, mac
, len
);
636 for (i
= 0; i
< sizeof(hmac_key
); i
++)
637 hmac_key
[i
] ^= 0x36; /* ipad */
638 SHA1_Init(&ctx
->head
);
639 sha1_update(&ctx
->head
, hmac_key
, sizeof(hmac_key
));
641 for (i
= 0; i
< sizeof(hmac_key
); i
++)
642 hmac_key
[i
] ^= 0x36 ^ 0x5c; /* opad */
643 SHA1_Init(&ctx
->tail
);
644 sha1_update(&ctx
->tail
, hmac_key
, sizeof(hmac_key
));
646 OPENSSL_cleanse(hmac_key
, sizeof(hmac_key
));
649 /* EVP_CTRL_AEAD_TLS1_AAD */
650 static int aesni_cbc_hmac_sha1_set_tls1_aad(void *vctx
,
651 unsigned char *aad_rec
, int aad_len
)
653 PROV_AES_HMAC_SHA_CTX
*ctx
= (PROV_AES_HMAC_SHA_CTX
*)vctx
;
654 PROV_AES_HMAC_SHA1_CTX
*sctx
= (PROV_AES_HMAC_SHA1_CTX
*)vctx
;
655 unsigned char *p
= aad_rec
;
658 if (aad_len
!= EVP_AEAD_TLS1_AAD_LEN
)
661 len
= p
[aad_len
- 2] << 8 | p
[aad_len
- 1];
664 ctx
->payload_length
= len
;
665 if ((ctx
->aux
.tls_ver
=
666 p
[aad_len
- 4] << 8 | p
[aad_len
- 3]) >= TLS1_1_VERSION
) {
667 if (len
< AES_BLOCK_SIZE
)
669 len
-= AES_BLOCK_SIZE
;
670 p
[aad_len
- 2] = len
>> 8;
671 p
[aad_len
- 1] = len
;
673 sctx
->md
= sctx
->head
;
674 sha1_update(&sctx
->md
, p
, aad_len
);
675 ctx
->tls_aad_pad
= (int)(((len
+ SHA_DIGEST_LENGTH
+
676 AES_BLOCK_SIZE
) & -AES_BLOCK_SIZE
)
680 memcpy(ctx
->aux
.tls_aad
, aad_rec
, aad_len
);
681 ctx
->payload_length
= aad_len
;
682 ctx
->tls_aad_pad
= SHA_DIGEST_LENGTH
;
687 # if !defined(OPENSSL_NO_MULTIBLOCK)
689 /* EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE */
690 static int aesni_cbc_hmac_sha1_tls1_multiblock_max_bufsize(void *vctx
)
692 PROV_AES_HMAC_SHA_CTX
*ctx
= (PROV_AES_HMAC_SHA_CTX
*)vctx
;
694 OPENSSL_assert(ctx
->multiblock_max_send_fragment
!= 0);
696 + (((int)ctx
->multiblock_max_send_fragment
+ 20 + 16) & -16));
699 /* EVP_CTRL_TLS1_1_MULTIBLOCK_AAD */
700 static int aesni_cbc_hmac_sha1_tls1_multiblock_aad(
701 void *vctx
, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM
*param
)
703 PROV_AES_HMAC_SHA_CTX
*ctx
= (PROV_AES_HMAC_SHA_CTX
*)vctx
;
704 PROV_AES_HMAC_SHA1_CTX
*sctx
= (PROV_AES_HMAC_SHA1_CTX
*)vctx
;
705 unsigned int n4x
= 1, x4
;
706 unsigned int frag
, last
, packlen
, inp_len
;
708 inp_len
= param
->inp
[11] << 8 | param
->inp
[12];
709 ctx
->multiblock_interleave
= param
->interleave
;
712 if ((param
->inp
[9] << 8 | param
->inp
[10]) < TLS1_1_VERSION
)
717 return 0; /* too short */
719 if (inp_len
>= 8192 && OPENSSL_ia32cap_P
[2] & (1 << 5))
721 } else if ((n4x
= param
->interleave
/ 4) && n4x
<= 2)
722 inp_len
= param
->len
;
726 sctx
->md
= sctx
->head
;
727 sha1_update(&sctx
->md
, param
->inp
, 13);
732 frag
= inp_len
>> n4x
;
733 last
= inp_len
+ frag
- (frag
<< n4x
);
734 if (last
> frag
&& ((last
+ 13 + 9) % 64 < (x4
- 1))) {
739 packlen
= 5 + 16 + ((frag
+ 20 + 16) & -16);
740 packlen
= (packlen
<< n4x
) - packlen
;
741 packlen
+= 5 + 16 + ((last
+ 20 + 16) & -16);
743 param
->interleave
= x4
;
744 /* The returned values used by get need to be stored */
745 ctx
->multiblock_interleave
= x4
;
746 ctx
->multiblock_aad_packlen
= packlen
;
749 return -1; /* not yet */
752 /* EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT */
753 static int aesni_cbc_hmac_sha1_tls1_multiblock_encrypt(
754 void *ctx
, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM
*param
)
756 return (int)tls1_multi_block_encrypt(ctx
, param
->out
,
757 param
->inp
, param
->len
,
758 param
->interleave
/ 4);
761 #endif /* OPENSSL_NO_MULTIBLOCK */
763 static const PROV_CIPHER_HW_AES_HMAC_SHA cipher_hw_aes_hmac_sha1
= {
765 aesni_cbc_hmac_sha1_init_key
,
766 aesni_cbc_hmac_sha1_cipher
768 aesni_cbc_hmac_sha1_set_mac_key
,
769 aesni_cbc_hmac_sha1_set_tls1_aad
,
770 # if !defined(OPENSSL_NO_MULTIBLOCK)
771 aesni_cbc_hmac_sha1_tls1_multiblock_max_bufsize
,
772 aesni_cbc_hmac_sha1_tls1_multiblock_aad
,
773 aesni_cbc_hmac_sha1_tls1_multiblock_encrypt
777 const PROV_CIPHER_HW_AES_HMAC_SHA
*PROV_CIPHER_HW_aes_cbc_hmac_sha1(void)
779 return &cipher_hw_aes_hmac_sha1
;
782 #endif /* AES_CBC_HMAC_SHA_CAPABLE */