]> git.ipfire.org Git - thirdparty/openssl.git/blame - providers/implementations/ciphers/cipher_aes_cbc_hmac_sha1_hw.c
Deprecate Low Level Camellia APIs
[thirdparty/openssl.git] / providers / implementations / ciphers / cipher_aes_cbc_hmac_sha1_hw.c
CommitLineData
0d2bfe52
SL
1/*
2 * Copyright 2011-2019 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
0081ce9b
RL
10/*
11 * AES low level APIs are deprecated for public use, but still ok for internal
12 * use where we're using them to implement the higher level EVP interface, as is
13 * the case here.
14 */
15#include "internal/deprecated.h"
16
0d2bfe52
SL
17#include "cipher_aes_cbc_hmac_sha.h"
18
19#ifndef AES_CBC_HMAC_SHA_CAPABLE
20int cipher_capable_aes_cbc_hmac_sha1(void)
21{
22 return 0;
23}
24#else
25
26# include "crypto/rand.h"
27# include "crypto/evp.h"
28# include "internal/constant_time.h"
29
30void sha1_block_data_order(void *c, const void *p, size_t len);
31void aesni_cbc_sha1_enc(const void *inp, void *out, size_t blocks,
32 const AES_KEY *key, unsigned char iv[16],
33 SHA_CTX *ctx, const void *in0);
34
35int cipher_capable_aes_cbc_hmac_sha1(void)
36{
37 return AESNI_CBC_HMAC_SHA_CAPABLE;
38}
39
40static int aesni_cbc_hmac_sha1_init_key(PROV_CIPHER_CTX *vctx,
41 const unsigned char *key, size_t keylen)
42{
43 int ret;
44 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
45 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
46
47 if (ctx->base.enc)
48 ret = aesni_set_encrypt_key(key, keylen * 8, &ctx->ks);
49 else
50 ret = aesni_set_decrypt_key(key, keylen * 8, &ctx->ks);
51
52 SHA1_Init(&sctx->head); /* handy when benchmarking */
53 sctx->tail = sctx->head;
54 sctx->md = sctx->head;
55
56 ctx->payload_length = NO_PAYLOAD_LENGTH;
57
58 return ret < 0 ? 0 : 1;
59}
60
61static void sha1_update(SHA_CTX *c, const void *data, size_t len)
62{
63 const unsigned char *ptr = data;
64 size_t res;
65
66 if ((res = c->num)) {
67 res = SHA_CBLOCK - res;
68 if (len < res)
69 res = len;
70 SHA1_Update(c, ptr, res);
71 ptr += res;
72 len -= res;
73 }
74
75 res = len % SHA_CBLOCK;
76 len -= res;
77
78 if (len) {
79 sha1_block_data_order(c, ptr, len / SHA_CBLOCK);
80
81 ptr += len;
82 c->Nh += len >> 29;
83 c->Nl += len <<= 3;
84 if (c->Nl < (unsigned int)len)
85 c->Nh++;
86 }
87
88 if (res)
89 SHA1_Update(c, ptr, res);
90}
91
92# if !defined(OPENSSL_NO_MULTIBLOCK)
93
94typedef struct {
95 unsigned int A[8], B[8], C[8], D[8], E[8];
96} SHA1_MB_CTX;
97
98typedef struct {
99 const unsigned char *ptr;
100 int blocks;
101} HASH_DESC;
102
103typedef struct {
104 const unsigned char *inp;
105 unsigned char *out;
106 int blocks;
107 u64 iv[2];
108} CIPH_DESC;
109
110void sha1_multi_block(SHA1_MB_CTX *, const HASH_DESC *, int);
111void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int);
112
113static size_t tls1_multi_block_encrypt(void *vctx,
114 unsigned char *out,
115 const unsigned char *inp,
116 size_t inp_len, int n4x)
117{ /* n4x is 1 or 2 */
118 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
119 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
120 HASH_DESC hash_d[8], edges[8];
121 CIPH_DESC ciph_d[8];
122 unsigned char storage[sizeof(SHA1_MB_CTX) + 32];
123 union {
124 u64 q[16];
125 u32 d[32];
126 u8 c[128];
127 } blocks[8];
128 SHA1_MB_CTX *mctx;
129 unsigned int frag, last, packlen, i;
130 unsigned int x4 = 4 * n4x, minblocks, processed = 0;
131 size_t ret = 0;
132 u8 *IVs;
133# if defined(BSWAP8)
134 u64 seqnum;
135# endif
136
137 /* ask for IVs in bulk */
138 if (rand_bytes_ex(ctx->base.libctx, (IVs = blocks[0].c), 16 * x4) <= 0)
139 return 0;
140
141 mctx = (SHA1_MB_CTX *) (storage + 32 - ((size_t)storage % 32)); /* align */
142
143 frag = (unsigned int)inp_len >> (1 + n4x);
144 last = (unsigned int)inp_len + frag - (frag << (1 + n4x));
145 if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) {
146 frag++;
147 last -= x4 - 1;
148 }
149
150 packlen = 5 + 16 + ((frag + 20 + 16) & -16);
151
152 /* populate descriptors with pointers and IVs */
153 hash_d[0].ptr = inp;
154 ciph_d[0].inp = inp;
155 /* 5+16 is place for header and explicit IV */
156 ciph_d[0].out = out + 5 + 16;
157 memcpy(ciph_d[0].out - 16, IVs, 16);
158 memcpy(ciph_d[0].iv, IVs, 16);
159 IVs += 16;
160
161 for (i = 1; i < x4; i++) {
162 ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag;
163 ciph_d[i].out = ciph_d[i - 1].out + packlen;
164 memcpy(ciph_d[i].out - 16, IVs, 16);
165 memcpy(ciph_d[i].iv, IVs, 16);
166 IVs += 16;
167 }
168
169# if defined(BSWAP8)
170 memcpy(blocks[0].c, sctx->md.data, 8);
171 seqnum = BSWAP8(blocks[0].q[0]);
172# endif
173 for (i = 0; i < x4; i++) {
174 unsigned int len = (i == (x4 - 1) ? last : frag);
175# if !defined(BSWAP8)
176 unsigned int carry, j;
177# endif
178
179 mctx->A[i] = sctx->md.h0;
180 mctx->B[i] = sctx->md.h1;
181 mctx->C[i] = sctx->md.h2;
182 mctx->D[i] = sctx->md.h3;
183 mctx->E[i] = sctx->md.h4;
184
185 /* fix seqnum */
186# if defined(BSWAP8)
187 blocks[i].q[0] = BSWAP8(seqnum + i);
188# else
189 for (carry = i, j = 8; j--;) {
190 blocks[i].c[j] = ((u8 *)sctx->md.data)[j] + carry;
191 carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1);
192 }
193# endif
194 blocks[i].c[8] = ((u8 *)sctx->md.data)[8];
195 blocks[i].c[9] = ((u8 *)sctx->md.data)[9];
196 blocks[i].c[10] = ((u8 *)sctx->md.data)[10];
197 /* fix length */
198 blocks[i].c[11] = (u8)(len >> 8);
199 blocks[i].c[12] = (u8)(len);
200
201 memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13);
202 hash_d[i].ptr += 64 - 13;
203 hash_d[i].blocks = (len - (64 - 13)) / 64;
204
205 edges[i].ptr = blocks[i].c;
206 edges[i].blocks = 1;
207 }
208
209 /* hash 13-byte headers and first 64-13 bytes of inputs */
210 sha1_multi_block(mctx, edges, n4x);
211 /* hash bulk inputs */
212# define MAXCHUNKSIZE 2048
213# if MAXCHUNKSIZE%64
214# error "MAXCHUNKSIZE is not divisible by 64"
215# elif MAXCHUNKSIZE
216 /*
217 * goal is to minimize pressure on L1 cache by moving in shorter steps,
218 * so that hashed data is still in the cache by the time we encrypt it
219 */
220 minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64;
221 if (minblocks > MAXCHUNKSIZE / 64) {
222 for (i = 0; i < x4; i++) {
223 edges[i].ptr = hash_d[i].ptr;
224 edges[i].blocks = MAXCHUNKSIZE / 64;
225 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
226 }
227 do {
228 sha1_multi_block(mctx, edges, n4x);
229 aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x);
230
231 for (i = 0; i < x4; i++) {
232 edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE;
233 hash_d[i].blocks -= MAXCHUNKSIZE / 64;
234 edges[i].blocks = MAXCHUNKSIZE / 64;
235 ciph_d[i].inp += MAXCHUNKSIZE;
236 ciph_d[i].out += MAXCHUNKSIZE;
237 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
238 memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16);
239 }
240 processed += MAXCHUNKSIZE;
241 minblocks -= MAXCHUNKSIZE / 64;
242 } while (minblocks > MAXCHUNKSIZE / 64);
243 }
244# endif
245# undef MAXCHUNKSIZE
246 sha1_multi_block(mctx, hash_d, n4x);
247
248 memset(blocks, 0, sizeof(blocks));
249 for (i = 0; i < x4; i++) {
250 unsigned int len = (i == (x4 - 1) ? last : frag),
251 off = hash_d[i].blocks * 64;
252 const unsigned char *ptr = hash_d[i].ptr + off;
253
254 off = (len - processed) - (64 - 13) - off; /* remainder actually */
255 memcpy(blocks[i].c, ptr, off);
256 blocks[i].c[off] = 0x80;
257 len += 64 + 13; /* 64 is HMAC header */
258 len *= 8; /* convert to bits */
259 if (off < (64 - 8)) {
260# ifdef BSWAP4
261 blocks[i].d[15] = BSWAP4(len);
262# else
263 PUTU32(blocks[i].c + 60, len);
264# endif
265 edges[i].blocks = 1;
266 } else {
267# ifdef BSWAP4
268 blocks[i].d[31] = BSWAP4(len);
269# else
270 PUTU32(blocks[i].c + 124, len);
271# endif
272 edges[i].blocks = 2;
273 }
274 edges[i].ptr = blocks[i].c;
275 }
276
277 /* hash input tails and finalize */
278 sha1_multi_block(mctx, edges, n4x);
279
280 memset(blocks, 0, sizeof(blocks));
281 for (i = 0; i < x4; i++) {
282# ifdef BSWAP4
283 blocks[i].d[0] = BSWAP4(mctx->A[i]);
284 mctx->A[i] = sctx->tail.h0;
285 blocks[i].d[1] = BSWAP4(mctx->B[i]);
286 mctx->B[i] = sctx->tail.h1;
287 blocks[i].d[2] = BSWAP4(mctx->C[i]);
288 mctx->C[i] = sctx->tail.h2;
289 blocks[i].d[3] = BSWAP4(mctx->D[i]);
290 mctx->D[i] = sctx->tail.h3;
291 blocks[i].d[4] = BSWAP4(mctx->E[i]);
292 mctx->E[i] = sctx->tail.h4;
293 blocks[i].c[20] = 0x80;
294 blocks[i].d[15] = BSWAP4((64 + 20) * 8);
295# else
296 PUTU32(blocks[i].c + 0, mctx->A[i]);
297 mctx->A[i] = sctx->tail.h0;
298 PUTU32(blocks[i].c + 4, mctx->B[i]);
299 mctx->B[i] = sctx->tail.h1;
300 PUTU32(blocks[i].c + 8, mctx->C[i]);
301 mctx->C[i] = sctx->tail.h2;
302 PUTU32(blocks[i].c + 12, mctx->D[i]);
303 mctx->D[i] = sctx->tail.h3;
304 PUTU32(blocks[i].c + 16, mctx->E[i]);
305 mctx->E[i] = sctx->tail.h4;
306 blocks[i].c[20] = 0x80;
307 PUTU32(blocks[i].c + 60, (64 + 20) * 8);
308# endif /* BSWAP */
309 edges[i].ptr = blocks[i].c;
310 edges[i].blocks = 1;
311 }
312
313 /* finalize MACs */
314 sha1_multi_block(mctx, edges, n4x);
315
316 for (i = 0; i < x4; i++) {
317 unsigned int len = (i == (x4 - 1) ? last : frag), pad, j;
318 unsigned char *out0 = out;
319
320 memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed);
321 ciph_d[i].inp = ciph_d[i].out;
322
323 out += 5 + 16 + len;
324
325 /* write MAC */
326 PUTU32(out + 0, mctx->A[i]);
327 PUTU32(out + 4, mctx->B[i]);
328 PUTU32(out + 8, mctx->C[i]);
329 PUTU32(out + 12, mctx->D[i]);
330 PUTU32(out + 16, mctx->E[i]);
331 out += 20;
332 len += 20;
333
334 /* pad */
335 pad = 15 - len % 16;
336 for (j = 0; j <= pad; j++)
337 *(out++) = pad;
338 len += pad + 1;
339
340 ciph_d[i].blocks = (len - processed) / 16;
341 len += 16; /* account for explicit iv */
342
343 /* arrange header */
344 out0[0] = ((u8 *)sctx->md.data)[8];
345 out0[1] = ((u8 *)sctx->md.data)[9];
346 out0[2] = ((u8 *)sctx->md.data)[10];
347 out0[3] = (u8)(len >> 8);
348 out0[4] = (u8)(len);
349
350 ret += len + 5;
351 inp += frag;
352 }
353
354 aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x);
355
356 OPENSSL_cleanse(blocks, sizeof(blocks));
357 OPENSSL_cleanse(mctx, sizeof(*mctx));
358
359 ctx->multiblock_encrypt_len = ret;
360 return ret;
361}
362# endif /* OPENSSL_NO_MULTIBLOCK */
363
364static int aesni_cbc_hmac_sha1_cipher(PROV_CIPHER_CTX *vctx,
365 unsigned char *out,
366 const unsigned char *in, size_t len)
367{
368 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
369 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
370 unsigned int l;
371 size_t plen = ctx->payload_length;
372 size_t iv = 0; /* explicit IV in TLS 1.1 and later */
373 size_t aes_off = 0, blocks;
374 size_t sha_off = SHA_CBLOCK - sctx->md.num;
375
376 ctx->payload_length = NO_PAYLOAD_LENGTH;
377
378 if (len % AES_BLOCK_SIZE)
379 return 0;
380
381 if (ctx->base.enc) {
382 if (plen == NO_PAYLOAD_LENGTH)
383 plen = len;
384 else if (len !=
385 ((plen + SHA_DIGEST_LENGTH +
386 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE))
387 return 0;
388 else if (ctx->aux.tls_ver >= TLS1_1_VERSION)
389 iv = AES_BLOCK_SIZE;
390
391 if (plen > (sha_off + iv)
392 && (blocks = (plen - (sha_off + iv)) / SHA_CBLOCK)) {
393 sha1_update(&sctx->md, in + iv, sha_off);
394
395 aesni_cbc_sha1_enc(in, out, blocks, &ctx->ks, ctx->base.iv,
396 &sctx->md, in + iv + sha_off);
397 blocks *= SHA_CBLOCK;
398 aes_off += blocks;
399 sha_off += blocks;
400 sctx->md.Nh += blocks >> 29;
401 sctx->md.Nl += blocks <<= 3;
402 if (sctx->md.Nl < (unsigned int)blocks)
403 sctx->md.Nh++;
404 } else {
405 sha_off = 0;
406 }
407 sha_off += iv;
408 sha1_update(&sctx->md, in + sha_off, plen - sha_off);
409
410 if (plen != len) { /* "TLS" mode of operation */
411 if (in != out)
412 memcpy(out + aes_off, in + aes_off, plen - aes_off);
413
414 /* calculate HMAC and append it to payload */
415 SHA1_Final(out + plen, &sctx->md);
416 sctx->md = sctx->tail;
417 sha1_update(&sctx->md, out + plen, SHA_DIGEST_LENGTH);
418 SHA1_Final(out + plen, &sctx->md);
419
420 /* pad the payload|hmac */
421 plen += SHA_DIGEST_LENGTH;
422 for (l = len - plen - 1; plen < len; plen++)
423 out[plen] = l;
424 /* encrypt HMAC|padding at once */
425 aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off,
426 &ctx->ks, ctx->base.iv, 1);
427 } else {
428 aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off,
429 &ctx->ks, ctx->base.iv, 1);
430 }
431 } else {
432 union {
433 unsigned int u[SHA_DIGEST_LENGTH / sizeof(unsigned int)];
434 unsigned char c[32 + SHA_DIGEST_LENGTH];
435 } mac, *pmac;
436
437 /* arrange cache line alignment */
438 pmac = (void *)(((size_t)mac.c + 31) & ((size_t)0 - 32));
439
440 if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */
441 size_t inp_len, mask, j, i;
442 unsigned int res, maxpad, pad, bitlen;
443 int ret = 1;
444 union {
445 unsigned int u[SHA_LBLOCK];
446 unsigned char c[SHA_CBLOCK];
447 } *data = (void *)sctx->md.data;
448
449 if ((ctx->aux.tls_aad[plen - 4] << 8 | ctx->aux.tls_aad[plen - 3])
450 >= TLS1_1_VERSION) {
451 if (len < (AES_BLOCK_SIZE + SHA_DIGEST_LENGTH + 1))
452 return 0;
453
454 /* omit explicit iv */
455 memcpy(ctx->base.iv, in, AES_BLOCK_SIZE);
456
457 in += AES_BLOCK_SIZE;
458 out += AES_BLOCK_SIZE;
459 len -= AES_BLOCK_SIZE;
460 } else if (len < (SHA_DIGEST_LENGTH + 1))
461 return 0;
462
463 /* decrypt HMAC|padding at once */
464 aesni_cbc_encrypt(in, out, len, &ctx->ks, ctx->base.iv, 0);
465
466 /* figure out payload length */
467 pad = out[len - 1];
468 maxpad = len - (SHA_DIGEST_LENGTH + 1);
469 maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
470 maxpad &= 255;
471
472 mask = constant_time_ge(maxpad, pad);
473 ret &= mask;
474 /*
475 * If pad is invalid then we will fail the above test but we must
476 * continue anyway because we are in constant time code. However,
477 * we'll use the maxpad value instead of the supplied pad to make
478 * sure we perform well defined pointer arithmetic.
479 */
480 pad = constant_time_select(mask, pad, maxpad);
481
482 inp_len = len - (SHA_DIGEST_LENGTH + pad + 1);
483
484 ctx->aux.tls_aad[plen - 2] = inp_len >> 8;
485 ctx->aux.tls_aad[plen - 1] = inp_len;
486
487 /* calculate HMAC */
488 sctx->md = sctx->head;
489 sha1_update(&sctx->md, ctx->aux.tls_aad, plen);
490
491 /* code containing lucky-13 fix */
492 len -= SHA_DIGEST_LENGTH; /* amend mac */
493 if (len >= (256 + SHA_CBLOCK)) {
494 j = (len - (256 + SHA_CBLOCK)) & (0 - SHA_CBLOCK);
495 j += SHA_CBLOCK - sctx->md.num;
496 sha1_update(&sctx->md, out, j);
497 out += j;
498 len -= j;
499 inp_len -= j;
500 }
501
502 /* but pretend as if we hashed padded payload */
503 bitlen = sctx->md.Nl + (inp_len << 3); /* at most 18 bits */
504# ifdef BSWAP4
505 bitlen = BSWAP4(bitlen);
506# else
507 mac.c[0] = 0;
508 mac.c[1] = (unsigned char)(bitlen >> 16);
509 mac.c[2] = (unsigned char)(bitlen >> 8);
510 mac.c[3] = (unsigned char)bitlen;
511 bitlen = mac.u[0];
512# endif /* BSWAP */
513
514 pmac->u[0] = 0;
515 pmac->u[1] = 0;
516 pmac->u[2] = 0;
517 pmac->u[3] = 0;
518 pmac->u[4] = 0;
519
520 for (res = sctx->md.num, j = 0; j < len; j++) {
521 size_t c = out[j];
522 mask = (j - inp_len) >> (sizeof(j) * 8 - 8);
523 c &= mask;
524 c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8));
525 data->c[res++] = (unsigned char)c;
526
527 if (res != SHA_CBLOCK)
528 continue;
529
530 /* j is not incremented yet */
531 mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1));
532 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
533 sha1_block_data_order(&sctx->md, data, 1);
534 mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1));
535 pmac->u[0] |= sctx->md.h0 & mask;
536 pmac->u[1] |= sctx->md.h1 & mask;
537 pmac->u[2] |= sctx->md.h2 & mask;
538 pmac->u[3] |= sctx->md.h3 & mask;
539 pmac->u[4] |= sctx->md.h4 & mask;
540 res = 0;
541 }
542
543 for (i = res; i < SHA_CBLOCK; i++, j++)
544 data->c[i] = 0;
545
546 if (res > SHA_CBLOCK - 8) {
547 mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1));
548 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
549 sha1_block_data_order(&sctx->md, data, 1);
550 mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
551 pmac->u[0] |= sctx->md.h0 & mask;
552 pmac->u[1] |= sctx->md.h1 & mask;
553 pmac->u[2] |= sctx->md.h2 & mask;
554 pmac->u[3] |= sctx->md.h3 & mask;
555 pmac->u[4] |= sctx->md.h4 & mask;
556
557 memset(data, 0, SHA_CBLOCK);
558 j += 64;
559 }
560 data->u[SHA_LBLOCK - 1] = bitlen;
561 sha1_block_data_order(&sctx->md, data, 1);
562 mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
563 pmac->u[0] |= sctx->md.h0 & mask;
564 pmac->u[1] |= sctx->md.h1 & mask;
565 pmac->u[2] |= sctx->md.h2 & mask;
566 pmac->u[3] |= sctx->md.h3 & mask;
567 pmac->u[4] |= sctx->md.h4 & mask;
568
569# ifdef BSWAP4
570 pmac->u[0] = BSWAP4(pmac->u[0]);
571 pmac->u[1] = BSWAP4(pmac->u[1]);
572 pmac->u[2] = BSWAP4(pmac->u[2]);
573 pmac->u[3] = BSWAP4(pmac->u[3]);
574 pmac->u[4] = BSWAP4(pmac->u[4]);
575# else
576 for (i = 0; i < 5; i++) {
577 res = pmac->u[i];
578 pmac->c[4 * i + 0] = (unsigned char)(res >> 24);
579 pmac->c[4 * i + 1] = (unsigned char)(res >> 16);
580 pmac->c[4 * i + 2] = (unsigned char)(res >> 8);
581 pmac->c[4 * i + 3] = (unsigned char)res;
582 }
583# endif /* BSWAP4 */
584 len += SHA_DIGEST_LENGTH;
585 sctx->md = sctx->tail;
586 sha1_update(&sctx->md, pmac->c, SHA_DIGEST_LENGTH);
587 SHA1_Final(pmac->c, &sctx->md);
588
589 /* verify HMAC */
590 out += inp_len;
591 len -= inp_len;
592 /* version of code with lucky-13 fix */
593 {
594 unsigned char *p = out + len - 1 - maxpad - SHA_DIGEST_LENGTH;
595 size_t off = out - p;
596 unsigned int c, cmask;
597
598 maxpad += SHA_DIGEST_LENGTH;
599 for (res = 0, i = 0, j = 0; j < maxpad; j++) {
600 c = p[j];
601 cmask =
602 ((int)(j - off - SHA_DIGEST_LENGTH)) >> (sizeof(int) *
603 8 - 1);
604 res |= (c ^ pad) & ~cmask; /* ... and padding */
605 cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1);
606 res |= (c ^ pmac->c[i]) & cmask;
607 i += 1 & cmask;
608 }
609 maxpad -= SHA_DIGEST_LENGTH;
610
611 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
612 ret &= (int)~res;
613 }
614 return ret;
615 } else {
616 /* decrypt HMAC|padding at once */
617 aesni_cbc_encrypt(in, out, len, &ctx->ks, ctx->base.iv, 0);
618 sha1_update(&sctx->md, out, len);
619 }
620 }
621
622 return 1;
623}
624
625/* EVP_CTRL_AEAD_SET_MAC_KEY */
626static void aesni_cbc_hmac_sha1_set_mac_key(void *vctx,
627 const unsigned char *mac, size_t len)
628{
629 PROV_AES_HMAC_SHA1_CTX *ctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
630 unsigned int i;
631 unsigned char hmac_key[64];
632
633 memset(hmac_key, 0, sizeof(hmac_key));
634
635 if (len > (int)sizeof(hmac_key)) {
636 SHA1_Init(&ctx->head);
637 sha1_update(&ctx->head, mac, len);
638 SHA1_Final(hmac_key, &ctx->head);
639 } else {
640 memcpy(hmac_key, mac, len);
641 }
642
643 for (i = 0; i < sizeof(hmac_key); i++)
644 hmac_key[i] ^= 0x36; /* ipad */
645 SHA1_Init(&ctx->head);
646 sha1_update(&ctx->head, hmac_key, sizeof(hmac_key));
647
648 for (i = 0; i < sizeof(hmac_key); i++)
649 hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
650 SHA1_Init(&ctx->tail);
651 sha1_update(&ctx->tail, hmac_key, sizeof(hmac_key));
652
653 OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
654}
655
656/* EVP_CTRL_AEAD_TLS1_AAD */
657static int aesni_cbc_hmac_sha1_set_tls1_aad(void *vctx,
658 unsigned char *aad_rec, int aad_len)
659{
660 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
661 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
662 unsigned char *p = aad_rec;
663 unsigned int len;
664
665 if (aad_len != EVP_AEAD_TLS1_AAD_LEN)
666 return -1;
667
668 len = p[aad_len - 2] << 8 | p[aad_len - 1];
669
670 if (ctx->base.enc) {
671 ctx->payload_length = len;
672 if ((ctx->aux.tls_ver =
673 p[aad_len - 4] << 8 | p[aad_len - 3]) >= TLS1_1_VERSION) {
674 if (len < AES_BLOCK_SIZE)
675 return 0;
676 len -= AES_BLOCK_SIZE;
677 p[aad_len - 2] = len >> 8;
678 p[aad_len - 1] = len;
679 }
680 sctx->md = sctx->head;
681 sha1_update(&sctx->md, p, aad_len);
682 ctx->tls_aad_pad = (int)(((len + SHA_DIGEST_LENGTH +
683 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)
684 - len);
685 return 1;
686 } else {
687 memcpy(ctx->aux.tls_aad, aad_rec, aad_len);
688 ctx->payload_length = aad_len;
689 ctx->tls_aad_pad = SHA_DIGEST_LENGTH;
690 return 1;
691 }
692}
693
694# if !defined(OPENSSL_NO_MULTIBLOCK)
695
696/* EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE */
697static int aesni_cbc_hmac_sha1_tls1_multiblock_max_bufsize(void *vctx)
698{
699 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
700
701 OPENSSL_assert(ctx->multiblock_max_send_fragment != 0);
702 return (int)(5 + 16
703 + (((int)ctx->multiblock_max_send_fragment + 20 + 16) & -16));
704}
705
706/* EVP_CTRL_TLS1_1_MULTIBLOCK_AAD */
707static int aesni_cbc_hmac_sha1_tls1_multiblock_aad(
708 void *vctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param)
709{
710 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
711 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
712 unsigned int n4x = 1, x4;
713 unsigned int frag, last, packlen, inp_len;
714
715 inp_len = param->inp[11] << 8 | param->inp[12];
716 ctx->multiblock_interleave = param->interleave;
717
718 if (ctx->base.enc) {
719 if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION)
720 return -1;
721
722 if (inp_len) {
723 if (inp_len < 4096)
724 return 0; /* too short */
725
726 if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5))
727 n4x = 2; /* AVX2 */
728 } else if ((n4x = param->interleave / 4) && n4x <= 2)
729 inp_len = param->len;
730 else
731 return -1;
732
733 sctx->md = sctx->head;
734 sha1_update(&sctx->md, param->inp, 13);
735
736 x4 = 4 * n4x;
737 n4x += 1;
738
739 frag = inp_len >> n4x;
740 last = inp_len + frag - (frag << n4x);
741 if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) {
742 frag++;
743 last -= x4 - 1;
744 }
745
746 packlen = 5 + 16 + ((frag + 20 + 16) & -16);
747 packlen = (packlen << n4x) - packlen;
748 packlen += 5 + 16 + ((last + 20 + 16) & -16);
749
750 param->interleave = x4;
751 /* The returned values used by get need to be stored */
752 ctx->multiblock_interleave = x4;
753 ctx->multiblock_aad_packlen = packlen;
754 return 1;
755 }
756 return -1; /* not yet */
757}
758
759/* EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT */
760static int aesni_cbc_hmac_sha1_tls1_multiblock_encrypt(
761 void *ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param)
762{
763 return (int)tls1_multi_block_encrypt(ctx, param->out,
764 param->inp, param->len,
765 param->interleave / 4);
766}
767
768#endif /* OPENSSL_NO_MULTIBLOCK */
769
770static const PROV_CIPHER_HW_AES_HMAC_SHA cipher_hw_aes_hmac_sha1 = {
771 {
772 aesni_cbc_hmac_sha1_init_key,
773 aesni_cbc_hmac_sha1_cipher
774 },
775 aesni_cbc_hmac_sha1_set_mac_key,
776 aesni_cbc_hmac_sha1_set_tls1_aad,
777# if !defined(OPENSSL_NO_MULTIBLOCK)
778 aesni_cbc_hmac_sha1_tls1_multiblock_max_bufsize,
779 aesni_cbc_hmac_sha1_tls1_multiblock_aad,
780 aesni_cbc_hmac_sha1_tls1_multiblock_encrypt
781# endif
782};
783
784const PROV_CIPHER_HW_AES_HMAC_SHA *PROV_CIPHER_HW_aes_cbc_hmac_sha1(void)
785{
786 return &cipher_hw_aes_hmac_sha1;
787}
788
789#endif /* AES_CBC_HMAC_SHA_CAPABLE */