]> git.ipfire.org Git - thirdparty/openssl.git/blame - providers/implementations/ciphers/cipher_aes_cbc_hmac_sha256_hw.c
Don't compile AESNI code if we're not AESNI capable
[thirdparty/openssl.git] / providers / implementations / ciphers / cipher_aes_cbc_hmac_sha256_hw.c
CommitLineData
0d2bfe52
SL
1/*
2 * Copyright 2011-2019 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
0081ce9b 10/*
85d843c8 11 * All low level APIs are deprecated for public use, but still ok for internal
0081ce9b
RL
12 * use where we're using them to implement the higher level EVP interface, as is
13 * the case here.
14 */
15#include "internal/deprecated.h"
16
0d2bfe52
SL
17#include "cipher_aes_cbc_hmac_sha.h"
18
87d3bb8e 19#if !defined(AES_CBC_HMAC_SHA_CAPABLE) || !defined(AESNI_CAPABLE)
0d2bfe52
SL
20int cipher_capable_aes_cbc_hmac_sha256(void)
21{
22 return 0;
23}
87d3bb8e
MC
24
25const PROV_CIPHER_HW_AES_HMAC_SHA *PROV_CIPHER_HW_aes_cbc_hmac_sha256(void)
26{
27 return NULL;
28}
0d2bfe52
SL
29#else
30
993ebac9 31# include <openssl/rand.h>
0d2bfe52
SL
32# include "crypto/evp.h"
33# include "internal/constant_time.h"
34
35void sha256_block_data_order(void *c, const void *p, size_t len);
36int aesni_cbc_sha256_enc(const void *inp, void *out, size_t blocks,
37 const AES_KEY *key, unsigned char iv[16],
38 SHA256_CTX *ctx, const void *in0);
39
40int cipher_capable_aes_cbc_hmac_sha256(void)
41{
42 return AESNI_CBC_HMAC_SHA_CAPABLE
43 && aesni_cbc_sha256_enc(NULL, NULL, 0, NULL, NULL, NULL, NULL);
44}
45
46static int aesni_cbc_hmac_sha256_init_key(PROV_CIPHER_CTX *vctx,
47 const unsigned char *key,
48 size_t keylen)
49{
50 int ret;
51 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
52 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
53
54 if (ctx->base.enc)
55 ret = aesni_set_encrypt_key(key, ctx->base.keylen * 8, &ctx->ks);
56 else
57 ret = aesni_set_decrypt_key(key, ctx->base.keylen * 8, &ctx->ks);
58
59 SHA256_Init(&sctx->head); /* handy when benchmarking */
60 sctx->tail = sctx->head;
61 sctx->md = sctx->head;
62
63 ctx->payload_length = NO_PAYLOAD_LENGTH;
64
65 return ret < 0 ? 0 : 1;
66}
67
68void sha256_block_data_order(void *c, const void *p, size_t len);
69
70static void sha256_update(SHA256_CTX *c, const void *data, size_t len)
71{
72 const unsigned char *ptr = data;
73 size_t res;
74
75 if ((res = c->num)) {
76 res = SHA256_CBLOCK - res;
77 if (len < res)
78 res = len;
79 SHA256_Update(c, ptr, res);
80 ptr += res;
81 len -= res;
82 }
83
84 res = len % SHA256_CBLOCK;
85 len -= res;
86
87 if (len) {
88 sha256_block_data_order(c, ptr, len / SHA256_CBLOCK);
89
90 ptr += len;
91 c->Nh += len >> 29;
92 c->Nl += len <<= 3;
93 if (c->Nl < (unsigned int)len)
94 c->Nh++;
95 }
96
97 if (res)
98 SHA256_Update(c, ptr, res);
99}
100
101# if !defined(OPENSSL_NO_MULTIBLOCK)
102
103typedef struct {
104 unsigned int A[8], B[8], C[8], D[8], E[8], F[8], G[8], H[8];
105} SHA256_MB_CTX;
106
107typedef struct {
108 const unsigned char *ptr;
109 int blocks;
110} HASH_DESC;
111
112typedef struct {
113 const unsigned char *inp;
114 unsigned char *out;
115 int blocks;
116 u64 iv[2];
117} CIPH_DESC;
118
119void sha256_multi_block(SHA256_MB_CTX *, const HASH_DESC *, int);
120void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int);
121
122static size_t tls1_multi_block_encrypt(void *vctx,
123 unsigned char *out,
124 const unsigned char *inp,
125 size_t inp_len, int n4x)
126{ /* n4x is 1 or 2 */
127 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
128 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
129 HASH_DESC hash_d[8], edges[8];
130 CIPH_DESC ciph_d[8];
131 unsigned char storage[sizeof(SHA256_MB_CTX) + 32];
132 union {
133 u64 q[16];
134 u32 d[32];
135 u8 c[128];
136 } blocks[8];
137 SHA256_MB_CTX *mctx;
138 unsigned int frag, last, packlen, i;
139 unsigned int x4 = 4 * n4x, minblocks, processed = 0;
140 size_t ret = 0;
141 u8 *IVs;
142# if defined(BSWAP8)
143 u64 seqnum;
144# endif
145
146 /* ask for IVs in bulk */
993ebac9 147 if (RAND_bytes_ex(ctx->base.libctx, (IVs = blocks[0].c), 16 * x4) <= 0)
0d2bfe52
SL
148 return 0;
149
150 mctx = (SHA256_MB_CTX *) (storage + 32 - ((size_t)storage % 32)); /* align */
151
152 frag = (unsigned int)inp_len >> (1 + n4x);
153 last = (unsigned int)inp_len + frag - (frag << (1 + n4x));
154 if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) {
155 frag++;
156 last -= x4 - 1;
157 }
158
159 packlen = 5 + 16 + ((frag + 32 + 16) & -16);
160
161 /* populate descriptors with pointers and IVs */
162 hash_d[0].ptr = inp;
163 ciph_d[0].inp = inp;
164 /* 5+16 is place for header and explicit IV */
165 ciph_d[0].out = out + 5 + 16;
166 memcpy(ciph_d[0].out - 16, IVs, 16);
167 memcpy(ciph_d[0].iv, IVs, 16);
168 IVs += 16;
169
170 for (i = 1; i < x4; i++) {
171 ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag;
172 ciph_d[i].out = ciph_d[i - 1].out + packlen;
173 memcpy(ciph_d[i].out - 16, IVs, 16);
174 memcpy(ciph_d[i].iv, IVs, 16);
175 IVs += 16;
176 }
177
178# if defined(BSWAP8)
179 memcpy(blocks[0].c, sctx->md.data, 8);
180 seqnum = BSWAP8(blocks[0].q[0]);
181# endif
182
183 for (i = 0; i < x4; i++) {
184 unsigned int len = (i == (x4 - 1) ? last : frag);
185# if !defined(BSWAP8)
186 unsigned int carry, j;
187# endif
188
189 mctx->A[i] = sctx->md.h[0];
190 mctx->B[i] = sctx->md.h[1];
191 mctx->C[i] = sctx->md.h[2];
192 mctx->D[i] = sctx->md.h[3];
193 mctx->E[i] = sctx->md.h[4];
194 mctx->F[i] = sctx->md.h[5];
195 mctx->G[i] = sctx->md.h[6];
196 mctx->H[i] = sctx->md.h[7];
197
198 /* fix seqnum */
199# if defined(BSWAP8)
200 blocks[i].q[0] = BSWAP8(seqnum + i);
201# else
202 for (carry = i, j = 8; j--;) {
203 blocks[i].c[j] = ((u8 *)sctx->md.data)[j] + carry;
204 carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1);
205 }
206# endif
207 blocks[i].c[8] = ((u8 *)sctx->md.data)[8];
208 blocks[i].c[9] = ((u8 *)sctx->md.data)[9];
209 blocks[i].c[10] = ((u8 *)sctx->md.data)[10];
210 /* fix length */
211 blocks[i].c[11] = (u8)(len >> 8);
212 blocks[i].c[12] = (u8)(len);
213
214 memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13);
215 hash_d[i].ptr += 64 - 13;
216 hash_d[i].blocks = (len - (64 - 13)) / 64;
217
218 edges[i].ptr = blocks[i].c;
219 edges[i].blocks = 1;
220 }
221
222 /* hash 13-byte headers and first 64-13 bytes of inputs */
223 sha256_multi_block(mctx, edges, n4x);
224 /* hash bulk inputs */
225# define MAXCHUNKSIZE 2048
226# if MAXCHUNKSIZE%64
227# error "MAXCHUNKSIZE is not divisible by 64"
228# elif MAXCHUNKSIZE
229 /*
230 * goal is to minimize pressure on L1 cache by moving in shorter steps,
231 * so that hashed data is still in the cache by the time we encrypt it
232 */
233 minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64;
234 if (minblocks > MAXCHUNKSIZE / 64) {
235 for (i = 0; i < x4; i++) {
236 edges[i].ptr = hash_d[i].ptr;
237 edges[i].blocks = MAXCHUNKSIZE / 64;
238 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
239 }
240 do {
241 sha256_multi_block(mctx, edges, n4x);
242 aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x);
243
244 for (i = 0; i < x4; i++) {
245 edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE;
246 hash_d[i].blocks -= MAXCHUNKSIZE / 64;
247 edges[i].blocks = MAXCHUNKSIZE / 64;
248 ciph_d[i].inp += MAXCHUNKSIZE;
249 ciph_d[i].out += MAXCHUNKSIZE;
250 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
251 memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16);
252 }
253 processed += MAXCHUNKSIZE;
254 minblocks -= MAXCHUNKSIZE / 64;
255 } while (minblocks > MAXCHUNKSIZE / 64);
256 }
257# endif
258# undef MAXCHUNKSIZE
259 sha256_multi_block(mctx, hash_d, n4x);
260
261 memset(blocks, 0, sizeof(blocks));
262 for (i = 0; i < x4; i++) {
263 unsigned int len = (i == (x4 - 1) ? last : frag),
264 off = hash_d[i].blocks * 64;
265 const unsigned char *ptr = hash_d[i].ptr + off;
266
267 off = (len - processed) - (64 - 13) - off; /* remainder actually */
268 memcpy(blocks[i].c, ptr, off);
269 blocks[i].c[off] = 0x80;
270 len += 64 + 13; /* 64 is HMAC header */
271 len *= 8; /* convert to bits */
272 if (off < (64 - 8)) {
273# ifdef BSWAP4
274 blocks[i].d[15] = BSWAP4(len);
275# else
276 PUTU32(blocks[i].c + 60, len);
277# endif
278 edges[i].blocks = 1;
279 } else {
280# ifdef BSWAP4
281 blocks[i].d[31] = BSWAP4(len);
282# else
283 PUTU32(blocks[i].c + 124, len);
284# endif
285 edges[i].blocks = 2;
286 }
287 edges[i].ptr = blocks[i].c;
288 }
289
290 /* hash input tails and finalize */
291 sha256_multi_block(mctx, edges, n4x);
292
293 memset(blocks, 0, sizeof(blocks));
294 for (i = 0; i < x4; i++) {
295# ifdef BSWAP4
296 blocks[i].d[0] = BSWAP4(mctx->A[i]);
297 mctx->A[i] = sctx->tail.h[0];
298 blocks[i].d[1] = BSWAP4(mctx->B[i]);
299 mctx->B[i] = sctx->tail.h[1];
300 blocks[i].d[2] = BSWAP4(mctx->C[i]);
301 mctx->C[i] = sctx->tail.h[2];
302 blocks[i].d[3] = BSWAP4(mctx->D[i]);
303 mctx->D[i] = sctx->tail.h[3];
304 blocks[i].d[4] = BSWAP4(mctx->E[i]);
305 mctx->E[i] = sctx->tail.h[4];
306 blocks[i].d[5] = BSWAP4(mctx->F[i]);
307 mctx->F[i] = sctx->tail.h[5];
308 blocks[i].d[6] = BSWAP4(mctx->G[i]);
309 mctx->G[i] = sctx->tail.h[6];
310 blocks[i].d[7] = BSWAP4(mctx->H[i]);
311 mctx->H[i] = sctx->tail.h[7];
312 blocks[i].c[32] = 0x80;
313 blocks[i].d[15] = BSWAP4((64 + 32) * 8);
314# else
315 PUTU32(blocks[i].c + 0, mctx->A[i]);
316 mctx->A[i] = sctx->tail.h[0];
317 PUTU32(blocks[i].c + 4, mctx->B[i]);
318 mctx->B[i] = sctx->tail.h[1];
319 PUTU32(blocks[i].c + 8, mctx->C[i]);
320 mctx->C[i] = sctx->tail.h[2];
321 PUTU32(blocks[i].c + 12, mctx->D[i]);
322 mctx->D[i] = sctx->tail.h[3];
323 PUTU32(blocks[i].c + 16, mctx->E[i]);
324 mctx->E[i] = sctx->tail.h[4];
325 PUTU32(blocks[i].c + 20, mctx->F[i]);
326 mctx->F[i] = sctx->tail.h[5];
327 PUTU32(blocks[i].c + 24, mctx->G[i]);
328 mctx->G[i] = sctx->tail.h[6];
329 PUTU32(blocks[i].c + 28, mctx->H[i]);
330 mctx->H[i] = sctx->tail.h[7];
331 blocks[i].c[32] = 0x80;
332 PUTU32(blocks[i].c + 60, (64 + 32) * 8);
333# endif /* BSWAP */
334 edges[i].ptr = blocks[i].c;
335 edges[i].blocks = 1;
336 }
337
338 /* finalize MACs */
339 sha256_multi_block(mctx, edges, n4x);
340
341 for (i = 0; i < x4; i++) {
342 unsigned int len = (i == (x4 - 1) ? last : frag), pad, j;
343 unsigned char *out0 = out;
344
345 memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed);
346 ciph_d[i].inp = ciph_d[i].out;
347
348 out += 5 + 16 + len;
349
350 /* write MAC */
351 PUTU32(out + 0, mctx->A[i]);
352 PUTU32(out + 4, mctx->B[i]);
353 PUTU32(out + 8, mctx->C[i]);
354 PUTU32(out + 12, mctx->D[i]);
355 PUTU32(out + 16, mctx->E[i]);
356 PUTU32(out + 20, mctx->F[i]);
357 PUTU32(out + 24, mctx->G[i]);
358 PUTU32(out + 28, mctx->H[i]);
359 out += 32;
360 len += 32;
361
362 /* pad */
363 pad = 15 - len % 16;
364 for (j = 0; j <= pad; j++)
365 *(out++) = pad;
366 len += pad + 1;
367
368 ciph_d[i].blocks = (len - processed) / 16;
369 len += 16; /* account for explicit iv */
370
371 /* arrange header */
372 out0[0] = ((u8 *)sctx->md.data)[8];
373 out0[1] = ((u8 *)sctx->md.data)[9];
374 out0[2] = ((u8 *)sctx->md.data)[10];
375 out0[3] = (u8)(len >> 8);
376 out0[4] = (u8)(len);
377
378 ret += len + 5;
379 inp += frag;
380 }
381
382 aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x);
383
384 OPENSSL_cleanse(blocks, sizeof(blocks));
385 OPENSSL_cleanse(mctx, sizeof(*mctx));
386
387 ctx->multiblock_encrypt_len = ret;
388 return ret;
389}
390# endif /* !OPENSSL_NO_MULTIBLOCK */
391
392static int aesni_cbc_hmac_sha256_cipher(PROV_CIPHER_CTX *vctx,
393 unsigned char *out,
394 const unsigned char *in, size_t len)
395{
396 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
397 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
398 unsigned int l;
399 size_t plen = ctx->payload_length;
400 size_t iv = 0; /* explicit IV in TLS 1.1 and * later */
401 size_t aes_off = 0, blocks;
402 size_t sha_off = SHA256_CBLOCK - sctx->md.num;
403
404 ctx->payload_length = NO_PAYLOAD_LENGTH;
405
406 if (len % AES_BLOCK_SIZE)
407 return 0;
408
409 if (ctx->base.enc) {
410 if (plen == NO_PAYLOAD_LENGTH)
411 plen = len;
412 else if (len !=
413 ((plen + SHA256_DIGEST_LENGTH +
414 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE))
415 return 0;
416 else if (ctx->aux.tls_ver >= TLS1_1_VERSION)
417 iv = AES_BLOCK_SIZE;
418
419 /*
420 * Assembly stitch handles AVX-capable processors, but its
421 * performance is not optimal on AMD Jaguar, ~40% worse, for
422 * unknown reasons. Incidentally processor in question supports
423 * AVX, but not AMD-specific XOP extension, which can be used
424 * to identify it and avoid stitch invocation. So that after we
425 * establish that current CPU supports AVX, we even see if it's
426 * either even XOP-capable Bulldozer-based or GenuineIntel one.
427 * But SHAEXT-capable go ahead...
428 */
429 if (((OPENSSL_ia32cap_P[2] & (1 << 29)) || /* SHAEXT? */
430 ((OPENSSL_ia32cap_P[1] & (1 << (60 - 32))) && /* AVX? */
431 ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */
432 | (OPENSSL_ia32cap_P[0] & (1 << 30))))) && /* "Intel CPU"? */
433 plen > (sha_off + iv) &&
434 (blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) {
435 sha256_update(&sctx->md, in + iv, sha_off);
436
437 (void)aesni_cbc_sha256_enc(in, out, blocks, &ctx->ks,
438 ctx->base.iv,
439 &sctx->md, in + iv + sha_off);
440 blocks *= SHA256_CBLOCK;
441 aes_off += blocks;
442 sha_off += blocks;
443 sctx->md.Nh += blocks >> 29;
444 sctx->md.Nl += blocks <<= 3;
445 if (sctx->md.Nl < (unsigned int)blocks)
446 sctx->md.Nh++;
447 } else {
448 sha_off = 0;
449 }
450 sha_off += iv;
451 sha256_update(&sctx->md, in + sha_off, plen - sha_off);
452
453 if (plen != len) { /* "TLS" mode of operation */
454 if (in != out)
455 memcpy(out + aes_off, in + aes_off, plen - aes_off);
456
457 /* calculate HMAC and append it to payload */
458 SHA256_Final(out + plen, &sctx->md);
459 sctx->md = sctx->tail;
460 sha256_update(&sctx->md, out + plen, SHA256_DIGEST_LENGTH);
461 SHA256_Final(out + plen, &sctx->md);
462
463 /* pad the payload|hmac */
464 plen += SHA256_DIGEST_LENGTH;
465 for (l = len - plen - 1; plen < len; plen++)
466 out[plen] = l;
467 /* encrypt HMAC|padding at once */
468 aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off,
469 &ctx->ks, ctx->base.iv, 1);
470 } else {
471 aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off,
472 &ctx->ks, ctx->base.iv, 1);
473 }
474 } else {
475 union {
476 unsigned int u[SHA256_DIGEST_LENGTH / sizeof(unsigned int)];
477 unsigned char c[64 + SHA256_DIGEST_LENGTH];
478 } mac, *pmac;
479
480 /* arrange cache line alignment */
481 pmac = (void *)(((size_t)mac.c + 63) & ((size_t)0 - 64));
482
483 /* decrypt HMAC|padding at once */
484 aesni_cbc_encrypt(in, out, len, &ctx->ks,
485 ctx->base.iv, 0);
486
487 if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */
488 size_t inp_len, mask, j, i;
489 unsigned int res, maxpad, pad, bitlen;
490 int ret = 1;
491 union {
492 unsigned int u[SHA_LBLOCK];
493 unsigned char c[SHA256_CBLOCK];
494 } *data = (void *)sctx->md.data;
495
496 if ((ctx->aux.tls_aad[plen - 4] << 8 | ctx->aux.tls_aad[plen - 3])
497 >= TLS1_1_VERSION)
498 iv = AES_BLOCK_SIZE;
499
500 if (len < (iv + SHA256_DIGEST_LENGTH + 1))
501 return 0;
502
503 /* omit explicit iv */
504 out += iv;
505 len -= iv;
506
507 /* figure out payload length */
508 pad = out[len - 1];
509 maxpad = len - (SHA256_DIGEST_LENGTH + 1);
510 maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
511 maxpad &= 255;
512
513 mask = constant_time_ge(maxpad, pad);
514 ret &= mask;
515 /*
516 * If pad is invalid then we will fail the above test but we must
517 * continue anyway because we are in constant time code. However,
518 * we'll use the maxpad value instead of the supplied pad to make
519 * sure we perform well defined pointer arithmetic.
520 */
521 pad = constant_time_select(mask, pad, maxpad);
522
523 inp_len = len - (SHA256_DIGEST_LENGTH + pad + 1);
524
525 ctx->aux.tls_aad[plen - 2] = inp_len >> 8;
526 ctx->aux.tls_aad[plen - 1] = inp_len;
527
528 /* calculate HMAC */
529 sctx->md = sctx->head;
530 sha256_update(&sctx->md, ctx->aux.tls_aad, plen);
531
532 /* code with lucky-13 fix */
533 len -= SHA256_DIGEST_LENGTH; /* amend mac */
534 if (len >= (256 + SHA256_CBLOCK)) {
535 j = (len - (256 + SHA256_CBLOCK)) & (0 - SHA256_CBLOCK);
536 j += SHA256_CBLOCK - sctx->md.num;
537 sha256_update(&sctx->md, out, j);
538 out += j;
539 len -= j;
540 inp_len -= j;
541 }
542
543 /* but pretend as if we hashed padded payload */
544 bitlen = sctx->md.Nl + (inp_len << 3); /* at most 18 bits */
545# ifdef BSWAP4
546 bitlen = BSWAP4(bitlen);
547# else
548 mac.c[0] = 0;
549 mac.c[1] = (unsigned char)(bitlen >> 16);
550 mac.c[2] = (unsigned char)(bitlen >> 8);
551 mac.c[3] = (unsigned char)bitlen;
552 bitlen = mac.u[0];
553# endif /* BSWAP */
554
555 pmac->u[0] = 0;
556 pmac->u[1] = 0;
557 pmac->u[2] = 0;
558 pmac->u[3] = 0;
559 pmac->u[4] = 0;
560 pmac->u[5] = 0;
561 pmac->u[6] = 0;
562 pmac->u[7] = 0;
563
564 for (res = sctx->md.num, j = 0; j < len; j++) {
565 size_t c = out[j];
566 mask = (j - inp_len) >> (sizeof(j) * 8 - 8);
567 c &= mask;
568 c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8));
569 data->c[res++] = (unsigned char)c;
570
571 if (res != SHA256_CBLOCK)
572 continue;
573
574 /* j is not incremented yet */
575 mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1));
576 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
577 sha256_block_data_order(&sctx->md, data, 1);
578 mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1));
579 pmac->u[0] |= sctx->md.h[0] & mask;
580 pmac->u[1] |= sctx->md.h[1] & mask;
581 pmac->u[2] |= sctx->md.h[2] & mask;
582 pmac->u[3] |= sctx->md.h[3] & mask;
583 pmac->u[4] |= sctx->md.h[4] & mask;
584 pmac->u[5] |= sctx->md.h[5] & mask;
585 pmac->u[6] |= sctx->md.h[6] & mask;
586 pmac->u[7] |= sctx->md.h[7] & mask;
587 res = 0;
588 }
589
590 for (i = res; i < SHA256_CBLOCK; i++, j++)
591 data->c[i] = 0;
592
593 if (res > SHA256_CBLOCK - 8) {
594 mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1));
595 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
596 sha256_block_data_order(&sctx->md, data, 1);
597 mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
598 pmac->u[0] |= sctx->md.h[0] & mask;
599 pmac->u[1] |= sctx->md.h[1] & mask;
600 pmac->u[2] |= sctx->md.h[2] & mask;
601 pmac->u[3] |= sctx->md.h[3] & mask;
602 pmac->u[4] |= sctx->md.h[4] & mask;
603 pmac->u[5] |= sctx->md.h[5] & mask;
604 pmac->u[6] |= sctx->md.h[6] & mask;
605 pmac->u[7] |= sctx->md.h[7] & mask;
606
607 memset(data, 0, SHA256_CBLOCK);
608 j += 64;
609 }
610 data->u[SHA_LBLOCK - 1] = bitlen;
611 sha256_block_data_order(&sctx->md, data, 1);
612 mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
613 pmac->u[0] |= sctx->md.h[0] & mask;
614 pmac->u[1] |= sctx->md.h[1] & mask;
615 pmac->u[2] |= sctx->md.h[2] & mask;
616 pmac->u[3] |= sctx->md.h[3] & mask;
617 pmac->u[4] |= sctx->md.h[4] & mask;
618 pmac->u[5] |= sctx->md.h[5] & mask;
619 pmac->u[6] |= sctx->md.h[6] & mask;
620 pmac->u[7] |= sctx->md.h[7] & mask;
621
622# ifdef BSWAP4
623 pmac->u[0] = BSWAP4(pmac->u[0]);
624 pmac->u[1] = BSWAP4(pmac->u[1]);
625 pmac->u[2] = BSWAP4(pmac->u[2]);
626 pmac->u[3] = BSWAP4(pmac->u[3]);
627 pmac->u[4] = BSWAP4(pmac->u[4]);
628 pmac->u[5] = BSWAP4(pmac->u[5]);
629 pmac->u[6] = BSWAP4(pmac->u[6]);
630 pmac->u[7] = BSWAP4(pmac->u[7]);
631# else
632 for (i = 0; i < 8; i++) {
633 res = pmac->u[i];
634 pmac->c[4 * i + 0] = (unsigned char)(res >> 24);
635 pmac->c[4 * i + 1] = (unsigned char)(res >> 16);
636 pmac->c[4 * i + 2] = (unsigned char)(res >> 8);
637 pmac->c[4 * i + 3] = (unsigned char)res;
638 }
639# endif /* BSWAP */
640 len += SHA256_DIGEST_LENGTH;
641 sctx->md = sctx->tail;
642 sha256_update(&sctx->md, pmac->c, SHA256_DIGEST_LENGTH);
643 SHA256_Final(pmac->c, &sctx->md);
644
645 /* verify HMAC */
646 out += inp_len;
647 len -= inp_len;
648 /* code containing lucky-13 fix */
649 {
650 unsigned char *p =
651 out + len - 1 - maxpad - SHA256_DIGEST_LENGTH;
652 size_t off = out - p;
653 unsigned int c, cmask;
654
655 maxpad += SHA256_DIGEST_LENGTH;
656 for (res = 0, i = 0, j = 0; j < maxpad; j++) {
657 c = p[j];
658 cmask =
659 ((int)(j - off - SHA256_DIGEST_LENGTH)) >>
660 (sizeof(int) * 8 - 1);
661 res |= (c ^ pad) & ~cmask; /* ... and padding */
662 cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1);
663 res |= (c ^ pmac->c[i]) & cmask;
664 i += 1 & cmask;
665 }
666 maxpad -= SHA256_DIGEST_LENGTH;
667
668 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
669 ret &= (int)~res;
670 }
671 return ret;
672 } else {
673 sha256_update(&sctx->md, out, len);
674 }
675 }
676
677 return 1;
678}
679
680/* EVP_CTRL_AEAD_SET_MAC_KEY */
681static void aesni_cbc_hmac_sha256_set_mac_key(void *vctx,
682 const unsigned char *mackey,
683 size_t len)
684{
685 PROV_AES_HMAC_SHA256_CTX *ctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
686 unsigned int i;
687 unsigned char hmac_key[64];
688
689 memset(hmac_key, 0, sizeof(hmac_key));
690
691 if (len > sizeof(hmac_key)) {
692 SHA256_Init(&ctx->head);
693 sha256_update(&ctx->head, mackey, len);
694 SHA256_Final(hmac_key, &ctx->head);
695 } else {
696 memcpy(hmac_key, mackey, len);
697 }
698
699 for (i = 0; i < sizeof(hmac_key); i++)
700 hmac_key[i] ^= 0x36; /* ipad */
701 SHA256_Init(&ctx->head);
702 sha256_update(&ctx->head, hmac_key, sizeof(hmac_key));
703
704 for (i = 0; i < sizeof(hmac_key); i++)
705 hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
706 SHA256_Init(&ctx->tail);
707 sha256_update(&ctx->tail, hmac_key, sizeof(hmac_key));
708
709 OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
710}
711
712/* EVP_CTRL_AEAD_TLS1_AAD */
713static int aesni_cbc_hmac_sha256_set_tls1_aad(void *vctx,
714 unsigned char *aad_rec, int aad_len)
715{
716 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
717 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
718 unsigned char *p = aad_rec;
719 unsigned int len;
720
721 if (aad_len != EVP_AEAD_TLS1_AAD_LEN)
722 return -1;
723
724 len = p[aad_len - 2] << 8 | p[aad_len - 1];
725
726 if (ctx->base.enc) {
727 ctx->payload_length = len;
728 if ((ctx->aux.tls_ver =
729 p[aad_len - 4] << 8 | p[aad_len - 3]) >= TLS1_1_VERSION) {
730 if (len < AES_BLOCK_SIZE)
731 return 0;
732 len -= AES_BLOCK_SIZE;
733 p[aad_len] = len >> 8;
734 p[aad_len - 1] = len;
735 }
736 sctx->md = sctx->head;
737 sha256_update(&sctx->md, p, aad_len);
738 ctx->tls_aad_pad = (int)(((len + SHA256_DIGEST_LENGTH +
739 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)
740 - len);
741 return 1;
742 } else {
743 memcpy(ctx->aux.tls_aad, p, aad_len);
744 ctx->payload_length = aad_len;
745 ctx->tls_aad_pad = SHA256_DIGEST_LENGTH;
746 return 1;
747 }
748}
749
750# if !defined(OPENSSL_NO_MULTIBLOCK)
751/* EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE */
752static int aesni_cbc_hmac_sha256_tls1_multiblock_max_bufsize(
753 void *vctx)
754{
755 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
756
757 OPENSSL_assert(ctx->multiblock_max_send_fragment != 0);
758 return (int)(5 + 16
759 + (((int)ctx->multiblock_max_send_fragment + 32 + 16) & -16));
760}
761
762/* EVP_CTRL_TLS1_1_MULTIBLOCK_AAD */
763static int aesni_cbc_hmac_sha256_tls1_multiblock_aad(
764 void *vctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param)
765{
766 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
767 PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
768 unsigned int n4x = 1, x4;
769 unsigned int frag, last, packlen, inp_len;
770
771 inp_len = param->inp[11] << 8 | param->inp[12];
772
773 if (ctx->base.enc) {
774 if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION)
775 return -1;
776
777 if (inp_len) {
778 if (inp_len < 4096)
779 return 0; /* too short */
780
781 if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5))
782 n4x = 2; /* AVX2 */
783 } else if ((n4x = param->interleave / 4) && n4x <= 2)
784 inp_len = param->len;
785 else
786 return -1;
787
788 sctx->md = sctx->head;
789 sha256_update(&sctx->md, param->inp, 13);
790
791 x4 = 4 * n4x;
792 n4x += 1;
793
794 frag = inp_len >> n4x;
795 last = inp_len + frag - (frag << n4x);
796 if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) {
797 frag++;
798 last -= x4 - 1;
799 }
800
801 packlen = 5 + 16 + ((frag + 32 + 16) & -16);
802 packlen = (packlen << n4x) - packlen;
803 packlen += 5 + 16 + ((last + 32 + 16) & -16);
804
805 param->interleave = x4;
806 /* The returned values used by get need to be stored */
807 ctx->multiblock_interleave = x4;
808 ctx->multiblock_aad_packlen = packlen;
809 return 1;
810 }
811 return -1; /* not yet */
812}
813
814/* EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT */
815static int aesni_cbc_hmac_sha256_tls1_multiblock_encrypt(
816 void *ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param)
817{
818 return (int)tls1_multi_block_encrypt(ctx, param->out,
819 param->inp, param->len,
820 param->interleave / 4);
821}
87d3bb8e 822# endif
0d2bfe52
SL
823
824static const PROV_CIPHER_HW_AES_HMAC_SHA cipher_hw_aes_hmac_sha256 = {
825 {
826 aesni_cbc_hmac_sha256_init_key,
827 aesni_cbc_hmac_sha256_cipher
828 },
829 aesni_cbc_hmac_sha256_set_mac_key,
830 aesni_cbc_hmac_sha256_set_tls1_aad,
831# if !defined(OPENSSL_NO_MULTIBLOCK)
832 aesni_cbc_hmac_sha256_tls1_multiblock_max_bufsize,
833 aesni_cbc_hmac_sha256_tls1_multiblock_aad,
834 aesni_cbc_hmac_sha256_tls1_multiblock_encrypt
835# endif
836};
837
838const PROV_CIPHER_HW_AES_HMAC_SHA *PROV_CIPHER_HW_aes_cbc_hmac_sha256(void)
839{
840 return &cipher_hw_aes_hmac_sha256;
841}
842
87d3bb8e 843#endif /* !defined(AES_CBC_HMAC_SHA_CAPABLE) || !defined(AESNI_CAPABLE) */