]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/evp/e_aes.c
Move the public SIV mode functions from public headers to internal ones
[thirdparty/openssl.git] / crypto / evp / e_aes.c
1 /*
2 * Copyright 2001-2019 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <string.h>
11 #include <assert.h>
12 #include <openssl/opensslconf.h>
13 #include <openssl/crypto.h>
14 #include <openssl/evp.h>
15 #include <openssl/err.h>
16 #include <openssl/aes.h>
17 #include <openssl/rand.h>
18 #include <openssl/cmac.h>
19 #include "internal/evp_int.h"
20 #include "internal/cryptlib.h"
21 #include "internal/modes_int.h"
22 #include "modes_lcl.h"
23 #include "evp_locl.h"
24
25 typedef struct {
26 union {
27 OSSL_UNION_ALIGN;
28 AES_KEY ks;
29 } ks;
30 block128_f block;
31 union {
32 cbc128_f cbc;
33 ctr128_f ctr;
34 } stream;
35 } EVP_AES_KEY;
36
37 typedef struct {
38 union {
39 OSSL_UNION_ALIGN;
40 AES_KEY ks;
41 } ks; /* AES key schedule to use */
42 int key_set; /* Set if key initialised */
43 int iv_set; /* Set if an iv is set */
44 GCM128_CONTEXT gcm;
45 unsigned char *iv; /* Temporary IV store */
46 int ivlen; /* IV length */
47 int taglen;
48 int iv_gen; /* It is OK to generate IVs */
49 int iv_gen_rand; /* No IV was specified, so generate a rand IV */
50 int tls_aad_len; /* TLS AAD length */
51 uint64_t tls_enc_records; /* Number of TLS records encrypted */
52 ctr128_f ctr;
53 } EVP_AES_GCM_CTX;
54
55 typedef struct {
56 union {
57 OSSL_UNION_ALIGN;
58 AES_KEY ks;
59 } ks1, ks2; /* AES key schedules to use */
60 XTS128_CONTEXT xts;
61 void (*stream) (const unsigned char *in,
62 unsigned char *out, size_t length,
63 const AES_KEY *key1, const AES_KEY *key2,
64 const unsigned char iv[16]);
65 } EVP_AES_XTS_CTX;
66
67 #ifdef FIPS_MODE
68 static const int allow_insecure_decrypt = 0;
69 #else
70 static const int allow_insecure_decrypt = 1;
71 #endif
72
73 typedef struct {
74 union {
75 OSSL_UNION_ALIGN;
76 AES_KEY ks;
77 } ks; /* AES key schedule to use */
78 int key_set; /* Set if key initialised */
79 int iv_set; /* Set if an iv is set */
80 int tag_set; /* Set if tag is valid */
81 int len_set; /* Set if message length set */
82 int L, M; /* L and M parameters from RFC3610 */
83 int tls_aad_len; /* TLS AAD length */
84 CCM128_CONTEXT ccm;
85 ccm128_f str;
86 } EVP_AES_CCM_CTX;
87
88 #ifndef OPENSSL_NO_OCB
89 typedef struct {
90 union {
91 OSSL_UNION_ALIGN;
92 AES_KEY ks;
93 } ksenc; /* AES key schedule to use for encryption */
94 union {
95 OSSL_UNION_ALIGN;
96 AES_KEY ks;
97 } ksdec; /* AES key schedule to use for decryption */
98 int key_set; /* Set if key initialised */
99 int iv_set; /* Set if an iv is set */
100 OCB128_CONTEXT ocb;
101 unsigned char *iv; /* Temporary IV store */
102 unsigned char tag[16];
103 unsigned char data_buf[16]; /* Store partial data blocks */
104 unsigned char aad_buf[16]; /* Store partial AAD blocks */
105 int data_buf_len;
106 int aad_buf_len;
107 int ivlen; /* IV length */
108 int taglen;
109 } EVP_AES_OCB_CTX;
110 #endif
111
112 #define MAXBITCHUNK ((size_t)1<<(sizeof(size_t)*8-4))
113
114 #ifdef VPAES_ASM
115 int vpaes_set_encrypt_key(const unsigned char *userKey, int bits,
116 AES_KEY *key);
117 int vpaes_set_decrypt_key(const unsigned char *userKey, int bits,
118 AES_KEY *key);
119
120 void vpaes_encrypt(const unsigned char *in, unsigned char *out,
121 const AES_KEY *key);
122 void vpaes_decrypt(const unsigned char *in, unsigned char *out,
123 const AES_KEY *key);
124
125 void vpaes_cbc_encrypt(const unsigned char *in,
126 unsigned char *out,
127 size_t length,
128 const AES_KEY *key, unsigned char *ivec, int enc);
129 #endif
130 #ifdef BSAES_ASM
131 void bsaes_cbc_encrypt(const unsigned char *in, unsigned char *out,
132 size_t length, const AES_KEY *key,
133 unsigned char ivec[16], int enc);
134 void bsaes_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
135 size_t len, const AES_KEY *key,
136 const unsigned char ivec[16]);
137 void bsaes_xts_encrypt(const unsigned char *inp, unsigned char *out,
138 size_t len, const AES_KEY *key1,
139 const AES_KEY *key2, const unsigned char iv[16]);
140 void bsaes_xts_decrypt(const unsigned char *inp, unsigned char *out,
141 size_t len, const AES_KEY *key1,
142 const AES_KEY *key2, const unsigned char iv[16]);
143 #endif
144 #ifdef AES_CTR_ASM
145 void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
146 size_t blocks, const AES_KEY *key,
147 const unsigned char ivec[AES_BLOCK_SIZE]);
148 #endif
149 #ifdef AES_XTS_ASM
150 void AES_xts_encrypt(const unsigned char *inp, unsigned char *out, size_t len,
151 const AES_KEY *key1, const AES_KEY *key2,
152 const unsigned char iv[16]);
153 void AES_xts_decrypt(const unsigned char *inp, unsigned char *out, size_t len,
154 const AES_KEY *key1, const AES_KEY *key2,
155 const unsigned char iv[16]);
156 #endif
157
158 /* increment counter (64-bit int) by 1 */
159 static void ctr64_inc(unsigned char *counter)
160 {
161 int n = 8;
162 unsigned char c;
163
164 do {
165 --n;
166 c = counter[n];
167 ++c;
168 counter[n] = c;
169 if (c)
170 return;
171 } while (n);
172 }
173
174 #if defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
175 # include "ppc_arch.h"
176 # ifdef VPAES_ASM
177 # define VPAES_CAPABLE (OPENSSL_ppccap_P & PPC_ALTIVEC)
178 # endif
179 # define HWAES_CAPABLE (OPENSSL_ppccap_P & PPC_CRYPTO207)
180 # define HWAES_set_encrypt_key aes_p8_set_encrypt_key
181 # define HWAES_set_decrypt_key aes_p8_set_decrypt_key
182 # define HWAES_encrypt aes_p8_encrypt
183 # define HWAES_decrypt aes_p8_decrypt
184 # define HWAES_cbc_encrypt aes_p8_cbc_encrypt
185 # define HWAES_ctr32_encrypt_blocks aes_p8_ctr32_encrypt_blocks
186 # define HWAES_xts_encrypt aes_p8_xts_encrypt
187 # define HWAES_xts_decrypt aes_p8_xts_decrypt
188 #endif
189
190 #if defined(AES_ASM) && !defined(I386_ONLY) && ( \
191 ((defined(__i386) || defined(__i386__) || \
192 defined(_M_IX86)) && defined(OPENSSL_IA32_SSE2))|| \
193 defined(__x86_64) || defined(__x86_64__) || \
194 defined(_M_AMD64) || defined(_M_X64) )
195
196 extern unsigned int OPENSSL_ia32cap_P[];
197
198 # ifdef VPAES_ASM
199 # define VPAES_CAPABLE (OPENSSL_ia32cap_P[1]&(1<<(41-32)))
200 # endif
201 # ifdef BSAES_ASM
202 # define BSAES_CAPABLE (OPENSSL_ia32cap_P[1]&(1<<(41-32)))
203 # endif
204 /*
205 * AES-NI section
206 */
207 # define AESNI_CAPABLE (OPENSSL_ia32cap_P[1]&(1<<(57-32)))
208
209 int aesni_set_encrypt_key(const unsigned char *userKey, int bits,
210 AES_KEY *key);
211 int aesni_set_decrypt_key(const unsigned char *userKey, int bits,
212 AES_KEY *key);
213
214 void aesni_encrypt(const unsigned char *in, unsigned char *out,
215 const AES_KEY *key);
216 void aesni_decrypt(const unsigned char *in, unsigned char *out,
217 const AES_KEY *key);
218
219 void aesni_ecb_encrypt(const unsigned char *in,
220 unsigned char *out,
221 size_t length, const AES_KEY *key, int enc);
222 void aesni_cbc_encrypt(const unsigned char *in,
223 unsigned char *out,
224 size_t length,
225 const AES_KEY *key, unsigned char *ivec, int enc);
226
227 void aesni_ctr32_encrypt_blocks(const unsigned char *in,
228 unsigned char *out,
229 size_t blocks,
230 const void *key, const unsigned char *ivec);
231
232 void aesni_xts_encrypt(const unsigned char *in,
233 unsigned char *out,
234 size_t length,
235 const AES_KEY *key1, const AES_KEY *key2,
236 const unsigned char iv[16]);
237
238 void aesni_xts_decrypt(const unsigned char *in,
239 unsigned char *out,
240 size_t length,
241 const AES_KEY *key1, const AES_KEY *key2,
242 const unsigned char iv[16]);
243
244 void aesni_ccm64_encrypt_blocks(const unsigned char *in,
245 unsigned char *out,
246 size_t blocks,
247 const void *key,
248 const unsigned char ivec[16],
249 unsigned char cmac[16]);
250
251 void aesni_ccm64_decrypt_blocks(const unsigned char *in,
252 unsigned char *out,
253 size_t blocks,
254 const void *key,
255 const unsigned char ivec[16],
256 unsigned char cmac[16]);
257
258 # if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
259 size_t aesni_gcm_encrypt(const unsigned char *in,
260 unsigned char *out,
261 size_t len,
262 const void *key, unsigned char ivec[16], u64 *Xi);
263 # define AES_gcm_encrypt aesni_gcm_encrypt
264 size_t aesni_gcm_decrypt(const unsigned char *in,
265 unsigned char *out,
266 size_t len,
267 const void *key, unsigned char ivec[16], u64 *Xi);
268 # define AES_gcm_decrypt aesni_gcm_decrypt
269 void gcm_ghash_avx(u64 Xi[2], const u128 Htable[16], const u8 *in,
270 size_t len);
271 # define AES_GCM_ASM(gctx) (gctx->ctr==aesni_ctr32_encrypt_blocks && \
272 gctx->gcm.ghash==gcm_ghash_avx)
273 # define AES_GCM_ASM2(gctx) (gctx->gcm.block==(block128_f)aesni_encrypt && \
274 gctx->gcm.ghash==gcm_ghash_avx)
275 # undef AES_GCM_ASM2 /* minor size optimization */
276 # endif
277
278 static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
279 const unsigned char *iv, int enc)
280 {
281 int ret, mode;
282 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
283
284 mode = EVP_CIPHER_CTX_mode(ctx);
285 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
286 && !enc) {
287 ret = aesni_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
288 &dat->ks.ks);
289 dat->block = (block128_f) aesni_decrypt;
290 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
291 (cbc128_f) aesni_cbc_encrypt : NULL;
292 } else {
293 ret = aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
294 &dat->ks.ks);
295 dat->block = (block128_f) aesni_encrypt;
296 if (mode == EVP_CIPH_CBC_MODE)
297 dat->stream.cbc = (cbc128_f) aesni_cbc_encrypt;
298 else if (mode == EVP_CIPH_CTR_MODE)
299 dat->stream.ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
300 else
301 dat->stream.cbc = NULL;
302 }
303
304 if (ret < 0) {
305 EVPerr(EVP_F_AESNI_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
306 return 0;
307 }
308
309 return 1;
310 }
311
312 static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
313 const unsigned char *in, size_t len)
314 {
315 aesni_cbc_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
316 EVP_CIPHER_CTX_iv_noconst(ctx),
317 EVP_CIPHER_CTX_encrypting(ctx));
318
319 return 1;
320 }
321
322 static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
323 const unsigned char *in, size_t len)
324 {
325 size_t bl = EVP_CIPHER_CTX_block_size(ctx);
326
327 if (len < bl)
328 return 1;
329
330 aesni_ecb_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
331 EVP_CIPHER_CTX_encrypting(ctx));
332
333 return 1;
334 }
335
336 # define aesni_ofb_cipher aes_ofb_cipher
337 static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
338 const unsigned char *in, size_t len);
339
340 # define aesni_cfb_cipher aes_cfb_cipher
341 static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
342 const unsigned char *in, size_t len);
343
344 # define aesni_cfb8_cipher aes_cfb8_cipher
345 static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
346 const unsigned char *in, size_t len);
347
348 # define aesni_cfb1_cipher aes_cfb1_cipher
349 static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
350 const unsigned char *in, size_t len);
351
352 # define aesni_ctr_cipher aes_ctr_cipher
353 static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
354 const unsigned char *in, size_t len);
355
356 static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
357 const unsigned char *iv, int enc)
358 {
359 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
360 if (!iv && !key)
361 return 1;
362 if (key) {
363 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
364 &gctx->ks.ks);
365 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
366 gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
367 /*
368 * If we have an iv can set it directly, otherwise use saved IV.
369 */
370 if (iv == NULL && gctx->iv_set)
371 iv = gctx->iv;
372 if (iv) {
373 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
374 gctx->iv_set = 1;
375 }
376 gctx->key_set = 1;
377 } else {
378 /* If key set use IV, otherwise copy */
379 if (gctx->key_set)
380 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
381 else
382 memcpy(gctx->iv, iv, gctx->ivlen);
383 gctx->iv_set = 1;
384 gctx->iv_gen = 0;
385 }
386 return 1;
387 }
388
389 # define aesni_gcm_cipher aes_gcm_cipher
390 static int aesni_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
391 const unsigned char *in, size_t len);
392
393 static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
394 const unsigned char *iv, int enc)
395 {
396 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
397
398 if (!iv && !key)
399 return 1;
400
401 if (key) {
402 /* The key is two half length keys in reality */
403 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
404 const int bits = bytes * 8;
405
406 /*
407 * Verify that the two keys are different.
408 *
409 * This addresses Rogaway's vulnerability.
410 * See comment in aes_xts_init_key() below.
411 */
412 if ((!allow_insecure_decrypt || enc)
413 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
414 EVPerr(EVP_F_AESNI_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
415 return 0;
416 }
417
418 /* key_len is two AES keys */
419 if (enc) {
420 aesni_set_encrypt_key(key, bits, &xctx->ks1.ks);
421 xctx->xts.block1 = (block128_f) aesni_encrypt;
422 xctx->stream = aesni_xts_encrypt;
423 } else {
424 aesni_set_decrypt_key(key, bits, &xctx->ks1.ks);
425 xctx->xts.block1 = (block128_f) aesni_decrypt;
426 xctx->stream = aesni_xts_decrypt;
427 }
428
429 aesni_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
430 xctx->xts.block2 = (block128_f) aesni_encrypt;
431
432 xctx->xts.key1 = &xctx->ks1;
433 }
434
435 if (iv) {
436 xctx->xts.key2 = &xctx->ks2;
437 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
438 }
439
440 return 1;
441 }
442
443 # define aesni_xts_cipher aes_xts_cipher
444 static int aesni_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
445 const unsigned char *in, size_t len);
446
447 static int aesni_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
448 const unsigned char *iv, int enc)
449 {
450 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
451 if (!iv && !key)
452 return 1;
453 if (key) {
454 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
455 &cctx->ks.ks);
456 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
457 &cctx->ks, (block128_f) aesni_encrypt);
458 cctx->str = enc ? (ccm128_f) aesni_ccm64_encrypt_blocks :
459 (ccm128_f) aesni_ccm64_decrypt_blocks;
460 cctx->key_set = 1;
461 }
462 if (iv) {
463 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
464 cctx->iv_set = 1;
465 }
466 return 1;
467 }
468
469 # define aesni_ccm_cipher aes_ccm_cipher
470 static int aesni_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
471 const unsigned char *in, size_t len);
472
473 # ifndef OPENSSL_NO_OCB
474 void aesni_ocb_encrypt(const unsigned char *in, unsigned char *out,
475 size_t blocks, const void *key,
476 size_t start_block_num,
477 unsigned char offset_i[16],
478 const unsigned char L_[][16],
479 unsigned char checksum[16]);
480 void aesni_ocb_decrypt(const unsigned char *in, unsigned char *out,
481 size_t blocks, const void *key,
482 size_t start_block_num,
483 unsigned char offset_i[16],
484 const unsigned char L_[][16],
485 unsigned char checksum[16]);
486
487 static int aesni_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
488 const unsigned char *iv, int enc)
489 {
490 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
491 if (!iv && !key)
492 return 1;
493 if (key) {
494 do {
495 /*
496 * We set both the encrypt and decrypt key here because decrypt
497 * needs both. We could possibly optimise to remove setting the
498 * decrypt for an encryption operation.
499 */
500 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
501 &octx->ksenc.ks);
502 aesni_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
503 &octx->ksdec.ks);
504 if (!CRYPTO_ocb128_init(&octx->ocb,
505 &octx->ksenc.ks, &octx->ksdec.ks,
506 (block128_f) aesni_encrypt,
507 (block128_f) aesni_decrypt,
508 enc ? aesni_ocb_encrypt
509 : aesni_ocb_decrypt))
510 return 0;
511 }
512 while (0);
513
514 /*
515 * If we have an iv we can set it directly, otherwise use saved IV.
516 */
517 if (iv == NULL && octx->iv_set)
518 iv = octx->iv;
519 if (iv) {
520 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
521 != 1)
522 return 0;
523 octx->iv_set = 1;
524 }
525 octx->key_set = 1;
526 } else {
527 /* If key set use IV, otherwise copy */
528 if (octx->key_set)
529 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
530 else
531 memcpy(octx->iv, iv, octx->ivlen);
532 octx->iv_set = 1;
533 }
534 return 1;
535 }
536
537 # define aesni_ocb_cipher aes_ocb_cipher
538 static int aesni_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
539 const unsigned char *in, size_t len);
540 # endif /* OPENSSL_NO_OCB */
541
542 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
543 static const EVP_CIPHER aesni_##keylen##_##mode = { \
544 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
545 flags|EVP_CIPH_##MODE##_MODE, \
546 aesni_init_key, \
547 aesni_##mode##_cipher, \
548 NULL, \
549 sizeof(EVP_AES_KEY), \
550 NULL,NULL,NULL,NULL }; \
551 static const EVP_CIPHER aes_##keylen##_##mode = { \
552 nid##_##keylen##_##nmode,blocksize, \
553 keylen/8,ivlen, \
554 flags|EVP_CIPH_##MODE##_MODE, \
555 aes_init_key, \
556 aes_##mode##_cipher, \
557 NULL, \
558 sizeof(EVP_AES_KEY), \
559 NULL,NULL,NULL,NULL }; \
560 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
561 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
562
563 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
564 static const EVP_CIPHER aesni_##keylen##_##mode = { \
565 nid##_##keylen##_##mode,blocksize, \
566 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
567 ivlen, \
568 flags|EVP_CIPH_##MODE##_MODE, \
569 aesni_##mode##_init_key, \
570 aesni_##mode##_cipher, \
571 aes_##mode##_cleanup, \
572 sizeof(EVP_AES_##MODE##_CTX), \
573 NULL,NULL,aes_##mode##_ctrl,NULL }; \
574 static const EVP_CIPHER aes_##keylen##_##mode = { \
575 nid##_##keylen##_##mode,blocksize, \
576 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
577 ivlen, \
578 flags|EVP_CIPH_##MODE##_MODE, \
579 aes_##mode##_init_key, \
580 aes_##mode##_cipher, \
581 aes_##mode##_cleanup, \
582 sizeof(EVP_AES_##MODE##_CTX), \
583 NULL,NULL,aes_##mode##_ctrl,NULL }; \
584 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
585 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
586
587 #elif defined(AES_ASM) && (defined(__sparc) || defined(__sparc__))
588
589 # include "sparc_arch.h"
590
591 extern unsigned int OPENSSL_sparcv9cap_P[];
592
593 /*
594 * Initial Fujitsu SPARC64 X support
595 */
596 # define HWAES_CAPABLE (OPENSSL_sparcv9cap_P[0] & SPARCV9_FJAESX)
597 # define HWAES_set_encrypt_key aes_fx_set_encrypt_key
598 # define HWAES_set_decrypt_key aes_fx_set_decrypt_key
599 # define HWAES_encrypt aes_fx_encrypt
600 # define HWAES_decrypt aes_fx_decrypt
601 # define HWAES_cbc_encrypt aes_fx_cbc_encrypt
602 # define HWAES_ctr32_encrypt_blocks aes_fx_ctr32_encrypt_blocks
603
604 # define SPARC_AES_CAPABLE (OPENSSL_sparcv9cap_P[1] & CFR_AES)
605
606 void aes_t4_set_encrypt_key(const unsigned char *key, int bits, AES_KEY *ks);
607 void aes_t4_set_decrypt_key(const unsigned char *key, int bits, AES_KEY *ks);
608 void aes_t4_encrypt(const unsigned char *in, unsigned char *out,
609 const AES_KEY *key);
610 void aes_t4_decrypt(const unsigned char *in, unsigned char *out,
611 const AES_KEY *key);
612 /*
613 * Key-length specific subroutines were chosen for following reason.
614 * Each SPARC T4 core can execute up to 8 threads which share core's
615 * resources. Loading as much key material to registers allows to
616 * minimize references to shared memory interface, as well as amount
617 * of instructions in inner loops [much needed on T4]. But then having
618 * non-key-length specific routines would require conditional branches
619 * either in inner loops or on subroutines' entries. Former is hardly
620 * acceptable, while latter means code size increase to size occupied
621 * by multiple key-length specific subroutines, so why fight?
622 */
623 void aes128_t4_cbc_encrypt(const unsigned char *in, unsigned char *out,
624 size_t len, const AES_KEY *key,
625 unsigned char *ivec);
626 void aes128_t4_cbc_decrypt(const unsigned char *in, unsigned char *out,
627 size_t len, const AES_KEY *key,
628 unsigned char *ivec);
629 void aes192_t4_cbc_encrypt(const unsigned char *in, unsigned char *out,
630 size_t len, const AES_KEY *key,
631 unsigned char *ivec);
632 void aes192_t4_cbc_decrypt(const unsigned char *in, unsigned char *out,
633 size_t len, const AES_KEY *key,
634 unsigned char *ivec);
635 void aes256_t4_cbc_encrypt(const unsigned char *in, unsigned char *out,
636 size_t len, const AES_KEY *key,
637 unsigned char *ivec);
638 void aes256_t4_cbc_decrypt(const unsigned char *in, unsigned char *out,
639 size_t len, const AES_KEY *key,
640 unsigned char *ivec);
641 void aes128_t4_ctr32_encrypt(const unsigned char *in, unsigned char *out,
642 size_t blocks, const AES_KEY *key,
643 unsigned char *ivec);
644 void aes192_t4_ctr32_encrypt(const unsigned char *in, unsigned char *out,
645 size_t blocks, const AES_KEY *key,
646 unsigned char *ivec);
647 void aes256_t4_ctr32_encrypt(const unsigned char *in, unsigned char *out,
648 size_t blocks, const AES_KEY *key,
649 unsigned char *ivec);
650 void aes128_t4_xts_encrypt(const unsigned char *in, unsigned char *out,
651 size_t blocks, const AES_KEY *key1,
652 const AES_KEY *key2, const unsigned char *ivec);
653 void aes128_t4_xts_decrypt(const unsigned char *in, unsigned char *out,
654 size_t blocks, const AES_KEY *key1,
655 const AES_KEY *key2, const unsigned char *ivec);
656 void aes256_t4_xts_encrypt(const unsigned char *in, unsigned char *out,
657 size_t blocks, const AES_KEY *key1,
658 const AES_KEY *key2, const unsigned char *ivec);
659 void aes256_t4_xts_decrypt(const unsigned char *in, unsigned char *out,
660 size_t blocks, const AES_KEY *key1,
661 const AES_KEY *key2, const unsigned char *ivec);
662
663 static int aes_t4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
664 const unsigned char *iv, int enc)
665 {
666 int ret, mode, bits;
667 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
668
669 mode = EVP_CIPHER_CTX_mode(ctx);
670 bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
671 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
672 && !enc) {
673 ret = 0;
674 aes_t4_set_decrypt_key(key, bits, &dat->ks.ks);
675 dat->block = (block128_f) aes_t4_decrypt;
676 switch (bits) {
677 case 128:
678 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
679 (cbc128_f) aes128_t4_cbc_decrypt : NULL;
680 break;
681 case 192:
682 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
683 (cbc128_f) aes192_t4_cbc_decrypt : NULL;
684 break;
685 case 256:
686 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
687 (cbc128_f) aes256_t4_cbc_decrypt : NULL;
688 break;
689 default:
690 ret = -1;
691 }
692 } else {
693 ret = 0;
694 aes_t4_set_encrypt_key(key, bits, &dat->ks.ks);
695 dat->block = (block128_f) aes_t4_encrypt;
696 switch (bits) {
697 case 128:
698 if (mode == EVP_CIPH_CBC_MODE)
699 dat->stream.cbc = (cbc128_f) aes128_t4_cbc_encrypt;
700 else if (mode == EVP_CIPH_CTR_MODE)
701 dat->stream.ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
702 else
703 dat->stream.cbc = NULL;
704 break;
705 case 192:
706 if (mode == EVP_CIPH_CBC_MODE)
707 dat->stream.cbc = (cbc128_f) aes192_t4_cbc_encrypt;
708 else if (mode == EVP_CIPH_CTR_MODE)
709 dat->stream.ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
710 else
711 dat->stream.cbc = NULL;
712 break;
713 case 256:
714 if (mode == EVP_CIPH_CBC_MODE)
715 dat->stream.cbc = (cbc128_f) aes256_t4_cbc_encrypt;
716 else if (mode == EVP_CIPH_CTR_MODE)
717 dat->stream.ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
718 else
719 dat->stream.cbc = NULL;
720 break;
721 default:
722 ret = -1;
723 }
724 }
725
726 if (ret < 0) {
727 EVPerr(EVP_F_AES_T4_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
728 return 0;
729 }
730
731 return 1;
732 }
733
734 # define aes_t4_cbc_cipher aes_cbc_cipher
735 static int aes_t4_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
736 const unsigned char *in, size_t len);
737
738 # define aes_t4_ecb_cipher aes_ecb_cipher
739 static int aes_t4_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
740 const unsigned char *in, size_t len);
741
742 # define aes_t4_ofb_cipher aes_ofb_cipher
743 static int aes_t4_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
744 const unsigned char *in, size_t len);
745
746 # define aes_t4_cfb_cipher aes_cfb_cipher
747 static int aes_t4_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
748 const unsigned char *in, size_t len);
749
750 # define aes_t4_cfb8_cipher aes_cfb8_cipher
751 static int aes_t4_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
752 const unsigned char *in, size_t len);
753
754 # define aes_t4_cfb1_cipher aes_cfb1_cipher
755 static int aes_t4_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
756 const unsigned char *in, size_t len);
757
758 # define aes_t4_ctr_cipher aes_ctr_cipher
759 static int aes_t4_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
760 const unsigned char *in, size_t len);
761
762 static int aes_t4_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
763 const unsigned char *iv, int enc)
764 {
765 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
766 if (!iv && !key)
767 return 1;
768 if (key) {
769 int bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
770 aes_t4_set_encrypt_key(key, bits, &gctx->ks.ks);
771 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
772 (block128_f) aes_t4_encrypt);
773 switch (bits) {
774 case 128:
775 gctx->ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
776 break;
777 case 192:
778 gctx->ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
779 break;
780 case 256:
781 gctx->ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
782 break;
783 default:
784 return 0;
785 }
786 /*
787 * If we have an iv can set it directly, otherwise use saved IV.
788 */
789 if (iv == NULL && gctx->iv_set)
790 iv = gctx->iv;
791 if (iv) {
792 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
793 gctx->iv_set = 1;
794 }
795 gctx->key_set = 1;
796 } else {
797 /* If key set use IV, otherwise copy */
798 if (gctx->key_set)
799 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
800 else
801 memcpy(gctx->iv, iv, gctx->ivlen);
802 gctx->iv_set = 1;
803 gctx->iv_gen = 0;
804 }
805 return 1;
806 }
807
808 # define aes_t4_gcm_cipher aes_gcm_cipher
809 static int aes_t4_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
810 const unsigned char *in, size_t len);
811
812 static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
813 const unsigned char *iv, int enc)
814 {
815 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
816
817 if (!iv && !key)
818 return 1;
819
820 if (key) {
821 /* The key is two half length keys in reality */
822 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
823 const int bits = bytes * 8;
824
825 /*
826 * Verify that the two keys are different.
827 *
828 * This addresses Rogaway's vulnerability.
829 * See comment in aes_xts_init_key() below.
830 */
831 if ((!allow_insecure_decrypt || enc)
832 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
833 EVPerr(EVP_F_AES_T4_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
834 return 0;
835 }
836
837 xctx->stream = NULL;
838 /* key_len is two AES keys */
839 if (enc) {
840 aes_t4_set_encrypt_key(key, bits, &xctx->ks1.ks);
841 xctx->xts.block1 = (block128_f) aes_t4_encrypt;
842 switch (bits) {
843 case 128:
844 xctx->stream = aes128_t4_xts_encrypt;
845 break;
846 case 256:
847 xctx->stream = aes256_t4_xts_encrypt;
848 break;
849 default:
850 return 0;
851 }
852 } else {
853 aes_t4_set_decrypt_key(key, bits, &xctx->ks1.ks);
854 xctx->xts.block1 = (block128_f) aes_t4_decrypt;
855 switch (bits) {
856 case 128:
857 xctx->stream = aes128_t4_xts_decrypt;
858 break;
859 case 256:
860 xctx->stream = aes256_t4_xts_decrypt;
861 break;
862 default:
863 return 0;
864 }
865 }
866
867 aes_t4_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
868 xctx->xts.block2 = (block128_f) aes_t4_encrypt;
869
870 xctx->xts.key1 = &xctx->ks1;
871 }
872
873 if (iv) {
874 xctx->xts.key2 = &xctx->ks2;
875 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
876 }
877
878 return 1;
879 }
880
881 # define aes_t4_xts_cipher aes_xts_cipher
882 static int aes_t4_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
883 const unsigned char *in, size_t len);
884
885 static int aes_t4_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
886 const unsigned char *iv, int enc)
887 {
888 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
889 if (!iv && !key)
890 return 1;
891 if (key) {
892 int bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
893 aes_t4_set_encrypt_key(key, bits, &cctx->ks.ks);
894 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
895 &cctx->ks, (block128_f) aes_t4_encrypt);
896 cctx->str = NULL;
897 cctx->key_set = 1;
898 }
899 if (iv) {
900 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
901 cctx->iv_set = 1;
902 }
903 return 1;
904 }
905
906 # define aes_t4_ccm_cipher aes_ccm_cipher
907 static int aes_t4_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
908 const unsigned char *in, size_t len);
909
910 # ifndef OPENSSL_NO_OCB
911 static int aes_t4_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
912 const unsigned char *iv, int enc)
913 {
914 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
915 if (!iv && !key)
916 return 1;
917 if (key) {
918 do {
919 /*
920 * We set both the encrypt and decrypt key here because decrypt
921 * needs both. We could possibly optimise to remove setting the
922 * decrypt for an encryption operation.
923 */
924 aes_t4_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
925 &octx->ksenc.ks);
926 aes_t4_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
927 &octx->ksdec.ks);
928 if (!CRYPTO_ocb128_init(&octx->ocb,
929 &octx->ksenc.ks, &octx->ksdec.ks,
930 (block128_f) aes_t4_encrypt,
931 (block128_f) aes_t4_decrypt,
932 NULL))
933 return 0;
934 }
935 while (0);
936
937 /*
938 * If we have an iv we can set it directly, otherwise use saved IV.
939 */
940 if (iv == NULL && octx->iv_set)
941 iv = octx->iv;
942 if (iv) {
943 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
944 != 1)
945 return 0;
946 octx->iv_set = 1;
947 }
948 octx->key_set = 1;
949 } else {
950 /* If key set use IV, otherwise copy */
951 if (octx->key_set)
952 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
953 else
954 memcpy(octx->iv, iv, octx->ivlen);
955 octx->iv_set = 1;
956 }
957 return 1;
958 }
959
960 # define aes_t4_ocb_cipher aes_ocb_cipher
961 static int aes_t4_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
962 const unsigned char *in, size_t len);
963 # endif /* OPENSSL_NO_OCB */
964
965 # ifndef OPENSSL_NO_SIV
966 # define aes_t4_siv_init_key aes_siv_init_key
967 # define aes_t4_siv_cipher aes_siv_cipher
968 # endif /* OPENSSL_NO_SIV */
969
970 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
971 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
972 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
973 flags|EVP_CIPH_##MODE##_MODE, \
974 aes_t4_init_key, \
975 aes_t4_##mode##_cipher, \
976 NULL, \
977 sizeof(EVP_AES_KEY), \
978 NULL,NULL,NULL,NULL }; \
979 static const EVP_CIPHER aes_##keylen##_##mode = { \
980 nid##_##keylen##_##nmode,blocksize, \
981 keylen/8,ivlen, \
982 flags|EVP_CIPH_##MODE##_MODE, \
983 aes_init_key, \
984 aes_##mode##_cipher, \
985 NULL, \
986 sizeof(EVP_AES_KEY), \
987 NULL,NULL,NULL,NULL }; \
988 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
989 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
990
991 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
992 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
993 nid##_##keylen##_##mode,blocksize, \
994 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
995 ivlen, \
996 flags|EVP_CIPH_##MODE##_MODE, \
997 aes_t4_##mode##_init_key, \
998 aes_t4_##mode##_cipher, \
999 aes_##mode##_cleanup, \
1000 sizeof(EVP_AES_##MODE##_CTX), \
1001 NULL,NULL,aes_##mode##_ctrl,NULL }; \
1002 static const EVP_CIPHER aes_##keylen##_##mode = { \
1003 nid##_##keylen##_##mode,blocksize, \
1004 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
1005 ivlen, \
1006 flags|EVP_CIPH_##MODE##_MODE, \
1007 aes_##mode##_init_key, \
1008 aes_##mode##_cipher, \
1009 aes_##mode##_cleanup, \
1010 sizeof(EVP_AES_##MODE##_CTX), \
1011 NULL,NULL,aes_##mode##_ctrl,NULL }; \
1012 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
1013 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
1014
1015 #elif defined(OPENSSL_CPUID_OBJ) && defined(__s390__)
1016 /*
1017 * IBM S390X support
1018 */
1019 # include "s390x_arch.h"
1020
1021 typedef struct {
1022 union {
1023 OSSL_UNION_ALIGN;
1024 /*-
1025 * KM-AES parameter block - begin
1026 * (see z/Architecture Principles of Operation >= SA22-7832-06)
1027 */
1028 struct {
1029 unsigned char k[32];
1030 } param;
1031 /* KM-AES parameter block - end */
1032 } km;
1033 unsigned int fc;
1034 } S390X_AES_ECB_CTX;
1035
1036 typedef struct {
1037 union {
1038 OSSL_UNION_ALIGN;
1039 /*-
1040 * KMO-AES parameter block - begin
1041 * (see z/Architecture Principles of Operation >= SA22-7832-08)
1042 */
1043 struct {
1044 unsigned char cv[16];
1045 unsigned char k[32];
1046 } param;
1047 /* KMO-AES parameter block - end */
1048 } kmo;
1049 unsigned int fc;
1050
1051 int res;
1052 } S390X_AES_OFB_CTX;
1053
1054 typedef struct {
1055 union {
1056 OSSL_UNION_ALIGN;
1057 /*-
1058 * KMF-AES parameter block - begin
1059 * (see z/Architecture Principles of Operation >= SA22-7832-08)
1060 */
1061 struct {
1062 unsigned char cv[16];
1063 unsigned char k[32];
1064 } param;
1065 /* KMF-AES parameter block - end */
1066 } kmf;
1067 unsigned int fc;
1068
1069 int res;
1070 } S390X_AES_CFB_CTX;
1071
1072 typedef struct {
1073 union {
1074 OSSL_UNION_ALIGN;
1075 /*-
1076 * KMA-GCM-AES parameter block - begin
1077 * (see z/Architecture Principles of Operation >= SA22-7832-11)
1078 */
1079 struct {
1080 unsigned char reserved[12];
1081 union {
1082 unsigned int w;
1083 unsigned char b[4];
1084 } cv;
1085 union {
1086 unsigned long long g[2];
1087 unsigned char b[16];
1088 } t;
1089 unsigned char h[16];
1090 unsigned long long taadl;
1091 unsigned long long tpcl;
1092 union {
1093 unsigned long long g[2];
1094 unsigned int w[4];
1095 } j0;
1096 unsigned char k[32];
1097 } param;
1098 /* KMA-GCM-AES parameter block - end */
1099 } kma;
1100 unsigned int fc;
1101 int key_set;
1102
1103 unsigned char *iv;
1104 int ivlen;
1105 int iv_set;
1106 int iv_gen;
1107
1108 int taglen;
1109
1110 unsigned char ares[16];
1111 unsigned char mres[16];
1112 unsigned char kres[16];
1113 int areslen;
1114 int mreslen;
1115 int kreslen;
1116
1117 int tls_aad_len;
1118 uint64_t tls_enc_records; /* Number of TLS records encrypted */
1119 } S390X_AES_GCM_CTX;
1120
1121 typedef struct {
1122 union {
1123 OSSL_UNION_ALIGN;
1124 /*-
1125 * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
1126 * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
1127 * rounds field is used to store the function code and that the key
1128 * schedule is not stored (if aes hardware support is detected).
1129 */
1130 struct {
1131 unsigned char pad[16];
1132 AES_KEY k;
1133 } key;
1134
1135 struct {
1136 /*-
1137 * KMAC-AES parameter block - begin
1138 * (see z/Architecture Principles of Operation >= SA22-7832-08)
1139 */
1140 struct {
1141 union {
1142 unsigned long long g[2];
1143 unsigned char b[16];
1144 } icv;
1145 unsigned char k[32];
1146 } kmac_param;
1147 /* KMAC-AES paramater block - end */
1148
1149 union {
1150 unsigned long long g[2];
1151 unsigned char b[16];
1152 } nonce;
1153 union {
1154 unsigned long long g[2];
1155 unsigned char b[16];
1156 } buf;
1157
1158 unsigned long long blocks;
1159 int l;
1160 int m;
1161 int tls_aad_len;
1162 int iv_set;
1163 int tag_set;
1164 int len_set;
1165 int key_set;
1166
1167 unsigned char pad[140];
1168 unsigned int fc;
1169 } ccm;
1170 } aes;
1171 } S390X_AES_CCM_CTX;
1172
1173 /* Convert key size to function code: [16,24,32] -> [18,19,20]. */
1174 # define S390X_AES_FC(keylen) (S390X_AES_128 + ((((keylen) << 3) - 128) >> 6))
1175
1176 /* Most modes of operation need km for partial block processing. */
1177 # define S390X_aes_128_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
1178 S390X_CAPBIT(S390X_AES_128))
1179 # define S390X_aes_192_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
1180 S390X_CAPBIT(S390X_AES_192))
1181 # define S390X_aes_256_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
1182 S390X_CAPBIT(S390X_AES_256))
1183
1184 # define s390x_aes_init_key aes_init_key
1185 static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
1186 const unsigned char *iv, int enc);
1187
1188 # define S390X_aes_128_cbc_CAPABLE 1 /* checked by callee */
1189 # define S390X_aes_192_cbc_CAPABLE 1
1190 # define S390X_aes_256_cbc_CAPABLE 1
1191 # define S390X_AES_CBC_CTX EVP_AES_KEY
1192
1193 # define s390x_aes_cbc_init_key aes_init_key
1194
1195 # define s390x_aes_cbc_cipher aes_cbc_cipher
1196 static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1197 const unsigned char *in, size_t len);
1198
1199 # define S390X_aes_128_ecb_CAPABLE S390X_aes_128_CAPABLE
1200 # define S390X_aes_192_ecb_CAPABLE S390X_aes_192_CAPABLE
1201 # define S390X_aes_256_ecb_CAPABLE S390X_aes_256_CAPABLE
1202
1203 static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
1204 const unsigned char *key,
1205 const unsigned char *iv, int enc)
1206 {
1207 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1208 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1209
1210 cctx->fc = S390X_AES_FC(keylen);
1211 if (!enc)
1212 cctx->fc |= S390X_DECRYPT;
1213
1214 memcpy(cctx->km.param.k, key, keylen);
1215 return 1;
1216 }
1217
1218 static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1219 const unsigned char *in, size_t len)
1220 {
1221 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1222
1223 s390x_km(in, len, out, cctx->fc, &cctx->km.param);
1224 return 1;
1225 }
1226
1227 # define S390X_aes_128_ofb_CAPABLE (S390X_aes_128_CAPABLE && \
1228 (OPENSSL_s390xcap_P.kmo[0] & \
1229 S390X_CAPBIT(S390X_AES_128)))
1230 # define S390X_aes_192_ofb_CAPABLE (S390X_aes_192_CAPABLE && \
1231 (OPENSSL_s390xcap_P.kmo[0] & \
1232 S390X_CAPBIT(S390X_AES_192)))
1233 # define S390X_aes_256_ofb_CAPABLE (S390X_aes_256_CAPABLE && \
1234 (OPENSSL_s390xcap_P.kmo[0] & \
1235 S390X_CAPBIT(S390X_AES_256)))
1236
1237 static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
1238 const unsigned char *key,
1239 const unsigned char *ivec, int enc)
1240 {
1241 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1242 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
1243 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1244 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
1245
1246 memcpy(cctx->kmo.param.cv, iv, ivlen);
1247 memcpy(cctx->kmo.param.k, key, keylen);
1248 cctx->fc = S390X_AES_FC(keylen);
1249 cctx->res = 0;
1250 return 1;
1251 }
1252
1253 static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1254 const unsigned char *in, size_t len)
1255 {
1256 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1257 int n = cctx->res;
1258 int rem;
1259
1260 while (n && len) {
1261 *out = *in ^ cctx->kmo.param.cv[n];
1262 n = (n + 1) & 0xf;
1263 --len;
1264 ++in;
1265 ++out;
1266 }
1267
1268 rem = len & 0xf;
1269
1270 len &= ~(size_t)0xf;
1271 if (len) {
1272 s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
1273
1274 out += len;
1275 in += len;
1276 }
1277
1278 if (rem) {
1279 s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
1280 cctx->kmo.param.k);
1281
1282 while (rem--) {
1283 out[n] = in[n] ^ cctx->kmo.param.cv[n];
1284 ++n;
1285 }
1286 }
1287
1288 cctx->res = n;
1289 return 1;
1290 }
1291
1292 # define S390X_aes_128_cfb_CAPABLE (S390X_aes_128_CAPABLE && \
1293 (OPENSSL_s390xcap_P.kmf[0] & \
1294 S390X_CAPBIT(S390X_AES_128)))
1295 # define S390X_aes_192_cfb_CAPABLE (S390X_aes_192_CAPABLE && \
1296 (OPENSSL_s390xcap_P.kmf[0] & \
1297 S390X_CAPBIT(S390X_AES_192)))
1298 # define S390X_aes_256_cfb_CAPABLE (S390X_aes_256_CAPABLE && \
1299 (OPENSSL_s390xcap_P.kmf[0] & \
1300 S390X_CAPBIT(S390X_AES_256)))
1301
1302 static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
1303 const unsigned char *key,
1304 const unsigned char *ivec, int enc)
1305 {
1306 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1307 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
1308 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1309 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
1310
1311 cctx->fc = S390X_AES_FC(keylen);
1312 cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
1313 if (!enc)
1314 cctx->fc |= S390X_DECRYPT;
1315
1316 cctx->res = 0;
1317 memcpy(cctx->kmf.param.cv, iv, ivlen);
1318 memcpy(cctx->kmf.param.k, key, keylen);
1319 return 1;
1320 }
1321
1322 static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1323 const unsigned char *in, size_t len)
1324 {
1325 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1326 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1327 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1328 int n = cctx->res;
1329 int rem;
1330 unsigned char tmp;
1331
1332 while (n && len) {
1333 tmp = *in;
1334 *out = cctx->kmf.param.cv[n] ^ tmp;
1335 cctx->kmf.param.cv[n] = enc ? *out : tmp;
1336 n = (n + 1) & 0xf;
1337 --len;
1338 ++in;
1339 ++out;
1340 }
1341
1342 rem = len & 0xf;
1343
1344 len &= ~(size_t)0xf;
1345 if (len) {
1346 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1347
1348 out += len;
1349 in += len;
1350 }
1351
1352 if (rem) {
1353 s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
1354 S390X_AES_FC(keylen), cctx->kmf.param.k);
1355
1356 while (rem--) {
1357 tmp = in[n];
1358 out[n] = cctx->kmf.param.cv[n] ^ tmp;
1359 cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
1360 ++n;
1361 }
1362 }
1363
1364 cctx->res = n;
1365 return 1;
1366 }
1367
1368 # define S390X_aes_128_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
1369 S390X_CAPBIT(S390X_AES_128))
1370 # define S390X_aes_192_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
1371 S390X_CAPBIT(S390X_AES_192))
1372 # define S390X_aes_256_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
1373 S390X_CAPBIT(S390X_AES_256))
1374
1375 static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
1376 const unsigned char *key,
1377 const unsigned char *ivec, int enc)
1378 {
1379 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1380 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
1381 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1382 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
1383
1384 cctx->fc = S390X_AES_FC(keylen);
1385 cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
1386 if (!enc)
1387 cctx->fc |= S390X_DECRYPT;
1388
1389 memcpy(cctx->kmf.param.cv, iv, ivlen);
1390 memcpy(cctx->kmf.param.k, key, keylen);
1391 return 1;
1392 }
1393
1394 static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1395 const unsigned char *in, size_t len)
1396 {
1397 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1398
1399 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1400 return 1;
1401 }
1402
1403 # define S390X_aes_128_cfb1_CAPABLE 0
1404 # define S390X_aes_192_cfb1_CAPABLE 0
1405 # define S390X_aes_256_cfb1_CAPABLE 0
1406
1407 # define s390x_aes_cfb1_init_key aes_init_key
1408
1409 # define s390x_aes_cfb1_cipher aes_cfb1_cipher
1410 static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1411 const unsigned char *in, size_t len);
1412
1413 # define S390X_aes_128_ctr_CAPABLE 1 /* checked by callee */
1414 # define S390X_aes_192_ctr_CAPABLE 1
1415 # define S390X_aes_256_ctr_CAPABLE 1
1416 # define S390X_AES_CTR_CTX EVP_AES_KEY
1417
1418 # define s390x_aes_ctr_init_key aes_init_key
1419
1420 # define s390x_aes_ctr_cipher aes_ctr_cipher
1421 static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1422 const unsigned char *in, size_t len);
1423
1424 # define S390X_aes_128_gcm_CAPABLE (S390X_aes_128_CAPABLE && \
1425 (OPENSSL_s390xcap_P.kma[0] & \
1426 S390X_CAPBIT(S390X_AES_128)))
1427 # define S390X_aes_192_gcm_CAPABLE (S390X_aes_192_CAPABLE && \
1428 (OPENSSL_s390xcap_P.kma[0] & \
1429 S390X_CAPBIT(S390X_AES_192)))
1430 # define S390X_aes_256_gcm_CAPABLE (S390X_aes_256_CAPABLE && \
1431 (OPENSSL_s390xcap_P.kma[0] & \
1432 S390X_CAPBIT(S390X_AES_256)))
1433
1434 /* iv + padding length for iv lengths != 12 */
1435 # define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
1436
1437 /*-
1438 * Process additional authenticated data. Returns 0 on success. Code is
1439 * big-endian.
1440 */
1441 static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
1442 size_t len)
1443 {
1444 unsigned long long alen;
1445 int n, rem;
1446
1447 if (ctx->kma.param.tpcl)
1448 return -2;
1449
1450 alen = ctx->kma.param.taadl + len;
1451 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
1452 return -1;
1453 ctx->kma.param.taadl = alen;
1454
1455 n = ctx->areslen;
1456 if (n) {
1457 while (n && len) {
1458 ctx->ares[n] = *aad;
1459 n = (n + 1) & 0xf;
1460 ++aad;
1461 --len;
1462 }
1463 /* ctx->ares contains a complete block if offset has wrapped around */
1464 if (!n) {
1465 s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1466 ctx->fc |= S390X_KMA_HS;
1467 }
1468 ctx->areslen = n;
1469 }
1470
1471 rem = len & 0xf;
1472
1473 len &= ~(size_t)0xf;
1474 if (len) {
1475 s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1476 aad += len;
1477 ctx->fc |= S390X_KMA_HS;
1478 }
1479
1480 if (rem) {
1481 ctx->areslen = rem;
1482
1483 do {
1484 --rem;
1485 ctx->ares[rem] = aad[rem];
1486 } while (rem);
1487 }
1488 return 0;
1489 }
1490
1491 /*-
1492 * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
1493 * success. Code is big-endian.
1494 */
1495 static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
1496 unsigned char *out, size_t len)
1497 {
1498 const unsigned char *inptr;
1499 unsigned long long mlen;
1500 union {
1501 unsigned int w[4];
1502 unsigned char b[16];
1503 } buf;
1504 size_t inlen;
1505 int n, rem, i;
1506
1507 mlen = ctx->kma.param.tpcl + len;
1508 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1509 return -1;
1510 ctx->kma.param.tpcl = mlen;
1511
1512 n = ctx->mreslen;
1513 if (n) {
1514 inptr = in;
1515 inlen = len;
1516 while (n && inlen) {
1517 ctx->mres[n] = *inptr;
1518 n = (n + 1) & 0xf;
1519 ++inptr;
1520 --inlen;
1521 }
1522 /* ctx->mres contains a complete block if offset has wrapped around */
1523 if (!n) {
1524 s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
1525 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1526 ctx->fc |= S390X_KMA_HS;
1527 ctx->areslen = 0;
1528
1529 /* previous call already encrypted/decrypted its remainder,
1530 * see comment below */
1531 n = ctx->mreslen;
1532 while (n) {
1533 *out = buf.b[n];
1534 n = (n + 1) & 0xf;
1535 ++out;
1536 ++in;
1537 --len;
1538 }
1539 ctx->mreslen = 0;
1540 }
1541 }
1542
1543 rem = len & 0xf;
1544
1545 len &= ~(size_t)0xf;
1546 if (len) {
1547 s390x_kma(ctx->ares, ctx->areslen, in, len, out,
1548 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1549 in += len;
1550 out += len;
1551 ctx->fc |= S390X_KMA_HS;
1552 ctx->areslen = 0;
1553 }
1554
1555 /*-
1556 * If there is a remainder, it has to be saved such that it can be
1557 * processed by kma later. However, we also have to do the for-now
1558 * unauthenticated encryption/decryption part here and now...
1559 */
1560 if (rem) {
1561 if (!ctx->mreslen) {
1562 buf.w[0] = ctx->kma.param.j0.w[0];
1563 buf.w[1] = ctx->kma.param.j0.w[1];
1564 buf.w[2] = ctx->kma.param.j0.w[2];
1565 buf.w[3] = ctx->kma.param.cv.w + 1;
1566 s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
1567 }
1568
1569 n = ctx->mreslen;
1570 for (i = 0; i < rem; i++) {
1571 ctx->mres[n + i] = in[i];
1572 out[i] = in[i] ^ ctx->kres[n + i];
1573 }
1574
1575 ctx->mreslen += rem;
1576 }
1577 return 0;
1578 }
1579
1580 /*-
1581 * Initialize context structure. Code is big-endian.
1582 */
1583 static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
1584 const unsigned char *iv)
1585 {
1586 ctx->kma.param.t.g[0] = 0;
1587 ctx->kma.param.t.g[1] = 0;
1588 ctx->kma.param.tpcl = 0;
1589 ctx->kma.param.taadl = 0;
1590 ctx->mreslen = 0;
1591 ctx->areslen = 0;
1592 ctx->kreslen = 0;
1593
1594 if (ctx->ivlen == 12) {
1595 memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
1596 ctx->kma.param.j0.w[3] = 1;
1597 ctx->kma.param.cv.w = 1;
1598 } else {
1599 /* ctx->iv has the right size and is already padded. */
1600 memcpy(ctx->iv, iv, ctx->ivlen);
1601 s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
1602 ctx->fc, &ctx->kma.param);
1603 ctx->fc |= S390X_KMA_HS;
1604
1605 ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
1606 ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
1607 ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
1608 ctx->kma.param.t.g[0] = 0;
1609 ctx->kma.param.t.g[1] = 0;
1610 }
1611 }
1612
1613 /*-
1614 * Performs various operations on the context structure depending on control
1615 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
1616 * Code is big-endian.
1617 */
1618 static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
1619 {
1620 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1621 S390X_AES_GCM_CTX *gctx_out;
1622 EVP_CIPHER_CTX *out;
1623 unsigned char *buf, *iv;
1624 int ivlen, enc, len;
1625
1626 switch (type) {
1627 case EVP_CTRL_INIT:
1628 ivlen = EVP_CIPHER_CTX_iv_length(c);
1629 iv = EVP_CIPHER_CTX_iv_noconst(c);
1630 gctx->key_set = 0;
1631 gctx->iv_set = 0;
1632 gctx->ivlen = ivlen;
1633 gctx->iv = iv;
1634 gctx->taglen = -1;
1635 gctx->iv_gen = 0;
1636 gctx->tls_aad_len = -1;
1637 return 1;
1638
1639 case EVP_CTRL_AEAD_SET_IVLEN:
1640 if (arg <= 0)
1641 return 0;
1642
1643 if (arg != 12) {
1644 iv = EVP_CIPHER_CTX_iv_noconst(c);
1645 len = S390X_gcm_ivpadlen(arg);
1646
1647 /* Allocate memory for iv if needed. */
1648 if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
1649 if (gctx->iv != iv)
1650 OPENSSL_free(gctx->iv);
1651
1652 if ((gctx->iv = OPENSSL_malloc(len)) == NULL) {
1653 EVPerr(EVP_F_S390X_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
1654 return 0;
1655 }
1656 }
1657 /* Add padding. */
1658 memset(gctx->iv + arg, 0, len - arg - 8);
1659 *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
1660 }
1661 gctx->ivlen = arg;
1662 return 1;
1663
1664 case EVP_CTRL_AEAD_SET_TAG:
1665 buf = EVP_CIPHER_CTX_buf_noconst(c);
1666 enc = EVP_CIPHER_CTX_encrypting(c);
1667 if (arg <= 0 || arg > 16 || enc)
1668 return 0;
1669
1670 memcpy(buf, ptr, arg);
1671 gctx->taglen = arg;
1672 return 1;
1673
1674 case EVP_CTRL_AEAD_GET_TAG:
1675 enc = EVP_CIPHER_CTX_encrypting(c);
1676 if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
1677 return 0;
1678
1679 memcpy(ptr, gctx->kma.param.t.b, arg);
1680 return 1;
1681
1682 case EVP_CTRL_GCM_SET_IV_FIXED:
1683 /* Special case: -1 length restores whole iv */
1684 if (arg == -1) {
1685 memcpy(gctx->iv, ptr, gctx->ivlen);
1686 gctx->iv_gen = 1;
1687 return 1;
1688 }
1689 /*
1690 * Fixed field must be at least 4 bytes and invocation field at least
1691 * 8.
1692 */
1693 if ((arg < 4) || (gctx->ivlen - arg) < 8)
1694 return 0;
1695
1696 if (arg)
1697 memcpy(gctx->iv, ptr, arg);
1698
1699 enc = EVP_CIPHER_CTX_encrypting(c);
1700 if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
1701 return 0;
1702
1703 gctx->iv_gen = 1;
1704 return 1;
1705
1706 case EVP_CTRL_GCM_IV_GEN:
1707 if (gctx->iv_gen == 0 || gctx->key_set == 0)
1708 return 0;
1709
1710 s390x_aes_gcm_setiv(gctx, gctx->iv);
1711
1712 if (arg <= 0 || arg > gctx->ivlen)
1713 arg = gctx->ivlen;
1714
1715 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
1716 /*
1717 * Invocation field will be at least 8 bytes in size and so no need
1718 * to check wrap around or increment more than last 8 bytes.
1719 */
1720 ctr64_inc(gctx->iv + gctx->ivlen - 8);
1721 gctx->iv_set = 1;
1722 return 1;
1723
1724 case EVP_CTRL_GCM_SET_IV_INV:
1725 enc = EVP_CIPHER_CTX_encrypting(c);
1726 if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
1727 return 0;
1728
1729 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
1730 s390x_aes_gcm_setiv(gctx, gctx->iv);
1731 gctx->iv_set = 1;
1732 return 1;
1733
1734 case EVP_CTRL_AEAD_TLS1_AAD:
1735 /* Save the aad for later use. */
1736 if (arg != EVP_AEAD_TLS1_AAD_LEN)
1737 return 0;
1738
1739 buf = EVP_CIPHER_CTX_buf_noconst(c);
1740 memcpy(buf, ptr, arg);
1741 gctx->tls_aad_len = arg;
1742 gctx->tls_enc_records = 0;
1743
1744 len = buf[arg - 2] << 8 | buf[arg - 1];
1745 /* Correct length for explicit iv. */
1746 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
1747 return 0;
1748 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
1749
1750 /* If decrypting correct for tag too. */
1751 enc = EVP_CIPHER_CTX_encrypting(c);
1752 if (!enc) {
1753 if (len < EVP_GCM_TLS_TAG_LEN)
1754 return 0;
1755 len -= EVP_GCM_TLS_TAG_LEN;
1756 }
1757 buf[arg - 2] = len >> 8;
1758 buf[arg - 1] = len & 0xff;
1759 /* Extra padding: tag appended to record. */
1760 return EVP_GCM_TLS_TAG_LEN;
1761
1762 case EVP_CTRL_COPY:
1763 out = ptr;
1764 gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
1765 iv = EVP_CIPHER_CTX_iv_noconst(c);
1766
1767 if (gctx->iv == iv) {
1768 gctx_out->iv = EVP_CIPHER_CTX_iv_noconst(out);
1769 } else {
1770 len = S390X_gcm_ivpadlen(gctx->ivlen);
1771
1772 if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL) {
1773 EVPerr(EVP_F_S390X_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
1774 return 0;
1775 }
1776
1777 memcpy(gctx_out->iv, gctx->iv, len);
1778 }
1779 return 1;
1780
1781 default:
1782 return -1;
1783 }
1784 }
1785
1786 /*-
1787 * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
1788 */
1789 static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
1790 const unsigned char *key,
1791 const unsigned char *iv, int enc)
1792 {
1793 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1794 int keylen;
1795
1796 if (iv == NULL && key == NULL)
1797 return 1;
1798
1799 if (key != NULL) {
1800 keylen = EVP_CIPHER_CTX_key_length(ctx);
1801 memcpy(&gctx->kma.param.k, key, keylen);
1802
1803 gctx->fc = S390X_AES_FC(keylen);
1804 if (!enc)
1805 gctx->fc |= S390X_DECRYPT;
1806
1807 if (iv == NULL && gctx->iv_set)
1808 iv = gctx->iv;
1809
1810 if (iv != NULL) {
1811 s390x_aes_gcm_setiv(gctx, iv);
1812 gctx->iv_set = 1;
1813 }
1814 gctx->key_set = 1;
1815 } else {
1816 if (gctx->key_set)
1817 s390x_aes_gcm_setiv(gctx, iv);
1818 else
1819 memcpy(gctx->iv, iv, gctx->ivlen);
1820
1821 gctx->iv_set = 1;
1822 gctx->iv_gen = 0;
1823 }
1824 return 1;
1825 }
1826
1827 /*-
1828 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1829 * if successful. Otherwise -1 is returned. Code is big-endian.
1830 */
1831 static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1832 const unsigned char *in, size_t len)
1833 {
1834 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1835 const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1836 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1837 int rv = -1;
1838
1839 if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
1840 return -1;
1841
1842 /*
1843 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
1844 * Requirements from SP 800-38D". The requirements is for one party to the
1845 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
1846 * side only.
1847 */
1848 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
1849 EVPerr(EVP_F_S390X_AES_GCM_TLS_CIPHER, EVP_R_TOO_MANY_RECORDS);
1850 goto err;
1851 }
1852
1853 if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
1854 : EVP_CTRL_GCM_SET_IV_INV,
1855 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
1856 goto err;
1857
1858 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1859 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1860 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1861
1862 gctx->kma.param.taadl = gctx->tls_aad_len << 3;
1863 gctx->kma.param.tpcl = len << 3;
1864 s390x_kma(buf, gctx->tls_aad_len, in, len, out,
1865 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1866
1867 if (enc) {
1868 memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
1869 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1870 } else {
1871 if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
1872 EVP_GCM_TLS_TAG_LEN)) {
1873 OPENSSL_cleanse(out, len);
1874 goto err;
1875 }
1876 rv = len;
1877 }
1878 err:
1879 gctx->iv_set = 0;
1880 gctx->tls_aad_len = -1;
1881 return rv;
1882 }
1883
1884 /*-
1885 * Called from EVP layer to initialize context, process additional
1886 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1887 * ciphertext or process a TLS packet, depending on context. Returns bytes
1888 * written on success. Otherwise -1 is returned. Code is big-endian.
1889 */
1890 static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1891 const unsigned char *in, size_t len)
1892 {
1893 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1894 unsigned char *buf, tmp[16];
1895 int enc;
1896
1897 if (!gctx->key_set)
1898 return -1;
1899
1900 if (gctx->tls_aad_len >= 0)
1901 return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
1902
1903 if (!gctx->iv_set)
1904 return -1;
1905
1906 if (in != NULL) {
1907 if (out == NULL) {
1908 if (s390x_aes_gcm_aad(gctx, in, len))
1909 return -1;
1910 } else {
1911 if (s390x_aes_gcm(gctx, in, out, len))
1912 return -1;
1913 }
1914 return len;
1915 } else {
1916 gctx->kma.param.taadl <<= 3;
1917 gctx->kma.param.tpcl <<= 3;
1918 s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
1919 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1920 /* recall that we already did en-/decrypt gctx->mres
1921 * and returned it to caller... */
1922 OPENSSL_cleanse(tmp, gctx->mreslen);
1923 gctx->iv_set = 0;
1924
1925 enc = EVP_CIPHER_CTX_encrypting(ctx);
1926 if (enc) {
1927 gctx->taglen = 16;
1928 } else {
1929 if (gctx->taglen < 0)
1930 return -1;
1931
1932 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1933 if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
1934 return -1;
1935 }
1936 return 0;
1937 }
1938 }
1939
1940 static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
1941 {
1942 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1943 const unsigned char *iv;
1944
1945 if (gctx == NULL)
1946 return 0;
1947
1948 iv = EVP_CIPHER_CTX_iv(c);
1949 if (iv != gctx->iv)
1950 OPENSSL_free(gctx->iv);
1951
1952 OPENSSL_cleanse(gctx, sizeof(*gctx));
1953 return 1;
1954 }
1955
1956 # define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
1957 # define S390X_aes_128_xts_CAPABLE 1 /* checked by callee */
1958 # define S390X_aes_256_xts_CAPABLE 1
1959
1960 # define s390x_aes_xts_init_key aes_xts_init_key
1961 static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
1962 const unsigned char *key,
1963 const unsigned char *iv, int enc);
1964 # define s390x_aes_xts_cipher aes_xts_cipher
1965 static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1966 const unsigned char *in, size_t len);
1967 # define s390x_aes_xts_ctrl aes_xts_ctrl
1968 static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
1969 # define s390x_aes_xts_cleanup aes_xts_cleanup
1970
1971 # define S390X_aes_128_ccm_CAPABLE (S390X_aes_128_CAPABLE && \
1972 (OPENSSL_s390xcap_P.kmac[0] & \
1973 S390X_CAPBIT(S390X_AES_128)))
1974 # define S390X_aes_192_ccm_CAPABLE (S390X_aes_192_CAPABLE && \
1975 (OPENSSL_s390xcap_P.kmac[0] & \
1976 S390X_CAPBIT(S390X_AES_192)))
1977 # define S390X_aes_256_ccm_CAPABLE (S390X_aes_256_CAPABLE && \
1978 (OPENSSL_s390xcap_P.kmac[0] & \
1979 S390X_CAPBIT(S390X_AES_256)))
1980
1981 # define S390X_CCM_AAD_FLAG 0x40
1982
1983 /*-
1984 * Set nonce and length fields. Code is big-endian.
1985 */
1986 static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
1987 const unsigned char *nonce,
1988 size_t mlen)
1989 {
1990 ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
1991 ctx->aes.ccm.nonce.g[1] = mlen;
1992 memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
1993 }
1994
1995 /*-
1996 * Process additional authenticated data. Code is big-endian.
1997 */
1998 static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
1999 size_t alen)
2000 {
2001 unsigned char *ptr;
2002 int i, rem;
2003
2004 if (!alen)
2005 return;
2006
2007 ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
2008
2009 /* Suppress 'type-punned pointer dereference' warning. */
2010 ptr = ctx->aes.ccm.buf.b;
2011
2012 if (alen < ((1 << 16) - (1 << 8))) {
2013 *(uint16_t *)ptr = alen;
2014 i = 2;
2015 } else if (sizeof(alen) == 8
2016 && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
2017 *(uint16_t *)ptr = 0xffff;
2018 *(uint64_t *)(ptr + 2) = alen;
2019 i = 10;
2020 } else {
2021 *(uint16_t *)ptr = 0xfffe;
2022 *(uint32_t *)(ptr + 2) = alen;
2023 i = 6;
2024 }
2025
2026 while (i < 16 && alen) {
2027 ctx->aes.ccm.buf.b[i] = *aad;
2028 ++aad;
2029 --alen;
2030 ++i;
2031 }
2032 while (i < 16) {
2033 ctx->aes.ccm.buf.b[i] = 0;
2034 ++i;
2035 }
2036
2037 ctx->aes.ccm.kmac_param.icv.g[0] = 0;
2038 ctx->aes.ccm.kmac_param.icv.g[1] = 0;
2039 s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
2040 &ctx->aes.ccm.kmac_param);
2041 ctx->aes.ccm.blocks += 2;
2042
2043 rem = alen & 0xf;
2044 alen &= ~(size_t)0xf;
2045 if (alen) {
2046 s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
2047 ctx->aes.ccm.blocks += alen >> 4;
2048 aad += alen;
2049 }
2050 if (rem) {
2051 for (i = 0; i < rem; i++)
2052 ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
2053
2054 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
2055 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
2056 ctx->aes.ccm.kmac_param.k);
2057 ctx->aes.ccm.blocks++;
2058 }
2059 }
2060
2061 /*-
2062 * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
2063 * success.
2064 */
2065 static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
2066 unsigned char *out, size_t len, int enc)
2067 {
2068 size_t n, rem;
2069 unsigned int i, l, num;
2070 unsigned char flags;
2071
2072 flags = ctx->aes.ccm.nonce.b[0];
2073 if (!(flags & S390X_CCM_AAD_FLAG)) {
2074 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
2075 ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
2076 ctx->aes.ccm.blocks++;
2077 }
2078 l = flags & 0x7;
2079 ctx->aes.ccm.nonce.b[0] = l;
2080
2081 /*-
2082 * Reconstruct length from encoded length field
2083 * and initialize it with counter value.
2084 */
2085 n = 0;
2086 for (i = 15 - l; i < 15; i++) {
2087 n |= ctx->aes.ccm.nonce.b[i];
2088 ctx->aes.ccm.nonce.b[i] = 0;
2089 n <<= 8;
2090 }
2091 n |= ctx->aes.ccm.nonce.b[15];
2092 ctx->aes.ccm.nonce.b[15] = 1;
2093
2094 if (n != len)
2095 return -1; /* length mismatch */
2096
2097 if (enc) {
2098 /* Two operations per block plus one for tag encryption */
2099 ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
2100 if (ctx->aes.ccm.blocks > (1ULL << 61))
2101 return -2; /* too much data */
2102 }
2103
2104 num = 0;
2105 rem = len & 0xf;
2106 len &= ~(size_t)0xf;
2107
2108 if (enc) {
2109 /* mac-then-encrypt */
2110 if (len)
2111 s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
2112 if (rem) {
2113 for (i = 0; i < rem; i++)
2114 ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
2115
2116 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
2117 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
2118 ctx->aes.ccm.kmac_param.k);
2119 }
2120
2121 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
2122 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
2123 &num, (ctr128_f)AES_ctr32_encrypt);
2124 } else {
2125 /* decrypt-then-mac */
2126 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
2127 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
2128 &num, (ctr128_f)AES_ctr32_encrypt);
2129
2130 if (len)
2131 s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
2132 if (rem) {
2133 for (i = 0; i < rem; i++)
2134 ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
2135
2136 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
2137 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
2138 ctx->aes.ccm.kmac_param.k);
2139 }
2140 }
2141 /* encrypt tag */
2142 for (i = 15 - l; i < 16; i++)
2143 ctx->aes.ccm.nonce.b[i] = 0;
2144
2145 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
2146 ctx->aes.ccm.kmac_param.k);
2147 ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
2148 ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
2149
2150 ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
2151 return 0;
2152 }
2153
2154 /*-
2155 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
2156 * if successful. Otherwise -1 is returned.
2157 */
2158 static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2159 const unsigned char *in, size_t len)
2160 {
2161 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2162 unsigned char *ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
2163 unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2164 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
2165
2166 if (out != in
2167 || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
2168 return -1;
2169
2170 if (enc) {
2171 /* Set explicit iv (sequence number). */
2172 memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
2173 }
2174
2175 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
2176 /*-
2177 * Get explicit iv (sequence number). We already have fixed iv
2178 * (server/client_write_iv) here.
2179 */
2180 memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
2181 s390x_aes_ccm_setiv(cctx, ivec, len);
2182
2183 /* Process aad (sequence number|type|version|length) */
2184 s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
2185
2186 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
2187 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
2188
2189 if (enc) {
2190 if (s390x_aes_ccm(cctx, in, out, len, enc))
2191 return -1;
2192
2193 memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2194 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
2195 } else {
2196 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2197 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
2198 cctx->aes.ccm.m))
2199 return len;
2200 }
2201
2202 OPENSSL_cleanse(out, len);
2203 return -1;
2204 }
2205 }
2206
2207 /*-
2208 * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
2209 * returned.
2210 */
2211 static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
2212 const unsigned char *key,
2213 const unsigned char *iv, int enc)
2214 {
2215 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2216 unsigned char *ivec;
2217 int keylen;
2218
2219 if (iv == NULL && key == NULL)
2220 return 1;
2221
2222 if (key != NULL) {
2223 keylen = EVP_CIPHER_CTX_key_length(ctx);
2224 cctx->aes.ccm.fc = S390X_AES_FC(keylen);
2225 memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
2226
2227 /* Store encoded m and l. */
2228 cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
2229 | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
2230 memset(cctx->aes.ccm.nonce.b + 1, 0,
2231 sizeof(cctx->aes.ccm.nonce.b));
2232 cctx->aes.ccm.blocks = 0;
2233
2234 cctx->aes.ccm.key_set = 1;
2235 }
2236
2237 if (iv != NULL) {
2238 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
2239 memcpy(ivec, iv, 15 - cctx->aes.ccm.l);
2240
2241 cctx->aes.ccm.iv_set = 1;
2242 }
2243
2244 return 1;
2245 }
2246
2247 /*-
2248 * Called from EVP layer to initialize context, process additional
2249 * authenticated data, en/de-crypt plain/cipher-text and authenticate
2250 * plaintext or process a TLS packet, depending on context. Returns bytes
2251 * written on success. Otherwise -1 is returned.
2252 */
2253 static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2254 const unsigned char *in, size_t len)
2255 {
2256 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2257 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
2258 int rv;
2259 unsigned char *buf, *ivec;
2260
2261 if (!cctx->aes.ccm.key_set)
2262 return -1;
2263
2264 if (cctx->aes.ccm.tls_aad_len >= 0)
2265 return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
2266
2267 /*-
2268 * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
2269 * so integrity must be checked already at Update() i.e., before
2270 * potentially corrupted data is output.
2271 */
2272 if (in == NULL && out != NULL)
2273 return 0;
2274
2275 if (!cctx->aes.ccm.iv_set)
2276 return -1;
2277
2278 if (out == NULL) {
2279 /* Update(): Pass message length. */
2280 if (in == NULL) {
2281 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
2282 s390x_aes_ccm_setiv(cctx, ivec, len);
2283
2284 cctx->aes.ccm.len_set = 1;
2285 return len;
2286 }
2287
2288 /* Update(): Process aad. */
2289 if (!cctx->aes.ccm.len_set && len)
2290 return -1;
2291
2292 s390x_aes_ccm_aad(cctx, in, len);
2293 return len;
2294 }
2295
2296 /* The tag must be set before actually decrypting data */
2297 if (!enc && !cctx->aes.ccm.tag_set)
2298 return -1;
2299
2300 /* Update(): Process message. */
2301
2302 if (!cctx->aes.ccm.len_set) {
2303 /*-
2304 * In case message length was not previously set explicitly via
2305 * Update(), set it now.
2306 */
2307 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
2308 s390x_aes_ccm_setiv(cctx, ivec, len);
2309
2310 cctx->aes.ccm.len_set = 1;
2311 }
2312
2313 if (enc) {
2314 if (s390x_aes_ccm(cctx, in, out, len, enc))
2315 return -1;
2316
2317 cctx->aes.ccm.tag_set = 1;
2318 return len;
2319 } else {
2320 rv = -1;
2321
2322 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2323 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2324 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
2325 cctx->aes.ccm.m))
2326 rv = len;
2327 }
2328
2329 if (rv == -1)
2330 OPENSSL_cleanse(out, len);
2331
2332 cctx->aes.ccm.iv_set = 0;
2333 cctx->aes.ccm.tag_set = 0;
2334 cctx->aes.ccm.len_set = 0;
2335 return rv;
2336 }
2337 }
2338
2339 /*-
2340 * Performs various operations on the context structure depending on control
2341 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
2342 * Code is big-endian.
2343 */
2344 static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2345 {
2346 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
2347 unsigned char *buf, *iv;
2348 int enc, len;
2349
2350 switch (type) {
2351 case EVP_CTRL_INIT:
2352 cctx->aes.ccm.key_set = 0;
2353 cctx->aes.ccm.iv_set = 0;
2354 cctx->aes.ccm.l = 8;
2355 cctx->aes.ccm.m = 12;
2356 cctx->aes.ccm.tag_set = 0;
2357 cctx->aes.ccm.len_set = 0;
2358 cctx->aes.ccm.tls_aad_len = -1;
2359 return 1;
2360
2361 case EVP_CTRL_AEAD_TLS1_AAD:
2362 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2363 return 0;
2364
2365 /* Save the aad for later use. */
2366 buf = EVP_CIPHER_CTX_buf_noconst(c);
2367 memcpy(buf, ptr, arg);
2368 cctx->aes.ccm.tls_aad_len = arg;
2369
2370 len = buf[arg - 2] << 8 | buf[arg - 1];
2371 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
2372 return 0;
2373
2374 /* Correct length for explicit iv. */
2375 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
2376
2377 enc = EVP_CIPHER_CTX_encrypting(c);
2378 if (!enc) {
2379 if (len < cctx->aes.ccm.m)
2380 return 0;
2381
2382 /* Correct length for tag. */
2383 len -= cctx->aes.ccm.m;
2384 }
2385
2386 buf[arg - 2] = len >> 8;
2387 buf[arg - 1] = len & 0xff;
2388
2389 /* Extra padding: tag appended to record. */
2390 return cctx->aes.ccm.m;
2391
2392 case EVP_CTRL_CCM_SET_IV_FIXED:
2393 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
2394 return 0;
2395
2396 /* Copy to first part of the iv. */
2397 iv = EVP_CIPHER_CTX_iv_noconst(c);
2398 memcpy(iv, ptr, arg);
2399 return 1;
2400
2401 case EVP_CTRL_AEAD_SET_IVLEN:
2402 arg = 15 - arg;
2403 /* fall-through */
2404
2405 case EVP_CTRL_CCM_SET_L:
2406 if (arg < 2 || arg > 8)
2407 return 0;
2408
2409 cctx->aes.ccm.l = arg;
2410 return 1;
2411
2412 case EVP_CTRL_AEAD_SET_TAG:
2413 if ((arg & 1) || arg < 4 || arg > 16)
2414 return 0;
2415
2416 enc = EVP_CIPHER_CTX_encrypting(c);
2417 if (enc && ptr)
2418 return 0;
2419
2420 if (ptr) {
2421 cctx->aes.ccm.tag_set = 1;
2422 buf = EVP_CIPHER_CTX_buf_noconst(c);
2423 memcpy(buf, ptr, arg);
2424 }
2425
2426 cctx->aes.ccm.m = arg;
2427 return 1;
2428
2429 case EVP_CTRL_AEAD_GET_TAG:
2430 enc = EVP_CIPHER_CTX_encrypting(c);
2431 if (!enc || !cctx->aes.ccm.tag_set)
2432 return 0;
2433
2434 if(arg < cctx->aes.ccm.m)
2435 return 0;
2436
2437 memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2438 cctx->aes.ccm.tag_set = 0;
2439 cctx->aes.ccm.iv_set = 0;
2440 cctx->aes.ccm.len_set = 0;
2441 return 1;
2442
2443 case EVP_CTRL_COPY:
2444 return 1;
2445
2446 default:
2447 return -1;
2448 }
2449 }
2450
2451 # define s390x_aes_ccm_cleanup aes_ccm_cleanup
2452
2453 # ifndef OPENSSL_NO_OCB
2454 # define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
2455 # define S390X_aes_128_ocb_CAPABLE 0
2456 # define S390X_aes_192_ocb_CAPABLE 0
2457 # define S390X_aes_256_ocb_CAPABLE 0
2458
2459 # define s390x_aes_ocb_init_key aes_ocb_init_key
2460 static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2461 const unsigned char *iv, int enc);
2462 # define s390x_aes_ocb_cipher aes_ocb_cipher
2463 static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2464 const unsigned char *in, size_t len);
2465 # define s390x_aes_ocb_cleanup aes_ocb_cleanup
2466 static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
2467 # define s390x_aes_ocb_ctrl aes_ocb_ctrl
2468 static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
2469 # endif
2470
2471 # ifndef OPENSSL_NO_SIV
2472 # define S390X_AES_SIV_CTX EVP_AES_SIV_CTX
2473 # define S390X_aes_128_siv_CAPABLE 0
2474 # define S390X_aes_192_siv_CAPABLE 0
2475 # define S390X_aes_256_siv_CAPABLE 0
2476
2477 # define s390x_aes_siv_init_key aes_siv_init_key
2478 # define s390x_aes_siv_cipher aes_siv_cipher
2479 # define s390x_aes_siv_cleanup aes_siv_cleanup
2480 # define s390x_aes_siv_ctrl aes_siv_ctrl
2481 # endif
2482
2483 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
2484 MODE,flags) \
2485 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2486 nid##_##keylen##_##nmode,blocksize, \
2487 keylen / 8, \
2488 ivlen, \
2489 flags | EVP_CIPH_##MODE##_MODE, \
2490 s390x_aes_##mode##_init_key, \
2491 s390x_aes_##mode##_cipher, \
2492 NULL, \
2493 sizeof(S390X_AES_##MODE##_CTX), \
2494 NULL, \
2495 NULL, \
2496 NULL, \
2497 NULL \
2498 }; \
2499 static const EVP_CIPHER aes_##keylen##_##mode = { \
2500 nid##_##keylen##_##nmode, \
2501 blocksize, \
2502 keylen / 8, \
2503 ivlen, \
2504 flags | EVP_CIPH_##MODE##_MODE, \
2505 aes_init_key, \
2506 aes_##mode##_cipher, \
2507 NULL, \
2508 sizeof(EVP_AES_KEY), \
2509 NULL, \
2510 NULL, \
2511 NULL, \
2512 NULL \
2513 }; \
2514 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2515 { \
2516 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2517 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2518 }
2519
2520 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
2521 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2522 nid##_##keylen##_##mode, \
2523 blocksize, \
2524 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2525 ivlen, \
2526 flags | EVP_CIPH_##MODE##_MODE, \
2527 s390x_aes_##mode##_init_key, \
2528 s390x_aes_##mode##_cipher, \
2529 s390x_aes_##mode##_cleanup, \
2530 sizeof(S390X_AES_##MODE##_CTX), \
2531 NULL, \
2532 NULL, \
2533 s390x_aes_##mode##_ctrl, \
2534 NULL \
2535 }; \
2536 static const EVP_CIPHER aes_##keylen##_##mode = { \
2537 nid##_##keylen##_##mode,blocksize, \
2538 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2539 ivlen, \
2540 flags | EVP_CIPH_##MODE##_MODE, \
2541 aes_##mode##_init_key, \
2542 aes_##mode##_cipher, \
2543 aes_##mode##_cleanup, \
2544 sizeof(EVP_AES_##MODE##_CTX), \
2545 NULL, \
2546 NULL, \
2547 aes_##mode##_ctrl, \
2548 NULL \
2549 }; \
2550 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2551 { \
2552 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2553 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2554 }
2555
2556 #else
2557
2558 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
2559 static const EVP_CIPHER aes_##keylen##_##mode = { \
2560 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
2561 flags|EVP_CIPH_##MODE##_MODE, \
2562 aes_init_key, \
2563 aes_##mode##_cipher, \
2564 NULL, \
2565 sizeof(EVP_AES_KEY), \
2566 NULL,NULL,NULL,NULL }; \
2567 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2568 { return &aes_##keylen##_##mode; }
2569
2570 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
2571 static const EVP_CIPHER aes_##keylen##_##mode = { \
2572 nid##_##keylen##_##mode,blocksize, \
2573 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
2574 ivlen, \
2575 flags|EVP_CIPH_##MODE##_MODE, \
2576 aes_##mode##_init_key, \
2577 aes_##mode##_cipher, \
2578 aes_##mode##_cleanup, \
2579 sizeof(EVP_AES_##MODE##_CTX), \
2580 NULL,NULL,aes_##mode##_ctrl,NULL }; \
2581 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2582 { return &aes_##keylen##_##mode; }
2583
2584 #endif
2585
2586 #if defined(OPENSSL_CPUID_OBJ) && (defined(__arm__) || defined(__arm) || defined(__aarch64__))
2587 # include "arm_arch.h"
2588 # if __ARM_MAX_ARCH__>=7
2589 # if defined(BSAES_ASM)
2590 # define BSAES_CAPABLE (OPENSSL_armcap_P & ARMV7_NEON)
2591 # endif
2592 # if defined(VPAES_ASM)
2593 # define VPAES_CAPABLE (OPENSSL_armcap_P & ARMV7_NEON)
2594 # endif
2595 # define HWAES_CAPABLE (OPENSSL_armcap_P & ARMV8_AES)
2596 # define HWAES_set_encrypt_key aes_v8_set_encrypt_key
2597 # define HWAES_set_decrypt_key aes_v8_set_decrypt_key
2598 # define HWAES_encrypt aes_v8_encrypt
2599 # define HWAES_decrypt aes_v8_decrypt
2600 # define HWAES_cbc_encrypt aes_v8_cbc_encrypt
2601 # define HWAES_ctr32_encrypt_blocks aes_v8_ctr32_encrypt_blocks
2602 # endif
2603 #endif
2604
2605 #if defined(HWAES_CAPABLE)
2606 int HWAES_set_encrypt_key(const unsigned char *userKey, const int bits,
2607 AES_KEY *key);
2608 int HWAES_set_decrypt_key(const unsigned char *userKey, const int bits,
2609 AES_KEY *key);
2610 void HWAES_encrypt(const unsigned char *in, unsigned char *out,
2611 const AES_KEY *key);
2612 void HWAES_decrypt(const unsigned char *in, unsigned char *out,
2613 const AES_KEY *key);
2614 void HWAES_cbc_encrypt(const unsigned char *in, unsigned char *out,
2615 size_t length, const AES_KEY *key,
2616 unsigned char *ivec, const int enc);
2617 void HWAES_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
2618 size_t len, const AES_KEY *key,
2619 const unsigned char ivec[16]);
2620 void HWAES_xts_encrypt(const unsigned char *inp, unsigned char *out,
2621 size_t len, const AES_KEY *key1,
2622 const AES_KEY *key2, const unsigned char iv[16]);
2623 void HWAES_xts_decrypt(const unsigned char *inp, unsigned char *out,
2624 size_t len, const AES_KEY *key1,
2625 const AES_KEY *key2, const unsigned char iv[16]);
2626 #endif
2627
2628 #define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
2629 BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2630 BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2631 BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2632 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2633 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags) \
2634 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags) \
2635 BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
2636
2637 static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2638 const unsigned char *iv, int enc)
2639 {
2640 int ret, mode;
2641 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2642
2643 mode = EVP_CIPHER_CTX_mode(ctx);
2644 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
2645 && !enc) {
2646 #ifdef HWAES_CAPABLE
2647 if (HWAES_CAPABLE) {
2648 ret = HWAES_set_decrypt_key(key,
2649 EVP_CIPHER_CTX_key_length(ctx) * 8,
2650 &dat->ks.ks);
2651 dat->block = (block128_f) HWAES_decrypt;
2652 dat->stream.cbc = NULL;
2653 # ifdef HWAES_cbc_encrypt
2654 if (mode == EVP_CIPH_CBC_MODE)
2655 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2656 # endif
2657 } else
2658 #endif
2659 #ifdef BSAES_CAPABLE
2660 if (BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE) {
2661 ret = AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2662 &dat->ks.ks);
2663 dat->block = (block128_f) AES_decrypt;
2664 dat->stream.cbc = (cbc128_f) bsaes_cbc_encrypt;
2665 } else
2666 #endif
2667 #ifdef VPAES_CAPABLE
2668 if (VPAES_CAPABLE) {
2669 ret = vpaes_set_decrypt_key(key,
2670 EVP_CIPHER_CTX_key_length(ctx) * 8,
2671 &dat->ks.ks);
2672 dat->block = (block128_f) vpaes_decrypt;
2673 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2674 (cbc128_f) vpaes_cbc_encrypt : NULL;
2675 } else
2676 #endif
2677 {
2678 ret = AES_set_decrypt_key(key,
2679 EVP_CIPHER_CTX_key_length(ctx) * 8,
2680 &dat->ks.ks);
2681 dat->block = (block128_f) AES_decrypt;
2682 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2683 (cbc128_f) AES_cbc_encrypt : NULL;
2684 }
2685 } else
2686 #ifdef HWAES_CAPABLE
2687 if (HWAES_CAPABLE) {
2688 ret = HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2689 &dat->ks.ks);
2690 dat->block = (block128_f) HWAES_encrypt;
2691 dat->stream.cbc = NULL;
2692 # ifdef HWAES_cbc_encrypt
2693 if (mode == EVP_CIPH_CBC_MODE)
2694 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2695 else
2696 # endif
2697 # ifdef HWAES_ctr32_encrypt_blocks
2698 if (mode == EVP_CIPH_CTR_MODE)
2699 dat->stream.ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2700 else
2701 # endif
2702 (void)0; /* terminate potentially open 'else' */
2703 } else
2704 #endif
2705 #ifdef BSAES_CAPABLE
2706 if (BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE) {
2707 ret = AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2708 &dat->ks.ks);
2709 dat->block = (block128_f) AES_encrypt;
2710 dat->stream.ctr = (ctr128_f) bsaes_ctr32_encrypt_blocks;
2711 } else
2712 #endif
2713 #ifdef VPAES_CAPABLE
2714 if (VPAES_CAPABLE) {
2715 ret = vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2716 &dat->ks.ks);
2717 dat->block = (block128_f) vpaes_encrypt;
2718 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2719 (cbc128_f) vpaes_cbc_encrypt : NULL;
2720 } else
2721 #endif
2722 {
2723 ret = AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2724 &dat->ks.ks);
2725 dat->block = (block128_f) AES_encrypt;
2726 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2727 (cbc128_f) AES_cbc_encrypt : NULL;
2728 #ifdef AES_CTR_ASM
2729 if (mode == EVP_CIPH_CTR_MODE)
2730 dat->stream.ctr = (ctr128_f) AES_ctr32_encrypt;
2731 #endif
2732 }
2733
2734 if (ret < 0) {
2735 EVPerr(EVP_F_AES_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
2736 return 0;
2737 }
2738
2739 return 1;
2740 }
2741
2742 static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2743 const unsigned char *in, size_t len)
2744 {
2745 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2746
2747 if (dat->stream.cbc)
2748 (*dat->stream.cbc) (in, out, len, &dat->ks,
2749 EVP_CIPHER_CTX_iv_noconst(ctx),
2750 EVP_CIPHER_CTX_encrypting(ctx));
2751 else if (EVP_CIPHER_CTX_encrypting(ctx))
2752 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks,
2753 EVP_CIPHER_CTX_iv_noconst(ctx), dat->block);
2754 else
2755 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks,
2756 EVP_CIPHER_CTX_iv_noconst(ctx), dat->block);
2757
2758 return 1;
2759 }
2760
2761 static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2762 const unsigned char *in, size_t len)
2763 {
2764 size_t bl = EVP_CIPHER_CTX_block_size(ctx);
2765 size_t i;
2766 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2767
2768 if (len < bl)
2769 return 1;
2770
2771 for (i = 0, len -= bl; i <= len; i += bl)
2772 (*dat->block) (in + i, out + i, &dat->ks);
2773
2774 return 1;
2775 }
2776
2777 static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2778 const unsigned char *in, size_t len)
2779 {
2780 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2781
2782 int num = EVP_CIPHER_CTX_num(ctx);
2783 CRYPTO_ofb128_encrypt(in, out, len, &dat->ks,
2784 EVP_CIPHER_CTX_iv_noconst(ctx), &num, dat->block);
2785 EVP_CIPHER_CTX_set_num(ctx, num);
2786 return 1;
2787 }
2788
2789 static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2790 const unsigned char *in, size_t len)
2791 {
2792 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2793
2794 int num = EVP_CIPHER_CTX_num(ctx);
2795 CRYPTO_cfb128_encrypt(in, out, len, &dat->ks,
2796 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2797 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2798 EVP_CIPHER_CTX_set_num(ctx, num);
2799 return 1;
2800 }
2801
2802 static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2803 const unsigned char *in, size_t len)
2804 {
2805 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2806
2807 int num = EVP_CIPHER_CTX_num(ctx);
2808 CRYPTO_cfb128_8_encrypt(in, out, len, &dat->ks,
2809 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2810 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2811 EVP_CIPHER_CTX_set_num(ctx, num);
2812 return 1;
2813 }
2814
2815 static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2816 const unsigned char *in, size_t len)
2817 {
2818 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2819
2820 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) {
2821 int num = EVP_CIPHER_CTX_num(ctx);
2822 CRYPTO_cfb128_1_encrypt(in, out, len, &dat->ks,
2823 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2824 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2825 EVP_CIPHER_CTX_set_num(ctx, num);
2826 return 1;
2827 }
2828
2829 while (len >= MAXBITCHUNK) {
2830 int num = EVP_CIPHER_CTX_num(ctx);
2831 CRYPTO_cfb128_1_encrypt(in, out, MAXBITCHUNK * 8, &dat->ks,
2832 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2833 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2834 EVP_CIPHER_CTX_set_num(ctx, num);
2835 len -= MAXBITCHUNK;
2836 out += MAXBITCHUNK;
2837 in += MAXBITCHUNK;
2838 }
2839 if (len) {
2840 int num = EVP_CIPHER_CTX_num(ctx);
2841 CRYPTO_cfb128_1_encrypt(in, out, len * 8, &dat->ks,
2842 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2843 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2844 EVP_CIPHER_CTX_set_num(ctx, num);
2845 }
2846
2847 return 1;
2848 }
2849
2850 static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2851 const unsigned char *in, size_t len)
2852 {
2853 unsigned int num = EVP_CIPHER_CTX_num(ctx);
2854 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2855
2856 if (dat->stream.ctr)
2857 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks,
2858 EVP_CIPHER_CTX_iv_noconst(ctx),
2859 EVP_CIPHER_CTX_buf_noconst(ctx),
2860 &num, dat->stream.ctr);
2861 else
2862 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks,
2863 EVP_CIPHER_CTX_iv_noconst(ctx),
2864 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
2865 dat->block);
2866 EVP_CIPHER_CTX_set_num(ctx, num);
2867 return 1;
2868 }
2869
2870 BLOCK_CIPHER_generic_pack(NID_aes, 128, 0)
2871 BLOCK_CIPHER_generic_pack(NID_aes, 192, 0)
2872 BLOCK_CIPHER_generic_pack(NID_aes, 256, 0)
2873
2874 static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
2875 {
2876 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2877 if (gctx == NULL)
2878 return 0;
2879 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
2880 if (gctx->iv != EVP_CIPHER_CTX_iv_noconst(c))
2881 OPENSSL_free(gctx->iv);
2882 return 1;
2883 }
2884
2885 static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2886 {
2887 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2888 switch (type) {
2889 case EVP_CTRL_INIT:
2890 gctx->key_set = 0;
2891 gctx->iv_set = 0;
2892 gctx->ivlen = c->cipher->iv_len;
2893 gctx->iv = c->iv;
2894 gctx->taglen = -1;
2895 gctx->iv_gen = 0;
2896 gctx->tls_aad_len = -1;
2897 return 1;
2898
2899 case EVP_CTRL_AEAD_SET_IVLEN:
2900 if (arg <= 0)
2901 return 0;
2902 /* Allocate memory for IV if needed */
2903 if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
2904 if (gctx->iv != c->iv)
2905 OPENSSL_free(gctx->iv);
2906 if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) {
2907 EVPerr(EVP_F_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
2908 return 0;
2909 }
2910 }
2911 gctx->ivlen = arg;
2912 return 1;
2913
2914 case EVP_CTRL_AEAD_SET_TAG:
2915 if (arg <= 0 || arg > 16 || c->encrypt)
2916 return 0;
2917 memcpy(c->buf, ptr, arg);
2918 gctx->taglen = arg;
2919 return 1;
2920
2921 case EVP_CTRL_AEAD_GET_TAG:
2922 if (arg <= 0 || arg > 16 || !c->encrypt
2923 || gctx->taglen < 0)
2924 return 0;
2925 memcpy(ptr, c->buf, arg);
2926 return 1;
2927
2928 case EVP_CTRL_GET_IV:
2929 if (gctx->iv_gen != 1 && gctx->iv_gen_rand != 1)
2930 return 0;
2931 if (gctx->ivlen != arg)
2932 return 0;
2933 memcpy(ptr, gctx->iv, arg);
2934 return 1;
2935
2936 case EVP_CTRL_GCM_SET_IV_FIXED:
2937 /* Special case: -1 length restores whole IV */
2938 if (arg == -1) {
2939 memcpy(gctx->iv, ptr, gctx->ivlen);
2940 gctx->iv_gen = 1;
2941 return 1;
2942 }
2943 /*
2944 * Fixed field must be at least 4 bytes and invocation field at least
2945 * 8.
2946 */
2947 if ((arg < 4) || (gctx->ivlen - arg) < 8)
2948 return 0;
2949 if (arg)
2950 memcpy(gctx->iv, ptr, arg);
2951 if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
2952 return 0;
2953 gctx->iv_gen = 1;
2954 return 1;
2955
2956 case EVP_CTRL_GCM_IV_GEN:
2957 if (gctx->iv_gen == 0 || gctx->key_set == 0)
2958 return 0;
2959 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2960 if (arg <= 0 || arg > gctx->ivlen)
2961 arg = gctx->ivlen;
2962 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
2963 /*
2964 * Invocation field will be at least 8 bytes in size and so no need
2965 * to check wrap around or increment more than last 8 bytes.
2966 */
2967 ctr64_inc(gctx->iv + gctx->ivlen - 8);
2968 gctx->iv_set = 1;
2969 return 1;
2970
2971 case EVP_CTRL_GCM_SET_IV_INV:
2972 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
2973 return 0;
2974 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
2975 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2976 gctx->iv_set = 1;
2977 return 1;
2978
2979 case EVP_CTRL_AEAD_TLS1_AAD:
2980 /* Save the AAD for later use */
2981 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2982 return 0;
2983 memcpy(c->buf, ptr, arg);
2984 gctx->tls_aad_len = arg;
2985 gctx->tls_enc_records = 0;
2986 {
2987 unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
2988 /* Correct length for explicit IV */
2989 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
2990 return 0;
2991 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
2992 /* If decrypting correct for tag too */
2993 if (!c->encrypt) {
2994 if (len < EVP_GCM_TLS_TAG_LEN)
2995 return 0;
2996 len -= EVP_GCM_TLS_TAG_LEN;
2997 }
2998 c->buf[arg - 2] = len >> 8;
2999 c->buf[arg - 1] = len & 0xff;
3000 }
3001 /* Extra padding: tag appended to record */
3002 return EVP_GCM_TLS_TAG_LEN;
3003
3004 case EVP_CTRL_COPY:
3005 {
3006 EVP_CIPHER_CTX *out = ptr;
3007 EVP_AES_GCM_CTX *gctx_out = EVP_C_DATA(EVP_AES_GCM_CTX,out);
3008 if (gctx->gcm.key) {
3009 if (gctx->gcm.key != &gctx->ks)
3010 return 0;
3011 gctx_out->gcm.key = &gctx_out->ks;
3012 }
3013 if (gctx->iv == c->iv)
3014 gctx_out->iv = out->iv;
3015 else {
3016 if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) {
3017 EVPerr(EVP_F_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
3018 return 0;
3019 }
3020 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
3021 }
3022 return 1;
3023 }
3024
3025 default:
3026 return -1;
3027
3028 }
3029 }
3030
3031 static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3032 const unsigned char *iv, int enc)
3033 {
3034 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
3035 if (!iv && !key)
3036 return 1;
3037 if (key) {
3038 do {
3039 #ifdef HWAES_CAPABLE
3040 if (HWAES_CAPABLE) {
3041 HWAES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
3042 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
3043 (block128_f) HWAES_encrypt);
3044 # ifdef HWAES_ctr32_encrypt_blocks
3045 gctx->ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
3046 # else
3047 gctx->ctr = NULL;
3048 # endif
3049 break;
3050 } else
3051 #endif
3052 #ifdef BSAES_CAPABLE
3053 if (BSAES_CAPABLE) {
3054 AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
3055 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
3056 (block128_f) AES_encrypt);
3057 gctx->ctr = (ctr128_f) bsaes_ctr32_encrypt_blocks;
3058 break;
3059 } else
3060 #endif
3061 #ifdef VPAES_CAPABLE
3062 if (VPAES_CAPABLE) {
3063 vpaes_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
3064 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
3065 (block128_f) vpaes_encrypt);
3066 gctx->ctr = NULL;
3067 break;
3068 } else
3069 #endif
3070 (void)0; /* terminate potentially open 'else' */
3071
3072 AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
3073 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
3074 (block128_f) AES_encrypt);
3075 #ifdef AES_CTR_ASM
3076 gctx->ctr = (ctr128_f) AES_ctr32_encrypt;
3077 #else
3078 gctx->ctr = NULL;
3079 #endif
3080 } while (0);
3081
3082 /*
3083 * If we have an iv can set it directly, otherwise use saved IV.
3084 */
3085 if (iv == NULL && gctx->iv_set)
3086 iv = gctx->iv;
3087 if (iv) {
3088 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
3089 gctx->iv_set = 1;
3090 }
3091 gctx->key_set = 1;
3092 } else {
3093 /* If key set use IV, otherwise copy */
3094 if (gctx->key_set)
3095 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
3096 else
3097 memcpy(gctx->iv, iv, gctx->ivlen);
3098 gctx->iv_set = 1;
3099 gctx->iv_gen = 0;
3100 }
3101 return 1;
3102 }
3103
3104 /*
3105 * Handle TLS GCM packet format. This consists of the last portion of the IV
3106 * followed by the payload and finally the tag. On encrypt generate IV,
3107 * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
3108 * and verify tag.
3109 */
3110
3111 static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3112 const unsigned char *in, size_t len)
3113 {
3114 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
3115 int rv = -1;
3116 /* Encrypt/decrypt must be performed in place */
3117 if (out != in
3118 || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
3119 return -1;
3120
3121 /*
3122 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
3123 * Requirements from SP 800-38D". The requirements is for one party to the
3124 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
3125 * side only.
3126 */
3127 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
3128 EVPerr(EVP_F_AES_GCM_TLS_CIPHER, EVP_R_TOO_MANY_RECORDS);
3129 goto err;
3130 }
3131
3132 /*
3133 * Set IV from start of buffer or generate IV and write to start of
3134 * buffer.
3135 */
3136 if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN
3137 : EVP_CTRL_GCM_SET_IV_INV,
3138 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
3139 goto err;
3140 /* Use saved AAD */
3141 if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len))
3142 goto err;
3143 /* Fix buffer and length to point to payload */
3144 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
3145 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
3146 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
3147 if (ctx->encrypt) {
3148 /* Encrypt payload */
3149 if (gctx->ctr) {
3150 size_t bulk = 0;
3151 #if defined(AES_GCM_ASM)
3152 if (len >= 32 && AES_GCM_ASM(gctx)) {
3153 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
3154 return -1;
3155
3156 bulk = AES_gcm_encrypt(in, out, len,
3157 gctx->gcm.key,
3158 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3159 gctx->gcm.len.u[1] += bulk;
3160 }
3161 #endif
3162 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
3163 in + bulk,
3164 out + bulk,
3165 len - bulk, gctx->ctr))
3166 goto err;
3167 } else {
3168 size_t bulk = 0;
3169 #if defined(AES_GCM_ASM2)
3170 if (len >= 32 && AES_GCM_ASM2(gctx)) {
3171 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
3172 return -1;
3173
3174 bulk = AES_gcm_encrypt(in, out, len,
3175 gctx->gcm.key,
3176 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3177 gctx->gcm.len.u[1] += bulk;
3178 }
3179 #endif
3180 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3181 in + bulk, out + bulk, len - bulk))
3182 goto err;
3183 }
3184 out += len;
3185 /* Finally write tag */
3186 CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN);
3187 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
3188 } else {
3189 /* Decrypt */
3190 if (gctx->ctr) {
3191 size_t bulk = 0;
3192 #if defined(AES_GCM_ASM)
3193 if (len >= 16 && AES_GCM_ASM(gctx)) {
3194 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
3195 return -1;
3196
3197 bulk = AES_gcm_decrypt(in, out, len,
3198 gctx->gcm.key,
3199 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3200 gctx->gcm.len.u[1] += bulk;
3201 }
3202 #endif
3203 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3204 in + bulk,
3205 out + bulk,
3206 len - bulk, gctx->ctr))
3207 goto err;
3208 } else {
3209 size_t bulk = 0;
3210 #if defined(AES_GCM_ASM2)
3211 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3212 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
3213 return -1;
3214
3215 bulk = AES_gcm_decrypt(in, out, len,
3216 gctx->gcm.key,
3217 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3218 gctx->gcm.len.u[1] += bulk;
3219 }
3220 #endif
3221 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3222 in + bulk, out + bulk, len - bulk))
3223 goto err;
3224 }
3225 /* Retrieve tag */
3226 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN);
3227 /* If tag mismatch wipe buffer */
3228 if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) {
3229 OPENSSL_cleanse(out, len);
3230 goto err;
3231 }
3232 rv = len;
3233 }
3234
3235 err:
3236 gctx->iv_set = 0;
3237 gctx->tls_aad_len = -1;
3238 return rv;
3239 }
3240
3241 #ifdef FIPS_MODE
3242 /*
3243 * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
3244 *
3245 * See also 8.2.2 RBG-based construction.
3246 * Random construction consists of a free field (which can be NULL) and a
3247 * random field which will use a DRBG that can return at least 96 bits of
3248 * entropy strength. (The DRBG must be seeded by the FIPS module).
3249 */
3250 static int aes_gcm_iv_generate(EVP_AES_GCM_CTX *gctx, int offset)
3251 {
3252 int sz = gctx->ivlen - offset;
3253
3254 /* Must be at least 96 bits */
3255 if (sz <= 0 || gctx->ivlen < 12)
3256 return 0;
3257
3258 /* Use DRBG to generate random iv */
3259 if (RAND_bytes(gctx->iv + offset, sz) <= 0)
3260 return 0;
3261 return 1;
3262 }
3263 #endif /* FIPS_MODE */
3264
3265 static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3266 const unsigned char *in, size_t len)
3267 {
3268 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
3269
3270 /* If not set up, return error */
3271 if (!gctx->key_set)
3272 return -1;
3273
3274 if (gctx->tls_aad_len >= 0)
3275 return aes_gcm_tls_cipher(ctx, out, in, len);
3276
3277 #ifdef FIPS_MODE
3278 /*
3279 * FIPS requires generation of AES-GCM IV's inside the FIPS module.
3280 * The IV can still be set externally (the security policy will state that
3281 * this is not FIPS compliant). There are some applications
3282 * where setting the IV externally is the only option available.
3283 */
3284 if (!gctx->iv_set) {
3285 if (!ctx->encrypt || !aes_gcm_iv_generate(gctx, 0))
3286 return -1;
3287 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
3288 gctx->iv_set = 1;
3289 gctx->iv_gen_rand = 1;
3290 }
3291 #else
3292 if (!gctx->iv_set)
3293 return -1;
3294 #endif /* FIPS_MODE */
3295
3296 if (in) {
3297 if (out == NULL) {
3298 if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
3299 return -1;
3300 } else if (ctx->encrypt) {
3301 if (gctx->ctr) {
3302 size_t bulk = 0;
3303 #if defined(AES_GCM_ASM)
3304 if (len >= 32 && AES_GCM_ASM(gctx)) {
3305 size_t res = (16 - gctx->gcm.mres) % 16;
3306
3307 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3308 return -1;
3309
3310 bulk = AES_gcm_encrypt(in + res,
3311 out + res, len - res,
3312 gctx->gcm.key, gctx->gcm.Yi.c,
3313 gctx->gcm.Xi.u);
3314 gctx->gcm.len.u[1] += bulk;
3315 bulk += res;
3316 }
3317 #endif
3318 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
3319 in + bulk,
3320 out + bulk,
3321 len - bulk, gctx->ctr))
3322 return -1;
3323 } else {
3324 size_t bulk = 0;
3325 #if defined(AES_GCM_ASM2)
3326 if (len >= 32 && AES_GCM_ASM2(gctx)) {
3327 size_t res = (16 - gctx->gcm.mres) % 16;
3328
3329 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3330 return -1;
3331
3332 bulk = AES_gcm_encrypt(in + res,
3333 out + res, len - res,
3334 gctx->gcm.key, gctx->gcm.Yi.c,
3335 gctx->gcm.Xi.u);
3336 gctx->gcm.len.u[1] += bulk;
3337 bulk += res;
3338 }
3339 #endif
3340 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3341 in + bulk, out + bulk, len - bulk))
3342 return -1;
3343 }
3344 } else {
3345 if (gctx->ctr) {
3346 size_t bulk = 0;
3347 #if defined(AES_GCM_ASM)
3348 if (len >= 16 && AES_GCM_ASM(gctx)) {
3349 size_t res = (16 - gctx->gcm.mres) % 16;
3350
3351 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3352 return -1;
3353
3354 bulk = AES_gcm_decrypt(in + res,
3355 out + res, len - res,
3356 gctx->gcm.key,
3357 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3358 gctx->gcm.len.u[1] += bulk;
3359 bulk += res;
3360 }
3361 #endif
3362 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3363 in + bulk,
3364 out + bulk,
3365 len - bulk, gctx->ctr))
3366 return -1;
3367 } else {
3368 size_t bulk = 0;
3369 #if defined(AES_GCM_ASM2)
3370 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3371 size_t res = (16 - gctx->gcm.mres) % 16;
3372
3373 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3374 return -1;
3375
3376 bulk = AES_gcm_decrypt(in + res,
3377 out + res, len - res,
3378 gctx->gcm.key,
3379 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3380 gctx->gcm.len.u[1] += bulk;
3381 bulk += res;
3382 }
3383 #endif
3384 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3385 in + bulk, out + bulk, len - bulk))
3386 return -1;
3387 }
3388 }
3389 return len;
3390 } else {
3391 if (!ctx->encrypt) {
3392 if (gctx->taglen < 0)
3393 return -1;
3394 if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0)
3395 return -1;
3396 gctx->iv_set = 0;
3397 return 0;
3398 }
3399 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
3400 gctx->taglen = 16;
3401 /* Don't reuse the IV */
3402 gctx->iv_set = 0;
3403 return 0;
3404 }
3405
3406 }
3407
3408 #define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \
3409 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3410 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3411 | EVP_CIPH_CUSTOM_COPY)
3412
3413 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, gcm, GCM,
3414 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3415 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, gcm, GCM,
3416 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3417 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, gcm, GCM,
3418 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3419
3420 static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3421 {
3422 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX, c);
3423
3424 if (type == EVP_CTRL_COPY) {
3425 EVP_CIPHER_CTX *out = ptr;
3426 EVP_AES_XTS_CTX *xctx_out = EVP_C_DATA(EVP_AES_XTS_CTX,out);
3427
3428 if (xctx->xts.key1) {
3429 if (xctx->xts.key1 != &xctx->ks1)
3430 return 0;
3431 xctx_out->xts.key1 = &xctx_out->ks1;
3432 }
3433 if (xctx->xts.key2) {
3434 if (xctx->xts.key2 != &xctx->ks2)
3435 return 0;
3436 xctx_out->xts.key2 = &xctx_out->ks2;
3437 }
3438 return 1;
3439 } else if (type != EVP_CTRL_INIT)
3440 return -1;
3441 /* key1 and key2 are used as an indicator both key and IV are set */
3442 xctx->xts.key1 = NULL;
3443 xctx->xts.key2 = NULL;
3444 return 1;
3445 }
3446
3447 static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3448 const unsigned char *iv, int enc)
3449 {
3450 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3451
3452 if (!iv && !key)
3453 return 1;
3454
3455 if (key) {
3456 do {
3457 /* The key is two half length keys in reality */
3458 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
3459 const int bits = bytes * 8;
3460
3461 /*
3462 * Verify that the two keys are different.
3463 *
3464 * This addresses the vulnerability described in Rogaway's
3465 * September 2004 paper:
3466 *
3467 * "Efficient Instantiations of Tweakable Blockciphers and
3468 * Refinements to Modes OCB and PMAC".
3469 * (http://web.cs.ucdavis.edu/~rogaway/papers/offsets.pdf)
3470 *
3471 * FIPS 140-2 IG A.9 XTS-AES Key Generation Requirements states
3472 * that:
3473 * "The check for Key_1 != Key_2 shall be done at any place
3474 * BEFORE using the keys in the XTS-AES algorithm to process
3475 * data with them."
3476 */
3477 if ((!allow_insecure_decrypt || enc)
3478 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
3479 EVPerr(EVP_F_AES_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
3480 return 0;
3481 }
3482
3483 #ifdef AES_XTS_ASM
3484 xctx->stream = enc ? AES_xts_encrypt : AES_xts_decrypt;
3485 #else
3486 xctx->stream = NULL;
3487 #endif
3488 /* key_len is two AES keys */
3489 #ifdef HWAES_CAPABLE
3490 if (HWAES_CAPABLE) {
3491 if (enc) {
3492 HWAES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3493 xctx->xts.block1 = (block128_f) HWAES_encrypt;
3494 # ifdef HWAES_xts_encrypt
3495 xctx->stream = HWAES_xts_encrypt;
3496 # endif
3497 } else {
3498 HWAES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3499 xctx->xts.block1 = (block128_f) HWAES_decrypt;
3500 # ifdef HWAES_xts_decrypt
3501 xctx->stream = HWAES_xts_decrypt;
3502 #endif
3503 }
3504
3505 HWAES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3506 xctx->xts.block2 = (block128_f) HWAES_encrypt;
3507
3508 xctx->xts.key1 = &xctx->ks1;
3509 break;
3510 } else
3511 #endif
3512 #ifdef BSAES_CAPABLE
3513 if (BSAES_CAPABLE)
3514 xctx->stream = enc ? bsaes_xts_encrypt : bsaes_xts_decrypt;
3515 else
3516 #endif
3517 #ifdef VPAES_CAPABLE
3518 if (VPAES_CAPABLE) {
3519 if (enc) {
3520 vpaes_set_encrypt_key(key, bits, &xctx->ks1.ks);
3521 xctx->xts.block1 = (block128_f) vpaes_encrypt;
3522 } else {
3523 vpaes_set_decrypt_key(key, bits, &xctx->ks1.ks);
3524 xctx->xts.block1 = (block128_f) vpaes_decrypt;
3525 }
3526
3527 vpaes_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3528 xctx->xts.block2 = (block128_f) vpaes_encrypt;
3529
3530 xctx->xts.key1 = &xctx->ks1;
3531 break;
3532 } else
3533 #endif
3534 (void)0; /* terminate potentially open 'else' */
3535
3536 if (enc) {
3537 AES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3538 xctx->xts.block1 = (block128_f) AES_encrypt;
3539 } else {
3540 AES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3541 xctx->xts.block1 = (block128_f) AES_decrypt;
3542 }
3543
3544 AES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3545 xctx->xts.block2 = (block128_f) AES_encrypt;
3546
3547 xctx->xts.key1 = &xctx->ks1;
3548 } while (0);
3549 }
3550
3551 if (iv) {
3552 xctx->xts.key2 = &xctx->ks2;
3553 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
3554 }
3555
3556 return 1;
3557 }
3558
3559 static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3560 const unsigned char *in, size_t len)
3561 {
3562 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3563
3564 if (xctx->xts.key1 == NULL
3565 || xctx->xts.key2 == NULL
3566 || out == NULL
3567 || in == NULL
3568 || len < AES_BLOCK_SIZE)
3569 return 0;
3570
3571 /*
3572 * Impose a limit of 2^20 blocks per data unit as specifed by
3573 * IEEE Std 1619-2018. The earlier and obsolete IEEE Std 1619-2007
3574 * indicated that this was a SHOULD NOT rather than a MUST NOT.
3575 * NIST SP 800-38E mandates the same limit.
3576 */
3577 if (len > XTS_MAX_BLOCKS_PER_DATA_UNIT * AES_BLOCK_SIZE) {
3578 EVPerr(EVP_F_AES_XTS_CIPHER, EVP_R_XTS_DATA_UNIT_IS_TOO_LARGE);
3579 return 0;
3580 }
3581
3582 if (xctx->stream)
3583 (*xctx->stream) (in, out, len,
3584 xctx->xts.key1, xctx->xts.key2,
3585 EVP_CIPHER_CTX_iv_noconst(ctx));
3586 else if (CRYPTO_xts128_encrypt(&xctx->xts, EVP_CIPHER_CTX_iv_noconst(ctx),
3587 in, out, len,
3588 EVP_CIPHER_CTX_encrypting(ctx)))
3589 return 0;
3590 return 1;
3591 }
3592
3593 #define aes_xts_cleanup NULL
3594
3595 #define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \
3596 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3597 | EVP_CIPH_CUSTOM_COPY)
3598
3599 BLOCK_CIPHER_custom(NID_aes, 128, 1, 16, xts, XTS, XTS_FLAGS)
3600 BLOCK_CIPHER_custom(NID_aes, 256, 1, 16, xts, XTS, XTS_FLAGS)
3601
3602 static int aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3603 {
3604 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,c);
3605 switch (type) {
3606 case EVP_CTRL_INIT:
3607 cctx->key_set = 0;
3608 cctx->iv_set = 0;
3609 cctx->L = 8;
3610 cctx->M = 12;
3611 cctx->tag_set = 0;
3612 cctx->len_set = 0;
3613 cctx->tls_aad_len = -1;
3614 return 1;
3615
3616 case EVP_CTRL_AEAD_TLS1_AAD:
3617 /* Save the AAD for later use */
3618 if (arg != EVP_AEAD_TLS1_AAD_LEN)
3619 return 0;
3620 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3621 cctx->tls_aad_len = arg;
3622 {
3623 uint16_t len =
3624 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
3625 | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
3626 /* Correct length for explicit IV */
3627 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
3628 return 0;
3629 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
3630 /* If decrypting correct for tag too */
3631 if (!EVP_CIPHER_CTX_encrypting(c)) {
3632 if (len < cctx->M)
3633 return 0;
3634 len -= cctx->M;
3635 }
3636 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
3637 EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
3638 }
3639 /* Extra padding: tag appended to record */
3640 return cctx->M;
3641
3642 case EVP_CTRL_CCM_SET_IV_FIXED:
3643 /* Sanity check length */
3644 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
3645 return 0;
3646 /* Just copy to first part of IV */
3647 memcpy(EVP_CIPHER_CTX_iv_noconst(c), ptr, arg);
3648 return 1;
3649
3650 case EVP_CTRL_AEAD_SET_IVLEN:
3651 arg = 15 - arg;
3652 /* fall thru */
3653 case EVP_CTRL_CCM_SET_L:
3654 if (arg < 2 || arg > 8)
3655 return 0;
3656 cctx->L = arg;
3657 return 1;
3658
3659 case EVP_CTRL_AEAD_SET_TAG:
3660 if ((arg & 1) || arg < 4 || arg > 16)
3661 return 0;
3662 if (EVP_CIPHER_CTX_encrypting(c) && ptr)
3663 return 0;
3664 if (ptr) {
3665 cctx->tag_set = 1;
3666 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3667 }
3668 cctx->M = arg;
3669 return 1;
3670
3671 case EVP_CTRL_AEAD_GET_TAG:
3672 if (!EVP_CIPHER_CTX_encrypting(c) || !cctx->tag_set)
3673 return 0;
3674 if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg))
3675 return 0;
3676 cctx->tag_set = 0;
3677 cctx->iv_set = 0;
3678 cctx->len_set = 0;
3679 return 1;
3680
3681 case EVP_CTRL_COPY:
3682 {
3683 EVP_CIPHER_CTX *out = ptr;
3684 EVP_AES_CCM_CTX *cctx_out = EVP_C_DATA(EVP_AES_CCM_CTX,out);
3685 if (cctx->ccm.key) {
3686 if (cctx->ccm.key != &cctx->ks)
3687 return 0;
3688 cctx_out->ccm.key = &cctx_out->ks;
3689 }
3690 return 1;
3691 }
3692
3693 default:
3694 return -1;
3695
3696 }
3697 }
3698
3699 static int aes_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3700 const unsigned char *iv, int enc)
3701 {
3702 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3703 if (!iv && !key)
3704 return 1;
3705 if (key)
3706 do {
3707 #ifdef HWAES_CAPABLE
3708 if (HWAES_CAPABLE) {
3709 HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3710 &cctx->ks.ks);
3711
3712 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3713 &cctx->ks, (block128_f) HWAES_encrypt);
3714 cctx->str = NULL;
3715 cctx->key_set = 1;
3716 break;
3717 } else
3718 #endif
3719 #ifdef VPAES_CAPABLE
3720 if (VPAES_CAPABLE) {
3721 vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3722 &cctx->ks.ks);
3723 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3724 &cctx->ks, (block128_f) vpaes_encrypt);
3725 cctx->str = NULL;
3726 cctx->key_set = 1;
3727 break;
3728 }
3729 #endif
3730 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3731 &cctx->ks.ks);
3732 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3733 &cctx->ks, (block128_f) AES_encrypt);
3734 cctx->str = NULL;
3735 cctx->key_set = 1;
3736 } while (0);
3737 if (iv) {
3738 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
3739 cctx->iv_set = 1;
3740 }
3741 return 1;
3742 }
3743
3744 static int aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3745 const unsigned char *in, size_t len)
3746 {
3747 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3748 CCM128_CONTEXT *ccm = &cctx->ccm;
3749 /* Encrypt/decrypt must be performed in place */
3750 if (out != in || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->M))
3751 return -1;
3752 /* If encrypting set explicit IV from sequence number (start of AAD) */
3753 if (EVP_CIPHER_CTX_encrypting(ctx))
3754 memcpy(out, EVP_CIPHER_CTX_buf_noconst(ctx),
3755 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3756 /* Get rest of IV from explicit IV */
3757 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx) + EVP_CCM_TLS_FIXED_IV_LEN, in,
3758 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3759 /* Correct length value */
3760 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3761 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), 15 - cctx->L,
3762 len))
3763 return -1;
3764 /* Use saved AAD */
3765 CRYPTO_ccm128_aad(ccm, EVP_CIPHER_CTX_buf_noconst(ctx), cctx->tls_aad_len);
3766 /* Fix buffer to point to payload */
3767 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3768 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3769 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3770 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3771 cctx->str) :
3772 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3773 return -1;
3774 if (!CRYPTO_ccm128_tag(ccm, out + len, cctx->M))
3775 return -1;
3776 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3777 } else {
3778 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3779 cctx->str) :
3780 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3781 unsigned char tag[16];
3782 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3783 if (!CRYPTO_memcmp(tag, in + len, cctx->M))
3784 return len;
3785 }
3786 }
3787 OPENSSL_cleanse(out, len);
3788 return -1;
3789 }
3790 }
3791
3792 static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3793 const unsigned char *in, size_t len)
3794 {
3795 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3796 CCM128_CONTEXT *ccm = &cctx->ccm;
3797 /* If not set up, return error */
3798 if (!cctx->key_set)
3799 return -1;
3800
3801 if (cctx->tls_aad_len >= 0)
3802 return aes_ccm_tls_cipher(ctx, out, in, len);
3803
3804 /* EVP_*Final() doesn't return any data */
3805 if (in == NULL && out != NULL)
3806 return 0;
3807
3808 if (!cctx->iv_set)
3809 return -1;
3810
3811 if (!out) {
3812 if (!in) {
3813 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx),
3814 15 - cctx->L, len))
3815 return -1;
3816 cctx->len_set = 1;
3817 return len;
3818 }
3819 /* If have AAD need message length */
3820 if (!cctx->len_set && len)
3821 return -1;
3822 CRYPTO_ccm128_aad(ccm, in, len);
3823 return len;
3824 }
3825
3826 /* The tag must be set before actually decrypting data */
3827 if (!EVP_CIPHER_CTX_encrypting(ctx) && !cctx->tag_set)
3828 return -1;
3829
3830 /* If not set length yet do it */
3831 if (!cctx->len_set) {
3832 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx),
3833 15 - cctx->L, len))
3834 return -1;
3835 cctx->len_set = 1;
3836 }
3837 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3838 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3839 cctx->str) :
3840 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3841 return -1;
3842 cctx->tag_set = 1;
3843 return len;
3844 } else {
3845 int rv = -1;
3846 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3847 cctx->str) :
3848 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3849 unsigned char tag[16];
3850 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3851 if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx),
3852 cctx->M))
3853 rv = len;
3854 }
3855 }
3856 if (rv == -1)
3857 OPENSSL_cleanse(out, len);
3858 cctx->iv_set = 0;
3859 cctx->tag_set = 0;
3860 cctx->len_set = 0;
3861 return rv;
3862 }
3863 }
3864
3865 #define aes_ccm_cleanup NULL
3866
3867 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, ccm, CCM,
3868 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3869 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, ccm, CCM,
3870 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3871 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, ccm, CCM,
3872 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3873
3874 typedef struct {
3875 union {
3876 OSSL_UNION_ALIGN;
3877 AES_KEY ks;
3878 } ks;
3879 /* Indicates if IV has been set */
3880 unsigned char *iv;
3881 } EVP_AES_WRAP_CTX;
3882
3883 static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3884 const unsigned char *iv, int enc)
3885 {
3886 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3887 if (!iv && !key)
3888 return 1;
3889 if (key) {
3890 if (EVP_CIPHER_CTX_encrypting(ctx))
3891 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3892 &wctx->ks.ks);
3893 else
3894 AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3895 &wctx->ks.ks);
3896 if (!iv)
3897 wctx->iv = NULL;
3898 }
3899 if (iv) {
3900 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, EVP_CIPHER_CTX_iv_length(ctx));
3901 wctx->iv = EVP_CIPHER_CTX_iv_noconst(ctx);
3902 }
3903 return 1;
3904 }
3905
3906 static int aes_wrap_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3907 const unsigned char *in, size_t inlen)
3908 {
3909 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3910 size_t rv;
3911 /* AES wrap with padding has IV length of 4, without padding 8 */
3912 int pad = EVP_CIPHER_CTX_iv_length(ctx) == 4;
3913 /* No final operation so always return zero length */
3914 if (!in)
3915 return 0;
3916 /* Input length must always be non-zero */
3917 if (!inlen)
3918 return -1;
3919 /* If decrypting need at least 16 bytes and multiple of 8 */
3920 if (!EVP_CIPHER_CTX_encrypting(ctx) && (inlen < 16 || inlen & 0x7))
3921 return -1;
3922 /* If not padding input must be multiple of 8 */
3923 if (!pad && inlen & 0x7)
3924 return -1;
3925 if (is_partially_overlapping(out, in, inlen)) {
3926 EVPerr(EVP_F_AES_WRAP_CIPHER, EVP_R_PARTIALLY_OVERLAPPING);
3927 return 0;
3928 }
3929 if (!out) {
3930 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3931 /* If padding round up to multiple of 8 */
3932 if (pad)
3933 inlen = (inlen + 7) / 8 * 8;
3934 /* 8 byte prefix */
3935 return inlen + 8;
3936 } else {
3937 /*
3938 * If not padding output will be exactly 8 bytes smaller than
3939 * input. If padding it will be at least 8 bytes smaller but we
3940 * don't know how much.
3941 */
3942 return inlen - 8;
3943 }
3944 }
3945 if (pad) {
3946 if (EVP_CIPHER_CTX_encrypting(ctx))
3947 rv = CRYPTO_128_wrap_pad(&wctx->ks.ks, wctx->iv,
3948 out, in, inlen,
3949 (block128_f) AES_encrypt);
3950 else
3951 rv = CRYPTO_128_unwrap_pad(&wctx->ks.ks, wctx->iv,
3952 out, in, inlen,
3953 (block128_f) AES_decrypt);
3954 } else {
3955 if (EVP_CIPHER_CTX_encrypting(ctx))
3956 rv = CRYPTO_128_wrap(&wctx->ks.ks, wctx->iv,
3957 out, in, inlen, (block128_f) AES_encrypt);
3958 else
3959 rv = CRYPTO_128_unwrap(&wctx->ks.ks, wctx->iv,
3960 out, in, inlen, (block128_f) AES_decrypt);
3961 }
3962 return rv ? (int)rv : -1;
3963 }
3964
3965 #define WRAP_FLAGS (EVP_CIPH_WRAP_MODE \
3966 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3967 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_FLAG_DEFAULT_ASN1)
3968
3969 static const EVP_CIPHER aes_128_wrap = {
3970 NID_id_aes128_wrap,
3971 8, 16, 8, WRAP_FLAGS,
3972 aes_wrap_init_key, aes_wrap_cipher,
3973 NULL,
3974 sizeof(EVP_AES_WRAP_CTX),
3975 NULL, NULL, NULL, NULL
3976 };
3977
3978 const EVP_CIPHER *EVP_aes_128_wrap(void)
3979 {
3980 return &aes_128_wrap;
3981 }
3982
3983 static const EVP_CIPHER aes_192_wrap = {
3984 NID_id_aes192_wrap,
3985 8, 24, 8, WRAP_FLAGS,
3986 aes_wrap_init_key, aes_wrap_cipher,
3987 NULL,
3988 sizeof(EVP_AES_WRAP_CTX),
3989 NULL, NULL, NULL, NULL
3990 };
3991
3992 const EVP_CIPHER *EVP_aes_192_wrap(void)
3993 {
3994 return &aes_192_wrap;
3995 }
3996
3997 static const EVP_CIPHER aes_256_wrap = {
3998 NID_id_aes256_wrap,
3999 8, 32, 8, WRAP_FLAGS,
4000 aes_wrap_init_key, aes_wrap_cipher,
4001 NULL,
4002 sizeof(EVP_AES_WRAP_CTX),
4003 NULL, NULL, NULL, NULL
4004 };
4005
4006 const EVP_CIPHER *EVP_aes_256_wrap(void)
4007 {
4008 return &aes_256_wrap;
4009 }
4010
4011 static const EVP_CIPHER aes_128_wrap_pad = {
4012 NID_id_aes128_wrap_pad,
4013 8, 16, 4, WRAP_FLAGS,
4014 aes_wrap_init_key, aes_wrap_cipher,
4015 NULL,
4016 sizeof(EVP_AES_WRAP_CTX),
4017 NULL, NULL, NULL, NULL
4018 };
4019
4020 const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
4021 {
4022 return &aes_128_wrap_pad;
4023 }
4024
4025 static const EVP_CIPHER aes_192_wrap_pad = {
4026 NID_id_aes192_wrap_pad,
4027 8, 24, 4, WRAP_FLAGS,
4028 aes_wrap_init_key, aes_wrap_cipher,
4029 NULL,
4030 sizeof(EVP_AES_WRAP_CTX),
4031 NULL, NULL, NULL, NULL
4032 };
4033
4034 const EVP_CIPHER *EVP_aes_192_wrap_pad(void)
4035 {
4036 return &aes_192_wrap_pad;
4037 }
4038
4039 static const EVP_CIPHER aes_256_wrap_pad = {
4040 NID_id_aes256_wrap_pad,
4041 8, 32, 4, WRAP_FLAGS,
4042 aes_wrap_init_key, aes_wrap_cipher,
4043 NULL,
4044 sizeof(EVP_AES_WRAP_CTX),
4045 NULL, NULL, NULL, NULL
4046 };
4047
4048 const EVP_CIPHER *EVP_aes_256_wrap_pad(void)
4049 {
4050 return &aes_256_wrap_pad;
4051 }
4052
4053 #ifndef OPENSSL_NO_OCB
4054 static int aes_ocb_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
4055 {
4056 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4057 EVP_CIPHER_CTX *newc;
4058 EVP_AES_OCB_CTX *new_octx;
4059
4060 switch (type) {
4061 case EVP_CTRL_INIT:
4062 octx->key_set = 0;
4063 octx->iv_set = 0;
4064 octx->ivlen = EVP_CIPHER_CTX_iv_length(c);
4065 octx->iv = EVP_CIPHER_CTX_iv_noconst(c);
4066 octx->taglen = 16;
4067 octx->data_buf_len = 0;
4068 octx->aad_buf_len = 0;
4069 return 1;
4070
4071 case EVP_CTRL_AEAD_SET_IVLEN:
4072 /* IV len must be 1 to 15 */
4073 if (arg <= 0 || arg > 15)
4074 return 0;
4075
4076 octx->ivlen = arg;
4077 return 1;
4078
4079 case EVP_CTRL_AEAD_SET_TAG:
4080 if (!ptr) {
4081 /* Tag len must be 0 to 16 */
4082 if (arg < 0 || arg > 16)
4083 return 0;
4084
4085 octx->taglen = arg;
4086 return 1;
4087 }
4088 if (arg != octx->taglen || EVP_CIPHER_CTX_encrypting(c))
4089 return 0;
4090 memcpy(octx->tag, ptr, arg);
4091 return 1;
4092
4093 case EVP_CTRL_AEAD_GET_TAG:
4094 if (arg != octx->taglen || !EVP_CIPHER_CTX_encrypting(c))
4095 return 0;
4096
4097 memcpy(ptr, octx->tag, arg);
4098 return 1;
4099
4100 case EVP_CTRL_COPY:
4101 newc = (EVP_CIPHER_CTX *)ptr;
4102 new_octx = EVP_C_DATA(EVP_AES_OCB_CTX,newc);
4103 return CRYPTO_ocb128_copy_ctx(&new_octx->ocb, &octx->ocb,
4104 &new_octx->ksenc.ks,
4105 &new_octx->ksdec.ks);
4106
4107 default:
4108 return -1;
4109
4110 }
4111 }
4112
4113 # ifdef HWAES_CAPABLE
4114 # ifdef HWAES_ocb_encrypt
4115 void HWAES_ocb_encrypt(const unsigned char *in, unsigned char *out,
4116 size_t blocks, const void *key,
4117 size_t start_block_num,
4118 unsigned char offset_i[16],
4119 const unsigned char L_[][16],
4120 unsigned char checksum[16]);
4121 # else
4122 # define HWAES_ocb_encrypt ((ocb128_f)NULL)
4123 # endif
4124 # ifdef HWAES_ocb_decrypt
4125 void HWAES_ocb_decrypt(const unsigned char *in, unsigned char *out,
4126 size_t blocks, const void *key,
4127 size_t start_block_num,
4128 unsigned char offset_i[16],
4129 const unsigned char L_[][16],
4130 unsigned char checksum[16]);
4131 # else
4132 # define HWAES_ocb_decrypt ((ocb128_f)NULL)
4133 # endif
4134 # endif
4135
4136 static int aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
4137 const unsigned char *iv, int enc)
4138 {
4139 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
4140 if (!iv && !key)
4141 return 1;
4142 if (key) {
4143 do {
4144 /*
4145 * We set both the encrypt and decrypt key here because decrypt
4146 * needs both. We could possibly optimise to remove setting the
4147 * decrypt for an encryption operation.
4148 */
4149 # ifdef HWAES_CAPABLE
4150 if (HWAES_CAPABLE) {
4151 HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
4152 &octx->ksenc.ks);
4153 HWAES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
4154 &octx->ksdec.ks);
4155 if (!CRYPTO_ocb128_init(&octx->ocb,
4156 &octx->ksenc.ks, &octx->ksdec.ks,
4157 (block128_f) HWAES_encrypt,
4158 (block128_f) HWAES_decrypt,
4159 enc ? HWAES_ocb_encrypt
4160 : HWAES_ocb_decrypt))
4161 return 0;
4162 break;
4163 }
4164 # endif
4165 # ifdef VPAES_CAPABLE
4166 if (VPAES_CAPABLE) {
4167 vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
4168 &octx->ksenc.ks);
4169 vpaes_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
4170 &octx->ksdec.ks);
4171 if (!CRYPTO_ocb128_init(&octx->ocb,
4172 &octx->ksenc.ks, &octx->ksdec.ks,
4173 (block128_f) vpaes_encrypt,
4174 (block128_f) vpaes_decrypt,
4175 NULL))
4176 return 0;
4177 break;
4178 }
4179 # endif
4180 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
4181 &octx->ksenc.ks);
4182 AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
4183 &octx->ksdec.ks);
4184 if (!CRYPTO_ocb128_init(&octx->ocb,
4185 &octx->ksenc.ks, &octx->ksdec.ks,
4186 (block128_f) AES_encrypt,
4187 (block128_f) AES_decrypt,
4188 NULL))
4189 return 0;
4190 }
4191 while (0);
4192
4193 /*
4194 * If we have an iv we can set it directly, otherwise use saved IV.
4195 */
4196 if (iv == NULL && octx->iv_set)
4197 iv = octx->iv;
4198 if (iv) {
4199 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
4200 != 1)
4201 return 0;
4202 octx->iv_set = 1;
4203 }
4204 octx->key_set = 1;
4205 } else {
4206 /* If key set use IV, otherwise copy */
4207 if (octx->key_set)
4208 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
4209 else
4210 memcpy(octx->iv, iv, octx->ivlen);
4211 octx->iv_set = 1;
4212 }
4213 return 1;
4214 }
4215
4216 static int aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
4217 const unsigned char *in, size_t len)
4218 {
4219 unsigned char *buf;
4220 int *buf_len;
4221 int written_len = 0;
4222 size_t trailing_len;
4223 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
4224
4225 /* If IV or Key not set then return error */
4226 if (!octx->iv_set)
4227 return -1;
4228
4229 if (!octx->key_set)
4230 return -1;
4231
4232 if (in != NULL) {
4233 /*
4234 * Need to ensure we are only passing full blocks to low level OCB
4235 * routines. We do it here rather than in EVP_EncryptUpdate/
4236 * EVP_DecryptUpdate because we need to pass full blocks of AAD too
4237 * and those routines don't support that
4238 */
4239
4240 /* Are we dealing with AAD or normal data here? */
4241 if (out == NULL) {
4242 buf = octx->aad_buf;
4243 buf_len = &(octx->aad_buf_len);
4244 } else {
4245 buf = octx->data_buf;
4246 buf_len = &(octx->data_buf_len);
4247
4248 if (is_partially_overlapping(out + *buf_len, in, len)) {
4249 EVPerr(EVP_F_AES_OCB_CIPHER, EVP_R_PARTIALLY_OVERLAPPING);
4250 return 0;
4251 }
4252 }
4253
4254 /*
4255 * If we've got a partially filled buffer from a previous call then
4256 * use that data first
4257 */
4258 if (*buf_len > 0) {
4259 unsigned int remaining;
4260
4261 remaining = AES_BLOCK_SIZE - (*buf_len);
4262 if (remaining > len) {
4263 memcpy(buf + (*buf_len), in, len);
4264 *(buf_len) += len;
4265 return 0;
4266 }
4267 memcpy(buf + (*buf_len), in, remaining);
4268
4269 /*
4270 * If we get here we've filled the buffer, so process it
4271 */
4272 len -= remaining;
4273 in += remaining;
4274 if (out == NULL) {
4275 if (!CRYPTO_ocb128_aad(&octx->ocb, buf, AES_BLOCK_SIZE))
4276 return -1;
4277 } else if (EVP_CIPHER_CTX_encrypting(ctx)) {
4278 if (!CRYPTO_ocb128_encrypt(&octx->ocb, buf, out,
4279 AES_BLOCK_SIZE))
4280 return -1;
4281 } else {
4282 if (!CRYPTO_ocb128_decrypt(&octx->ocb, buf, out,
4283 AES_BLOCK_SIZE))
4284 return -1;
4285 }
4286 written_len = AES_BLOCK_SIZE;
4287 *buf_len = 0;
4288 if (out != NULL)
4289 out += AES_BLOCK_SIZE;
4290 }
4291
4292 /* Do we have a partial block to handle at the end? */
4293 trailing_len = len % AES_BLOCK_SIZE;
4294
4295 /*
4296 * If we've got some full blocks to handle, then process these first
4297 */
4298 if (len != trailing_len) {
4299 if (out == NULL) {
4300 if (!CRYPTO_ocb128_aad(&octx->ocb, in, len - trailing_len))
4301 return -1;
4302 } else if (EVP_CIPHER_CTX_encrypting(ctx)) {
4303 if (!CRYPTO_ocb128_encrypt
4304 (&octx->ocb, in, out, len - trailing_len))
4305 return -1;
4306 } else {
4307 if (!CRYPTO_ocb128_decrypt
4308 (&octx->ocb, in, out, len - trailing_len))
4309 return -1;
4310 }
4311 written_len += len - trailing_len;
4312 in += len - trailing_len;
4313 }
4314
4315 /* Handle any trailing partial block */
4316 if (trailing_len > 0) {
4317 memcpy(buf, in, trailing_len);
4318 *buf_len = trailing_len;
4319 }
4320
4321 return written_len;
4322 } else {
4323 /*
4324 * First of all empty the buffer of any partial block that we might
4325 * have been provided - both for data and AAD
4326 */
4327 if (octx->data_buf_len > 0) {
4328 if (EVP_CIPHER_CTX_encrypting(ctx)) {
4329 if (!CRYPTO_ocb128_encrypt(&octx->ocb, octx->data_buf, out,
4330 octx->data_buf_len))
4331 return -1;
4332 } else {
4333 if (!CRYPTO_ocb128_decrypt(&octx->ocb, octx->data_buf, out,
4334 octx->data_buf_len))
4335 return -1;
4336 }
4337 written_len = octx->data_buf_len;
4338 octx->data_buf_len = 0;
4339 }
4340 if (octx->aad_buf_len > 0) {
4341 if (!CRYPTO_ocb128_aad
4342 (&octx->ocb, octx->aad_buf, octx->aad_buf_len))
4343 return -1;
4344 octx->aad_buf_len = 0;
4345 }
4346 /* If decrypting then verify */
4347 if (!EVP_CIPHER_CTX_encrypting(ctx)) {
4348 if (octx->taglen < 0)
4349 return -1;
4350 if (CRYPTO_ocb128_finish(&octx->ocb,
4351 octx->tag, octx->taglen) != 0)
4352 return -1;
4353 octx->iv_set = 0;
4354 return written_len;
4355 }
4356 /* If encrypting then just get the tag */
4357 if (CRYPTO_ocb128_tag(&octx->ocb, octx->tag, 16) != 1)
4358 return -1;
4359 /* Don't reuse the IV */
4360 octx->iv_set = 0;
4361 return written_len;
4362 }
4363 }
4364
4365 static int aes_ocb_cleanup(EVP_CIPHER_CTX *c)
4366 {
4367 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4368 CRYPTO_ocb128_cleanup(&octx->ocb);
4369 return 1;
4370 }
4371
4372 BLOCK_CIPHER_custom(NID_aes, 128, 16, 12, ocb, OCB,
4373 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4374 BLOCK_CIPHER_custom(NID_aes, 192, 16, 12, ocb, OCB,
4375 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4376 BLOCK_CIPHER_custom(NID_aes, 256, 16, 12, ocb, OCB,
4377 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4378 #endif /* OPENSSL_NO_OCB */
4379
4380 /* AES-SIV mode */
4381 #ifndef OPENSSL_NO_SIV
4382
4383 typedef SIV128_CONTEXT EVP_AES_SIV_CTX;
4384
4385 #define aesni_siv_init_key aes_siv_init_key
4386 static int aes_siv_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
4387 const unsigned char *iv, int enc)
4388 {
4389 const EVP_CIPHER *ctr;
4390 const EVP_CIPHER *cbc;
4391 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, ctx);
4392 int klen = EVP_CIPHER_CTX_key_length(ctx) / 2;
4393
4394 if (key == NULL)
4395 return 1;
4396
4397 switch (klen) {
4398 case 16:
4399 cbc = EVP_aes_128_cbc();
4400 ctr = EVP_aes_128_ctr();
4401 break;
4402 case 24:
4403 cbc = EVP_aes_192_cbc();
4404 ctr = EVP_aes_192_ctr();
4405 break;
4406 case 32:
4407 cbc = EVP_aes_256_cbc();
4408 ctr = EVP_aes_256_ctr();
4409 break;
4410 default:
4411 return 0;
4412 }
4413
4414 /* klen is the length of the underlying cipher, not the input key,
4415 which should be twice as long */
4416 return CRYPTO_siv128_init(sctx, key, klen, cbc, ctr);
4417 }
4418
4419 #define aesni_siv_cipher aes_siv_cipher
4420 static int aes_siv_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
4421 const unsigned char *in, size_t len)
4422 {
4423 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, ctx);
4424
4425 /* EncryptFinal or DecryptFinal */
4426 if (in == NULL)
4427 return CRYPTO_siv128_finish(sctx);
4428
4429 /* Deal with associated data */
4430 if (out == NULL)
4431 return CRYPTO_siv128_aad(sctx, in, len);
4432
4433 if (EVP_CIPHER_CTX_encrypting(ctx))
4434 return CRYPTO_siv128_encrypt(sctx, in, out, len);
4435
4436 return CRYPTO_siv128_decrypt(sctx, in, out, len);
4437 }
4438
4439 #define aesni_siv_cleanup aes_siv_cleanup
4440 static int aes_siv_cleanup(EVP_CIPHER_CTX *c)
4441 {
4442 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, c);
4443
4444 return CRYPTO_siv128_cleanup(sctx);
4445 }
4446
4447
4448 #define aesni_siv_ctrl aes_siv_ctrl
4449 static int aes_siv_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
4450 {
4451 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, c);
4452 SIV128_CONTEXT *sctx_out;
4453
4454 switch (type) {
4455 case EVP_CTRL_INIT:
4456 return CRYPTO_siv128_cleanup(sctx);
4457
4458 case EVP_CTRL_SET_SPEED:
4459 return CRYPTO_siv128_speed(sctx, arg);
4460
4461 case EVP_CTRL_AEAD_SET_TAG:
4462 if (!EVP_CIPHER_CTX_encrypting(c))
4463 return CRYPTO_siv128_set_tag(sctx, ptr, arg);
4464 return 1;
4465
4466 case EVP_CTRL_AEAD_GET_TAG:
4467 if (!EVP_CIPHER_CTX_encrypting(c))
4468 return 0;
4469 return CRYPTO_siv128_get_tag(sctx, ptr, arg);
4470
4471 case EVP_CTRL_COPY:
4472 sctx_out = EVP_C_DATA(SIV128_CONTEXT, (EVP_CIPHER_CTX*)ptr);
4473 return CRYPTO_siv128_copy_ctx(sctx_out, sctx);
4474
4475 default:
4476 return -1;
4477
4478 }
4479 }
4480
4481 #define SIV_FLAGS (EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_DEFAULT_ASN1 \
4482 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
4483 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CUSTOM_COPY \
4484 | EVP_CIPH_CTRL_INIT)
4485
4486 BLOCK_CIPHER_custom(NID_aes, 128, 1, 0, siv, SIV, SIV_FLAGS)
4487 BLOCK_CIPHER_custom(NID_aes, 192, 1, 0, siv, SIV, SIV_FLAGS)
4488 BLOCK_CIPHER_custom(NID_aes, 256, 1, 0, siv, SIV, SIV_FLAGS)
4489 #endif