]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/evp/e_aes.c
Rename FIPS_MODE to FIPS_MODULE
[thirdparty/openssl.git] / crypto / evp / e_aes.c
1 /*
2 * Copyright 2001-2020 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 /*
11 * This file uses the low level AES functions (which are deprecated for
12 * non-internal use) in order to implement the EVP AES ciphers.
13 */
14 #include "internal/deprecated.h"
15
16 #include <string.h>
17 #include <assert.h>
18 #include <openssl/opensslconf.h>
19 #include <openssl/crypto.h>
20 #include <openssl/evp.h>
21 #include <openssl/err.h>
22 #include <openssl/aes.h>
23 #include <openssl/rand.h>
24 #include <openssl/cmac.h>
25 #include "crypto/evp.h"
26 #include "internal/cryptlib.h"
27 #include "crypto/modes.h"
28 #include "crypto/siv.h"
29 #include "crypto/aes_platform.h"
30 #include "evp_local.h"
31
32 typedef struct {
33 union {
34 OSSL_UNION_ALIGN;
35 AES_KEY ks;
36 } ks;
37 block128_f block;
38 union {
39 cbc128_f cbc;
40 ctr128_f ctr;
41 } stream;
42 } EVP_AES_KEY;
43
44 typedef struct {
45 union {
46 OSSL_UNION_ALIGN;
47 AES_KEY ks;
48 } ks; /* AES key schedule to use */
49 int key_set; /* Set if key initialised */
50 int iv_set; /* Set if an iv is set */
51 GCM128_CONTEXT gcm;
52 unsigned char *iv; /* Temporary IV store */
53 int ivlen; /* IV length */
54 int taglen;
55 int iv_gen; /* It is OK to generate IVs */
56 int iv_gen_rand; /* No IV was specified, so generate a rand IV */
57 int tls_aad_len; /* TLS AAD length */
58 uint64_t tls_enc_records; /* Number of TLS records encrypted */
59 ctr128_f ctr;
60 } EVP_AES_GCM_CTX;
61
62 typedef struct {
63 union {
64 OSSL_UNION_ALIGN;
65 AES_KEY ks;
66 } ks1, ks2; /* AES key schedules to use */
67 XTS128_CONTEXT xts;
68 void (*stream) (const unsigned char *in,
69 unsigned char *out, size_t length,
70 const AES_KEY *key1, const AES_KEY *key2,
71 const unsigned char iv[16]);
72 } EVP_AES_XTS_CTX;
73
74 #ifdef FIPS_MODULE
75 static const int allow_insecure_decrypt = 0;
76 #else
77 static const int allow_insecure_decrypt = 1;
78 #endif
79
80 typedef struct {
81 union {
82 OSSL_UNION_ALIGN;
83 AES_KEY ks;
84 } ks; /* AES key schedule to use */
85 int key_set; /* Set if key initialised */
86 int iv_set; /* Set if an iv is set */
87 int tag_set; /* Set if tag is valid */
88 int len_set; /* Set if message length set */
89 int L, M; /* L and M parameters from RFC3610 */
90 int tls_aad_len; /* TLS AAD length */
91 CCM128_CONTEXT ccm;
92 ccm128_f str;
93 } EVP_AES_CCM_CTX;
94
95 #ifndef OPENSSL_NO_OCB
96 typedef struct {
97 union {
98 OSSL_UNION_ALIGN;
99 AES_KEY ks;
100 } ksenc; /* AES key schedule to use for encryption */
101 union {
102 OSSL_UNION_ALIGN;
103 AES_KEY ks;
104 } ksdec; /* AES key schedule to use for decryption */
105 int key_set; /* Set if key initialised */
106 int iv_set; /* Set if an iv is set */
107 OCB128_CONTEXT ocb;
108 unsigned char *iv; /* Temporary IV store */
109 unsigned char tag[16];
110 unsigned char data_buf[16]; /* Store partial data blocks */
111 unsigned char aad_buf[16]; /* Store partial AAD blocks */
112 int data_buf_len;
113 int aad_buf_len;
114 int ivlen; /* IV length */
115 int taglen;
116 } EVP_AES_OCB_CTX;
117 #endif
118
119 #define MAXBITCHUNK ((size_t)1<<(sizeof(size_t)*8-4))
120
121 /* increment counter (64-bit int) by 1 */
122 static void ctr64_inc(unsigned char *counter)
123 {
124 int n = 8;
125 unsigned char c;
126
127 do {
128 --n;
129 c = counter[n];
130 ++c;
131 counter[n] = c;
132 if (c)
133 return;
134 } while (n);
135 }
136
137 #if defined(AESNI_CAPABLE)
138 # if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
139 # define AES_GCM_ASM2(gctx) (gctx->gcm.block==(block128_f)aesni_encrypt && \
140 gctx->gcm.ghash==gcm_ghash_avx)
141 # undef AES_GCM_ASM2 /* minor size optimization */
142 # endif
143
144 static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
145 const unsigned char *iv, int enc)
146 {
147 int ret, mode;
148 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
149
150 mode = EVP_CIPHER_CTX_mode(ctx);
151 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
152 && !enc) {
153 ret = aesni_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
154 &dat->ks.ks);
155 dat->block = (block128_f) aesni_decrypt;
156 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
157 (cbc128_f) aesni_cbc_encrypt : NULL;
158 } else {
159 ret = aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
160 &dat->ks.ks);
161 dat->block = (block128_f) aesni_encrypt;
162 if (mode == EVP_CIPH_CBC_MODE)
163 dat->stream.cbc = (cbc128_f) aesni_cbc_encrypt;
164 else if (mode == EVP_CIPH_CTR_MODE)
165 dat->stream.ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
166 else
167 dat->stream.cbc = NULL;
168 }
169
170 if (ret < 0) {
171 EVPerr(EVP_F_AESNI_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
172 return 0;
173 }
174
175 return 1;
176 }
177
178 static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
179 const unsigned char *in, size_t len)
180 {
181 aesni_cbc_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
182 EVP_CIPHER_CTX_iv_noconst(ctx),
183 EVP_CIPHER_CTX_encrypting(ctx));
184
185 return 1;
186 }
187
188 static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
189 const unsigned char *in, size_t len)
190 {
191 size_t bl = EVP_CIPHER_CTX_block_size(ctx);
192
193 if (len < bl)
194 return 1;
195
196 aesni_ecb_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
197 EVP_CIPHER_CTX_encrypting(ctx));
198
199 return 1;
200 }
201
202 # define aesni_ofb_cipher aes_ofb_cipher
203 static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
204 const unsigned char *in, size_t len);
205
206 # define aesni_cfb_cipher aes_cfb_cipher
207 static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
208 const unsigned char *in, size_t len);
209
210 # define aesni_cfb8_cipher aes_cfb8_cipher
211 static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
212 const unsigned char *in, size_t len);
213
214 # define aesni_cfb1_cipher aes_cfb1_cipher
215 static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
216 const unsigned char *in, size_t len);
217
218 # define aesni_ctr_cipher aes_ctr_cipher
219 static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
220 const unsigned char *in, size_t len);
221
222 static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
223 const unsigned char *iv, int enc)
224 {
225 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
226 if (!iv && !key)
227 return 1;
228 if (key) {
229 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
230 &gctx->ks.ks);
231 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
232 gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
233 /*
234 * If we have an iv can set it directly, otherwise use saved IV.
235 */
236 if (iv == NULL && gctx->iv_set)
237 iv = gctx->iv;
238 if (iv) {
239 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
240 gctx->iv_set = 1;
241 }
242 gctx->key_set = 1;
243 } else {
244 /* If key set use IV, otherwise copy */
245 if (gctx->key_set)
246 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
247 else
248 memcpy(gctx->iv, iv, gctx->ivlen);
249 gctx->iv_set = 1;
250 gctx->iv_gen = 0;
251 }
252 return 1;
253 }
254
255 # define aesni_gcm_cipher aes_gcm_cipher
256 static int aesni_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
257 const unsigned char *in, size_t len);
258
259 static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
260 const unsigned char *iv, int enc)
261 {
262 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
263
264 if (!iv && !key)
265 return 1;
266
267 if (key) {
268 /* The key is two half length keys in reality */
269 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
270 const int bits = bytes * 8;
271
272 /*
273 * Verify that the two keys are different.
274 *
275 * This addresses Rogaway's vulnerability.
276 * See comment in aes_xts_init_key() below.
277 */
278 if ((!allow_insecure_decrypt || enc)
279 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
280 EVPerr(EVP_F_AESNI_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
281 return 0;
282 }
283
284 /* key_len is two AES keys */
285 if (enc) {
286 aesni_set_encrypt_key(key, bits, &xctx->ks1.ks);
287 xctx->xts.block1 = (block128_f) aesni_encrypt;
288 xctx->stream = aesni_xts_encrypt;
289 } else {
290 aesni_set_decrypt_key(key, bits, &xctx->ks1.ks);
291 xctx->xts.block1 = (block128_f) aesni_decrypt;
292 xctx->stream = aesni_xts_decrypt;
293 }
294
295 aesni_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
296 xctx->xts.block2 = (block128_f) aesni_encrypt;
297
298 xctx->xts.key1 = &xctx->ks1;
299 }
300
301 if (iv) {
302 xctx->xts.key2 = &xctx->ks2;
303 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
304 }
305
306 return 1;
307 }
308
309 # define aesni_xts_cipher aes_xts_cipher
310 static int aesni_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
311 const unsigned char *in, size_t len);
312
313 static int aesni_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
314 const unsigned char *iv, int enc)
315 {
316 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
317 if (!iv && !key)
318 return 1;
319 if (key) {
320 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
321 &cctx->ks.ks);
322 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
323 &cctx->ks, (block128_f) aesni_encrypt);
324 cctx->str = enc ? (ccm128_f) aesni_ccm64_encrypt_blocks :
325 (ccm128_f) aesni_ccm64_decrypt_blocks;
326 cctx->key_set = 1;
327 }
328 if (iv) {
329 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
330 cctx->iv_set = 1;
331 }
332 return 1;
333 }
334
335 # define aesni_ccm_cipher aes_ccm_cipher
336 static int aesni_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
337 const unsigned char *in, size_t len);
338
339 # ifndef OPENSSL_NO_OCB
340 static int aesni_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
341 const unsigned char *iv, int enc)
342 {
343 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
344 if (!iv && !key)
345 return 1;
346 if (key) {
347 do {
348 /*
349 * We set both the encrypt and decrypt key here because decrypt
350 * needs both. We could possibly optimise to remove setting the
351 * decrypt for an encryption operation.
352 */
353 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
354 &octx->ksenc.ks);
355 aesni_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
356 &octx->ksdec.ks);
357 if (!CRYPTO_ocb128_init(&octx->ocb,
358 &octx->ksenc.ks, &octx->ksdec.ks,
359 (block128_f) aesni_encrypt,
360 (block128_f) aesni_decrypt,
361 enc ? aesni_ocb_encrypt
362 : aesni_ocb_decrypt))
363 return 0;
364 }
365 while (0);
366
367 /*
368 * If we have an iv we can set it directly, otherwise use saved IV.
369 */
370 if (iv == NULL && octx->iv_set)
371 iv = octx->iv;
372 if (iv) {
373 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
374 != 1)
375 return 0;
376 octx->iv_set = 1;
377 }
378 octx->key_set = 1;
379 } else {
380 /* If key set use IV, otherwise copy */
381 if (octx->key_set)
382 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
383 else
384 memcpy(octx->iv, iv, octx->ivlen);
385 octx->iv_set = 1;
386 }
387 return 1;
388 }
389
390 # define aesni_ocb_cipher aes_ocb_cipher
391 static int aesni_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
392 const unsigned char *in, size_t len);
393 # endif /* OPENSSL_NO_OCB */
394
395 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
396 static const EVP_CIPHER aesni_##keylen##_##mode = { \
397 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
398 flags|EVP_CIPH_##MODE##_MODE, \
399 aesni_init_key, \
400 aesni_##mode##_cipher, \
401 NULL, \
402 sizeof(EVP_AES_KEY), \
403 NULL,NULL,NULL,NULL }; \
404 static const EVP_CIPHER aes_##keylen##_##mode = { \
405 nid##_##keylen##_##nmode,blocksize, \
406 keylen/8,ivlen, \
407 flags|EVP_CIPH_##MODE##_MODE, \
408 aes_init_key, \
409 aes_##mode##_cipher, \
410 NULL, \
411 sizeof(EVP_AES_KEY), \
412 NULL,NULL,NULL,NULL }; \
413 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
414 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
415
416 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
417 static const EVP_CIPHER aesni_##keylen##_##mode = { \
418 nid##_##keylen##_##mode,blocksize, \
419 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
420 ivlen, \
421 flags|EVP_CIPH_##MODE##_MODE, \
422 aesni_##mode##_init_key, \
423 aesni_##mode##_cipher, \
424 aes_##mode##_cleanup, \
425 sizeof(EVP_AES_##MODE##_CTX), \
426 NULL,NULL,aes_##mode##_ctrl,NULL }; \
427 static const EVP_CIPHER aes_##keylen##_##mode = { \
428 nid##_##keylen##_##mode,blocksize, \
429 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
430 ivlen, \
431 flags|EVP_CIPH_##MODE##_MODE, \
432 aes_##mode##_init_key, \
433 aes_##mode##_cipher, \
434 aes_##mode##_cleanup, \
435 sizeof(EVP_AES_##MODE##_CTX), \
436 NULL,NULL,aes_##mode##_ctrl,NULL }; \
437 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
438 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
439
440 #elif defined(SPARC_AES_CAPABLE)
441
442 static int aes_t4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
443 const unsigned char *iv, int enc)
444 {
445 int ret, mode, bits;
446 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
447
448 mode = EVP_CIPHER_CTX_mode(ctx);
449 bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
450 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
451 && !enc) {
452 ret = 0;
453 aes_t4_set_decrypt_key(key, bits, &dat->ks.ks);
454 dat->block = (block128_f) aes_t4_decrypt;
455 switch (bits) {
456 case 128:
457 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
458 (cbc128_f) aes128_t4_cbc_decrypt : NULL;
459 break;
460 case 192:
461 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
462 (cbc128_f) aes192_t4_cbc_decrypt : NULL;
463 break;
464 case 256:
465 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
466 (cbc128_f) aes256_t4_cbc_decrypt : NULL;
467 break;
468 default:
469 ret = -1;
470 }
471 } else {
472 ret = 0;
473 aes_t4_set_encrypt_key(key, bits, &dat->ks.ks);
474 dat->block = (block128_f) aes_t4_encrypt;
475 switch (bits) {
476 case 128:
477 if (mode == EVP_CIPH_CBC_MODE)
478 dat->stream.cbc = (cbc128_f) aes128_t4_cbc_encrypt;
479 else if (mode == EVP_CIPH_CTR_MODE)
480 dat->stream.ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
481 else
482 dat->stream.cbc = NULL;
483 break;
484 case 192:
485 if (mode == EVP_CIPH_CBC_MODE)
486 dat->stream.cbc = (cbc128_f) aes192_t4_cbc_encrypt;
487 else if (mode == EVP_CIPH_CTR_MODE)
488 dat->stream.ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
489 else
490 dat->stream.cbc = NULL;
491 break;
492 case 256:
493 if (mode == EVP_CIPH_CBC_MODE)
494 dat->stream.cbc = (cbc128_f) aes256_t4_cbc_encrypt;
495 else if (mode == EVP_CIPH_CTR_MODE)
496 dat->stream.ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
497 else
498 dat->stream.cbc = NULL;
499 break;
500 default:
501 ret = -1;
502 }
503 }
504
505 if (ret < 0) {
506 EVPerr(EVP_F_AES_T4_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
507 return 0;
508 }
509
510 return 1;
511 }
512
513 # define aes_t4_cbc_cipher aes_cbc_cipher
514 static int aes_t4_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
515 const unsigned char *in, size_t len);
516
517 # define aes_t4_ecb_cipher aes_ecb_cipher
518 static int aes_t4_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
519 const unsigned char *in, size_t len);
520
521 # define aes_t4_ofb_cipher aes_ofb_cipher
522 static int aes_t4_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
523 const unsigned char *in, size_t len);
524
525 # define aes_t4_cfb_cipher aes_cfb_cipher
526 static int aes_t4_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
527 const unsigned char *in, size_t len);
528
529 # define aes_t4_cfb8_cipher aes_cfb8_cipher
530 static int aes_t4_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
531 const unsigned char *in, size_t len);
532
533 # define aes_t4_cfb1_cipher aes_cfb1_cipher
534 static int aes_t4_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
535 const unsigned char *in, size_t len);
536
537 # define aes_t4_ctr_cipher aes_ctr_cipher
538 static int aes_t4_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
539 const unsigned char *in, size_t len);
540
541 static int aes_t4_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
542 const unsigned char *iv, int enc)
543 {
544 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
545 if (!iv && !key)
546 return 1;
547 if (key) {
548 int bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
549 aes_t4_set_encrypt_key(key, bits, &gctx->ks.ks);
550 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
551 (block128_f) aes_t4_encrypt);
552 switch (bits) {
553 case 128:
554 gctx->ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
555 break;
556 case 192:
557 gctx->ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
558 break;
559 case 256:
560 gctx->ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
561 break;
562 default:
563 return 0;
564 }
565 /*
566 * If we have an iv can set it directly, otherwise use saved IV.
567 */
568 if (iv == NULL && gctx->iv_set)
569 iv = gctx->iv;
570 if (iv) {
571 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
572 gctx->iv_set = 1;
573 }
574 gctx->key_set = 1;
575 } else {
576 /* If key set use IV, otherwise copy */
577 if (gctx->key_set)
578 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
579 else
580 memcpy(gctx->iv, iv, gctx->ivlen);
581 gctx->iv_set = 1;
582 gctx->iv_gen = 0;
583 }
584 return 1;
585 }
586
587 # define aes_t4_gcm_cipher aes_gcm_cipher
588 static int aes_t4_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
589 const unsigned char *in, size_t len);
590
591 static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
592 const unsigned char *iv, int enc)
593 {
594 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
595
596 if (!iv && !key)
597 return 1;
598
599 if (key) {
600 /* The key is two half length keys in reality */
601 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
602 const int bits = bytes * 8;
603
604 /*
605 * Verify that the two keys are different.
606 *
607 * This addresses Rogaway's vulnerability.
608 * See comment in aes_xts_init_key() below.
609 */
610 if ((!allow_insecure_decrypt || enc)
611 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
612 EVPerr(EVP_F_AES_T4_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
613 return 0;
614 }
615
616 xctx->stream = NULL;
617 /* key_len is two AES keys */
618 if (enc) {
619 aes_t4_set_encrypt_key(key, bits, &xctx->ks1.ks);
620 xctx->xts.block1 = (block128_f) aes_t4_encrypt;
621 switch (bits) {
622 case 128:
623 xctx->stream = aes128_t4_xts_encrypt;
624 break;
625 case 256:
626 xctx->stream = aes256_t4_xts_encrypt;
627 break;
628 default:
629 return 0;
630 }
631 } else {
632 aes_t4_set_decrypt_key(key, bits, &xctx->ks1.ks);
633 xctx->xts.block1 = (block128_f) aes_t4_decrypt;
634 switch (bits) {
635 case 128:
636 xctx->stream = aes128_t4_xts_decrypt;
637 break;
638 case 256:
639 xctx->stream = aes256_t4_xts_decrypt;
640 break;
641 default:
642 return 0;
643 }
644 }
645
646 aes_t4_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
647 xctx->xts.block2 = (block128_f) aes_t4_encrypt;
648
649 xctx->xts.key1 = &xctx->ks1;
650 }
651
652 if (iv) {
653 xctx->xts.key2 = &xctx->ks2;
654 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
655 }
656
657 return 1;
658 }
659
660 # define aes_t4_xts_cipher aes_xts_cipher
661 static int aes_t4_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
662 const unsigned char *in, size_t len);
663
664 static int aes_t4_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
665 const unsigned char *iv, int enc)
666 {
667 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
668 if (!iv && !key)
669 return 1;
670 if (key) {
671 int bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
672 aes_t4_set_encrypt_key(key, bits, &cctx->ks.ks);
673 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
674 &cctx->ks, (block128_f) aes_t4_encrypt);
675 cctx->str = NULL;
676 cctx->key_set = 1;
677 }
678 if (iv) {
679 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
680 cctx->iv_set = 1;
681 }
682 return 1;
683 }
684
685 # define aes_t4_ccm_cipher aes_ccm_cipher
686 static int aes_t4_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
687 const unsigned char *in, size_t len);
688
689 # ifndef OPENSSL_NO_OCB
690 static int aes_t4_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
691 const unsigned char *iv, int enc)
692 {
693 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
694 if (!iv && !key)
695 return 1;
696 if (key) {
697 do {
698 /*
699 * We set both the encrypt and decrypt key here because decrypt
700 * needs both. We could possibly optimise to remove setting the
701 * decrypt for an encryption operation.
702 */
703 aes_t4_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
704 &octx->ksenc.ks);
705 aes_t4_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
706 &octx->ksdec.ks);
707 if (!CRYPTO_ocb128_init(&octx->ocb,
708 &octx->ksenc.ks, &octx->ksdec.ks,
709 (block128_f) aes_t4_encrypt,
710 (block128_f) aes_t4_decrypt,
711 NULL))
712 return 0;
713 }
714 while (0);
715
716 /*
717 * If we have an iv we can set it directly, otherwise use saved IV.
718 */
719 if (iv == NULL && octx->iv_set)
720 iv = octx->iv;
721 if (iv) {
722 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
723 != 1)
724 return 0;
725 octx->iv_set = 1;
726 }
727 octx->key_set = 1;
728 } else {
729 /* If key set use IV, otherwise copy */
730 if (octx->key_set)
731 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
732 else
733 memcpy(octx->iv, iv, octx->ivlen);
734 octx->iv_set = 1;
735 }
736 return 1;
737 }
738
739 # define aes_t4_ocb_cipher aes_ocb_cipher
740 static int aes_t4_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
741 const unsigned char *in, size_t len);
742 # endif /* OPENSSL_NO_OCB */
743
744 # ifndef OPENSSL_NO_SIV
745 # define aes_t4_siv_init_key aes_siv_init_key
746 # define aes_t4_siv_cipher aes_siv_cipher
747 # endif /* OPENSSL_NO_SIV */
748
749 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
750 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
751 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
752 flags|EVP_CIPH_##MODE##_MODE, \
753 aes_t4_init_key, \
754 aes_t4_##mode##_cipher, \
755 NULL, \
756 sizeof(EVP_AES_KEY), \
757 NULL,NULL,NULL,NULL }; \
758 static const EVP_CIPHER aes_##keylen##_##mode = { \
759 nid##_##keylen##_##nmode,blocksize, \
760 keylen/8,ivlen, \
761 flags|EVP_CIPH_##MODE##_MODE, \
762 aes_init_key, \
763 aes_##mode##_cipher, \
764 NULL, \
765 sizeof(EVP_AES_KEY), \
766 NULL,NULL,NULL,NULL }; \
767 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
768 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
769
770 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
771 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
772 nid##_##keylen##_##mode,blocksize, \
773 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
774 ivlen, \
775 flags|EVP_CIPH_##MODE##_MODE, \
776 aes_t4_##mode##_init_key, \
777 aes_t4_##mode##_cipher, \
778 aes_##mode##_cleanup, \
779 sizeof(EVP_AES_##MODE##_CTX), \
780 NULL,NULL,aes_##mode##_ctrl,NULL }; \
781 static const EVP_CIPHER aes_##keylen##_##mode = { \
782 nid##_##keylen##_##mode,blocksize, \
783 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
784 ivlen, \
785 flags|EVP_CIPH_##MODE##_MODE, \
786 aes_##mode##_init_key, \
787 aes_##mode##_cipher, \
788 aes_##mode##_cleanup, \
789 sizeof(EVP_AES_##MODE##_CTX), \
790 NULL,NULL,aes_##mode##_ctrl,NULL }; \
791 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
792 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
793
794 #elif defined(S390X_aes_128_CAPABLE)
795 /* IBM S390X support */
796 typedef struct {
797 union {
798 OSSL_UNION_ALIGN;
799 /*-
800 * KM-AES parameter block - begin
801 * (see z/Architecture Principles of Operation >= SA22-7832-06)
802 */
803 struct {
804 unsigned char k[32];
805 } param;
806 /* KM-AES parameter block - end */
807 } km;
808 unsigned int fc;
809 } S390X_AES_ECB_CTX;
810
811 typedef struct {
812 union {
813 OSSL_UNION_ALIGN;
814 /*-
815 * KMO-AES parameter block - begin
816 * (see z/Architecture Principles of Operation >= SA22-7832-08)
817 */
818 struct {
819 unsigned char cv[16];
820 unsigned char k[32];
821 } param;
822 /* KMO-AES parameter block - end */
823 } kmo;
824 unsigned int fc;
825
826 int res;
827 } S390X_AES_OFB_CTX;
828
829 typedef struct {
830 union {
831 OSSL_UNION_ALIGN;
832 /*-
833 * KMF-AES parameter block - begin
834 * (see z/Architecture Principles of Operation >= SA22-7832-08)
835 */
836 struct {
837 unsigned char cv[16];
838 unsigned char k[32];
839 } param;
840 /* KMF-AES parameter block - end */
841 } kmf;
842 unsigned int fc;
843
844 int res;
845 } S390X_AES_CFB_CTX;
846
847 typedef struct {
848 union {
849 OSSL_UNION_ALIGN;
850 /*-
851 * KMA-GCM-AES parameter block - begin
852 * (see z/Architecture Principles of Operation >= SA22-7832-11)
853 */
854 struct {
855 unsigned char reserved[12];
856 union {
857 unsigned int w;
858 unsigned char b[4];
859 } cv;
860 union {
861 unsigned long long g[2];
862 unsigned char b[16];
863 } t;
864 unsigned char h[16];
865 unsigned long long taadl;
866 unsigned long long tpcl;
867 union {
868 unsigned long long g[2];
869 unsigned int w[4];
870 } j0;
871 unsigned char k[32];
872 } param;
873 /* KMA-GCM-AES parameter block - end */
874 } kma;
875 unsigned int fc;
876 int key_set;
877
878 unsigned char *iv;
879 int ivlen;
880 int iv_set;
881 int iv_gen;
882
883 int taglen;
884
885 unsigned char ares[16];
886 unsigned char mres[16];
887 unsigned char kres[16];
888 int areslen;
889 int mreslen;
890 int kreslen;
891
892 int tls_aad_len;
893 uint64_t tls_enc_records; /* Number of TLS records encrypted */
894 } S390X_AES_GCM_CTX;
895
896 typedef struct {
897 union {
898 OSSL_UNION_ALIGN;
899 /*-
900 * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
901 * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
902 * rounds field is used to store the function code and that the key
903 * schedule is not stored (if aes hardware support is detected).
904 */
905 struct {
906 unsigned char pad[16];
907 AES_KEY k;
908 } key;
909
910 struct {
911 /*-
912 * KMAC-AES parameter block - begin
913 * (see z/Architecture Principles of Operation >= SA22-7832-08)
914 */
915 struct {
916 union {
917 unsigned long long g[2];
918 unsigned char b[16];
919 } icv;
920 unsigned char k[32];
921 } kmac_param;
922 /* KMAC-AES parameter block - end */
923
924 union {
925 unsigned long long g[2];
926 unsigned char b[16];
927 } nonce;
928 union {
929 unsigned long long g[2];
930 unsigned char b[16];
931 } buf;
932
933 unsigned long long blocks;
934 int l;
935 int m;
936 int tls_aad_len;
937 int iv_set;
938 int tag_set;
939 int len_set;
940 int key_set;
941
942 unsigned char pad[140];
943 unsigned int fc;
944 } ccm;
945 } aes;
946 } S390X_AES_CCM_CTX;
947
948 # define s390x_aes_init_key aes_init_key
949 static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
950 const unsigned char *iv, int enc);
951
952 # define S390X_AES_CBC_CTX EVP_AES_KEY
953
954 # define s390x_aes_cbc_init_key aes_init_key
955
956 # define s390x_aes_cbc_cipher aes_cbc_cipher
957 static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
958 const unsigned char *in, size_t len);
959
960 static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
961 const unsigned char *key,
962 const unsigned char *iv, int enc)
963 {
964 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
965 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
966
967 cctx->fc = S390X_AES_FC(keylen);
968 if (!enc)
969 cctx->fc |= S390X_DECRYPT;
970
971 memcpy(cctx->km.param.k, key, keylen);
972 return 1;
973 }
974
975 static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
976 const unsigned char *in, size_t len)
977 {
978 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
979
980 s390x_km(in, len, out, cctx->fc, &cctx->km.param);
981 return 1;
982 }
983
984 static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
985 const unsigned char *key,
986 const unsigned char *ivec, int enc)
987 {
988 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
989 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
990 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
991 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
992
993 memcpy(cctx->kmo.param.cv, iv, ivlen);
994 memcpy(cctx->kmo.param.k, key, keylen);
995 cctx->fc = S390X_AES_FC(keylen);
996 cctx->res = 0;
997 return 1;
998 }
999
1000 static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1001 const unsigned char *in, size_t len)
1002 {
1003 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1004 int n = cctx->res;
1005 int rem;
1006
1007 while (n && len) {
1008 *out = *in ^ cctx->kmo.param.cv[n];
1009 n = (n + 1) & 0xf;
1010 --len;
1011 ++in;
1012 ++out;
1013 }
1014
1015 rem = len & 0xf;
1016
1017 len &= ~(size_t)0xf;
1018 if (len) {
1019 s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
1020
1021 out += len;
1022 in += len;
1023 }
1024
1025 if (rem) {
1026 s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
1027 cctx->kmo.param.k);
1028
1029 while (rem--) {
1030 out[n] = in[n] ^ cctx->kmo.param.cv[n];
1031 ++n;
1032 }
1033 }
1034
1035 cctx->res = n;
1036 return 1;
1037 }
1038
1039 static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
1040 const unsigned char *key,
1041 const unsigned char *ivec, int enc)
1042 {
1043 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1044 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
1045 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1046 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
1047
1048 cctx->fc = S390X_AES_FC(keylen);
1049 cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
1050 if (!enc)
1051 cctx->fc |= S390X_DECRYPT;
1052
1053 cctx->res = 0;
1054 memcpy(cctx->kmf.param.cv, iv, ivlen);
1055 memcpy(cctx->kmf.param.k, key, keylen);
1056 return 1;
1057 }
1058
1059 static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1060 const unsigned char *in, size_t len)
1061 {
1062 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1063 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1064 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1065 int n = cctx->res;
1066 int rem;
1067 unsigned char tmp;
1068
1069 while (n && len) {
1070 tmp = *in;
1071 *out = cctx->kmf.param.cv[n] ^ tmp;
1072 cctx->kmf.param.cv[n] = enc ? *out : tmp;
1073 n = (n + 1) & 0xf;
1074 --len;
1075 ++in;
1076 ++out;
1077 }
1078
1079 rem = len & 0xf;
1080
1081 len &= ~(size_t)0xf;
1082 if (len) {
1083 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1084
1085 out += len;
1086 in += len;
1087 }
1088
1089 if (rem) {
1090 s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
1091 S390X_AES_FC(keylen), cctx->kmf.param.k);
1092
1093 while (rem--) {
1094 tmp = in[n];
1095 out[n] = cctx->kmf.param.cv[n] ^ tmp;
1096 cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
1097 ++n;
1098 }
1099 }
1100
1101 cctx->res = n;
1102 return 1;
1103 }
1104
1105 static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
1106 const unsigned char *key,
1107 const unsigned char *ivec, int enc)
1108 {
1109 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1110 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
1111 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1112 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
1113
1114 cctx->fc = S390X_AES_FC(keylen);
1115 cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
1116 if (!enc)
1117 cctx->fc |= S390X_DECRYPT;
1118
1119 memcpy(cctx->kmf.param.cv, iv, ivlen);
1120 memcpy(cctx->kmf.param.k, key, keylen);
1121 return 1;
1122 }
1123
1124 static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1125 const unsigned char *in, size_t len)
1126 {
1127 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1128
1129 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1130 return 1;
1131 }
1132
1133 # define s390x_aes_cfb1_init_key aes_init_key
1134
1135 # define s390x_aes_cfb1_cipher aes_cfb1_cipher
1136 static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1137 const unsigned char *in, size_t len);
1138
1139 # define S390X_AES_CTR_CTX EVP_AES_KEY
1140
1141 # define s390x_aes_ctr_init_key aes_init_key
1142
1143 # define s390x_aes_ctr_cipher aes_ctr_cipher
1144 static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1145 const unsigned char *in, size_t len);
1146
1147 /* iv + padding length for iv lengths != 12 */
1148 # define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
1149
1150 /*-
1151 * Process additional authenticated data. Returns 0 on success. Code is
1152 * big-endian.
1153 */
1154 static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
1155 size_t len)
1156 {
1157 unsigned long long alen;
1158 int n, rem;
1159
1160 if (ctx->kma.param.tpcl)
1161 return -2;
1162
1163 alen = ctx->kma.param.taadl + len;
1164 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
1165 return -1;
1166 ctx->kma.param.taadl = alen;
1167
1168 n = ctx->areslen;
1169 if (n) {
1170 while (n && len) {
1171 ctx->ares[n] = *aad;
1172 n = (n + 1) & 0xf;
1173 ++aad;
1174 --len;
1175 }
1176 /* ctx->ares contains a complete block if offset has wrapped around */
1177 if (!n) {
1178 s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1179 ctx->fc |= S390X_KMA_HS;
1180 }
1181 ctx->areslen = n;
1182 }
1183
1184 rem = len & 0xf;
1185
1186 len &= ~(size_t)0xf;
1187 if (len) {
1188 s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1189 aad += len;
1190 ctx->fc |= S390X_KMA_HS;
1191 }
1192
1193 if (rem) {
1194 ctx->areslen = rem;
1195
1196 do {
1197 --rem;
1198 ctx->ares[rem] = aad[rem];
1199 } while (rem);
1200 }
1201 return 0;
1202 }
1203
1204 /*-
1205 * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
1206 * success. Code is big-endian.
1207 */
1208 static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
1209 unsigned char *out, size_t len)
1210 {
1211 const unsigned char *inptr;
1212 unsigned long long mlen;
1213 union {
1214 unsigned int w[4];
1215 unsigned char b[16];
1216 } buf;
1217 size_t inlen;
1218 int n, rem, i;
1219
1220 mlen = ctx->kma.param.tpcl + len;
1221 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1222 return -1;
1223 ctx->kma.param.tpcl = mlen;
1224
1225 n = ctx->mreslen;
1226 if (n) {
1227 inptr = in;
1228 inlen = len;
1229 while (n && inlen) {
1230 ctx->mres[n] = *inptr;
1231 n = (n + 1) & 0xf;
1232 ++inptr;
1233 --inlen;
1234 }
1235 /* ctx->mres contains a complete block if offset has wrapped around */
1236 if (!n) {
1237 s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
1238 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1239 ctx->fc |= S390X_KMA_HS;
1240 ctx->areslen = 0;
1241
1242 /* previous call already encrypted/decrypted its remainder,
1243 * see comment below */
1244 n = ctx->mreslen;
1245 while (n) {
1246 *out = buf.b[n];
1247 n = (n + 1) & 0xf;
1248 ++out;
1249 ++in;
1250 --len;
1251 }
1252 ctx->mreslen = 0;
1253 }
1254 }
1255
1256 rem = len & 0xf;
1257
1258 len &= ~(size_t)0xf;
1259 if (len) {
1260 s390x_kma(ctx->ares, ctx->areslen, in, len, out,
1261 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1262 in += len;
1263 out += len;
1264 ctx->fc |= S390X_KMA_HS;
1265 ctx->areslen = 0;
1266 }
1267
1268 /*-
1269 * If there is a remainder, it has to be saved such that it can be
1270 * processed by kma later. However, we also have to do the for-now
1271 * unauthenticated encryption/decryption part here and now...
1272 */
1273 if (rem) {
1274 if (!ctx->mreslen) {
1275 buf.w[0] = ctx->kma.param.j0.w[0];
1276 buf.w[1] = ctx->kma.param.j0.w[1];
1277 buf.w[2] = ctx->kma.param.j0.w[2];
1278 buf.w[3] = ctx->kma.param.cv.w + 1;
1279 s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
1280 }
1281
1282 n = ctx->mreslen;
1283 for (i = 0; i < rem; i++) {
1284 ctx->mres[n + i] = in[i];
1285 out[i] = in[i] ^ ctx->kres[n + i];
1286 }
1287
1288 ctx->mreslen += rem;
1289 }
1290 return 0;
1291 }
1292
1293 /*-
1294 * Initialize context structure. Code is big-endian.
1295 */
1296 static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
1297 const unsigned char *iv)
1298 {
1299 ctx->kma.param.t.g[0] = 0;
1300 ctx->kma.param.t.g[1] = 0;
1301 ctx->kma.param.tpcl = 0;
1302 ctx->kma.param.taadl = 0;
1303 ctx->mreslen = 0;
1304 ctx->areslen = 0;
1305 ctx->kreslen = 0;
1306
1307 if (ctx->ivlen == 12) {
1308 memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
1309 ctx->kma.param.j0.w[3] = 1;
1310 ctx->kma.param.cv.w = 1;
1311 } else {
1312 /* ctx->iv has the right size and is already padded. */
1313 memcpy(ctx->iv, iv, ctx->ivlen);
1314 s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
1315 ctx->fc, &ctx->kma.param);
1316 ctx->fc |= S390X_KMA_HS;
1317
1318 ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
1319 ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
1320 ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
1321 ctx->kma.param.t.g[0] = 0;
1322 ctx->kma.param.t.g[1] = 0;
1323 }
1324 }
1325
1326 /*-
1327 * Performs various operations on the context structure depending on control
1328 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
1329 * Code is big-endian.
1330 */
1331 static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
1332 {
1333 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1334 S390X_AES_GCM_CTX *gctx_out;
1335 EVP_CIPHER_CTX *out;
1336 unsigned char *buf, *iv;
1337 int ivlen, enc, len;
1338
1339 switch (type) {
1340 case EVP_CTRL_INIT:
1341 ivlen = EVP_CIPHER_iv_length(c->cipher);
1342 iv = EVP_CIPHER_CTX_iv_noconst(c);
1343 gctx->key_set = 0;
1344 gctx->iv_set = 0;
1345 gctx->ivlen = ivlen;
1346 gctx->iv = iv;
1347 gctx->taglen = -1;
1348 gctx->iv_gen = 0;
1349 gctx->tls_aad_len = -1;
1350 return 1;
1351
1352 case EVP_CTRL_GET_IVLEN:
1353 *(int *)ptr = gctx->ivlen;
1354 return 1;
1355
1356 case EVP_CTRL_AEAD_SET_IVLEN:
1357 if (arg <= 0)
1358 return 0;
1359
1360 if (arg != 12) {
1361 iv = EVP_CIPHER_CTX_iv_noconst(c);
1362 len = S390X_gcm_ivpadlen(arg);
1363
1364 /* Allocate memory for iv if needed. */
1365 if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
1366 if (gctx->iv != iv)
1367 OPENSSL_free(gctx->iv);
1368
1369 if ((gctx->iv = OPENSSL_malloc(len)) == NULL) {
1370 EVPerr(EVP_F_S390X_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
1371 return 0;
1372 }
1373 }
1374 /* Add padding. */
1375 memset(gctx->iv + arg, 0, len - arg - 8);
1376 *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
1377 }
1378 gctx->ivlen = arg;
1379 return 1;
1380
1381 case EVP_CTRL_AEAD_SET_TAG:
1382 buf = EVP_CIPHER_CTX_buf_noconst(c);
1383 enc = EVP_CIPHER_CTX_encrypting(c);
1384 if (arg <= 0 || arg > 16 || enc)
1385 return 0;
1386
1387 memcpy(buf, ptr, arg);
1388 gctx->taglen = arg;
1389 return 1;
1390
1391 case EVP_CTRL_AEAD_GET_TAG:
1392 enc = EVP_CIPHER_CTX_encrypting(c);
1393 if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
1394 return 0;
1395
1396 memcpy(ptr, gctx->kma.param.t.b, arg);
1397 return 1;
1398
1399 case EVP_CTRL_GCM_SET_IV_FIXED:
1400 /* Special case: -1 length restores whole iv */
1401 if (arg == -1) {
1402 memcpy(gctx->iv, ptr, gctx->ivlen);
1403 gctx->iv_gen = 1;
1404 return 1;
1405 }
1406 /*
1407 * Fixed field must be at least 4 bytes and invocation field at least
1408 * 8.
1409 */
1410 if ((arg < 4) || (gctx->ivlen - arg) < 8)
1411 return 0;
1412
1413 if (arg)
1414 memcpy(gctx->iv, ptr, arg);
1415
1416 enc = EVP_CIPHER_CTX_encrypting(c);
1417 if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
1418 return 0;
1419
1420 gctx->iv_gen = 1;
1421 return 1;
1422
1423 case EVP_CTRL_GCM_IV_GEN:
1424 if (gctx->iv_gen == 0 || gctx->key_set == 0)
1425 return 0;
1426
1427 s390x_aes_gcm_setiv(gctx, gctx->iv);
1428
1429 if (arg <= 0 || arg > gctx->ivlen)
1430 arg = gctx->ivlen;
1431
1432 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
1433 /*
1434 * Invocation field will be at least 8 bytes in size and so no need
1435 * to check wrap around or increment more than last 8 bytes.
1436 */
1437 ctr64_inc(gctx->iv + gctx->ivlen - 8);
1438 gctx->iv_set = 1;
1439 return 1;
1440
1441 case EVP_CTRL_GCM_SET_IV_INV:
1442 enc = EVP_CIPHER_CTX_encrypting(c);
1443 if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
1444 return 0;
1445
1446 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
1447 s390x_aes_gcm_setiv(gctx, gctx->iv);
1448 gctx->iv_set = 1;
1449 return 1;
1450
1451 case EVP_CTRL_AEAD_TLS1_AAD:
1452 /* Save the aad for later use. */
1453 if (arg != EVP_AEAD_TLS1_AAD_LEN)
1454 return 0;
1455
1456 buf = EVP_CIPHER_CTX_buf_noconst(c);
1457 memcpy(buf, ptr, arg);
1458 gctx->tls_aad_len = arg;
1459 gctx->tls_enc_records = 0;
1460
1461 len = buf[arg - 2] << 8 | buf[arg - 1];
1462 /* Correct length for explicit iv. */
1463 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
1464 return 0;
1465 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
1466
1467 /* If decrypting correct for tag too. */
1468 enc = EVP_CIPHER_CTX_encrypting(c);
1469 if (!enc) {
1470 if (len < EVP_GCM_TLS_TAG_LEN)
1471 return 0;
1472 len -= EVP_GCM_TLS_TAG_LEN;
1473 }
1474 buf[arg - 2] = len >> 8;
1475 buf[arg - 1] = len & 0xff;
1476 /* Extra padding: tag appended to record. */
1477 return EVP_GCM_TLS_TAG_LEN;
1478
1479 case EVP_CTRL_COPY:
1480 out = ptr;
1481 gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
1482 iv = EVP_CIPHER_CTX_iv_noconst(c);
1483
1484 if (gctx->iv == iv) {
1485 gctx_out->iv = EVP_CIPHER_CTX_iv_noconst(out);
1486 } else {
1487 len = S390X_gcm_ivpadlen(gctx->ivlen);
1488
1489 if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL) {
1490 EVPerr(EVP_F_S390X_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
1491 return 0;
1492 }
1493
1494 memcpy(gctx_out->iv, gctx->iv, len);
1495 }
1496 return 1;
1497
1498 default:
1499 return -1;
1500 }
1501 }
1502
1503 /*-
1504 * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
1505 */
1506 static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
1507 const unsigned char *key,
1508 const unsigned char *iv, int enc)
1509 {
1510 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1511 int keylen;
1512
1513 if (iv == NULL && key == NULL)
1514 return 1;
1515
1516 if (key != NULL) {
1517 keylen = EVP_CIPHER_CTX_key_length(ctx);
1518 memcpy(&gctx->kma.param.k, key, keylen);
1519
1520 gctx->fc = S390X_AES_FC(keylen);
1521 if (!enc)
1522 gctx->fc |= S390X_DECRYPT;
1523
1524 if (iv == NULL && gctx->iv_set)
1525 iv = gctx->iv;
1526
1527 if (iv != NULL) {
1528 s390x_aes_gcm_setiv(gctx, iv);
1529 gctx->iv_set = 1;
1530 }
1531 gctx->key_set = 1;
1532 } else {
1533 if (gctx->key_set)
1534 s390x_aes_gcm_setiv(gctx, iv);
1535 else
1536 memcpy(gctx->iv, iv, gctx->ivlen);
1537
1538 gctx->iv_set = 1;
1539 gctx->iv_gen = 0;
1540 }
1541 return 1;
1542 }
1543
1544 /*-
1545 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1546 * if successful. Otherwise -1 is returned. Code is big-endian.
1547 */
1548 static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1549 const unsigned char *in, size_t len)
1550 {
1551 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1552 const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1553 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1554 int rv = -1;
1555
1556 if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
1557 return -1;
1558
1559 /*
1560 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
1561 * Requirements from SP 800-38D". The requirements is for one party to the
1562 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
1563 * side only.
1564 */
1565 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
1566 EVPerr(EVP_F_S390X_AES_GCM_TLS_CIPHER, EVP_R_TOO_MANY_RECORDS);
1567 goto err;
1568 }
1569
1570 if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
1571 : EVP_CTRL_GCM_SET_IV_INV,
1572 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
1573 goto err;
1574
1575 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1576 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1577 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1578
1579 gctx->kma.param.taadl = gctx->tls_aad_len << 3;
1580 gctx->kma.param.tpcl = len << 3;
1581 s390x_kma(buf, gctx->tls_aad_len, in, len, out,
1582 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1583
1584 if (enc) {
1585 memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
1586 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1587 } else {
1588 if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
1589 EVP_GCM_TLS_TAG_LEN)) {
1590 OPENSSL_cleanse(out, len);
1591 goto err;
1592 }
1593 rv = len;
1594 }
1595 err:
1596 gctx->iv_set = 0;
1597 gctx->tls_aad_len = -1;
1598 return rv;
1599 }
1600
1601 /*-
1602 * Called from EVP layer to initialize context, process additional
1603 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1604 * ciphertext or process a TLS packet, depending on context. Returns bytes
1605 * written on success. Otherwise -1 is returned. Code is big-endian.
1606 */
1607 static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1608 const unsigned char *in, size_t len)
1609 {
1610 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1611 unsigned char *buf, tmp[16];
1612 int enc;
1613
1614 if (!gctx->key_set)
1615 return -1;
1616
1617 if (gctx->tls_aad_len >= 0)
1618 return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
1619
1620 if (!gctx->iv_set)
1621 return -1;
1622
1623 if (in != NULL) {
1624 if (out == NULL) {
1625 if (s390x_aes_gcm_aad(gctx, in, len))
1626 return -1;
1627 } else {
1628 if (s390x_aes_gcm(gctx, in, out, len))
1629 return -1;
1630 }
1631 return len;
1632 } else {
1633 gctx->kma.param.taadl <<= 3;
1634 gctx->kma.param.tpcl <<= 3;
1635 s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
1636 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1637 /* recall that we already did en-/decrypt gctx->mres
1638 * and returned it to caller... */
1639 OPENSSL_cleanse(tmp, gctx->mreslen);
1640 gctx->iv_set = 0;
1641
1642 enc = EVP_CIPHER_CTX_encrypting(ctx);
1643 if (enc) {
1644 gctx->taglen = 16;
1645 } else {
1646 if (gctx->taglen < 0)
1647 return -1;
1648
1649 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1650 if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
1651 return -1;
1652 }
1653 return 0;
1654 }
1655 }
1656
1657 static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
1658 {
1659 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1660 const unsigned char *iv;
1661
1662 if (gctx == NULL)
1663 return 0;
1664
1665 iv = EVP_CIPHER_CTX_iv(c);
1666 if (iv != gctx->iv)
1667 OPENSSL_free(gctx->iv);
1668
1669 OPENSSL_cleanse(gctx, sizeof(*gctx));
1670 return 1;
1671 }
1672
1673 # define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
1674
1675 # define s390x_aes_xts_init_key aes_xts_init_key
1676 static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
1677 const unsigned char *key,
1678 const unsigned char *iv, int enc);
1679 # define s390x_aes_xts_cipher aes_xts_cipher
1680 static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1681 const unsigned char *in, size_t len);
1682 # define s390x_aes_xts_ctrl aes_xts_ctrl
1683 static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
1684 # define s390x_aes_xts_cleanup aes_xts_cleanup
1685
1686 /*-
1687 * Set nonce and length fields. Code is big-endian.
1688 */
1689 static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
1690 const unsigned char *nonce,
1691 size_t mlen)
1692 {
1693 ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
1694 ctx->aes.ccm.nonce.g[1] = mlen;
1695 memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
1696 }
1697
1698 /*-
1699 * Process additional authenticated data. Code is big-endian.
1700 */
1701 static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
1702 size_t alen)
1703 {
1704 unsigned char *ptr;
1705 int i, rem;
1706
1707 if (!alen)
1708 return;
1709
1710 ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
1711
1712 /* Suppress 'type-punned pointer dereference' warning. */
1713 ptr = ctx->aes.ccm.buf.b;
1714
1715 if (alen < ((1 << 16) - (1 << 8))) {
1716 *(uint16_t *)ptr = alen;
1717 i = 2;
1718 } else if (sizeof(alen) == 8
1719 && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
1720 *(uint16_t *)ptr = 0xffff;
1721 *(uint64_t *)(ptr + 2) = alen;
1722 i = 10;
1723 } else {
1724 *(uint16_t *)ptr = 0xfffe;
1725 *(uint32_t *)(ptr + 2) = alen;
1726 i = 6;
1727 }
1728
1729 while (i < 16 && alen) {
1730 ctx->aes.ccm.buf.b[i] = *aad;
1731 ++aad;
1732 --alen;
1733 ++i;
1734 }
1735 while (i < 16) {
1736 ctx->aes.ccm.buf.b[i] = 0;
1737 ++i;
1738 }
1739
1740 ctx->aes.ccm.kmac_param.icv.g[0] = 0;
1741 ctx->aes.ccm.kmac_param.icv.g[1] = 0;
1742 s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
1743 &ctx->aes.ccm.kmac_param);
1744 ctx->aes.ccm.blocks += 2;
1745
1746 rem = alen & 0xf;
1747 alen &= ~(size_t)0xf;
1748 if (alen) {
1749 s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1750 ctx->aes.ccm.blocks += alen >> 4;
1751 aad += alen;
1752 }
1753 if (rem) {
1754 for (i = 0; i < rem; i++)
1755 ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
1756
1757 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1758 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1759 ctx->aes.ccm.kmac_param.k);
1760 ctx->aes.ccm.blocks++;
1761 }
1762 }
1763
1764 /*-
1765 * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
1766 * success.
1767 */
1768 static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
1769 unsigned char *out, size_t len, int enc)
1770 {
1771 size_t n, rem;
1772 unsigned int i, l, num;
1773 unsigned char flags;
1774
1775 flags = ctx->aes.ccm.nonce.b[0];
1776 if (!(flags & S390X_CCM_AAD_FLAG)) {
1777 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
1778 ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
1779 ctx->aes.ccm.blocks++;
1780 }
1781 l = flags & 0x7;
1782 ctx->aes.ccm.nonce.b[0] = l;
1783
1784 /*-
1785 * Reconstruct length from encoded length field
1786 * and initialize it with counter value.
1787 */
1788 n = 0;
1789 for (i = 15 - l; i < 15; i++) {
1790 n |= ctx->aes.ccm.nonce.b[i];
1791 ctx->aes.ccm.nonce.b[i] = 0;
1792 n <<= 8;
1793 }
1794 n |= ctx->aes.ccm.nonce.b[15];
1795 ctx->aes.ccm.nonce.b[15] = 1;
1796
1797 if (n != len)
1798 return -1; /* length mismatch */
1799
1800 if (enc) {
1801 /* Two operations per block plus one for tag encryption */
1802 ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
1803 if (ctx->aes.ccm.blocks > (1ULL << 61))
1804 return -2; /* too much data */
1805 }
1806
1807 num = 0;
1808 rem = len & 0xf;
1809 len &= ~(size_t)0xf;
1810
1811 if (enc) {
1812 /* mac-then-encrypt */
1813 if (len)
1814 s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1815 if (rem) {
1816 for (i = 0; i < rem; i++)
1817 ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
1818
1819 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1820 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1821 ctx->aes.ccm.kmac_param.k);
1822 }
1823
1824 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1825 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1826 &num, (ctr128_f)AES_ctr32_encrypt);
1827 } else {
1828 /* decrypt-then-mac */
1829 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1830 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1831 &num, (ctr128_f)AES_ctr32_encrypt);
1832
1833 if (len)
1834 s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1835 if (rem) {
1836 for (i = 0; i < rem; i++)
1837 ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
1838
1839 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1840 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1841 ctx->aes.ccm.kmac_param.k);
1842 }
1843 }
1844 /* encrypt tag */
1845 for (i = 15 - l; i < 16; i++)
1846 ctx->aes.ccm.nonce.b[i] = 0;
1847
1848 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
1849 ctx->aes.ccm.kmac_param.k);
1850 ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
1851 ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
1852
1853 ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
1854 return 0;
1855 }
1856
1857 /*-
1858 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1859 * if successful. Otherwise -1 is returned.
1860 */
1861 static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1862 const unsigned char *in, size_t len)
1863 {
1864 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1865 unsigned char *ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
1866 unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1867 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1868
1869 if (out != in
1870 || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
1871 return -1;
1872
1873 if (enc) {
1874 /* Set explicit iv (sequence number). */
1875 memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1876 }
1877
1878 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1879 /*-
1880 * Get explicit iv (sequence number). We already have fixed iv
1881 * (server/client_write_iv) here.
1882 */
1883 memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1884 s390x_aes_ccm_setiv(cctx, ivec, len);
1885
1886 /* Process aad (sequence number|type|version|length) */
1887 s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
1888
1889 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1890 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1891
1892 if (enc) {
1893 if (s390x_aes_ccm(cctx, in, out, len, enc))
1894 return -1;
1895
1896 memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
1897 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1898 } else {
1899 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
1900 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
1901 cctx->aes.ccm.m))
1902 return len;
1903 }
1904
1905 OPENSSL_cleanse(out, len);
1906 return -1;
1907 }
1908 }
1909
1910 /*-
1911 * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
1912 * returned.
1913 */
1914 static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
1915 const unsigned char *key,
1916 const unsigned char *iv, int enc)
1917 {
1918 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1919 unsigned char *ivec;
1920 int keylen;
1921
1922 if (iv == NULL && key == NULL)
1923 return 1;
1924
1925 if (key != NULL) {
1926 keylen = EVP_CIPHER_CTX_key_length(ctx);
1927 cctx->aes.ccm.fc = S390X_AES_FC(keylen);
1928 memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
1929
1930 /* Store encoded m and l. */
1931 cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
1932 | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
1933 memset(cctx->aes.ccm.nonce.b + 1, 0,
1934 sizeof(cctx->aes.ccm.nonce.b));
1935 cctx->aes.ccm.blocks = 0;
1936
1937 cctx->aes.ccm.key_set = 1;
1938 }
1939
1940 if (iv != NULL) {
1941 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
1942 memcpy(ivec, iv, 15 - cctx->aes.ccm.l);
1943
1944 cctx->aes.ccm.iv_set = 1;
1945 }
1946
1947 return 1;
1948 }
1949
1950 /*-
1951 * Called from EVP layer to initialize context, process additional
1952 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1953 * plaintext or process a TLS packet, depending on context. Returns bytes
1954 * written on success. Otherwise -1 is returned.
1955 */
1956 static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1957 const unsigned char *in, size_t len)
1958 {
1959 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1960 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1961 int rv;
1962 unsigned char *buf, *ivec;
1963
1964 if (!cctx->aes.ccm.key_set)
1965 return -1;
1966
1967 if (cctx->aes.ccm.tls_aad_len >= 0)
1968 return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
1969
1970 /*-
1971 * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
1972 * so integrity must be checked already at Update() i.e., before
1973 * potentially corrupted data is output.
1974 */
1975 if (in == NULL && out != NULL)
1976 return 0;
1977
1978 if (!cctx->aes.ccm.iv_set)
1979 return -1;
1980
1981 if (out == NULL) {
1982 /* Update(): Pass message length. */
1983 if (in == NULL) {
1984 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
1985 s390x_aes_ccm_setiv(cctx, ivec, len);
1986
1987 cctx->aes.ccm.len_set = 1;
1988 return len;
1989 }
1990
1991 /* Update(): Process aad. */
1992 if (!cctx->aes.ccm.len_set && len)
1993 return -1;
1994
1995 s390x_aes_ccm_aad(cctx, in, len);
1996 return len;
1997 }
1998
1999 /* The tag must be set before actually decrypting data */
2000 if (!enc && !cctx->aes.ccm.tag_set)
2001 return -1;
2002
2003 /* Update(): Process message. */
2004
2005 if (!cctx->aes.ccm.len_set) {
2006 /*-
2007 * In case message length was not previously set explicitly via
2008 * Update(), set it now.
2009 */
2010 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
2011 s390x_aes_ccm_setiv(cctx, ivec, len);
2012
2013 cctx->aes.ccm.len_set = 1;
2014 }
2015
2016 if (enc) {
2017 if (s390x_aes_ccm(cctx, in, out, len, enc))
2018 return -1;
2019
2020 cctx->aes.ccm.tag_set = 1;
2021 return len;
2022 } else {
2023 rv = -1;
2024
2025 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2026 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2027 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
2028 cctx->aes.ccm.m))
2029 rv = len;
2030 }
2031
2032 if (rv == -1)
2033 OPENSSL_cleanse(out, len);
2034
2035 cctx->aes.ccm.iv_set = 0;
2036 cctx->aes.ccm.tag_set = 0;
2037 cctx->aes.ccm.len_set = 0;
2038 return rv;
2039 }
2040 }
2041
2042 /*-
2043 * Performs various operations on the context structure depending on control
2044 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
2045 * Code is big-endian.
2046 */
2047 static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2048 {
2049 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
2050 unsigned char *buf, *iv;
2051 int enc, len;
2052
2053 switch (type) {
2054 case EVP_CTRL_INIT:
2055 cctx->aes.ccm.key_set = 0;
2056 cctx->aes.ccm.iv_set = 0;
2057 cctx->aes.ccm.l = 8;
2058 cctx->aes.ccm.m = 12;
2059 cctx->aes.ccm.tag_set = 0;
2060 cctx->aes.ccm.len_set = 0;
2061 cctx->aes.ccm.tls_aad_len = -1;
2062 return 1;
2063
2064 case EVP_CTRL_GET_IVLEN:
2065 *(int *)ptr = 15 - cctx->aes.ccm.l;
2066 return 1;
2067
2068 case EVP_CTRL_AEAD_TLS1_AAD:
2069 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2070 return 0;
2071
2072 /* Save the aad for later use. */
2073 buf = EVP_CIPHER_CTX_buf_noconst(c);
2074 memcpy(buf, ptr, arg);
2075 cctx->aes.ccm.tls_aad_len = arg;
2076
2077 len = buf[arg - 2] << 8 | buf[arg - 1];
2078 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
2079 return 0;
2080
2081 /* Correct length for explicit iv. */
2082 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
2083
2084 enc = EVP_CIPHER_CTX_encrypting(c);
2085 if (!enc) {
2086 if (len < cctx->aes.ccm.m)
2087 return 0;
2088
2089 /* Correct length for tag. */
2090 len -= cctx->aes.ccm.m;
2091 }
2092
2093 buf[arg - 2] = len >> 8;
2094 buf[arg - 1] = len & 0xff;
2095
2096 /* Extra padding: tag appended to record. */
2097 return cctx->aes.ccm.m;
2098
2099 case EVP_CTRL_CCM_SET_IV_FIXED:
2100 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
2101 return 0;
2102
2103 /* Copy to first part of the iv. */
2104 iv = EVP_CIPHER_CTX_iv_noconst(c);
2105 memcpy(iv, ptr, arg);
2106 return 1;
2107
2108 case EVP_CTRL_AEAD_SET_IVLEN:
2109 arg = 15 - arg;
2110 /* fall-through */
2111
2112 case EVP_CTRL_CCM_SET_L:
2113 if (arg < 2 || arg > 8)
2114 return 0;
2115
2116 cctx->aes.ccm.l = arg;
2117 return 1;
2118
2119 case EVP_CTRL_AEAD_SET_TAG:
2120 if ((arg & 1) || arg < 4 || arg > 16)
2121 return 0;
2122
2123 enc = EVP_CIPHER_CTX_encrypting(c);
2124 if (enc && ptr)
2125 return 0;
2126
2127 if (ptr) {
2128 cctx->aes.ccm.tag_set = 1;
2129 buf = EVP_CIPHER_CTX_buf_noconst(c);
2130 memcpy(buf, ptr, arg);
2131 }
2132
2133 cctx->aes.ccm.m = arg;
2134 return 1;
2135
2136 case EVP_CTRL_AEAD_GET_TAG:
2137 enc = EVP_CIPHER_CTX_encrypting(c);
2138 if (!enc || !cctx->aes.ccm.tag_set)
2139 return 0;
2140
2141 if(arg < cctx->aes.ccm.m)
2142 return 0;
2143
2144 memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2145 cctx->aes.ccm.tag_set = 0;
2146 cctx->aes.ccm.iv_set = 0;
2147 cctx->aes.ccm.len_set = 0;
2148 return 1;
2149
2150 case EVP_CTRL_COPY:
2151 return 1;
2152
2153 default:
2154 return -1;
2155 }
2156 }
2157
2158 # define s390x_aes_ccm_cleanup aes_ccm_cleanup
2159
2160 # ifndef OPENSSL_NO_OCB
2161 # define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
2162
2163 # define s390x_aes_ocb_init_key aes_ocb_init_key
2164 static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2165 const unsigned char *iv, int enc);
2166 # define s390x_aes_ocb_cipher aes_ocb_cipher
2167 static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2168 const unsigned char *in, size_t len);
2169 # define s390x_aes_ocb_cleanup aes_ocb_cleanup
2170 static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
2171 # define s390x_aes_ocb_ctrl aes_ocb_ctrl
2172 static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
2173 # endif
2174
2175 # ifndef OPENSSL_NO_SIV
2176 # define S390X_AES_SIV_CTX EVP_AES_SIV_CTX
2177
2178 # define s390x_aes_siv_init_key aes_siv_init_key
2179 # define s390x_aes_siv_cipher aes_siv_cipher
2180 # define s390x_aes_siv_cleanup aes_siv_cleanup
2181 # define s390x_aes_siv_ctrl aes_siv_ctrl
2182 # endif
2183
2184 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
2185 MODE,flags) \
2186 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2187 nid##_##keylen##_##nmode,blocksize, \
2188 keylen / 8, \
2189 ivlen, \
2190 flags | EVP_CIPH_##MODE##_MODE, \
2191 s390x_aes_##mode##_init_key, \
2192 s390x_aes_##mode##_cipher, \
2193 NULL, \
2194 sizeof(S390X_AES_##MODE##_CTX), \
2195 NULL, \
2196 NULL, \
2197 NULL, \
2198 NULL \
2199 }; \
2200 static const EVP_CIPHER aes_##keylen##_##mode = { \
2201 nid##_##keylen##_##nmode, \
2202 blocksize, \
2203 keylen / 8, \
2204 ivlen, \
2205 flags | EVP_CIPH_##MODE##_MODE, \
2206 aes_init_key, \
2207 aes_##mode##_cipher, \
2208 NULL, \
2209 sizeof(EVP_AES_KEY), \
2210 NULL, \
2211 NULL, \
2212 NULL, \
2213 NULL \
2214 }; \
2215 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2216 { \
2217 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2218 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2219 }
2220
2221 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
2222 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2223 nid##_##keylen##_##mode, \
2224 blocksize, \
2225 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2226 ivlen, \
2227 flags | EVP_CIPH_##MODE##_MODE, \
2228 s390x_aes_##mode##_init_key, \
2229 s390x_aes_##mode##_cipher, \
2230 s390x_aes_##mode##_cleanup, \
2231 sizeof(S390X_AES_##MODE##_CTX), \
2232 NULL, \
2233 NULL, \
2234 s390x_aes_##mode##_ctrl, \
2235 NULL \
2236 }; \
2237 static const EVP_CIPHER aes_##keylen##_##mode = { \
2238 nid##_##keylen##_##mode,blocksize, \
2239 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2240 ivlen, \
2241 flags | EVP_CIPH_##MODE##_MODE, \
2242 aes_##mode##_init_key, \
2243 aes_##mode##_cipher, \
2244 aes_##mode##_cleanup, \
2245 sizeof(EVP_AES_##MODE##_CTX), \
2246 NULL, \
2247 NULL, \
2248 aes_##mode##_ctrl, \
2249 NULL \
2250 }; \
2251 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2252 { \
2253 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2254 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2255 }
2256
2257 #else
2258
2259 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
2260 static const EVP_CIPHER aes_##keylen##_##mode = { \
2261 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
2262 flags|EVP_CIPH_##MODE##_MODE, \
2263 aes_init_key, \
2264 aes_##mode##_cipher, \
2265 NULL, \
2266 sizeof(EVP_AES_KEY), \
2267 NULL,NULL,NULL,NULL }; \
2268 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2269 { return &aes_##keylen##_##mode; }
2270
2271 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
2272 static const EVP_CIPHER aes_##keylen##_##mode = { \
2273 nid##_##keylen##_##mode,blocksize, \
2274 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
2275 ivlen, \
2276 flags|EVP_CIPH_##MODE##_MODE, \
2277 aes_##mode##_init_key, \
2278 aes_##mode##_cipher, \
2279 aes_##mode##_cleanup, \
2280 sizeof(EVP_AES_##MODE##_CTX), \
2281 NULL,NULL,aes_##mode##_ctrl,NULL }; \
2282 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2283 { return &aes_##keylen##_##mode; }
2284
2285 #endif
2286
2287 #define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
2288 BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2289 BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2290 BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2291 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2292 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags) \
2293 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags) \
2294 BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
2295
2296 static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2297 const unsigned char *iv, int enc)
2298 {
2299 int ret, mode;
2300 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2301
2302 mode = EVP_CIPHER_CTX_mode(ctx);
2303 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
2304 && !enc) {
2305 #ifdef HWAES_CAPABLE
2306 if (HWAES_CAPABLE) {
2307 ret = HWAES_set_decrypt_key(key,
2308 EVP_CIPHER_CTX_key_length(ctx) * 8,
2309 &dat->ks.ks);
2310 dat->block = (block128_f) HWAES_decrypt;
2311 dat->stream.cbc = NULL;
2312 # ifdef HWAES_cbc_encrypt
2313 if (mode == EVP_CIPH_CBC_MODE)
2314 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2315 # endif
2316 } else
2317 #endif
2318 #ifdef BSAES_CAPABLE
2319 if (BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE) {
2320 ret = AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2321 &dat->ks.ks);
2322 dat->block = (block128_f) AES_decrypt;
2323 dat->stream.cbc = (cbc128_f) bsaes_cbc_encrypt;
2324 } else
2325 #endif
2326 #ifdef VPAES_CAPABLE
2327 if (VPAES_CAPABLE) {
2328 ret = vpaes_set_decrypt_key(key,
2329 EVP_CIPHER_CTX_key_length(ctx) * 8,
2330 &dat->ks.ks);
2331 dat->block = (block128_f) vpaes_decrypt;
2332 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2333 (cbc128_f) vpaes_cbc_encrypt : NULL;
2334 } else
2335 #endif
2336 {
2337 ret = AES_set_decrypt_key(key,
2338 EVP_CIPHER_CTX_key_length(ctx) * 8,
2339 &dat->ks.ks);
2340 dat->block = (block128_f) AES_decrypt;
2341 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2342 (cbc128_f) AES_cbc_encrypt : NULL;
2343 }
2344 } else
2345 #ifdef HWAES_CAPABLE
2346 if (HWAES_CAPABLE) {
2347 ret = HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2348 &dat->ks.ks);
2349 dat->block = (block128_f) HWAES_encrypt;
2350 dat->stream.cbc = NULL;
2351 # ifdef HWAES_cbc_encrypt
2352 if (mode == EVP_CIPH_CBC_MODE)
2353 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2354 else
2355 # endif
2356 # ifdef HWAES_ctr32_encrypt_blocks
2357 if (mode == EVP_CIPH_CTR_MODE)
2358 dat->stream.ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2359 else
2360 # endif
2361 (void)0; /* terminate potentially open 'else' */
2362 } else
2363 #endif
2364 #ifdef BSAES_CAPABLE
2365 if (BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE) {
2366 ret = AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2367 &dat->ks.ks);
2368 dat->block = (block128_f) AES_encrypt;
2369 dat->stream.ctr = (ctr128_f) bsaes_ctr32_encrypt_blocks;
2370 } else
2371 #endif
2372 #ifdef VPAES_CAPABLE
2373 if (VPAES_CAPABLE) {
2374 ret = vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2375 &dat->ks.ks);
2376 dat->block = (block128_f) vpaes_encrypt;
2377 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2378 (cbc128_f) vpaes_cbc_encrypt : NULL;
2379 } else
2380 #endif
2381 {
2382 ret = AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2383 &dat->ks.ks);
2384 dat->block = (block128_f) AES_encrypt;
2385 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2386 (cbc128_f) AES_cbc_encrypt : NULL;
2387 #ifdef AES_CTR_ASM
2388 if (mode == EVP_CIPH_CTR_MODE)
2389 dat->stream.ctr = (ctr128_f) AES_ctr32_encrypt;
2390 #endif
2391 }
2392
2393 if (ret < 0) {
2394 EVPerr(EVP_F_AES_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
2395 return 0;
2396 }
2397
2398 return 1;
2399 }
2400
2401 static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2402 const unsigned char *in, size_t len)
2403 {
2404 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2405
2406 if (dat->stream.cbc)
2407 (*dat->stream.cbc) (in, out, len, &dat->ks,
2408 EVP_CIPHER_CTX_iv_noconst(ctx),
2409 EVP_CIPHER_CTX_encrypting(ctx));
2410 else if (EVP_CIPHER_CTX_encrypting(ctx))
2411 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks,
2412 EVP_CIPHER_CTX_iv_noconst(ctx), dat->block);
2413 else
2414 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks,
2415 EVP_CIPHER_CTX_iv_noconst(ctx), dat->block);
2416
2417 return 1;
2418 }
2419
2420 static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2421 const unsigned char *in, size_t len)
2422 {
2423 size_t bl = EVP_CIPHER_CTX_block_size(ctx);
2424 size_t i;
2425 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2426
2427 if (len < bl)
2428 return 1;
2429
2430 for (i = 0, len -= bl; i <= len; i += bl)
2431 (*dat->block) (in + i, out + i, &dat->ks);
2432
2433 return 1;
2434 }
2435
2436 static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2437 const unsigned char *in, size_t len)
2438 {
2439 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2440
2441 int num = EVP_CIPHER_CTX_num(ctx);
2442 CRYPTO_ofb128_encrypt(in, out, len, &dat->ks,
2443 EVP_CIPHER_CTX_iv_noconst(ctx), &num, dat->block);
2444 EVP_CIPHER_CTX_set_num(ctx, num);
2445 return 1;
2446 }
2447
2448 static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2449 const unsigned char *in, size_t len)
2450 {
2451 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2452
2453 int num = EVP_CIPHER_CTX_num(ctx);
2454 CRYPTO_cfb128_encrypt(in, out, len, &dat->ks,
2455 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2456 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2457 EVP_CIPHER_CTX_set_num(ctx, num);
2458 return 1;
2459 }
2460
2461 static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2462 const unsigned char *in, size_t len)
2463 {
2464 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2465
2466 int num = EVP_CIPHER_CTX_num(ctx);
2467 CRYPTO_cfb128_8_encrypt(in, out, len, &dat->ks,
2468 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2469 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2470 EVP_CIPHER_CTX_set_num(ctx, num);
2471 return 1;
2472 }
2473
2474 static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2475 const unsigned char *in, size_t len)
2476 {
2477 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2478
2479 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) {
2480 int num = EVP_CIPHER_CTX_num(ctx);
2481 CRYPTO_cfb128_1_encrypt(in, out, len, &dat->ks,
2482 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2483 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2484 EVP_CIPHER_CTX_set_num(ctx, num);
2485 return 1;
2486 }
2487
2488 while (len >= MAXBITCHUNK) {
2489 int num = EVP_CIPHER_CTX_num(ctx);
2490 CRYPTO_cfb128_1_encrypt(in, out, MAXBITCHUNK * 8, &dat->ks,
2491 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2492 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2493 EVP_CIPHER_CTX_set_num(ctx, num);
2494 len -= MAXBITCHUNK;
2495 out += MAXBITCHUNK;
2496 in += MAXBITCHUNK;
2497 }
2498 if (len) {
2499 int num = EVP_CIPHER_CTX_num(ctx);
2500 CRYPTO_cfb128_1_encrypt(in, out, len * 8, &dat->ks,
2501 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2502 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2503 EVP_CIPHER_CTX_set_num(ctx, num);
2504 }
2505
2506 return 1;
2507 }
2508
2509 static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2510 const unsigned char *in, size_t len)
2511 {
2512 unsigned int num = EVP_CIPHER_CTX_num(ctx);
2513 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2514
2515 if (dat->stream.ctr)
2516 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks,
2517 EVP_CIPHER_CTX_iv_noconst(ctx),
2518 EVP_CIPHER_CTX_buf_noconst(ctx),
2519 &num, dat->stream.ctr);
2520 else
2521 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks,
2522 EVP_CIPHER_CTX_iv_noconst(ctx),
2523 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
2524 dat->block);
2525 EVP_CIPHER_CTX_set_num(ctx, num);
2526 return 1;
2527 }
2528
2529 BLOCK_CIPHER_generic_pack(NID_aes, 128, 0)
2530 BLOCK_CIPHER_generic_pack(NID_aes, 192, 0)
2531 BLOCK_CIPHER_generic_pack(NID_aes, 256, 0)
2532
2533 static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
2534 {
2535 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2536 if (gctx == NULL)
2537 return 0;
2538 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
2539 if (gctx->iv != EVP_CIPHER_CTX_iv_noconst(c))
2540 OPENSSL_free(gctx->iv);
2541 return 1;
2542 }
2543
2544 static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2545 {
2546 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2547 switch (type) {
2548 case EVP_CTRL_INIT:
2549 gctx->key_set = 0;
2550 gctx->iv_set = 0;
2551 gctx->ivlen = EVP_CIPHER_iv_length(c->cipher);
2552 gctx->iv = c->iv;
2553 gctx->taglen = -1;
2554 gctx->iv_gen = 0;
2555 gctx->tls_aad_len = -1;
2556 return 1;
2557
2558 case EVP_CTRL_GET_IVLEN:
2559 *(int *)ptr = gctx->ivlen;
2560 return 1;
2561
2562 case EVP_CTRL_AEAD_SET_IVLEN:
2563 if (arg <= 0)
2564 return 0;
2565 /* Allocate memory for IV if needed */
2566 if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
2567 if (gctx->iv != c->iv)
2568 OPENSSL_free(gctx->iv);
2569 if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) {
2570 EVPerr(EVP_F_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
2571 return 0;
2572 }
2573 }
2574 gctx->ivlen = arg;
2575 return 1;
2576
2577 case EVP_CTRL_AEAD_SET_TAG:
2578 if (arg <= 0 || arg > 16 || c->encrypt)
2579 return 0;
2580 memcpy(c->buf, ptr, arg);
2581 gctx->taglen = arg;
2582 return 1;
2583
2584 case EVP_CTRL_AEAD_GET_TAG:
2585 if (arg <= 0 || arg > 16 || !c->encrypt
2586 || gctx->taglen < 0)
2587 return 0;
2588 memcpy(ptr, c->buf, arg);
2589 return 1;
2590
2591 case EVP_CTRL_GET_IV:
2592 if (gctx->iv_gen != 1 && gctx->iv_gen_rand != 1)
2593 return 0;
2594 if (gctx->ivlen != arg)
2595 return 0;
2596 memcpy(ptr, gctx->iv, arg);
2597 return 1;
2598
2599 case EVP_CTRL_GCM_SET_IV_FIXED:
2600 /* Special case: -1 length restores whole IV */
2601 if (arg == -1) {
2602 memcpy(gctx->iv, ptr, gctx->ivlen);
2603 gctx->iv_gen = 1;
2604 return 1;
2605 }
2606 /*
2607 * Fixed field must be at least 4 bytes and invocation field at least
2608 * 8.
2609 */
2610 if ((arg < 4) || (gctx->ivlen - arg) < 8)
2611 return 0;
2612 if (arg)
2613 memcpy(gctx->iv, ptr, arg);
2614 if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
2615 return 0;
2616 gctx->iv_gen = 1;
2617 return 1;
2618
2619 case EVP_CTRL_GCM_IV_GEN:
2620 if (gctx->iv_gen == 0 || gctx->key_set == 0)
2621 return 0;
2622 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2623 if (arg <= 0 || arg > gctx->ivlen)
2624 arg = gctx->ivlen;
2625 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
2626 /*
2627 * Invocation field will be at least 8 bytes in size and so no need
2628 * to check wrap around or increment more than last 8 bytes.
2629 */
2630 ctr64_inc(gctx->iv + gctx->ivlen - 8);
2631 gctx->iv_set = 1;
2632 return 1;
2633
2634 case EVP_CTRL_GCM_SET_IV_INV:
2635 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
2636 return 0;
2637 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
2638 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2639 gctx->iv_set = 1;
2640 return 1;
2641
2642 case EVP_CTRL_AEAD_TLS1_AAD:
2643 /* Save the AAD for later use */
2644 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2645 return 0;
2646 memcpy(c->buf, ptr, arg);
2647 gctx->tls_aad_len = arg;
2648 gctx->tls_enc_records = 0;
2649 {
2650 unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
2651 /* Correct length for explicit IV */
2652 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
2653 return 0;
2654 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
2655 /* If decrypting correct for tag too */
2656 if (!c->encrypt) {
2657 if (len < EVP_GCM_TLS_TAG_LEN)
2658 return 0;
2659 len -= EVP_GCM_TLS_TAG_LEN;
2660 }
2661 c->buf[arg - 2] = len >> 8;
2662 c->buf[arg - 1] = len & 0xff;
2663 }
2664 /* Extra padding: tag appended to record */
2665 return EVP_GCM_TLS_TAG_LEN;
2666
2667 case EVP_CTRL_COPY:
2668 {
2669 EVP_CIPHER_CTX *out = ptr;
2670 EVP_AES_GCM_CTX *gctx_out = EVP_C_DATA(EVP_AES_GCM_CTX,out);
2671 if (gctx->gcm.key) {
2672 if (gctx->gcm.key != &gctx->ks)
2673 return 0;
2674 gctx_out->gcm.key = &gctx_out->ks;
2675 }
2676 if (gctx->iv == c->iv)
2677 gctx_out->iv = out->iv;
2678 else {
2679 if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) {
2680 EVPerr(EVP_F_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
2681 return 0;
2682 }
2683 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
2684 }
2685 return 1;
2686 }
2687
2688 default:
2689 return -1;
2690
2691 }
2692 }
2693
2694 static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2695 const unsigned char *iv, int enc)
2696 {
2697 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2698 if (!iv && !key)
2699 return 1;
2700 if (key) {
2701 do {
2702 #ifdef HWAES_CAPABLE
2703 if (HWAES_CAPABLE) {
2704 HWAES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2705 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2706 (block128_f) HWAES_encrypt);
2707 # ifdef HWAES_ctr32_encrypt_blocks
2708 gctx->ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2709 # else
2710 gctx->ctr = NULL;
2711 # endif
2712 break;
2713 } else
2714 #endif
2715 #ifdef BSAES_CAPABLE
2716 if (BSAES_CAPABLE) {
2717 AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2718 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2719 (block128_f) AES_encrypt);
2720 gctx->ctr = (ctr128_f) bsaes_ctr32_encrypt_blocks;
2721 break;
2722 } else
2723 #endif
2724 #ifdef VPAES_CAPABLE
2725 if (VPAES_CAPABLE) {
2726 vpaes_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2727 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2728 (block128_f) vpaes_encrypt);
2729 gctx->ctr = NULL;
2730 break;
2731 } else
2732 #endif
2733 (void)0; /* terminate potentially open 'else' */
2734
2735 AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2736 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2737 (block128_f) AES_encrypt);
2738 #ifdef AES_CTR_ASM
2739 gctx->ctr = (ctr128_f) AES_ctr32_encrypt;
2740 #else
2741 gctx->ctr = NULL;
2742 #endif
2743 } while (0);
2744
2745 /*
2746 * If we have an iv can set it directly, otherwise use saved IV.
2747 */
2748 if (iv == NULL && gctx->iv_set)
2749 iv = gctx->iv;
2750 if (iv) {
2751 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2752 gctx->iv_set = 1;
2753 }
2754 gctx->key_set = 1;
2755 } else {
2756 /* If key set use IV, otherwise copy */
2757 if (gctx->key_set)
2758 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2759 else
2760 memcpy(gctx->iv, iv, gctx->ivlen);
2761 gctx->iv_set = 1;
2762 gctx->iv_gen = 0;
2763 }
2764 return 1;
2765 }
2766
2767 /*
2768 * Handle TLS GCM packet format. This consists of the last portion of the IV
2769 * followed by the payload and finally the tag. On encrypt generate IV,
2770 * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
2771 * and verify tag.
2772 */
2773
2774 static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2775 const unsigned char *in, size_t len)
2776 {
2777 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2778 int rv = -1;
2779 /* Encrypt/decrypt must be performed in place */
2780 if (out != in
2781 || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
2782 return -1;
2783
2784 /*
2785 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
2786 * Requirements from SP 800-38D". The requirements is for one party to the
2787 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
2788 * side only.
2789 */
2790 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
2791 EVPerr(EVP_F_AES_GCM_TLS_CIPHER, EVP_R_TOO_MANY_RECORDS);
2792 goto err;
2793 }
2794
2795 /*
2796 * Set IV from start of buffer or generate IV and write to start of
2797 * buffer.
2798 */
2799 if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN
2800 : EVP_CTRL_GCM_SET_IV_INV,
2801 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
2802 goto err;
2803 /* Use saved AAD */
2804 if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len))
2805 goto err;
2806 /* Fix buffer and length to point to payload */
2807 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2808 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2809 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2810 if (ctx->encrypt) {
2811 /* Encrypt payload */
2812 if (gctx->ctr) {
2813 size_t bulk = 0;
2814 #if defined(AES_GCM_ASM)
2815 if (len >= 32 && AES_GCM_ASM(gctx)) {
2816 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2817 return -1;
2818
2819 bulk = AES_gcm_encrypt(in, out, len,
2820 gctx->gcm.key,
2821 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2822 gctx->gcm.len.u[1] += bulk;
2823 }
2824 #endif
2825 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2826 in + bulk,
2827 out + bulk,
2828 len - bulk, gctx->ctr))
2829 goto err;
2830 } else {
2831 size_t bulk = 0;
2832 #if defined(AES_GCM_ASM2)
2833 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2834 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2835 return -1;
2836
2837 bulk = AES_gcm_encrypt(in, out, len,
2838 gctx->gcm.key,
2839 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2840 gctx->gcm.len.u[1] += bulk;
2841 }
2842 #endif
2843 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
2844 in + bulk, out + bulk, len - bulk))
2845 goto err;
2846 }
2847 out += len;
2848 /* Finally write tag */
2849 CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN);
2850 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2851 } else {
2852 /* Decrypt */
2853 if (gctx->ctr) {
2854 size_t bulk = 0;
2855 #if defined(AES_GCM_ASM)
2856 if (len >= 16 && AES_GCM_ASM(gctx)) {
2857 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2858 return -1;
2859
2860 bulk = AES_gcm_decrypt(in, out, len,
2861 gctx->gcm.key,
2862 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2863 gctx->gcm.len.u[1] += bulk;
2864 }
2865 #endif
2866 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
2867 in + bulk,
2868 out + bulk,
2869 len - bulk, gctx->ctr))
2870 goto err;
2871 } else {
2872 size_t bulk = 0;
2873 #if defined(AES_GCM_ASM2)
2874 if (len >= 16 && AES_GCM_ASM2(gctx)) {
2875 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2876 return -1;
2877
2878 bulk = AES_gcm_decrypt(in, out, len,
2879 gctx->gcm.key,
2880 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2881 gctx->gcm.len.u[1] += bulk;
2882 }
2883 #endif
2884 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
2885 in + bulk, out + bulk, len - bulk))
2886 goto err;
2887 }
2888 /* Retrieve tag */
2889 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN);
2890 /* If tag mismatch wipe buffer */
2891 if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) {
2892 OPENSSL_cleanse(out, len);
2893 goto err;
2894 }
2895 rv = len;
2896 }
2897
2898 err:
2899 gctx->iv_set = 0;
2900 gctx->tls_aad_len = -1;
2901 return rv;
2902 }
2903
2904 #ifdef FIPS_MODULE
2905 /*
2906 * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
2907 *
2908 * See also 8.2.2 RBG-based construction.
2909 * Random construction consists of a free field (which can be NULL) and a
2910 * random field which will use a DRBG that can return at least 96 bits of
2911 * entropy strength. (The DRBG must be seeded by the FIPS module).
2912 */
2913 static int aes_gcm_iv_generate(EVP_AES_GCM_CTX *gctx, int offset)
2914 {
2915 int sz = gctx->ivlen - offset;
2916
2917 /* Must be at least 96 bits */
2918 if (sz <= 0 || gctx->ivlen < 12)
2919 return 0;
2920
2921 /* Use DRBG to generate random iv */
2922 if (RAND_bytes(gctx->iv + offset, sz) <= 0)
2923 return 0;
2924 return 1;
2925 }
2926 #endif /* FIPS_MODULE */
2927
2928 static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2929 const unsigned char *in, size_t len)
2930 {
2931 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2932
2933 /* If not set up, return error */
2934 if (!gctx->key_set)
2935 return -1;
2936
2937 if (gctx->tls_aad_len >= 0)
2938 return aes_gcm_tls_cipher(ctx, out, in, len);
2939
2940 #ifdef FIPS_MODULE
2941 /*
2942 * FIPS requires generation of AES-GCM IV's inside the FIPS module.
2943 * The IV can still be set externally (the security policy will state that
2944 * this is not FIPS compliant). There are some applications
2945 * where setting the IV externally is the only option available.
2946 */
2947 if (!gctx->iv_set) {
2948 if (!ctx->encrypt || !aes_gcm_iv_generate(gctx, 0))
2949 return -1;
2950 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2951 gctx->iv_set = 1;
2952 gctx->iv_gen_rand = 1;
2953 }
2954 #else
2955 if (!gctx->iv_set)
2956 return -1;
2957 #endif /* FIPS_MODULE */
2958
2959 if (in) {
2960 if (out == NULL) {
2961 if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
2962 return -1;
2963 } else if (ctx->encrypt) {
2964 if (gctx->ctr) {
2965 size_t bulk = 0;
2966 #if defined(AES_GCM_ASM)
2967 if (len >= 32 && AES_GCM_ASM(gctx)) {
2968 size_t res = (16 - gctx->gcm.mres) % 16;
2969
2970 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
2971 return -1;
2972
2973 bulk = AES_gcm_encrypt(in + res,
2974 out + res, len - res,
2975 gctx->gcm.key, gctx->gcm.Yi.c,
2976 gctx->gcm.Xi.u);
2977 gctx->gcm.len.u[1] += bulk;
2978 bulk += res;
2979 }
2980 #endif
2981 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2982 in + bulk,
2983 out + bulk,
2984 len - bulk, gctx->ctr))
2985 return -1;
2986 } else {
2987 size_t bulk = 0;
2988 #if defined(AES_GCM_ASM2)
2989 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2990 size_t res = (16 - gctx->gcm.mres) % 16;
2991
2992 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
2993 return -1;
2994
2995 bulk = AES_gcm_encrypt(in + res,
2996 out + res, len - res,
2997 gctx->gcm.key, gctx->gcm.Yi.c,
2998 gctx->gcm.Xi.u);
2999 gctx->gcm.len.u[1] += bulk;
3000 bulk += res;
3001 }
3002 #endif
3003 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3004 in + bulk, out + bulk, len - bulk))
3005 return -1;
3006 }
3007 } else {
3008 if (gctx->ctr) {
3009 size_t bulk = 0;
3010 #if defined(AES_GCM_ASM)
3011 if (len >= 16 && AES_GCM_ASM(gctx)) {
3012 size_t res = (16 - gctx->gcm.mres) % 16;
3013
3014 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3015 return -1;
3016
3017 bulk = AES_gcm_decrypt(in + res,
3018 out + res, len - res,
3019 gctx->gcm.key,
3020 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3021 gctx->gcm.len.u[1] += bulk;
3022 bulk += res;
3023 }
3024 #endif
3025 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3026 in + bulk,
3027 out + bulk,
3028 len - bulk, gctx->ctr))
3029 return -1;
3030 } else {
3031 size_t bulk = 0;
3032 #if defined(AES_GCM_ASM2)
3033 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3034 size_t res = (16 - gctx->gcm.mres) % 16;
3035
3036 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3037 return -1;
3038
3039 bulk = AES_gcm_decrypt(in + res,
3040 out + res, len - res,
3041 gctx->gcm.key,
3042 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3043 gctx->gcm.len.u[1] += bulk;
3044 bulk += res;
3045 }
3046 #endif
3047 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3048 in + bulk, out + bulk, len - bulk))
3049 return -1;
3050 }
3051 }
3052 return len;
3053 } else {
3054 if (!ctx->encrypt) {
3055 if (gctx->taglen < 0)
3056 return -1;
3057 if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0)
3058 return -1;
3059 gctx->iv_set = 0;
3060 return 0;
3061 }
3062 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
3063 gctx->taglen = 16;
3064 /* Don't reuse the IV */
3065 gctx->iv_set = 0;
3066 return 0;
3067 }
3068
3069 }
3070
3071 #define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \
3072 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3073 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3074 | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH)
3075
3076 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, gcm, GCM,
3077 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3078 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, gcm, GCM,
3079 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3080 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, gcm, GCM,
3081 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3082
3083 static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3084 {
3085 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX, c);
3086
3087 if (type == EVP_CTRL_COPY) {
3088 EVP_CIPHER_CTX *out = ptr;
3089 EVP_AES_XTS_CTX *xctx_out = EVP_C_DATA(EVP_AES_XTS_CTX,out);
3090
3091 if (xctx->xts.key1) {
3092 if (xctx->xts.key1 != &xctx->ks1)
3093 return 0;
3094 xctx_out->xts.key1 = &xctx_out->ks1;
3095 }
3096 if (xctx->xts.key2) {
3097 if (xctx->xts.key2 != &xctx->ks2)
3098 return 0;
3099 xctx_out->xts.key2 = &xctx_out->ks2;
3100 }
3101 return 1;
3102 } else if (type != EVP_CTRL_INIT)
3103 return -1;
3104 /* key1 and key2 are used as an indicator both key and IV are set */
3105 xctx->xts.key1 = NULL;
3106 xctx->xts.key2 = NULL;
3107 return 1;
3108 }
3109
3110 static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3111 const unsigned char *iv, int enc)
3112 {
3113 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3114
3115 if (!iv && !key)
3116 return 1;
3117
3118 if (key) {
3119 do {
3120 /* The key is two half length keys in reality */
3121 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
3122 const int bits = bytes * 8;
3123
3124 /*
3125 * Verify that the two keys are different.
3126 *
3127 * This addresses the vulnerability described in Rogaway's
3128 * September 2004 paper:
3129 *
3130 * "Efficient Instantiations of Tweakable Blockciphers and
3131 * Refinements to Modes OCB and PMAC".
3132 * (http://web.cs.ucdavis.edu/~rogaway/papers/offsets.pdf)
3133 *
3134 * FIPS 140-2 IG A.9 XTS-AES Key Generation Requirements states
3135 * that:
3136 * "The check for Key_1 != Key_2 shall be done at any place
3137 * BEFORE using the keys in the XTS-AES algorithm to process
3138 * data with them."
3139 */
3140 if ((!allow_insecure_decrypt || enc)
3141 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
3142 EVPerr(EVP_F_AES_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
3143 return 0;
3144 }
3145
3146 #ifdef AES_XTS_ASM
3147 xctx->stream = enc ? AES_xts_encrypt : AES_xts_decrypt;
3148 #else
3149 xctx->stream = NULL;
3150 #endif
3151 /* key_len is two AES keys */
3152 #ifdef HWAES_CAPABLE
3153 if (HWAES_CAPABLE) {
3154 if (enc) {
3155 HWAES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3156 xctx->xts.block1 = (block128_f) HWAES_encrypt;
3157 # ifdef HWAES_xts_encrypt
3158 xctx->stream = HWAES_xts_encrypt;
3159 # endif
3160 } else {
3161 HWAES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3162 xctx->xts.block1 = (block128_f) HWAES_decrypt;
3163 # ifdef HWAES_xts_decrypt
3164 xctx->stream = HWAES_xts_decrypt;
3165 #endif
3166 }
3167
3168 HWAES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3169 xctx->xts.block2 = (block128_f) HWAES_encrypt;
3170
3171 xctx->xts.key1 = &xctx->ks1;
3172 break;
3173 } else
3174 #endif
3175 #ifdef BSAES_CAPABLE
3176 if (BSAES_CAPABLE)
3177 xctx->stream = enc ? bsaes_xts_encrypt : bsaes_xts_decrypt;
3178 else
3179 #endif
3180 #ifdef VPAES_CAPABLE
3181 if (VPAES_CAPABLE) {
3182 if (enc) {
3183 vpaes_set_encrypt_key(key, bits, &xctx->ks1.ks);
3184 xctx->xts.block1 = (block128_f) vpaes_encrypt;
3185 } else {
3186 vpaes_set_decrypt_key(key, bits, &xctx->ks1.ks);
3187 xctx->xts.block1 = (block128_f) vpaes_decrypt;
3188 }
3189
3190 vpaes_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3191 xctx->xts.block2 = (block128_f) vpaes_encrypt;
3192
3193 xctx->xts.key1 = &xctx->ks1;
3194 break;
3195 } else
3196 #endif
3197 (void)0; /* terminate potentially open 'else' */
3198
3199 if (enc) {
3200 AES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3201 xctx->xts.block1 = (block128_f) AES_encrypt;
3202 } else {
3203 AES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3204 xctx->xts.block1 = (block128_f) AES_decrypt;
3205 }
3206
3207 AES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3208 xctx->xts.block2 = (block128_f) AES_encrypt;
3209
3210 xctx->xts.key1 = &xctx->ks1;
3211 } while (0);
3212 }
3213
3214 if (iv) {
3215 xctx->xts.key2 = &xctx->ks2;
3216 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
3217 }
3218
3219 return 1;
3220 }
3221
3222 static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3223 const unsigned char *in, size_t len)
3224 {
3225 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3226
3227 if (xctx->xts.key1 == NULL
3228 || xctx->xts.key2 == NULL
3229 || out == NULL
3230 || in == NULL
3231 || len < AES_BLOCK_SIZE)
3232 return 0;
3233
3234 /*
3235 * Impose a limit of 2^20 blocks per data unit as specified by
3236 * IEEE Std 1619-2018. The earlier and obsolete IEEE Std 1619-2007
3237 * indicated that this was a SHOULD NOT rather than a MUST NOT.
3238 * NIST SP 800-38E mandates the same limit.
3239 */
3240 if (len > XTS_MAX_BLOCKS_PER_DATA_UNIT * AES_BLOCK_SIZE) {
3241 EVPerr(EVP_F_AES_XTS_CIPHER, EVP_R_XTS_DATA_UNIT_IS_TOO_LARGE);
3242 return 0;
3243 }
3244
3245 if (xctx->stream)
3246 (*xctx->stream) (in, out, len,
3247 xctx->xts.key1, xctx->xts.key2,
3248 EVP_CIPHER_CTX_iv_noconst(ctx));
3249 else if (CRYPTO_xts128_encrypt(&xctx->xts, EVP_CIPHER_CTX_iv_noconst(ctx),
3250 in, out, len,
3251 EVP_CIPHER_CTX_encrypting(ctx)))
3252 return 0;
3253 return 1;
3254 }
3255
3256 #define aes_xts_cleanup NULL
3257
3258 #define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \
3259 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3260 | EVP_CIPH_CUSTOM_COPY)
3261
3262 BLOCK_CIPHER_custom(NID_aes, 128, 1, 16, xts, XTS, XTS_FLAGS)
3263 BLOCK_CIPHER_custom(NID_aes, 256, 1, 16, xts, XTS, XTS_FLAGS)
3264
3265 static int aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3266 {
3267 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,c);
3268 switch (type) {
3269 case EVP_CTRL_INIT:
3270 cctx->key_set = 0;
3271 cctx->iv_set = 0;
3272 cctx->L = 8;
3273 cctx->M = 12;
3274 cctx->tag_set = 0;
3275 cctx->len_set = 0;
3276 cctx->tls_aad_len = -1;
3277 return 1;
3278
3279 case EVP_CTRL_GET_IVLEN:
3280 *(int *)ptr = 15 - cctx->L;
3281 return 1;
3282
3283 case EVP_CTRL_AEAD_TLS1_AAD:
3284 /* Save the AAD for later use */
3285 if (arg != EVP_AEAD_TLS1_AAD_LEN)
3286 return 0;
3287 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3288 cctx->tls_aad_len = arg;
3289 {
3290 uint16_t len =
3291 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
3292 | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
3293 /* Correct length for explicit IV */
3294 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
3295 return 0;
3296 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
3297 /* If decrypting correct for tag too */
3298 if (!EVP_CIPHER_CTX_encrypting(c)) {
3299 if (len < cctx->M)
3300 return 0;
3301 len -= cctx->M;
3302 }
3303 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
3304 EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
3305 }
3306 /* Extra padding: tag appended to record */
3307 return cctx->M;
3308
3309 case EVP_CTRL_CCM_SET_IV_FIXED:
3310 /* Sanity check length */
3311 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
3312 return 0;
3313 /* Just copy to first part of IV */
3314 memcpy(EVP_CIPHER_CTX_iv_noconst(c), ptr, arg);
3315 return 1;
3316
3317 case EVP_CTRL_AEAD_SET_IVLEN:
3318 arg = 15 - arg;
3319 /* fall thru */
3320 case EVP_CTRL_CCM_SET_L:
3321 if (arg < 2 || arg > 8)
3322 return 0;
3323 cctx->L = arg;
3324 return 1;
3325
3326 case EVP_CTRL_AEAD_SET_TAG:
3327 if ((arg & 1) || arg < 4 || arg > 16)
3328 return 0;
3329 if (EVP_CIPHER_CTX_encrypting(c) && ptr)
3330 return 0;
3331 if (ptr) {
3332 cctx->tag_set = 1;
3333 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3334 }
3335 cctx->M = arg;
3336 return 1;
3337
3338 case EVP_CTRL_AEAD_GET_TAG:
3339 if (!EVP_CIPHER_CTX_encrypting(c) || !cctx->tag_set)
3340 return 0;
3341 if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg))
3342 return 0;
3343 cctx->tag_set = 0;
3344 cctx->iv_set = 0;
3345 cctx->len_set = 0;
3346 return 1;
3347
3348 case EVP_CTRL_COPY:
3349 {
3350 EVP_CIPHER_CTX *out = ptr;
3351 EVP_AES_CCM_CTX *cctx_out = EVP_C_DATA(EVP_AES_CCM_CTX,out);
3352 if (cctx->ccm.key) {
3353 if (cctx->ccm.key != &cctx->ks)
3354 return 0;
3355 cctx_out->ccm.key = &cctx_out->ks;
3356 }
3357 return 1;
3358 }
3359
3360 default:
3361 return -1;
3362
3363 }
3364 }
3365
3366 static int aes_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3367 const unsigned char *iv, int enc)
3368 {
3369 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3370 if (!iv && !key)
3371 return 1;
3372 if (key)
3373 do {
3374 #ifdef HWAES_CAPABLE
3375 if (HWAES_CAPABLE) {
3376 HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3377 &cctx->ks.ks);
3378
3379 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3380 &cctx->ks, (block128_f) HWAES_encrypt);
3381 cctx->str = NULL;
3382 cctx->key_set = 1;
3383 break;
3384 } else
3385 #endif
3386 #ifdef VPAES_CAPABLE
3387 if (VPAES_CAPABLE) {
3388 vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3389 &cctx->ks.ks);
3390 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3391 &cctx->ks, (block128_f) vpaes_encrypt);
3392 cctx->str = NULL;
3393 cctx->key_set = 1;
3394 break;
3395 }
3396 #endif
3397 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3398 &cctx->ks.ks);
3399 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3400 &cctx->ks, (block128_f) AES_encrypt);
3401 cctx->str = NULL;
3402 cctx->key_set = 1;
3403 } while (0);
3404 if (iv) {
3405 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
3406 cctx->iv_set = 1;
3407 }
3408 return 1;
3409 }
3410
3411 static int aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3412 const unsigned char *in, size_t len)
3413 {
3414 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3415 CCM128_CONTEXT *ccm = &cctx->ccm;
3416 /* Encrypt/decrypt must be performed in place */
3417 if (out != in || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->M))
3418 return -1;
3419 /* If encrypting set explicit IV from sequence number (start of AAD) */
3420 if (EVP_CIPHER_CTX_encrypting(ctx))
3421 memcpy(out, EVP_CIPHER_CTX_buf_noconst(ctx),
3422 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3423 /* Get rest of IV from explicit IV */
3424 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx) + EVP_CCM_TLS_FIXED_IV_LEN, in,
3425 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3426 /* Correct length value */
3427 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3428 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), 15 - cctx->L,
3429 len))
3430 return -1;
3431 /* Use saved AAD */
3432 CRYPTO_ccm128_aad(ccm, EVP_CIPHER_CTX_buf_noconst(ctx), cctx->tls_aad_len);
3433 /* Fix buffer to point to payload */
3434 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3435 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3436 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3437 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3438 cctx->str) :
3439 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3440 return -1;
3441 if (!CRYPTO_ccm128_tag(ccm, out + len, cctx->M))
3442 return -1;
3443 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3444 } else {
3445 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3446 cctx->str) :
3447 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3448 unsigned char tag[16];
3449 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3450 if (!CRYPTO_memcmp(tag, in + len, cctx->M))
3451 return len;
3452 }
3453 }
3454 OPENSSL_cleanse(out, len);
3455 return -1;
3456 }
3457 }
3458
3459 static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3460 const unsigned char *in, size_t len)
3461 {
3462 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3463 CCM128_CONTEXT *ccm = &cctx->ccm;
3464 /* If not set up, return error */
3465 if (!cctx->key_set)
3466 return -1;
3467
3468 if (cctx->tls_aad_len >= 0)
3469 return aes_ccm_tls_cipher(ctx, out, in, len);
3470
3471 /* EVP_*Final() doesn't return any data */
3472 if (in == NULL && out != NULL)
3473 return 0;
3474
3475 if (!cctx->iv_set)
3476 return -1;
3477
3478 if (!out) {
3479 if (!in) {
3480 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx),
3481 15 - cctx->L, len))
3482 return -1;
3483 cctx->len_set = 1;
3484 return len;
3485 }
3486 /* If have AAD need message length */
3487 if (!cctx->len_set && len)
3488 return -1;
3489 CRYPTO_ccm128_aad(ccm, in, len);
3490 return len;
3491 }
3492
3493 /* The tag must be set before actually decrypting data */
3494 if (!EVP_CIPHER_CTX_encrypting(ctx) && !cctx->tag_set)
3495 return -1;
3496
3497 /* If not set length yet do it */
3498 if (!cctx->len_set) {
3499 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx),
3500 15 - cctx->L, len))
3501 return -1;
3502 cctx->len_set = 1;
3503 }
3504 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3505 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3506 cctx->str) :
3507 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3508 return -1;
3509 cctx->tag_set = 1;
3510 return len;
3511 } else {
3512 int rv = -1;
3513 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3514 cctx->str) :
3515 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3516 unsigned char tag[16];
3517 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3518 if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx),
3519 cctx->M))
3520 rv = len;
3521 }
3522 }
3523 if (rv == -1)
3524 OPENSSL_cleanse(out, len);
3525 cctx->iv_set = 0;
3526 cctx->tag_set = 0;
3527 cctx->len_set = 0;
3528 return rv;
3529 }
3530 }
3531
3532 #define aes_ccm_cleanup NULL
3533
3534 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, ccm, CCM,
3535 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3536 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, ccm, CCM,
3537 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3538 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, ccm, CCM,
3539 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3540
3541 typedef struct {
3542 union {
3543 OSSL_UNION_ALIGN;
3544 AES_KEY ks;
3545 } ks;
3546 /* Indicates if IV has been set */
3547 unsigned char *iv;
3548 } EVP_AES_WRAP_CTX;
3549
3550 static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3551 const unsigned char *iv, int enc)
3552 {
3553 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3554 if (!iv && !key)
3555 return 1;
3556 if (key) {
3557 if (EVP_CIPHER_CTX_encrypting(ctx))
3558 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3559 &wctx->ks.ks);
3560 else
3561 AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3562 &wctx->ks.ks);
3563 if (!iv)
3564 wctx->iv = NULL;
3565 }
3566 if (iv) {
3567 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, EVP_CIPHER_CTX_iv_length(ctx));
3568 wctx->iv = EVP_CIPHER_CTX_iv_noconst(ctx);
3569 }
3570 return 1;
3571 }
3572
3573 static int aes_wrap_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3574 const unsigned char *in, size_t inlen)
3575 {
3576 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3577 size_t rv;
3578 /* AES wrap with padding has IV length of 4, without padding 8 */
3579 int pad = EVP_CIPHER_CTX_iv_length(ctx) == 4;
3580 /* No final operation so always return zero length */
3581 if (!in)
3582 return 0;
3583 /* Input length must always be non-zero */
3584 if (!inlen)
3585 return -1;
3586 /* If decrypting need at least 16 bytes and multiple of 8 */
3587 if (!EVP_CIPHER_CTX_encrypting(ctx) && (inlen < 16 || inlen & 0x7))
3588 return -1;
3589 /* If not padding input must be multiple of 8 */
3590 if (!pad && inlen & 0x7)
3591 return -1;
3592 if (is_partially_overlapping(out, in, inlen)) {
3593 EVPerr(EVP_F_AES_WRAP_CIPHER, EVP_R_PARTIALLY_OVERLAPPING);
3594 return 0;
3595 }
3596 if (!out) {
3597 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3598 /* If padding round up to multiple of 8 */
3599 if (pad)
3600 inlen = (inlen + 7) / 8 * 8;
3601 /* 8 byte prefix */
3602 return inlen + 8;
3603 } else {
3604 /*
3605 * If not padding output will be exactly 8 bytes smaller than
3606 * input. If padding it will be at least 8 bytes smaller but we
3607 * don't know how much.
3608 */
3609 return inlen - 8;
3610 }
3611 }
3612 if (pad) {
3613 if (EVP_CIPHER_CTX_encrypting(ctx))
3614 rv = CRYPTO_128_wrap_pad(&wctx->ks.ks, wctx->iv,
3615 out, in, inlen,
3616 (block128_f) AES_encrypt);
3617 else
3618 rv = CRYPTO_128_unwrap_pad(&wctx->ks.ks, wctx->iv,
3619 out, in, inlen,
3620 (block128_f) AES_decrypt);
3621 } else {
3622 if (EVP_CIPHER_CTX_encrypting(ctx))
3623 rv = CRYPTO_128_wrap(&wctx->ks.ks, wctx->iv,
3624 out, in, inlen, (block128_f) AES_encrypt);
3625 else
3626 rv = CRYPTO_128_unwrap(&wctx->ks.ks, wctx->iv,
3627 out, in, inlen, (block128_f) AES_decrypt);
3628 }
3629 return rv ? (int)rv : -1;
3630 }
3631
3632 #define WRAP_FLAGS (EVP_CIPH_WRAP_MODE \
3633 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3634 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_FLAG_DEFAULT_ASN1)
3635
3636 static const EVP_CIPHER aes_128_wrap = {
3637 NID_id_aes128_wrap,
3638 8, 16, 8, WRAP_FLAGS,
3639 aes_wrap_init_key, aes_wrap_cipher,
3640 NULL,
3641 sizeof(EVP_AES_WRAP_CTX),
3642 NULL, NULL, NULL, NULL
3643 };
3644
3645 const EVP_CIPHER *EVP_aes_128_wrap(void)
3646 {
3647 return &aes_128_wrap;
3648 }
3649
3650 static const EVP_CIPHER aes_192_wrap = {
3651 NID_id_aes192_wrap,
3652 8, 24, 8, WRAP_FLAGS,
3653 aes_wrap_init_key, aes_wrap_cipher,
3654 NULL,
3655 sizeof(EVP_AES_WRAP_CTX),
3656 NULL, NULL, NULL, NULL
3657 };
3658
3659 const EVP_CIPHER *EVP_aes_192_wrap(void)
3660 {
3661 return &aes_192_wrap;
3662 }
3663
3664 static const EVP_CIPHER aes_256_wrap = {
3665 NID_id_aes256_wrap,
3666 8, 32, 8, WRAP_FLAGS,
3667 aes_wrap_init_key, aes_wrap_cipher,
3668 NULL,
3669 sizeof(EVP_AES_WRAP_CTX),
3670 NULL, NULL, NULL, NULL
3671 };
3672
3673 const EVP_CIPHER *EVP_aes_256_wrap(void)
3674 {
3675 return &aes_256_wrap;
3676 }
3677
3678 static const EVP_CIPHER aes_128_wrap_pad = {
3679 NID_id_aes128_wrap_pad,
3680 8, 16, 4, WRAP_FLAGS,
3681 aes_wrap_init_key, aes_wrap_cipher,
3682 NULL,
3683 sizeof(EVP_AES_WRAP_CTX),
3684 NULL, NULL, NULL, NULL
3685 };
3686
3687 const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
3688 {
3689 return &aes_128_wrap_pad;
3690 }
3691
3692 static const EVP_CIPHER aes_192_wrap_pad = {
3693 NID_id_aes192_wrap_pad,
3694 8, 24, 4, WRAP_FLAGS,
3695 aes_wrap_init_key, aes_wrap_cipher,
3696 NULL,
3697 sizeof(EVP_AES_WRAP_CTX),
3698 NULL, NULL, NULL, NULL
3699 };
3700
3701 const EVP_CIPHER *EVP_aes_192_wrap_pad(void)
3702 {
3703 return &aes_192_wrap_pad;
3704 }
3705
3706 static const EVP_CIPHER aes_256_wrap_pad = {
3707 NID_id_aes256_wrap_pad,
3708 8, 32, 4, WRAP_FLAGS,
3709 aes_wrap_init_key, aes_wrap_cipher,
3710 NULL,
3711 sizeof(EVP_AES_WRAP_CTX),
3712 NULL, NULL, NULL, NULL
3713 };
3714
3715 const EVP_CIPHER *EVP_aes_256_wrap_pad(void)
3716 {
3717 return &aes_256_wrap_pad;
3718 }
3719
3720 #ifndef OPENSSL_NO_OCB
3721 static int aes_ocb_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3722 {
3723 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
3724 EVP_CIPHER_CTX *newc;
3725 EVP_AES_OCB_CTX *new_octx;
3726
3727 switch (type) {
3728 case EVP_CTRL_INIT:
3729 octx->key_set = 0;
3730 octx->iv_set = 0;
3731 octx->ivlen = EVP_CIPHER_iv_length(c->cipher);
3732 octx->iv = EVP_CIPHER_CTX_iv_noconst(c);
3733 octx->taglen = 16;
3734 octx->data_buf_len = 0;
3735 octx->aad_buf_len = 0;
3736 return 1;
3737
3738 case EVP_CTRL_GET_IVLEN:
3739 *(int *)ptr = octx->ivlen;
3740 return 1;
3741
3742 case EVP_CTRL_AEAD_SET_IVLEN:
3743 /* IV len must be 1 to 15 */
3744 if (arg <= 0 || arg > 15)
3745 return 0;
3746
3747 octx->ivlen = arg;
3748 return 1;
3749
3750 case EVP_CTRL_AEAD_SET_TAG:
3751 if (ptr == NULL) {
3752 /* Tag len must be 0 to 16 */
3753 if (arg < 0 || arg > 16)
3754 return 0;
3755
3756 octx->taglen = arg;
3757 return 1;
3758 }
3759 if (arg != octx->taglen || EVP_CIPHER_CTX_encrypting(c))
3760 return 0;
3761 memcpy(octx->tag, ptr, arg);
3762 return 1;
3763
3764 case EVP_CTRL_AEAD_GET_TAG:
3765 if (arg != octx->taglen || !EVP_CIPHER_CTX_encrypting(c))
3766 return 0;
3767
3768 memcpy(ptr, octx->tag, arg);
3769 return 1;
3770
3771 case EVP_CTRL_COPY:
3772 newc = (EVP_CIPHER_CTX *)ptr;
3773 new_octx = EVP_C_DATA(EVP_AES_OCB_CTX,newc);
3774 return CRYPTO_ocb128_copy_ctx(&new_octx->ocb, &octx->ocb,
3775 &new_octx->ksenc.ks,
3776 &new_octx->ksdec.ks);
3777
3778 default:
3779 return -1;
3780
3781 }
3782 }
3783
3784 static int aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3785 const unsigned char *iv, int enc)
3786 {
3787 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3788 if (!iv && !key)
3789 return 1;
3790 if (key) {
3791 do {
3792 /*
3793 * We set both the encrypt and decrypt key here because decrypt
3794 * needs both. We could possibly optimise to remove setting the
3795 * decrypt for an encryption operation.
3796 */
3797 # ifdef HWAES_CAPABLE
3798 if (HWAES_CAPABLE) {
3799 HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3800 &octx->ksenc.ks);
3801 HWAES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3802 &octx->ksdec.ks);
3803 if (!CRYPTO_ocb128_init(&octx->ocb,
3804 &octx->ksenc.ks, &octx->ksdec.ks,
3805 (block128_f) HWAES_encrypt,
3806 (block128_f) HWAES_decrypt,
3807 enc ? HWAES_ocb_encrypt
3808 : HWAES_ocb_decrypt))
3809 return 0;
3810 break;
3811 }
3812 # endif
3813 # ifdef VPAES_CAPABLE
3814 if (VPAES_CAPABLE) {
3815 vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3816 &octx->ksenc.ks);
3817 vpaes_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3818 &octx->ksdec.ks);
3819 if (!CRYPTO_ocb128_init(&octx->ocb,
3820 &octx->ksenc.ks, &octx->ksdec.ks,
3821 (block128_f) vpaes_encrypt,
3822 (block128_f) vpaes_decrypt,
3823 NULL))
3824 return 0;
3825 break;
3826 }
3827 # endif
3828 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3829 &octx->ksenc.ks);
3830 AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3831 &octx->ksdec.ks);
3832 if (!CRYPTO_ocb128_init(&octx->ocb,
3833 &octx->ksenc.ks, &octx->ksdec.ks,
3834 (block128_f) AES_encrypt,
3835 (block128_f) AES_decrypt,
3836 NULL))
3837 return 0;
3838 }
3839 while (0);
3840
3841 /*
3842 * If we have an iv we can set it directly, otherwise use saved IV.
3843 */
3844 if (iv == NULL && octx->iv_set)
3845 iv = octx->iv;
3846 if (iv) {
3847 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
3848 != 1)
3849 return 0;
3850 octx->iv_set = 1;
3851 }
3852 octx->key_set = 1;
3853 } else {
3854 /* If key set use IV, otherwise copy */
3855 if (octx->key_set)
3856 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
3857 else
3858 memcpy(octx->iv, iv, octx->ivlen);
3859 octx->iv_set = 1;
3860 }
3861 return 1;
3862 }
3863
3864 static int aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3865 const unsigned char *in, size_t len)
3866 {
3867 unsigned char *buf;
3868 int *buf_len;
3869 int written_len = 0;
3870 size_t trailing_len;
3871 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3872
3873 /* If IV or Key not set then return error */
3874 if (!octx->iv_set)
3875 return -1;
3876
3877 if (!octx->key_set)
3878 return -1;
3879
3880 if (in != NULL) {
3881 /*
3882 * Need to ensure we are only passing full blocks to low level OCB
3883 * routines. We do it here rather than in EVP_EncryptUpdate/
3884 * EVP_DecryptUpdate because we need to pass full blocks of AAD too
3885 * and those routines don't support that
3886 */
3887
3888 /* Are we dealing with AAD or normal data here? */
3889 if (out == NULL) {
3890 buf = octx->aad_buf;
3891 buf_len = &(octx->aad_buf_len);
3892 } else {
3893 buf = octx->data_buf;
3894 buf_len = &(octx->data_buf_len);
3895
3896 if (is_partially_overlapping(out + *buf_len, in, len)) {
3897 EVPerr(EVP_F_AES_OCB_CIPHER, EVP_R_PARTIALLY_OVERLAPPING);
3898 return 0;
3899 }
3900 }
3901
3902 /*
3903 * If we've got a partially filled buffer from a previous call then
3904 * use that data first
3905 */
3906 if (*buf_len > 0) {
3907 unsigned int remaining;
3908
3909 remaining = AES_BLOCK_SIZE - (*buf_len);
3910 if (remaining > len) {
3911 memcpy(buf + (*buf_len), in, len);
3912 *(buf_len) += len;
3913 return 0;
3914 }
3915 memcpy(buf + (*buf_len), in, remaining);
3916
3917 /*
3918 * If we get here we've filled the buffer, so process it
3919 */
3920 len -= remaining;
3921 in += remaining;
3922 if (out == NULL) {
3923 if (!CRYPTO_ocb128_aad(&octx->ocb, buf, AES_BLOCK_SIZE))
3924 return -1;
3925 } else if (EVP_CIPHER_CTX_encrypting(ctx)) {
3926 if (!CRYPTO_ocb128_encrypt(&octx->ocb, buf, out,
3927 AES_BLOCK_SIZE))
3928 return -1;
3929 } else {
3930 if (!CRYPTO_ocb128_decrypt(&octx->ocb, buf, out,
3931 AES_BLOCK_SIZE))
3932 return -1;
3933 }
3934 written_len = AES_BLOCK_SIZE;
3935 *buf_len = 0;
3936 if (out != NULL)
3937 out += AES_BLOCK_SIZE;
3938 }
3939
3940 /* Do we have a partial block to handle at the end? */
3941 trailing_len = len % AES_BLOCK_SIZE;
3942
3943 /*
3944 * If we've got some full blocks to handle, then process these first
3945 */
3946 if (len != trailing_len) {
3947 if (out == NULL) {
3948 if (!CRYPTO_ocb128_aad(&octx->ocb, in, len - trailing_len))
3949 return -1;
3950 } else if (EVP_CIPHER_CTX_encrypting(ctx)) {
3951 if (!CRYPTO_ocb128_encrypt
3952 (&octx->ocb, in, out, len - trailing_len))
3953 return -1;
3954 } else {
3955 if (!CRYPTO_ocb128_decrypt
3956 (&octx->ocb, in, out, len - trailing_len))
3957 return -1;
3958 }
3959 written_len += len - trailing_len;
3960 in += len - trailing_len;
3961 }
3962
3963 /* Handle any trailing partial block */
3964 if (trailing_len > 0) {
3965 memcpy(buf, in, trailing_len);
3966 *buf_len = trailing_len;
3967 }
3968
3969 return written_len;
3970 } else {
3971 /*
3972 * First of all empty the buffer of any partial block that we might
3973 * have been provided - both for data and AAD
3974 */
3975 if (octx->data_buf_len > 0) {
3976 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3977 if (!CRYPTO_ocb128_encrypt(&octx->ocb, octx->data_buf, out,
3978 octx->data_buf_len))
3979 return -1;
3980 } else {
3981 if (!CRYPTO_ocb128_decrypt(&octx->ocb, octx->data_buf, out,
3982 octx->data_buf_len))
3983 return -1;
3984 }
3985 written_len = octx->data_buf_len;
3986 octx->data_buf_len = 0;
3987 }
3988 if (octx->aad_buf_len > 0) {
3989 if (!CRYPTO_ocb128_aad
3990 (&octx->ocb, octx->aad_buf, octx->aad_buf_len))
3991 return -1;
3992 octx->aad_buf_len = 0;
3993 }
3994 /* If decrypting then verify */
3995 if (!EVP_CIPHER_CTX_encrypting(ctx)) {
3996 if (octx->taglen < 0)
3997 return -1;
3998 if (CRYPTO_ocb128_finish(&octx->ocb,
3999 octx->tag, octx->taglen) != 0)
4000 return -1;
4001 octx->iv_set = 0;
4002 return written_len;
4003 }
4004 /* If encrypting then just get the tag */
4005 if (CRYPTO_ocb128_tag(&octx->ocb, octx->tag, 16) != 1)
4006 return -1;
4007 /* Don't reuse the IV */
4008 octx->iv_set = 0;
4009 return written_len;
4010 }
4011 }
4012
4013 static int aes_ocb_cleanup(EVP_CIPHER_CTX *c)
4014 {
4015 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4016 CRYPTO_ocb128_cleanup(&octx->ocb);
4017 return 1;
4018 }
4019
4020 BLOCK_CIPHER_custom(NID_aes, 128, 16, 12, ocb, OCB,
4021 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4022 BLOCK_CIPHER_custom(NID_aes, 192, 16, 12, ocb, OCB,
4023 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4024 BLOCK_CIPHER_custom(NID_aes, 256, 16, 12, ocb, OCB,
4025 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4026 #endif /* OPENSSL_NO_OCB */
4027
4028 /* AES-SIV mode */
4029 #ifndef OPENSSL_NO_SIV
4030
4031 typedef SIV128_CONTEXT EVP_AES_SIV_CTX;
4032
4033 #define aesni_siv_init_key aes_siv_init_key
4034 static int aes_siv_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
4035 const unsigned char *iv, int enc)
4036 {
4037 const EVP_CIPHER *ctr;
4038 const EVP_CIPHER *cbc;
4039 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, ctx);
4040 int klen = EVP_CIPHER_CTX_key_length(ctx) / 2;
4041
4042 if (key == NULL)
4043 return 1;
4044
4045 switch (klen) {
4046 case 16:
4047 cbc = EVP_aes_128_cbc();
4048 ctr = EVP_aes_128_ctr();
4049 break;
4050 case 24:
4051 cbc = EVP_aes_192_cbc();
4052 ctr = EVP_aes_192_ctr();
4053 break;
4054 case 32:
4055 cbc = EVP_aes_256_cbc();
4056 ctr = EVP_aes_256_ctr();
4057 break;
4058 default:
4059 return 0;
4060 }
4061
4062 /* klen is the length of the underlying cipher, not the input key,
4063 which should be twice as long */
4064 return CRYPTO_siv128_init(sctx, key, klen, cbc, ctr);
4065 }
4066
4067 #define aesni_siv_cipher aes_siv_cipher
4068 static int aes_siv_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
4069 const unsigned char *in, size_t len)
4070 {
4071 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, ctx);
4072
4073 /* EncryptFinal or DecryptFinal */
4074 if (in == NULL)
4075 return CRYPTO_siv128_finish(sctx);
4076
4077 /* Deal with associated data */
4078 if (out == NULL)
4079 return CRYPTO_siv128_aad(sctx, in, len);
4080
4081 if (EVP_CIPHER_CTX_encrypting(ctx))
4082 return CRYPTO_siv128_encrypt(sctx, in, out, len);
4083
4084 return CRYPTO_siv128_decrypt(sctx, in, out, len);
4085 }
4086
4087 #define aesni_siv_cleanup aes_siv_cleanup
4088 static int aes_siv_cleanup(EVP_CIPHER_CTX *c)
4089 {
4090 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, c);
4091
4092 return CRYPTO_siv128_cleanup(sctx);
4093 }
4094
4095
4096 #define aesni_siv_ctrl aes_siv_ctrl
4097 static int aes_siv_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
4098 {
4099 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, c);
4100 SIV128_CONTEXT *sctx_out;
4101
4102 switch (type) {
4103 case EVP_CTRL_INIT:
4104 return CRYPTO_siv128_cleanup(sctx);
4105
4106 case EVP_CTRL_SET_SPEED:
4107 return CRYPTO_siv128_speed(sctx, arg);
4108
4109 case EVP_CTRL_AEAD_SET_TAG:
4110 if (!EVP_CIPHER_CTX_encrypting(c))
4111 return CRYPTO_siv128_set_tag(sctx, ptr, arg);
4112 return 1;
4113
4114 case EVP_CTRL_AEAD_GET_TAG:
4115 if (!EVP_CIPHER_CTX_encrypting(c))
4116 return 0;
4117 return CRYPTO_siv128_get_tag(sctx, ptr, arg);
4118
4119 case EVP_CTRL_COPY:
4120 sctx_out = EVP_C_DATA(SIV128_CONTEXT, (EVP_CIPHER_CTX*)ptr);
4121 return CRYPTO_siv128_copy_ctx(sctx_out, sctx);
4122
4123 default:
4124 return -1;
4125
4126 }
4127 }
4128
4129 #define SIV_FLAGS (EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_DEFAULT_ASN1 \
4130 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
4131 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CUSTOM_COPY \
4132 | EVP_CIPH_CTRL_INIT)
4133
4134 BLOCK_CIPHER_custom(NID_aes, 128, 1, 0, siv, SIV, SIV_FLAGS)
4135 BLOCK_CIPHER_custom(NID_aes, 192, 1, 0, siv, SIV, SIV_FLAGS)
4136 BLOCK_CIPHER_custom(NID_aes, 256, 1, 0, siv, SIV, SIV_FLAGS)
4137 #endif