]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/evp/e_aes.c
Explicitly test against NULL; do not use !p or similar
[thirdparty/openssl.git] / crypto / evp / e_aes.c
1 /*
2 * Copyright 2001-2019 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <string.h>
11 #include <assert.h>
12 #include <openssl/opensslconf.h>
13 #include <openssl/crypto.h>
14 #include <openssl/evp.h>
15 #include <openssl/err.h>
16 #include <openssl/aes.h>
17 #include <openssl/rand.h>
18 #include <openssl/cmac.h>
19 #include "crypto/evp.h"
20 #include "internal/cryptlib.h"
21 #include "crypto/modes.h"
22 #include "crypto/siv.h"
23 #include "crypto/ciphermode_platform.h"
24 #include "evp_local.h"
25
26 typedef struct {
27 union {
28 OSSL_UNION_ALIGN;
29 AES_KEY ks;
30 } ks;
31 block128_f block;
32 union {
33 cbc128_f cbc;
34 ctr128_f ctr;
35 } stream;
36 } EVP_AES_KEY;
37
38 typedef struct {
39 union {
40 OSSL_UNION_ALIGN;
41 AES_KEY ks;
42 } ks; /* AES key schedule to use */
43 int key_set; /* Set if key initialised */
44 int iv_set; /* Set if an iv is set */
45 GCM128_CONTEXT gcm;
46 unsigned char *iv; /* Temporary IV store */
47 int ivlen; /* IV length */
48 int taglen;
49 int iv_gen; /* It is OK to generate IVs */
50 int iv_gen_rand; /* No IV was specified, so generate a rand IV */
51 int tls_aad_len; /* TLS AAD length */
52 uint64_t tls_enc_records; /* Number of TLS records encrypted */
53 ctr128_f ctr;
54 } EVP_AES_GCM_CTX;
55
56 typedef struct {
57 union {
58 OSSL_UNION_ALIGN;
59 AES_KEY ks;
60 } ks1, ks2; /* AES key schedules to use */
61 XTS128_CONTEXT xts;
62 void (*stream) (const unsigned char *in,
63 unsigned char *out, size_t length,
64 const AES_KEY *key1, const AES_KEY *key2,
65 const unsigned char iv[16]);
66 } EVP_AES_XTS_CTX;
67
68 #ifdef FIPS_MODE
69 static const int allow_insecure_decrypt = 0;
70 #else
71 static const int allow_insecure_decrypt = 1;
72 #endif
73
74 typedef struct {
75 union {
76 OSSL_UNION_ALIGN;
77 AES_KEY ks;
78 } ks; /* AES key schedule to use */
79 int key_set; /* Set if key initialised */
80 int iv_set; /* Set if an iv is set */
81 int tag_set; /* Set if tag is valid */
82 int len_set; /* Set if message length set */
83 int L, M; /* L and M parameters from RFC3610 */
84 int tls_aad_len; /* TLS AAD length */
85 CCM128_CONTEXT ccm;
86 ccm128_f str;
87 } EVP_AES_CCM_CTX;
88
89 #ifndef OPENSSL_NO_OCB
90 typedef struct {
91 union {
92 OSSL_UNION_ALIGN;
93 AES_KEY ks;
94 } ksenc; /* AES key schedule to use for encryption */
95 union {
96 OSSL_UNION_ALIGN;
97 AES_KEY ks;
98 } ksdec; /* AES key schedule to use for decryption */
99 int key_set; /* Set if key initialised */
100 int iv_set; /* Set if an iv is set */
101 OCB128_CONTEXT ocb;
102 unsigned char *iv; /* Temporary IV store */
103 unsigned char tag[16];
104 unsigned char data_buf[16]; /* Store partial data blocks */
105 unsigned char aad_buf[16]; /* Store partial AAD blocks */
106 int data_buf_len;
107 int aad_buf_len;
108 int ivlen; /* IV length */
109 int taglen;
110 } EVP_AES_OCB_CTX;
111 #endif
112
113 #define MAXBITCHUNK ((size_t)1<<(sizeof(size_t)*8-4))
114
115 /* increment counter (64-bit int) by 1 */
116 static void ctr64_inc(unsigned char *counter)
117 {
118 int n = 8;
119 unsigned char c;
120
121 do {
122 --n;
123 c = counter[n];
124 ++c;
125 counter[n] = c;
126 if (c)
127 return;
128 } while (n);
129 }
130
131 #if defined(AESNI_CAPABLE)
132 # if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
133 # define AES_gcm_encrypt aesni_gcm_encrypt
134 # define AES_gcm_decrypt aesni_gcm_decrypt
135 # define AES_GCM_ASM2(gctx) (gctx->gcm.block==(block128_f)aesni_encrypt && \
136 gctx->gcm.ghash==gcm_ghash_avx)
137 # undef AES_GCM_ASM2 /* minor size optimization */
138 # endif
139
140 static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
141 const unsigned char *iv, int enc)
142 {
143 int ret, mode;
144 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
145
146 mode = EVP_CIPHER_CTX_mode(ctx);
147 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
148 && !enc) {
149 ret = aesni_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
150 &dat->ks.ks);
151 dat->block = (block128_f) aesni_decrypt;
152 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
153 (cbc128_f) aesni_cbc_encrypt : NULL;
154 } else {
155 ret = aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
156 &dat->ks.ks);
157 dat->block = (block128_f) aesni_encrypt;
158 if (mode == EVP_CIPH_CBC_MODE)
159 dat->stream.cbc = (cbc128_f) aesni_cbc_encrypt;
160 else if (mode == EVP_CIPH_CTR_MODE)
161 dat->stream.ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
162 else
163 dat->stream.cbc = NULL;
164 }
165
166 if (ret < 0) {
167 EVPerr(EVP_F_AESNI_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
168 return 0;
169 }
170
171 return 1;
172 }
173
174 static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
175 const unsigned char *in, size_t len)
176 {
177 aesni_cbc_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
178 EVP_CIPHER_CTX_iv_noconst(ctx),
179 EVP_CIPHER_CTX_encrypting(ctx));
180
181 return 1;
182 }
183
184 static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
185 const unsigned char *in, size_t len)
186 {
187 size_t bl = EVP_CIPHER_CTX_block_size(ctx);
188
189 if (len < bl)
190 return 1;
191
192 aesni_ecb_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
193 EVP_CIPHER_CTX_encrypting(ctx));
194
195 return 1;
196 }
197
198 # define aesni_ofb_cipher aes_ofb_cipher
199 static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
200 const unsigned char *in, size_t len);
201
202 # define aesni_cfb_cipher aes_cfb_cipher
203 static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
204 const unsigned char *in, size_t len);
205
206 # define aesni_cfb8_cipher aes_cfb8_cipher
207 static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
208 const unsigned char *in, size_t len);
209
210 # define aesni_cfb1_cipher aes_cfb1_cipher
211 static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
212 const unsigned char *in, size_t len);
213
214 # define aesni_ctr_cipher aes_ctr_cipher
215 static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
216 const unsigned char *in, size_t len);
217
218 static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
219 const unsigned char *iv, int enc)
220 {
221 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
222 if (!iv && !key)
223 return 1;
224 if (key) {
225 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
226 &gctx->ks.ks);
227 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
228 gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
229 /*
230 * If we have an iv can set it directly, otherwise use saved IV.
231 */
232 if (iv == NULL && gctx->iv_set)
233 iv = gctx->iv;
234 if (iv) {
235 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
236 gctx->iv_set = 1;
237 }
238 gctx->key_set = 1;
239 } else {
240 /* If key set use IV, otherwise copy */
241 if (gctx->key_set)
242 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
243 else
244 memcpy(gctx->iv, iv, gctx->ivlen);
245 gctx->iv_set = 1;
246 gctx->iv_gen = 0;
247 }
248 return 1;
249 }
250
251 # define aesni_gcm_cipher aes_gcm_cipher
252 static int aesni_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
253 const unsigned char *in, size_t len);
254
255 static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
256 const unsigned char *iv, int enc)
257 {
258 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
259
260 if (!iv && !key)
261 return 1;
262
263 if (key) {
264 /* The key is two half length keys in reality */
265 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
266 const int bits = bytes * 8;
267
268 /*
269 * Verify that the two keys are different.
270 *
271 * This addresses Rogaway's vulnerability.
272 * See comment in aes_xts_init_key() below.
273 */
274 if ((!allow_insecure_decrypt || enc)
275 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
276 EVPerr(EVP_F_AESNI_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
277 return 0;
278 }
279
280 /* key_len is two AES keys */
281 if (enc) {
282 aesni_set_encrypt_key(key, bits, &xctx->ks1.ks);
283 xctx->xts.block1 = (block128_f) aesni_encrypt;
284 xctx->stream = aesni_xts_encrypt;
285 } else {
286 aesni_set_decrypt_key(key, bits, &xctx->ks1.ks);
287 xctx->xts.block1 = (block128_f) aesni_decrypt;
288 xctx->stream = aesni_xts_decrypt;
289 }
290
291 aesni_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
292 xctx->xts.block2 = (block128_f) aesni_encrypt;
293
294 xctx->xts.key1 = &xctx->ks1;
295 }
296
297 if (iv) {
298 xctx->xts.key2 = &xctx->ks2;
299 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
300 }
301
302 return 1;
303 }
304
305 # define aesni_xts_cipher aes_xts_cipher
306 static int aesni_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
307 const unsigned char *in, size_t len);
308
309 static int aesni_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
310 const unsigned char *iv, int enc)
311 {
312 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
313 if (!iv && !key)
314 return 1;
315 if (key) {
316 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
317 &cctx->ks.ks);
318 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
319 &cctx->ks, (block128_f) aesni_encrypt);
320 cctx->str = enc ? (ccm128_f) aesni_ccm64_encrypt_blocks :
321 (ccm128_f) aesni_ccm64_decrypt_blocks;
322 cctx->key_set = 1;
323 }
324 if (iv) {
325 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
326 cctx->iv_set = 1;
327 }
328 return 1;
329 }
330
331 # define aesni_ccm_cipher aes_ccm_cipher
332 static int aesni_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
333 const unsigned char *in, size_t len);
334
335 # ifndef OPENSSL_NO_OCB
336 static int aesni_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
337 const unsigned char *iv, int enc)
338 {
339 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
340 if (!iv && !key)
341 return 1;
342 if (key) {
343 do {
344 /*
345 * We set both the encrypt and decrypt key here because decrypt
346 * needs both. We could possibly optimise to remove setting the
347 * decrypt for an encryption operation.
348 */
349 aesni_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
350 &octx->ksenc.ks);
351 aesni_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
352 &octx->ksdec.ks);
353 if (!CRYPTO_ocb128_init(&octx->ocb,
354 &octx->ksenc.ks, &octx->ksdec.ks,
355 (block128_f) aesni_encrypt,
356 (block128_f) aesni_decrypt,
357 enc ? aesni_ocb_encrypt
358 : aesni_ocb_decrypt))
359 return 0;
360 }
361 while (0);
362
363 /*
364 * If we have an iv we can set it directly, otherwise use saved IV.
365 */
366 if (iv == NULL && octx->iv_set)
367 iv = octx->iv;
368 if (iv) {
369 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
370 != 1)
371 return 0;
372 octx->iv_set = 1;
373 }
374 octx->key_set = 1;
375 } else {
376 /* If key set use IV, otherwise copy */
377 if (octx->key_set)
378 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
379 else
380 memcpy(octx->iv, iv, octx->ivlen);
381 octx->iv_set = 1;
382 }
383 return 1;
384 }
385
386 # define aesni_ocb_cipher aes_ocb_cipher
387 static int aesni_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
388 const unsigned char *in, size_t len);
389 # endif /* OPENSSL_NO_OCB */
390
391 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
392 static const EVP_CIPHER aesni_##keylen##_##mode = { \
393 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
394 flags|EVP_CIPH_##MODE##_MODE, \
395 aesni_init_key, \
396 aesni_##mode##_cipher, \
397 NULL, \
398 sizeof(EVP_AES_KEY), \
399 NULL,NULL,NULL,NULL }; \
400 static const EVP_CIPHER aes_##keylen##_##mode = { \
401 nid##_##keylen##_##nmode,blocksize, \
402 keylen/8,ivlen, \
403 flags|EVP_CIPH_##MODE##_MODE, \
404 aes_init_key, \
405 aes_##mode##_cipher, \
406 NULL, \
407 sizeof(EVP_AES_KEY), \
408 NULL,NULL,NULL,NULL }; \
409 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
410 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
411
412 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
413 static const EVP_CIPHER aesni_##keylen##_##mode = { \
414 nid##_##keylen##_##mode,blocksize, \
415 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
416 ivlen, \
417 flags|EVP_CIPH_##MODE##_MODE, \
418 aesni_##mode##_init_key, \
419 aesni_##mode##_cipher, \
420 aes_##mode##_cleanup, \
421 sizeof(EVP_AES_##MODE##_CTX), \
422 NULL,NULL,aes_##mode##_ctrl,NULL }; \
423 static const EVP_CIPHER aes_##keylen##_##mode = { \
424 nid##_##keylen##_##mode,blocksize, \
425 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
426 ivlen, \
427 flags|EVP_CIPH_##MODE##_MODE, \
428 aes_##mode##_init_key, \
429 aes_##mode##_cipher, \
430 aes_##mode##_cleanup, \
431 sizeof(EVP_AES_##MODE##_CTX), \
432 NULL,NULL,aes_##mode##_ctrl,NULL }; \
433 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
434 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
435
436 #elif defined(SPARC_AES_CAPABLE)
437
438 static int aes_t4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
439 const unsigned char *iv, int enc)
440 {
441 int ret, mode, bits;
442 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
443
444 mode = EVP_CIPHER_CTX_mode(ctx);
445 bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
446 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
447 && !enc) {
448 ret = 0;
449 aes_t4_set_decrypt_key(key, bits, &dat->ks.ks);
450 dat->block = (block128_f) aes_t4_decrypt;
451 switch (bits) {
452 case 128:
453 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
454 (cbc128_f) aes128_t4_cbc_decrypt : NULL;
455 break;
456 case 192:
457 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
458 (cbc128_f) aes192_t4_cbc_decrypt : NULL;
459 break;
460 case 256:
461 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
462 (cbc128_f) aes256_t4_cbc_decrypt : NULL;
463 break;
464 default:
465 ret = -1;
466 }
467 } else {
468 ret = 0;
469 aes_t4_set_encrypt_key(key, bits, &dat->ks.ks);
470 dat->block = (block128_f) aes_t4_encrypt;
471 switch (bits) {
472 case 128:
473 if (mode == EVP_CIPH_CBC_MODE)
474 dat->stream.cbc = (cbc128_f) aes128_t4_cbc_encrypt;
475 else if (mode == EVP_CIPH_CTR_MODE)
476 dat->stream.ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
477 else
478 dat->stream.cbc = NULL;
479 break;
480 case 192:
481 if (mode == EVP_CIPH_CBC_MODE)
482 dat->stream.cbc = (cbc128_f) aes192_t4_cbc_encrypt;
483 else if (mode == EVP_CIPH_CTR_MODE)
484 dat->stream.ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
485 else
486 dat->stream.cbc = NULL;
487 break;
488 case 256:
489 if (mode == EVP_CIPH_CBC_MODE)
490 dat->stream.cbc = (cbc128_f) aes256_t4_cbc_encrypt;
491 else if (mode == EVP_CIPH_CTR_MODE)
492 dat->stream.ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
493 else
494 dat->stream.cbc = NULL;
495 break;
496 default:
497 ret = -1;
498 }
499 }
500
501 if (ret < 0) {
502 EVPerr(EVP_F_AES_T4_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
503 return 0;
504 }
505
506 return 1;
507 }
508
509 # define aes_t4_cbc_cipher aes_cbc_cipher
510 static int aes_t4_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
511 const unsigned char *in, size_t len);
512
513 # define aes_t4_ecb_cipher aes_ecb_cipher
514 static int aes_t4_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
515 const unsigned char *in, size_t len);
516
517 # define aes_t4_ofb_cipher aes_ofb_cipher
518 static int aes_t4_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
519 const unsigned char *in, size_t len);
520
521 # define aes_t4_cfb_cipher aes_cfb_cipher
522 static int aes_t4_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
523 const unsigned char *in, size_t len);
524
525 # define aes_t4_cfb8_cipher aes_cfb8_cipher
526 static int aes_t4_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
527 const unsigned char *in, size_t len);
528
529 # define aes_t4_cfb1_cipher aes_cfb1_cipher
530 static int aes_t4_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
531 const unsigned char *in, size_t len);
532
533 # define aes_t4_ctr_cipher aes_ctr_cipher
534 static int aes_t4_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
535 const unsigned char *in, size_t len);
536
537 static int aes_t4_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
538 const unsigned char *iv, int enc)
539 {
540 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
541 if (!iv && !key)
542 return 1;
543 if (key) {
544 int bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
545 aes_t4_set_encrypt_key(key, bits, &gctx->ks.ks);
546 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
547 (block128_f) aes_t4_encrypt);
548 switch (bits) {
549 case 128:
550 gctx->ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
551 break;
552 case 192:
553 gctx->ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
554 break;
555 case 256:
556 gctx->ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
557 break;
558 default:
559 return 0;
560 }
561 /*
562 * If we have an iv can set it directly, otherwise use saved IV.
563 */
564 if (iv == NULL && gctx->iv_set)
565 iv = gctx->iv;
566 if (iv) {
567 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
568 gctx->iv_set = 1;
569 }
570 gctx->key_set = 1;
571 } else {
572 /* If key set use IV, otherwise copy */
573 if (gctx->key_set)
574 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
575 else
576 memcpy(gctx->iv, iv, gctx->ivlen);
577 gctx->iv_set = 1;
578 gctx->iv_gen = 0;
579 }
580 return 1;
581 }
582
583 # define aes_t4_gcm_cipher aes_gcm_cipher
584 static int aes_t4_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
585 const unsigned char *in, size_t len);
586
587 static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
588 const unsigned char *iv, int enc)
589 {
590 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
591
592 if (!iv && !key)
593 return 1;
594
595 if (key) {
596 /* The key is two half length keys in reality */
597 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
598 const int bits = bytes * 8;
599
600 /*
601 * Verify that the two keys are different.
602 *
603 * This addresses Rogaway's vulnerability.
604 * See comment in aes_xts_init_key() below.
605 */
606 if ((!allow_insecure_decrypt || enc)
607 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
608 EVPerr(EVP_F_AES_T4_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
609 return 0;
610 }
611
612 xctx->stream = NULL;
613 /* key_len is two AES keys */
614 if (enc) {
615 aes_t4_set_encrypt_key(key, bits, &xctx->ks1.ks);
616 xctx->xts.block1 = (block128_f) aes_t4_encrypt;
617 switch (bits) {
618 case 128:
619 xctx->stream = aes128_t4_xts_encrypt;
620 break;
621 case 256:
622 xctx->stream = aes256_t4_xts_encrypt;
623 break;
624 default:
625 return 0;
626 }
627 } else {
628 aes_t4_set_decrypt_key(key, bits, &xctx->ks1.ks);
629 xctx->xts.block1 = (block128_f) aes_t4_decrypt;
630 switch (bits) {
631 case 128:
632 xctx->stream = aes128_t4_xts_decrypt;
633 break;
634 case 256:
635 xctx->stream = aes256_t4_xts_decrypt;
636 break;
637 default:
638 return 0;
639 }
640 }
641
642 aes_t4_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
643 xctx->xts.block2 = (block128_f) aes_t4_encrypt;
644
645 xctx->xts.key1 = &xctx->ks1;
646 }
647
648 if (iv) {
649 xctx->xts.key2 = &xctx->ks2;
650 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
651 }
652
653 return 1;
654 }
655
656 # define aes_t4_xts_cipher aes_xts_cipher
657 static int aes_t4_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
658 const unsigned char *in, size_t len);
659
660 static int aes_t4_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
661 const unsigned char *iv, int enc)
662 {
663 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
664 if (!iv && !key)
665 return 1;
666 if (key) {
667 int bits = EVP_CIPHER_CTX_key_length(ctx) * 8;
668 aes_t4_set_encrypt_key(key, bits, &cctx->ks.ks);
669 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
670 &cctx->ks, (block128_f) aes_t4_encrypt);
671 cctx->str = NULL;
672 cctx->key_set = 1;
673 }
674 if (iv) {
675 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
676 cctx->iv_set = 1;
677 }
678 return 1;
679 }
680
681 # define aes_t4_ccm_cipher aes_ccm_cipher
682 static int aes_t4_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
683 const unsigned char *in, size_t len);
684
685 # ifndef OPENSSL_NO_OCB
686 static int aes_t4_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
687 const unsigned char *iv, int enc)
688 {
689 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
690 if (!iv && !key)
691 return 1;
692 if (key) {
693 do {
694 /*
695 * We set both the encrypt and decrypt key here because decrypt
696 * needs both. We could possibly optimise to remove setting the
697 * decrypt for an encryption operation.
698 */
699 aes_t4_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
700 &octx->ksenc.ks);
701 aes_t4_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
702 &octx->ksdec.ks);
703 if (!CRYPTO_ocb128_init(&octx->ocb,
704 &octx->ksenc.ks, &octx->ksdec.ks,
705 (block128_f) aes_t4_encrypt,
706 (block128_f) aes_t4_decrypt,
707 NULL))
708 return 0;
709 }
710 while (0);
711
712 /*
713 * If we have an iv we can set it directly, otherwise use saved IV.
714 */
715 if (iv == NULL && octx->iv_set)
716 iv = octx->iv;
717 if (iv) {
718 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
719 != 1)
720 return 0;
721 octx->iv_set = 1;
722 }
723 octx->key_set = 1;
724 } else {
725 /* If key set use IV, otherwise copy */
726 if (octx->key_set)
727 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
728 else
729 memcpy(octx->iv, iv, octx->ivlen);
730 octx->iv_set = 1;
731 }
732 return 1;
733 }
734
735 # define aes_t4_ocb_cipher aes_ocb_cipher
736 static int aes_t4_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
737 const unsigned char *in, size_t len);
738 # endif /* OPENSSL_NO_OCB */
739
740 # ifndef OPENSSL_NO_SIV
741 # define aes_t4_siv_init_key aes_siv_init_key
742 # define aes_t4_siv_cipher aes_siv_cipher
743 # endif /* OPENSSL_NO_SIV */
744
745 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
746 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
747 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
748 flags|EVP_CIPH_##MODE##_MODE, \
749 aes_t4_init_key, \
750 aes_t4_##mode##_cipher, \
751 NULL, \
752 sizeof(EVP_AES_KEY), \
753 NULL,NULL,NULL,NULL }; \
754 static const EVP_CIPHER aes_##keylen##_##mode = { \
755 nid##_##keylen##_##nmode,blocksize, \
756 keylen/8,ivlen, \
757 flags|EVP_CIPH_##MODE##_MODE, \
758 aes_init_key, \
759 aes_##mode##_cipher, \
760 NULL, \
761 sizeof(EVP_AES_KEY), \
762 NULL,NULL,NULL,NULL }; \
763 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
764 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
765
766 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
767 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
768 nid##_##keylen##_##mode,blocksize, \
769 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
770 ivlen, \
771 flags|EVP_CIPH_##MODE##_MODE, \
772 aes_t4_##mode##_init_key, \
773 aes_t4_##mode##_cipher, \
774 aes_##mode##_cleanup, \
775 sizeof(EVP_AES_##MODE##_CTX), \
776 NULL,NULL,aes_##mode##_ctrl,NULL }; \
777 static const EVP_CIPHER aes_##keylen##_##mode = { \
778 nid##_##keylen##_##mode,blocksize, \
779 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
780 ivlen, \
781 flags|EVP_CIPH_##MODE##_MODE, \
782 aes_##mode##_init_key, \
783 aes_##mode##_cipher, \
784 aes_##mode##_cleanup, \
785 sizeof(EVP_AES_##MODE##_CTX), \
786 NULL,NULL,aes_##mode##_ctrl,NULL }; \
787 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
788 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
789
790 #elif defined(S390X_aes_128_CAPABLE)
791 /* IBM S390X support */
792 typedef struct {
793 union {
794 OSSL_UNION_ALIGN;
795 /*-
796 * KM-AES parameter block - begin
797 * (see z/Architecture Principles of Operation >= SA22-7832-06)
798 */
799 struct {
800 unsigned char k[32];
801 } param;
802 /* KM-AES parameter block - end */
803 } km;
804 unsigned int fc;
805 } S390X_AES_ECB_CTX;
806
807 typedef struct {
808 union {
809 OSSL_UNION_ALIGN;
810 /*-
811 * KMO-AES parameter block - begin
812 * (see z/Architecture Principles of Operation >= SA22-7832-08)
813 */
814 struct {
815 unsigned char cv[16];
816 unsigned char k[32];
817 } param;
818 /* KMO-AES parameter block - end */
819 } kmo;
820 unsigned int fc;
821
822 int res;
823 } S390X_AES_OFB_CTX;
824
825 typedef struct {
826 union {
827 OSSL_UNION_ALIGN;
828 /*-
829 * KMF-AES parameter block - begin
830 * (see z/Architecture Principles of Operation >= SA22-7832-08)
831 */
832 struct {
833 unsigned char cv[16];
834 unsigned char k[32];
835 } param;
836 /* KMF-AES parameter block - end */
837 } kmf;
838 unsigned int fc;
839
840 int res;
841 } S390X_AES_CFB_CTX;
842
843 typedef struct {
844 union {
845 OSSL_UNION_ALIGN;
846 /*-
847 * KMA-GCM-AES parameter block - begin
848 * (see z/Architecture Principles of Operation >= SA22-7832-11)
849 */
850 struct {
851 unsigned char reserved[12];
852 union {
853 unsigned int w;
854 unsigned char b[4];
855 } cv;
856 union {
857 unsigned long long g[2];
858 unsigned char b[16];
859 } t;
860 unsigned char h[16];
861 unsigned long long taadl;
862 unsigned long long tpcl;
863 union {
864 unsigned long long g[2];
865 unsigned int w[4];
866 } j0;
867 unsigned char k[32];
868 } param;
869 /* KMA-GCM-AES parameter block - end */
870 } kma;
871 unsigned int fc;
872 int key_set;
873
874 unsigned char *iv;
875 int ivlen;
876 int iv_set;
877 int iv_gen;
878
879 int taglen;
880
881 unsigned char ares[16];
882 unsigned char mres[16];
883 unsigned char kres[16];
884 int areslen;
885 int mreslen;
886 int kreslen;
887
888 int tls_aad_len;
889 uint64_t tls_enc_records; /* Number of TLS records encrypted */
890 } S390X_AES_GCM_CTX;
891
892 typedef struct {
893 union {
894 OSSL_UNION_ALIGN;
895 /*-
896 * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
897 * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
898 * rounds field is used to store the function code and that the key
899 * schedule is not stored (if aes hardware support is detected).
900 */
901 struct {
902 unsigned char pad[16];
903 AES_KEY k;
904 } key;
905
906 struct {
907 /*-
908 * KMAC-AES parameter block - begin
909 * (see z/Architecture Principles of Operation >= SA22-7832-08)
910 */
911 struct {
912 union {
913 unsigned long long g[2];
914 unsigned char b[16];
915 } icv;
916 unsigned char k[32];
917 } kmac_param;
918 /* KMAC-AES paramater block - end */
919
920 union {
921 unsigned long long g[2];
922 unsigned char b[16];
923 } nonce;
924 union {
925 unsigned long long g[2];
926 unsigned char b[16];
927 } buf;
928
929 unsigned long long blocks;
930 int l;
931 int m;
932 int tls_aad_len;
933 int iv_set;
934 int tag_set;
935 int len_set;
936 int key_set;
937
938 unsigned char pad[140];
939 unsigned int fc;
940 } ccm;
941 } aes;
942 } S390X_AES_CCM_CTX;
943
944 # define s390x_aes_init_key aes_init_key
945 static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
946 const unsigned char *iv, int enc);
947
948 # define S390X_AES_CBC_CTX EVP_AES_KEY
949
950 # define s390x_aes_cbc_init_key aes_init_key
951
952 # define s390x_aes_cbc_cipher aes_cbc_cipher
953 static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
954 const unsigned char *in, size_t len);
955
956 static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
957 const unsigned char *key,
958 const unsigned char *iv, int enc)
959 {
960 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
961 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
962
963 cctx->fc = S390X_AES_FC(keylen);
964 if (!enc)
965 cctx->fc |= S390X_DECRYPT;
966
967 memcpy(cctx->km.param.k, key, keylen);
968 return 1;
969 }
970
971 static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
972 const unsigned char *in, size_t len)
973 {
974 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
975
976 s390x_km(in, len, out, cctx->fc, &cctx->km.param);
977 return 1;
978 }
979
980 static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
981 const unsigned char *key,
982 const unsigned char *ivec, int enc)
983 {
984 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
985 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
986 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
987 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
988
989 memcpy(cctx->kmo.param.cv, iv, ivlen);
990 memcpy(cctx->kmo.param.k, key, keylen);
991 cctx->fc = S390X_AES_FC(keylen);
992 cctx->res = 0;
993 return 1;
994 }
995
996 static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
997 const unsigned char *in, size_t len)
998 {
999 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1000 int n = cctx->res;
1001 int rem;
1002
1003 while (n && len) {
1004 *out = *in ^ cctx->kmo.param.cv[n];
1005 n = (n + 1) & 0xf;
1006 --len;
1007 ++in;
1008 ++out;
1009 }
1010
1011 rem = len & 0xf;
1012
1013 len &= ~(size_t)0xf;
1014 if (len) {
1015 s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
1016
1017 out += len;
1018 in += len;
1019 }
1020
1021 if (rem) {
1022 s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
1023 cctx->kmo.param.k);
1024
1025 while (rem--) {
1026 out[n] = in[n] ^ cctx->kmo.param.cv[n];
1027 ++n;
1028 }
1029 }
1030
1031 cctx->res = n;
1032 return 1;
1033 }
1034
1035 static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
1036 const unsigned char *key,
1037 const unsigned char *ivec, int enc)
1038 {
1039 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1040 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
1041 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1042 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
1043
1044 cctx->fc = S390X_AES_FC(keylen);
1045 cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
1046 if (!enc)
1047 cctx->fc |= S390X_DECRYPT;
1048
1049 cctx->res = 0;
1050 memcpy(cctx->kmf.param.cv, iv, ivlen);
1051 memcpy(cctx->kmf.param.k, key, keylen);
1052 return 1;
1053 }
1054
1055 static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1056 const unsigned char *in, size_t len)
1057 {
1058 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1059 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1060 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1061 int n = cctx->res;
1062 int rem;
1063 unsigned char tmp;
1064
1065 while (n && len) {
1066 tmp = *in;
1067 *out = cctx->kmf.param.cv[n] ^ tmp;
1068 cctx->kmf.param.cv[n] = enc ? *out : tmp;
1069 n = (n + 1) & 0xf;
1070 --len;
1071 ++in;
1072 ++out;
1073 }
1074
1075 rem = len & 0xf;
1076
1077 len &= ~(size_t)0xf;
1078 if (len) {
1079 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1080
1081 out += len;
1082 in += len;
1083 }
1084
1085 if (rem) {
1086 s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
1087 S390X_AES_FC(keylen), cctx->kmf.param.k);
1088
1089 while (rem--) {
1090 tmp = in[n];
1091 out[n] = cctx->kmf.param.cv[n] ^ tmp;
1092 cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
1093 ++n;
1094 }
1095 }
1096
1097 cctx->res = n;
1098 return 1;
1099 }
1100
1101 static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
1102 const unsigned char *key,
1103 const unsigned char *ivec, int enc)
1104 {
1105 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1106 const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
1107 const int keylen = EVP_CIPHER_CTX_key_length(ctx);
1108 const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
1109
1110 cctx->fc = S390X_AES_FC(keylen);
1111 cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
1112 if (!enc)
1113 cctx->fc |= S390X_DECRYPT;
1114
1115 memcpy(cctx->kmf.param.cv, iv, ivlen);
1116 memcpy(cctx->kmf.param.k, key, keylen);
1117 return 1;
1118 }
1119
1120 static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1121 const unsigned char *in, size_t len)
1122 {
1123 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1124
1125 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1126 return 1;
1127 }
1128
1129 # define s390x_aes_cfb1_init_key aes_init_key
1130
1131 # define s390x_aes_cfb1_cipher aes_cfb1_cipher
1132 static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1133 const unsigned char *in, size_t len);
1134
1135 # define S390X_AES_CTR_CTX EVP_AES_KEY
1136
1137 # define s390x_aes_ctr_init_key aes_init_key
1138
1139 # define s390x_aes_ctr_cipher aes_ctr_cipher
1140 static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1141 const unsigned char *in, size_t len);
1142
1143 /* iv + padding length for iv lengths != 12 */
1144 # define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
1145
1146 /*-
1147 * Process additional authenticated data. Returns 0 on success. Code is
1148 * big-endian.
1149 */
1150 static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
1151 size_t len)
1152 {
1153 unsigned long long alen;
1154 int n, rem;
1155
1156 if (ctx->kma.param.tpcl)
1157 return -2;
1158
1159 alen = ctx->kma.param.taadl + len;
1160 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
1161 return -1;
1162 ctx->kma.param.taadl = alen;
1163
1164 n = ctx->areslen;
1165 if (n) {
1166 while (n && len) {
1167 ctx->ares[n] = *aad;
1168 n = (n + 1) & 0xf;
1169 ++aad;
1170 --len;
1171 }
1172 /* ctx->ares contains a complete block if offset has wrapped around */
1173 if (!n) {
1174 s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1175 ctx->fc |= S390X_KMA_HS;
1176 }
1177 ctx->areslen = n;
1178 }
1179
1180 rem = len & 0xf;
1181
1182 len &= ~(size_t)0xf;
1183 if (len) {
1184 s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1185 aad += len;
1186 ctx->fc |= S390X_KMA_HS;
1187 }
1188
1189 if (rem) {
1190 ctx->areslen = rem;
1191
1192 do {
1193 --rem;
1194 ctx->ares[rem] = aad[rem];
1195 } while (rem);
1196 }
1197 return 0;
1198 }
1199
1200 /*-
1201 * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
1202 * success. Code is big-endian.
1203 */
1204 static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
1205 unsigned char *out, size_t len)
1206 {
1207 const unsigned char *inptr;
1208 unsigned long long mlen;
1209 union {
1210 unsigned int w[4];
1211 unsigned char b[16];
1212 } buf;
1213 size_t inlen;
1214 int n, rem, i;
1215
1216 mlen = ctx->kma.param.tpcl + len;
1217 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1218 return -1;
1219 ctx->kma.param.tpcl = mlen;
1220
1221 n = ctx->mreslen;
1222 if (n) {
1223 inptr = in;
1224 inlen = len;
1225 while (n && inlen) {
1226 ctx->mres[n] = *inptr;
1227 n = (n + 1) & 0xf;
1228 ++inptr;
1229 --inlen;
1230 }
1231 /* ctx->mres contains a complete block if offset has wrapped around */
1232 if (!n) {
1233 s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
1234 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1235 ctx->fc |= S390X_KMA_HS;
1236 ctx->areslen = 0;
1237
1238 /* previous call already encrypted/decrypted its remainder,
1239 * see comment below */
1240 n = ctx->mreslen;
1241 while (n) {
1242 *out = buf.b[n];
1243 n = (n + 1) & 0xf;
1244 ++out;
1245 ++in;
1246 --len;
1247 }
1248 ctx->mreslen = 0;
1249 }
1250 }
1251
1252 rem = len & 0xf;
1253
1254 len &= ~(size_t)0xf;
1255 if (len) {
1256 s390x_kma(ctx->ares, ctx->areslen, in, len, out,
1257 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1258 in += len;
1259 out += len;
1260 ctx->fc |= S390X_KMA_HS;
1261 ctx->areslen = 0;
1262 }
1263
1264 /*-
1265 * If there is a remainder, it has to be saved such that it can be
1266 * processed by kma later. However, we also have to do the for-now
1267 * unauthenticated encryption/decryption part here and now...
1268 */
1269 if (rem) {
1270 if (!ctx->mreslen) {
1271 buf.w[0] = ctx->kma.param.j0.w[0];
1272 buf.w[1] = ctx->kma.param.j0.w[1];
1273 buf.w[2] = ctx->kma.param.j0.w[2];
1274 buf.w[3] = ctx->kma.param.cv.w + 1;
1275 s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
1276 }
1277
1278 n = ctx->mreslen;
1279 for (i = 0; i < rem; i++) {
1280 ctx->mres[n + i] = in[i];
1281 out[i] = in[i] ^ ctx->kres[n + i];
1282 }
1283
1284 ctx->mreslen += rem;
1285 }
1286 return 0;
1287 }
1288
1289 /*-
1290 * Initialize context structure. Code is big-endian.
1291 */
1292 static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
1293 const unsigned char *iv)
1294 {
1295 ctx->kma.param.t.g[0] = 0;
1296 ctx->kma.param.t.g[1] = 0;
1297 ctx->kma.param.tpcl = 0;
1298 ctx->kma.param.taadl = 0;
1299 ctx->mreslen = 0;
1300 ctx->areslen = 0;
1301 ctx->kreslen = 0;
1302
1303 if (ctx->ivlen == 12) {
1304 memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
1305 ctx->kma.param.j0.w[3] = 1;
1306 ctx->kma.param.cv.w = 1;
1307 } else {
1308 /* ctx->iv has the right size and is already padded. */
1309 memcpy(ctx->iv, iv, ctx->ivlen);
1310 s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
1311 ctx->fc, &ctx->kma.param);
1312 ctx->fc |= S390X_KMA_HS;
1313
1314 ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
1315 ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
1316 ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
1317 ctx->kma.param.t.g[0] = 0;
1318 ctx->kma.param.t.g[1] = 0;
1319 }
1320 }
1321
1322 /*-
1323 * Performs various operations on the context structure depending on control
1324 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
1325 * Code is big-endian.
1326 */
1327 static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
1328 {
1329 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1330 S390X_AES_GCM_CTX *gctx_out;
1331 EVP_CIPHER_CTX *out;
1332 unsigned char *buf, *iv;
1333 int ivlen, enc, len;
1334
1335 switch (type) {
1336 case EVP_CTRL_INIT:
1337 ivlen = EVP_CIPHER_iv_length(c->cipher);
1338 iv = EVP_CIPHER_CTX_iv_noconst(c);
1339 gctx->key_set = 0;
1340 gctx->iv_set = 0;
1341 gctx->ivlen = ivlen;
1342 gctx->iv = iv;
1343 gctx->taglen = -1;
1344 gctx->iv_gen = 0;
1345 gctx->tls_aad_len = -1;
1346 return 1;
1347
1348 case EVP_CTRL_GET_IVLEN:
1349 *(int *)ptr = gctx->ivlen;
1350 return 1;
1351
1352 case EVP_CTRL_AEAD_SET_IVLEN:
1353 if (arg <= 0)
1354 return 0;
1355
1356 if (arg != 12) {
1357 iv = EVP_CIPHER_CTX_iv_noconst(c);
1358 len = S390X_gcm_ivpadlen(arg);
1359
1360 /* Allocate memory for iv if needed. */
1361 if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
1362 if (gctx->iv != iv)
1363 OPENSSL_free(gctx->iv);
1364
1365 if ((gctx->iv = OPENSSL_malloc(len)) == NULL) {
1366 EVPerr(EVP_F_S390X_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
1367 return 0;
1368 }
1369 }
1370 /* Add padding. */
1371 memset(gctx->iv + arg, 0, len - arg - 8);
1372 *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
1373 }
1374 gctx->ivlen = arg;
1375 return 1;
1376
1377 case EVP_CTRL_AEAD_SET_TAG:
1378 buf = EVP_CIPHER_CTX_buf_noconst(c);
1379 enc = EVP_CIPHER_CTX_encrypting(c);
1380 if (arg <= 0 || arg > 16 || enc)
1381 return 0;
1382
1383 memcpy(buf, ptr, arg);
1384 gctx->taglen = arg;
1385 return 1;
1386
1387 case EVP_CTRL_AEAD_GET_TAG:
1388 enc = EVP_CIPHER_CTX_encrypting(c);
1389 if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
1390 return 0;
1391
1392 memcpy(ptr, gctx->kma.param.t.b, arg);
1393 return 1;
1394
1395 case EVP_CTRL_GCM_SET_IV_FIXED:
1396 /* Special case: -1 length restores whole iv */
1397 if (arg == -1) {
1398 memcpy(gctx->iv, ptr, gctx->ivlen);
1399 gctx->iv_gen = 1;
1400 return 1;
1401 }
1402 /*
1403 * Fixed field must be at least 4 bytes and invocation field at least
1404 * 8.
1405 */
1406 if ((arg < 4) || (gctx->ivlen - arg) < 8)
1407 return 0;
1408
1409 if (arg)
1410 memcpy(gctx->iv, ptr, arg);
1411
1412 enc = EVP_CIPHER_CTX_encrypting(c);
1413 if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
1414 return 0;
1415
1416 gctx->iv_gen = 1;
1417 return 1;
1418
1419 case EVP_CTRL_GCM_IV_GEN:
1420 if (gctx->iv_gen == 0 || gctx->key_set == 0)
1421 return 0;
1422
1423 s390x_aes_gcm_setiv(gctx, gctx->iv);
1424
1425 if (arg <= 0 || arg > gctx->ivlen)
1426 arg = gctx->ivlen;
1427
1428 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
1429 /*
1430 * Invocation field will be at least 8 bytes in size and so no need
1431 * to check wrap around or increment more than last 8 bytes.
1432 */
1433 ctr64_inc(gctx->iv + gctx->ivlen - 8);
1434 gctx->iv_set = 1;
1435 return 1;
1436
1437 case EVP_CTRL_GCM_SET_IV_INV:
1438 enc = EVP_CIPHER_CTX_encrypting(c);
1439 if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
1440 return 0;
1441
1442 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
1443 s390x_aes_gcm_setiv(gctx, gctx->iv);
1444 gctx->iv_set = 1;
1445 return 1;
1446
1447 case EVP_CTRL_AEAD_TLS1_AAD:
1448 /* Save the aad for later use. */
1449 if (arg != EVP_AEAD_TLS1_AAD_LEN)
1450 return 0;
1451
1452 buf = EVP_CIPHER_CTX_buf_noconst(c);
1453 memcpy(buf, ptr, arg);
1454 gctx->tls_aad_len = arg;
1455 gctx->tls_enc_records = 0;
1456
1457 len = buf[arg - 2] << 8 | buf[arg - 1];
1458 /* Correct length for explicit iv. */
1459 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
1460 return 0;
1461 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
1462
1463 /* If decrypting correct for tag too. */
1464 enc = EVP_CIPHER_CTX_encrypting(c);
1465 if (!enc) {
1466 if (len < EVP_GCM_TLS_TAG_LEN)
1467 return 0;
1468 len -= EVP_GCM_TLS_TAG_LEN;
1469 }
1470 buf[arg - 2] = len >> 8;
1471 buf[arg - 1] = len & 0xff;
1472 /* Extra padding: tag appended to record. */
1473 return EVP_GCM_TLS_TAG_LEN;
1474
1475 case EVP_CTRL_COPY:
1476 out = ptr;
1477 gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
1478 iv = EVP_CIPHER_CTX_iv_noconst(c);
1479
1480 if (gctx->iv == iv) {
1481 gctx_out->iv = EVP_CIPHER_CTX_iv_noconst(out);
1482 } else {
1483 len = S390X_gcm_ivpadlen(gctx->ivlen);
1484
1485 if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL) {
1486 EVPerr(EVP_F_S390X_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
1487 return 0;
1488 }
1489
1490 memcpy(gctx_out->iv, gctx->iv, len);
1491 }
1492 return 1;
1493
1494 default:
1495 return -1;
1496 }
1497 }
1498
1499 /*-
1500 * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
1501 */
1502 static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
1503 const unsigned char *key,
1504 const unsigned char *iv, int enc)
1505 {
1506 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1507 int keylen;
1508
1509 if (iv == NULL && key == NULL)
1510 return 1;
1511
1512 if (key != NULL) {
1513 keylen = EVP_CIPHER_CTX_key_length(ctx);
1514 memcpy(&gctx->kma.param.k, key, keylen);
1515
1516 gctx->fc = S390X_AES_FC(keylen);
1517 if (!enc)
1518 gctx->fc |= S390X_DECRYPT;
1519
1520 if (iv == NULL && gctx->iv_set)
1521 iv = gctx->iv;
1522
1523 if (iv != NULL) {
1524 s390x_aes_gcm_setiv(gctx, iv);
1525 gctx->iv_set = 1;
1526 }
1527 gctx->key_set = 1;
1528 } else {
1529 if (gctx->key_set)
1530 s390x_aes_gcm_setiv(gctx, iv);
1531 else
1532 memcpy(gctx->iv, iv, gctx->ivlen);
1533
1534 gctx->iv_set = 1;
1535 gctx->iv_gen = 0;
1536 }
1537 return 1;
1538 }
1539
1540 /*-
1541 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1542 * if successful. Otherwise -1 is returned. Code is big-endian.
1543 */
1544 static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1545 const unsigned char *in, size_t len)
1546 {
1547 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1548 const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1549 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1550 int rv = -1;
1551
1552 if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
1553 return -1;
1554
1555 /*
1556 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
1557 * Requirements from SP 800-38D". The requirements is for one party to the
1558 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
1559 * side only.
1560 */
1561 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
1562 EVPerr(EVP_F_S390X_AES_GCM_TLS_CIPHER, EVP_R_TOO_MANY_RECORDS);
1563 goto err;
1564 }
1565
1566 if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
1567 : EVP_CTRL_GCM_SET_IV_INV,
1568 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
1569 goto err;
1570
1571 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1572 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1573 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1574
1575 gctx->kma.param.taadl = gctx->tls_aad_len << 3;
1576 gctx->kma.param.tpcl = len << 3;
1577 s390x_kma(buf, gctx->tls_aad_len, in, len, out,
1578 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1579
1580 if (enc) {
1581 memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
1582 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1583 } else {
1584 if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
1585 EVP_GCM_TLS_TAG_LEN)) {
1586 OPENSSL_cleanse(out, len);
1587 goto err;
1588 }
1589 rv = len;
1590 }
1591 err:
1592 gctx->iv_set = 0;
1593 gctx->tls_aad_len = -1;
1594 return rv;
1595 }
1596
1597 /*-
1598 * Called from EVP layer to initialize context, process additional
1599 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1600 * ciphertext or process a TLS packet, depending on context. Returns bytes
1601 * written on success. Otherwise -1 is returned. Code is big-endian.
1602 */
1603 static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1604 const unsigned char *in, size_t len)
1605 {
1606 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1607 unsigned char *buf, tmp[16];
1608 int enc;
1609
1610 if (!gctx->key_set)
1611 return -1;
1612
1613 if (gctx->tls_aad_len >= 0)
1614 return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
1615
1616 if (!gctx->iv_set)
1617 return -1;
1618
1619 if (in != NULL) {
1620 if (out == NULL) {
1621 if (s390x_aes_gcm_aad(gctx, in, len))
1622 return -1;
1623 } else {
1624 if (s390x_aes_gcm(gctx, in, out, len))
1625 return -1;
1626 }
1627 return len;
1628 } else {
1629 gctx->kma.param.taadl <<= 3;
1630 gctx->kma.param.tpcl <<= 3;
1631 s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
1632 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1633 /* recall that we already did en-/decrypt gctx->mres
1634 * and returned it to caller... */
1635 OPENSSL_cleanse(tmp, gctx->mreslen);
1636 gctx->iv_set = 0;
1637
1638 enc = EVP_CIPHER_CTX_encrypting(ctx);
1639 if (enc) {
1640 gctx->taglen = 16;
1641 } else {
1642 if (gctx->taglen < 0)
1643 return -1;
1644
1645 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1646 if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
1647 return -1;
1648 }
1649 return 0;
1650 }
1651 }
1652
1653 static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
1654 {
1655 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1656 const unsigned char *iv;
1657
1658 if (gctx == NULL)
1659 return 0;
1660
1661 iv = EVP_CIPHER_CTX_iv(c);
1662 if (iv != gctx->iv)
1663 OPENSSL_free(gctx->iv);
1664
1665 OPENSSL_cleanse(gctx, sizeof(*gctx));
1666 return 1;
1667 }
1668
1669 # define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
1670
1671 # define s390x_aes_xts_init_key aes_xts_init_key
1672 static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
1673 const unsigned char *key,
1674 const unsigned char *iv, int enc);
1675 # define s390x_aes_xts_cipher aes_xts_cipher
1676 static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1677 const unsigned char *in, size_t len);
1678 # define s390x_aes_xts_ctrl aes_xts_ctrl
1679 static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
1680 # define s390x_aes_xts_cleanup aes_xts_cleanup
1681
1682 /*-
1683 * Set nonce and length fields. Code is big-endian.
1684 */
1685 static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
1686 const unsigned char *nonce,
1687 size_t mlen)
1688 {
1689 ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
1690 ctx->aes.ccm.nonce.g[1] = mlen;
1691 memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
1692 }
1693
1694 /*-
1695 * Process additional authenticated data. Code is big-endian.
1696 */
1697 static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
1698 size_t alen)
1699 {
1700 unsigned char *ptr;
1701 int i, rem;
1702
1703 if (!alen)
1704 return;
1705
1706 ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
1707
1708 /* Suppress 'type-punned pointer dereference' warning. */
1709 ptr = ctx->aes.ccm.buf.b;
1710
1711 if (alen < ((1 << 16) - (1 << 8))) {
1712 *(uint16_t *)ptr = alen;
1713 i = 2;
1714 } else if (sizeof(alen) == 8
1715 && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
1716 *(uint16_t *)ptr = 0xffff;
1717 *(uint64_t *)(ptr + 2) = alen;
1718 i = 10;
1719 } else {
1720 *(uint16_t *)ptr = 0xfffe;
1721 *(uint32_t *)(ptr + 2) = alen;
1722 i = 6;
1723 }
1724
1725 while (i < 16 && alen) {
1726 ctx->aes.ccm.buf.b[i] = *aad;
1727 ++aad;
1728 --alen;
1729 ++i;
1730 }
1731 while (i < 16) {
1732 ctx->aes.ccm.buf.b[i] = 0;
1733 ++i;
1734 }
1735
1736 ctx->aes.ccm.kmac_param.icv.g[0] = 0;
1737 ctx->aes.ccm.kmac_param.icv.g[1] = 0;
1738 s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
1739 &ctx->aes.ccm.kmac_param);
1740 ctx->aes.ccm.blocks += 2;
1741
1742 rem = alen & 0xf;
1743 alen &= ~(size_t)0xf;
1744 if (alen) {
1745 s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1746 ctx->aes.ccm.blocks += alen >> 4;
1747 aad += alen;
1748 }
1749 if (rem) {
1750 for (i = 0; i < rem; i++)
1751 ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
1752
1753 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1754 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1755 ctx->aes.ccm.kmac_param.k);
1756 ctx->aes.ccm.blocks++;
1757 }
1758 }
1759
1760 /*-
1761 * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
1762 * success.
1763 */
1764 static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
1765 unsigned char *out, size_t len, int enc)
1766 {
1767 size_t n, rem;
1768 unsigned int i, l, num;
1769 unsigned char flags;
1770
1771 flags = ctx->aes.ccm.nonce.b[0];
1772 if (!(flags & S390X_CCM_AAD_FLAG)) {
1773 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
1774 ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
1775 ctx->aes.ccm.blocks++;
1776 }
1777 l = flags & 0x7;
1778 ctx->aes.ccm.nonce.b[0] = l;
1779
1780 /*-
1781 * Reconstruct length from encoded length field
1782 * and initialize it with counter value.
1783 */
1784 n = 0;
1785 for (i = 15 - l; i < 15; i++) {
1786 n |= ctx->aes.ccm.nonce.b[i];
1787 ctx->aes.ccm.nonce.b[i] = 0;
1788 n <<= 8;
1789 }
1790 n |= ctx->aes.ccm.nonce.b[15];
1791 ctx->aes.ccm.nonce.b[15] = 1;
1792
1793 if (n != len)
1794 return -1; /* length mismatch */
1795
1796 if (enc) {
1797 /* Two operations per block plus one for tag encryption */
1798 ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
1799 if (ctx->aes.ccm.blocks > (1ULL << 61))
1800 return -2; /* too much data */
1801 }
1802
1803 num = 0;
1804 rem = len & 0xf;
1805 len &= ~(size_t)0xf;
1806
1807 if (enc) {
1808 /* mac-then-encrypt */
1809 if (len)
1810 s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1811 if (rem) {
1812 for (i = 0; i < rem; i++)
1813 ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
1814
1815 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1816 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1817 ctx->aes.ccm.kmac_param.k);
1818 }
1819
1820 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1821 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1822 &num, (ctr128_f)AES_ctr32_encrypt);
1823 } else {
1824 /* decrypt-then-mac */
1825 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1826 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1827 &num, (ctr128_f)AES_ctr32_encrypt);
1828
1829 if (len)
1830 s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1831 if (rem) {
1832 for (i = 0; i < rem; i++)
1833 ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
1834
1835 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1836 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1837 ctx->aes.ccm.kmac_param.k);
1838 }
1839 }
1840 /* encrypt tag */
1841 for (i = 15 - l; i < 16; i++)
1842 ctx->aes.ccm.nonce.b[i] = 0;
1843
1844 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
1845 ctx->aes.ccm.kmac_param.k);
1846 ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
1847 ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
1848
1849 ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
1850 return 0;
1851 }
1852
1853 /*-
1854 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1855 * if successful. Otherwise -1 is returned.
1856 */
1857 static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1858 const unsigned char *in, size_t len)
1859 {
1860 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1861 unsigned char *ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
1862 unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1863 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1864
1865 if (out != in
1866 || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
1867 return -1;
1868
1869 if (enc) {
1870 /* Set explicit iv (sequence number). */
1871 memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1872 }
1873
1874 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1875 /*-
1876 * Get explicit iv (sequence number). We already have fixed iv
1877 * (server/client_write_iv) here.
1878 */
1879 memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1880 s390x_aes_ccm_setiv(cctx, ivec, len);
1881
1882 /* Process aad (sequence number|type|version|length) */
1883 s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
1884
1885 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1886 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1887
1888 if (enc) {
1889 if (s390x_aes_ccm(cctx, in, out, len, enc))
1890 return -1;
1891
1892 memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
1893 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1894 } else {
1895 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
1896 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
1897 cctx->aes.ccm.m))
1898 return len;
1899 }
1900
1901 OPENSSL_cleanse(out, len);
1902 return -1;
1903 }
1904 }
1905
1906 /*-
1907 * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
1908 * returned.
1909 */
1910 static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
1911 const unsigned char *key,
1912 const unsigned char *iv, int enc)
1913 {
1914 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1915 unsigned char *ivec;
1916 int keylen;
1917
1918 if (iv == NULL && key == NULL)
1919 return 1;
1920
1921 if (key != NULL) {
1922 keylen = EVP_CIPHER_CTX_key_length(ctx);
1923 cctx->aes.ccm.fc = S390X_AES_FC(keylen);
1924 memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
1925
1926 /* Store encoded m and l. */
1927 cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
1928 | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
1929 memset(cctx->aes.ccm.nonce.b + 1, 0,
1930 sizeof(cctx->aes.ccm.nonce.b));
1931 cctx->aes.ccm.blocks = 0;
1932
1933 cctx->aes.ccm.key_set = 1;
1934 }
1935
1936 if (iv != NULL) {
1937 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
1938 memcpy(ivec, iv, 15 - cctx->aes.ccm.l);
1939
1940 cctx->aes.ccm.iv_set = 1;
1941 }
1942
1943 return 1;
1944 }
1945
1946 /*-
1947 * Called from EVP layer to initialize context, process additional
1948 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1949 * plaintext or process a TLS packet, depending on context. Returns bytes
1950 * written on success. Otherwise -1 is returned.
1951 */
1952 static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1953 const unsigned char *in, size_t len)
1954 {
1955 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1956 const int enc = EVP_CIPHER_CTX_encrypting(ctx);
1957 int rv;
1958 unsigned char *buf, *ivec;
1959
1960 if (!cctx->aes.ccm.key_set)
1961 return -1;
1962
1963 if (cctx->aes.ccm.tls_aad_len >= 0)
1964 return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
1965
1966 /*-
1967 * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
1968 * so integrity must be checked already at Update() i.e., before
1969 * potentially corrupted data is output.
1970 */
1971 if (in == NULL && out != NULL)
1972 return 0;
1973
1974 if (!cctx->aes.ccm.iv_set)
1975 return -1;
1976
1977 if (out == NULL) {
1978 /* Update(): Pass message length. */
1979 if (in == NULL) {
1980 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
1981 s390x_aes_ccm_setiv(cctx, ivec, len);
1982
1983 cctx->aes.ccm.len_set = 1;
1984 return len;
1985 }
1986
1987 /* Update(): Process aad. */
1988 if (!cctx->aes.ccm.len_set && len)
1989 return -1;
1990
1991 s390x_aes_ccm_aad(cctx, in, len);
1992 return len;
1993 }
1994
1995 /* The tag must be set before actually decrypting data */
1996 if (!enc && !cctx->aes.ccm.tag_set)
1997 return -1;
1998
1999 /* Update(): Process message. */
2000
2001 if (!cctx->aes.ccm.len_set) {
2002 /*-
2003 * In case message length was not previously set explicitly via
2004 * Update(), set it now.
2005 */
2006 ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
2007 s390x_aes_ccm_setiv(cctx, ivec, len);
2008
2009 cctx->aes.ccm.len_set = 1;
2010 }
2011
2012 if (enc) {
2013 if (s390x_aes_ccm(cctx, in, out, len, enc))
2014 return -1;
2015
2016 cctx->aes.ccm.tag_set = 1;
2017 return len;
2018 } else {
2019 rv = -1;
2020
2021 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2022 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2023 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
2024 cctx->aes.ccm.m))
2025 rv = len;
2026 }
2027
2028 if (rv == -1)
2029 OPENSSL_cleanse(out, len);
2030
2031 cctx->aes.ccm.iv_set = 0;
2032 cctx->aes.ccm.tag_set = 0;
2033 cctx->aes.ccm.len_set = 0;
2034 return rv;
2035 }
2036 }
2037
2038 /*-
2039 * Performs various operations on the context structure depending on control
2040 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
2041 * Code is big-endian.
2042 */
2043 static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2044 {
2045 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
2046 unsigned char *buf, *iv;
2047 int enc, len;
2048
2049 switch (type) {
2050 case EVP_CTRL_INIT:
2051 cctx->aes.ccm.key_set = 0;
2052 cctx->aes.ccm.iv_set = 0;
2053 cctx->aes.ccm.l = 8;
2054 cctx->aes.ccm.m = 12;
2055 cctx->aes.ccm.tag_set = 0;
2056 cctx->aes.ccm.len_set = 0;
2057 cctx->aes.ccm.tls_aad_len = -1;
2058 return 1;
2059
2060 case EVP_CTRL_GET_IVLEN:
2061 *(int *)ptr = 15 - cctx->aes.ccm.l;
2062 return 1;
2063
2064 case EVP_CTRL_AEAD_TLS1_AAD:
2065 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2066 return 0;
2067
2068 /* Save the aad for later use. */
2069 buf = EVP_CIPHER_CTX_buf_noconst(c);
2070 memcpy(buf, ptr, arg);
2071 cctx->aes.ccm.tls_aad_len = arg;
2072
2073 len = buf[arg - 2] << 8 | buf[arg - 1];
2074 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
2075 return 0;
2076
2077 /* Correct length for explicit iv. */
2078 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
2079
2080 enc = EVP_CIPHER_CTX_encrypting(c);
2081 if (!enc) {
2082 if (len < cctx->aes.ccm.m)
2083 return 0;
2084
2085 /* Correct length for tag. */
2086 len -= cctx->aes.ccm.m;
2087 }
2088
2089 buf[arg - 2] = len >> 8;
2090 buf[arg - 1] = len & 0xff;
2091
2092 /* Extra padding: tag appended to record. */
2093 return cctx->aes.ccm.m;
2094
2095 case EVP_CTRL_CCM_SET_IV_FIXED:
2096 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
2097 return 0;
2098
2099 /* Copy to first part of the iv. */
2100 iv = EVP_CIPHER_CTX_iv_noconst(c);
2101 memcpy(iv, ptr, arg);
2102 return 1;
2103
2104 case EVP_CTRL_AEAD_SET_IVLEN:
2105 arg = 15 - arg;
2106 /* fall-through */
2107
2108 case EVP_CTRL_CCM_SET_L:
2109 if (arg < 2 || arg > 8)
2110 return 0;
2111
2112 cctx->aes.ccm.l = arg;
2113 return 1;
2114
2115 case EVP_CTRL_AEAD_SET_TAG:
2116 if ((arg & 1) || arg < 4 || arg > 16)
2117 return 0;
2118
2119 enc = EVP_CIPHER_CTX_encrypting(c);
2120 if (enc && ptr)
2121 return 0;
2122
2123 if (ptr) {
2124 cctx->aes.ccm.tag_set = 1;
2125 buf = EVP_CIPHER_CTX_buf_noconst(c);
2126 memcpy(buf, ptr, arg);
2127 }
2128
2129 cctx->aes.ccm.m = arg;
2130 return 1;
2131
2132 case EVP_CTRL_AEAD_GET_TAG:
2133 enc = EVP_CIPHER_CTX_encrypting(c);
2134 if (!enc || !cctx->aes.ccm.tag_set)
2135 return 0;
2136
2137 if(arg < cctx->aes.ccm.m)
2138 return 0;
2139
2140 memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2141 cctx->aes.ccm.tag_set = 0;
2142 cctx->aes.ccm.iv_set = 0;
2143 cctx->aes.ccm.len_set = 0;
2144 return 1;
2145
2146 case EVP_CTRL_COPY:
2147 return 1;
2148
2149 default:
2150 return -1;
2151 }
2152 }
2153
2154 # define s390x_aes_ccm_cleanup aes_ccm_cleanup
2155
2156 # ifndef OPENSSL_NO_OCB
2157 # define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
2158
2159 # define s390x_aes_ocb_init_key aes_ocb_init_key
2160 static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2161 const unsigned char *iv, int enc);
2162 # define s390x_aes_ocb_cipher aes_ocb_cipher
2163 static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2164 const unsigned char *in, size_t len);
2165 # define s390x_aes_ocb_cleanup aes_ocb_cleanup
2166 static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
2167 # define s390x_aes_ocb_ctrl aes_ocb_ctrl
2168 static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
2169 # endif
2170
2171 # ifndef OPENSSL_NO_SIV
2172 # define S390X_AES_SIV_CTX EVP_AES_SIV_CTX
2173
2174 # define s390x_aes_siv_init_key aes_siv_init_key
2175 # define s390x_aes_siv_cipher aes_siv_cipher
2176 # define s390x_aes_siv_cleanup aes_siv_cleanup
2177 # define s390x_aes_siv_ctrl aes_siv_ctrl
2178 # endif
2179
2180 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
2181 MODE,flags) \
2182 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2183 nid##_##keylen##_##nmode,blocksize, \
2184 keylen / 8, \
2185 ivlen, \
2186 flags | EVP_CIPH_##MODE##_MODE, \
2187 s390x_aes_##mode##_init_key, \
2188 s390x_aes_##mode##_cipher, \
2189 NULL, \
2190 sizeof(S390X_AES_##MODE##_CTX), \
2191 NULL, \
2192 NULL, \
2193 NULL, \
2194 NULL \
2195 }; \
2196 static const EVP_CIPHER aes_##keylen##_##mode = { \
2197 nid##_##keylen##_##nmode, \
2198 blocksize, \
2199 keylen / 8, \
2200 ivlen, \
2201 flags | EVP_CIPH_##MODE##_MODE, \
2202 aes_init_key, \
2203 aes_##mode##_cipher, \
2204 NULL, \
2205 sizeof(EVP_AES_KEY), \
2206 NULL, \
2207 NULL, \
2208 NULL, \
2209 NULL \
2210 }; \
2211 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2212 { \
2213 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2214 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2215 }
2216
2217 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
2218 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2219 nid##_##keylen##_##mode, \
2220 blocksize, \
2221 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2222 ivlen, \
2223 flags | EVP_CIPH_##MODE##_MODE, \
2224 s390x_aes_##mode##_init_key, \
2225 s390x_aes_##mode##_cipher, \
2226 s390x_aes_##mode##_cleanup, \
2227 sizeof(S390X_AES_##MODE##_CTX), \
2228 NULL, \
2229 NULL, \
2230 s390x_aes_##mode##_ctrl, \
2231 NULL \
2232 }; \
2233 static const EVP_CIPHER aes_##keylen##_##mode = { \
2234 nid##_##keylen##_##mode,blocksize, \
2235 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2236 ivlen, \
2237 flags | EVP_CIPH_##MODE##_MODE, \
2238 aes_##mode##_init_key, \
2239 aes_##mode##_cipher, \
2240 aes_##mode##_cleanup, \
2241 sizeof(EVP_AES_##MODE##_CTX), \
2242 NULL, \
2243 NULL, \
2244 aes_##mode##_ctrl, \
2245 NULL \
2246 }; \
2247 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2248 { \
2249 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2250 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2251 }
2252
2253 #else
2254
2255 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
2256 static const EVP_CIPHER aes_##keylen##_##mode = { \
2257 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
2258 flags|EVP_CIPH_##MODE##_MODE, \
2259 aes_init_key, \
2260 aes_##mode##_cipher, \
2261 NULL, \
2262 sizeof(EVP_AES_KEY), \
2263 NULL,NULL,NULL,NULL }; \
2264 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2265 { return &aes_##keylen##_##mode; }
2266
2267 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
2268 static const EVP_CIPHER aes_##keylen##_##mode = { \
2269 nid##_##keylen##_##mode,blocksize, \
2270 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
2271 ivlen, \
2272 flags|EVP_CIPH_##MODE##_MODE, \
2273 aes_##mode##_init_key, \
2274 aes_##mode##_cipher, \
2275 aes_##mode##_cleanup, \
2276 sizeof(EVP_AES_##MODE##_CTX), \
2277 NULL,NULL,aes_##mode##_ctrl,NULL }; \
2278 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2279 { return &aes_##keylen##_##mode; }
2280
2281 #endif
2282
2283 #define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
2284 BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2285 BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2286 BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2287 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2288 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags) \
2289 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags) \
2290 BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
2291
2292 static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2293 const unsigned char *iv, int enc)
2294 {
2295 int ret, mode;
2296 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2297
2298 mode = EVP_CIPHER_CTX_mode(ctx);
2299 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
2300 && !enc) {
2301 #ifdef HWAES_CAPABLE
2302 if (HWAES_CAPABLE) {
2303 ret = HWAES_set_decrypt_key(key,
2304 EVP_CIPHER_CTX_key_length(ctx) * 8,
2305 &dat->ks.ks);
2306 dat->block = (block128_f) HWAES_decrypt;
2307 dat->stream.cbc = NULL;
2308 # ifdef HWAES_cbc_encrypt
2309 if (mode == EVP_CIPH_CBC_MODE)
2310 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2311 # endif
2312 } else
2313 #endif
2314 #ifdef BSAES_CAPABLE
2315 if (BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE) {
2316 ret = AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2317 &dat->ks.ks);
2318 dat->block = (block128_f) AES_decrypt;
2319 dat->stream.cbc = (cbc128_f) bsaes_cbc_encrypt;
2320 } else
2321 #endif
2322 #ifdef VPAES_CAPABLE
2323 if (VPAES_CAPABLE) {
2324 ret = vpaes_set_decrypt_key(key,
2325 EVP_CIPHER_CTX_key_length(ctx) * 8,
2326 &dat->ks.ks);
2327 dat->block = (block128_f) vpaes_decrypt;
2328 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2329 (cbc128_f) vpaes_cbc_encrypt : NULL;
2330 } else
2331 #endif
2332 {
2333 ret = AES_set_decrypt_key(key,
2334 EVP_CIPHER_CTX_key_length(ctx) * 8,
2335 &dat->ks.ks);
2336 dat->block = (block128_f) AES_decrypt;
2337 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2338 (cbc128_f) AES_cbc_encrypt : NULL;
2339 }
2340 } else
2341 #ifdef HWAES_CAPABLE
2342 if (HWAES_CAPABLE) {
2343 ret = HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2344 &dat->ks.ks);
2345 dat->block = (block128_f) HWAES_encrypt;
2346 dat->stream.cbc = NULL;
2347 # ifdef HWAES_cbc_encrypt
2348 if (mode == EVP_CIPH_CBC_MODE)
2349 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2350 else
2351 # endif
2352 # ifdef HWAES_ctr32_encrypt_blocks
2353 if (mode == EVP_CIPH_CTR_MODE)
2354 dat->stream.ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2355 else
2356 # endif
2357 (void)0; /* terminate potentially open 'else' */
2358 } else
2359 #endif
2360 #ifdef BSAES_CAPABLE
2361 if (BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE) {
2362 ret = AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2363 &dat->ks.ks);
2364 dat->block = (block128_f) AES_encrypt;
2365 dat->stream.ctr = (ctr128_f) bsaes_ctr32_encrypt_blocks;
2366 } else
2367 #endif
2368 #ifdef VPAES_CAPABLE
2369 if (VPAES_CAPABLE) {
2370 ret = vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2371 &dat->ks.ks);
2372 dat->block = (block128_f) vpaes_encrypt;
2373 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2374 (cbc128_f) vpaes_cbc_encrypt : NULL;
2375 } else
2376 #endif
2377 {
2378 ret = AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
2379 &dat->ks.ks);
2380 dat->block = (block128_f) AES_encrypt;
2381 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2382 (cbc128_f) AES_cbc_encrypt : NULL;
2383 #ifdef AES_CTR_ASM
2384 if (mode == EVP_CIPH_CTR_MODE)
2385 dat->stream.ctr = (ctr128_f) AES_ctr32_encrypt;
2386 #endif
2387 }
2388
2389 if (ret < 0) {
2390 EVPerr(EVP_F_AES_INIT_KEY, EVP_R_AES_KEY_SETUP_FAILED);
2391 return 0;
2392 }
2393
2394 return 1;
2395 }
2396
2397 static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2398 const unsigned char *in, size_t len)
2399 {
2400 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2401
2402 if (dat->stream.cbc)
2403 (*dat->stream.cbc) (in, out, len, &dat->ks,
2404 EVP_CIPHER_CTX_iv_noconst(ctx),
2405 EVP_CIPHER_CTX_encrypting(ctx));
2406 else if (EVP_CIPHER_CTX_encrypting(ctx))
2407 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks,
2408 EVP_CIPHER_CTX_iv_noconst(ctx), dat->block);
2409 else
2410 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks,
2411 EVP_CIPHER_CTX_iv_noconst(ctx), dat->block);
2412
2413 return 1;
2414 }
2415
2416 static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2417 const unsigned char *in, size_t len)
2418 {
2419 size_t bl = EVP_CIPHER_CTX_block_size(ctx);
2420 size_t i;
2421 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2422
2423 if (len < bl)
2424 return 1;
2425
2426 for (i = 0, len -= bl; i <= len; i += bl)
2427 (*dat->block) (in + i, out + i, &dat->ks);
2428
2429 return 1;
2430 }
2431
2432 static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2433 const unsigned char *in, size_t len)
2434 {
2435 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2436
2437 int num = EVP_CIPHER_CTX_num(ctx);
2438 CRYPTO_ofb128_encrypt(in, out, len, &dat->ks,
2439 EVP_CIPHER_CTX_iv_noconst(ctx), &num, dat->block);
2440 EVP_CIPHER_CTX_set_num(ctx, num);
2441 return 1;
2442 }
2443
2444 static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2445 const unsigned char *in, size_t len)
2446 {
2447 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2448
2449 int num = EVP_CIPHER_CTX_num(ctx);
2450 CRYPTO_cfb128_encrypt(in, out, len, &dat->ks,
2451 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2452 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2453 EVP_CIPHER_CTX_set_num(ctx, num);
2454 return 1;
2455 }
2456
2457 static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2458 const unsigned char *in, size_t len)
2459 {
2460 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2461
2462 int num = EVP_CIPHER_CTX_num(ctx);
2463 CRYPTO_cfb128_8_encrypt(in, out, len, &dat->ks,
2464 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2465 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2466 EVP_CIPHER_CTX_set_num(ctx, num);
2467 return 1;
2468 }
2469
2470 static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2471 const unsigned char *in, size_t len)
2472 {
2473 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2474
2475 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) {
2476 int num = EVP_CIPHER_CTX_num(ctx);
2477 CRYPTO_cfb128_1_encrypt(in, out, len, &dat->ks,
2478 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2479 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2480 EVP_CIPHER_CTX_set_num(ctx, num);
2481 return 1;
2482 }
2483
2484 while (len >= MAXBITCHUNK) {
2485 int num = EVP_CIPHER_CTX_num(ctx);
2486 CRYPTO_cfb128_1_encrypt(in, out, MAXBITCHUNK * 8, &dat->ks,
2487 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2488 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2489 EVP_CIPHER_CTX_set_num(ctx, num);
2490 len -= MAXBITCHUNK;
2491 out += MAXBITCHUNK;
2492 in += MAXBITCHUNK;
2493 }
2494 if (len) {
2495 int num = EVP_CIPHER_CTX_num(ctx);
2496 CRYPTO_cfb128_1_encrypt(in, out, len * 8, &dat->ks,
2497 EVP_CIPHER_CTX_iv_noconst(ctx), &num,
2498 EVP_CIPHER_CTX_encrypting(ctx), dat->block);
2499 EVP_CIPHER_CTX_set_num(ctx, num);
2500 }
2501
2502 return 1;
2503 }
2504
2505 static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2506 const unsigned char *in, size_t len)
2507 {
2508 unsigned int num = EVP_CIPHER_CTX_num(ctx);
2509 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2510
2511 if (dat->stream.ctr)
2512 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks,
2513 EVP_CIPHER_CTX_iv_noconst(ctx),
2514 EVP_CIPHER_CTX_buf_noconst(ctx),
2515 &num, dat->stream.ctr);
2516 else
2517 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks,
2518 EVP_CIPHER_CTX_iv_noconst(ctx),
2519 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
2520 dat->block);
2521 EVP_CIPHER_CTX_set_num(ctx, num);
2522 return 1;
2523 }
2524
2525 BLOCK_CIPHER_generic_pack(NID_aes, 128, 0)
2526 BLOCK_CIPHER_generic_pack(NID_aes, 192, 0)
2527 BLOCK_CIPHER_generic_pack(NID_aes, 256, 0)
2528
2529 static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
2530 {
2531 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2532 if (gctx == NULL)
2533 return 0;
2534 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
2535 if (gctx->iv != EVP_CIPHER_CTX_iv_noconst(c))
2536 OPENSSL_free(gctx->iv);
2537 return 1;
2538 }
2539
2540 static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2541 {
2542 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2543 switch (type) {
2544 case EVP_CTRL_INIT:
2545 gctx->key_set = 0;
2546 gctx->iv_set = 0;
2547 gctx->ivlen = EVP_CIPHER_iv_length(c->cipher);
2548 gctx->iv = c->iv;
2549 gctx->taglen = -1;
2550 gctx->iv_gen = 0;
2551 gctx->tls_aad_len = -1;
2552 return 1;
2553
2554 case EVP_CTRL_GET_IVLEN:
2555 *(int *)ptr = gctx->ivlen;
2556 return 1;
2557
2558 case EVP_CTRL_AEAD_SET_IVLEN:
2559 if (arg <= 0)
2560 return 0;
2561 /* Allocate memory for IV if needed */
2562 if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
2563 if (gctx->iv != c->iv)
2564 OPENSSL_free(gctx->iv);
2565 if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) {
2566 EVPerr(EVP_F_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
2567 return 0;
2568 }
2569 }
2570 gctx->ivlen = arg;
2571 return 1;
2572
2573 case EVP_CTRL_AEAD_SET_TAG:
2574 if (arg <= 0 || arg > 16 || c->encrypt)
2575 return 0;
2576 memcpy(c->buf, ptr, arg);
2577 gctx->taglen = arg;
2578 return 1;
2579
2580 case EVP_CTRL_AEAD_GET_TAG:
2581 if (arg <= 0 || arg > 16 || !c->encrypt
2582 || gctx->taglen < 0)
2583 return 0;
2584 memcpy(ptr, c->buf, arg);
2585 return 1;
2586
2587 case EVP_CTRL_GET_IV:
2588 if (gctx->iv_gen != 1 && gctx->iv_gen_rand != 1)
2589 return 0;
2590 if (gctx->ivlen != arg)
2591 return 0;
2592 memcpy(ptr, gctx->iv, arg);
2593 return 1;
2594
2595 case EVP_CTRL_GCM_SET_IV_FIXED:
2596 /* Special case: -1 length restores whole IV */
2597 if (arg == -1) {
2598 memcpy(gctx->iv, ptr, gctx->ivlen);
2599 gctx->iv_gen = 1;
2600 return 1;
2601 }
2602 /*
2603 * Fixed field must be at least 4 bytes and invocation field at least
2604 * 8.
2605 */
2606 if ((arg < 4) || (gctx->ivlen - arg) < 8)
2607 return 0;
2608 if (arg)
2609 memcpy(gctx->iv, ptr, arg);
2610 if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
2611 return 0;
2612 gctx->iv_gen = 1;
2613 return 1;
2614
2615 case EVP_CTRL_GCM_IV_GEN:
2616 if (gctx->iv_gen == 0 || gctx->key_set == 0)
2617 return 0;
2618 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2619 if (arg <= 0 || arg > gctx->ivlen)
2620 arg = gctx->ivlen;
2621 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
2622 /*
2623 * Invocation field will be at least 8 bytes in size and so no need
2624 * to check wrap around or increment more than last 8 bytes.
2625 */
2626 ctr64_inc(gctx->iv + gctx->ivlen - 8);
2627 gctx->iv_set = 1;
2628 return 1;
2629
2630 case EVP_CTRL_GCM_SET_IV_INV:
2631 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
2632 return 0;
2633 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
2634 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2635 gctx->iv_set = 1;
2636 return 1;
2637
2638 case EVP_CTRL_AEAD_TLS1_AAD:
2639 /* Save the AAD for later use */
2640 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2641 return 0;
2642 memcpy(c->buf, ptr, arg);
2643 gctx->tls_aad_len = arg;
2644 gctx->tls_enc_records = 0;
2645 {
2646 unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
2647 /* Correct length for explicit IV */
2648 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
2649 return 0;
2650 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
2651 /* If decrypting correct for tag too */
2652 if (!c->encrypt) {
2653 if (len < EVP_GCM_TLS_TAG_LEN)
2654 return 0;
2655 len -= EVP_GCM_TLS_TAG_LEN;
2656 }
2657 c->buf[arg - 2] = len >> 8;
2658 c->buf[arg - 1] = len & 0xff;
2659 }
2660 /* Extra padding: tag appended to record */
2661 return EVP_GCM_TLS_TAG_LEN;
2662
2663 case EVP_CTRL_COPY:
2664 {
2665 EVP_CIPHER_CTX *out = ptr;
2666 EVP_AES_GCM_CTX *gctx_out = EVP_C_DATA(EVP_AES_GCM_CTX,out);
2667 if (gctx->gcm.key) {
2668 if (gctx->gcm.key != &gctx->ks)
2669 return 0;
2670 gctx_out->gcm.key = &gctx_out->ks;
2671 }
2672 if (gctx->iv == c->iv)
2673 gctx_out->iv = out->iv;
2674 else {
2675 if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) {
2676 EVPerr(EVP_F_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
2677 return 0;
2678 }
2679 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
2680 }
2681 return 1;
2682 }
2683
2684 default:
2685 return -1;
2686
2687 }
2688 }
2689
2690 static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2691 const unsigned char *iv, int enc)
2692 {
2693 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2694 if (!iv && !key)
2695 return 1;
2696 if (key) {
2697 do {
2698 #ifdef HWAES_CAPABLE
2699 if (HWAES_CAPABLE) {
2700 HWAES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2701 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2702 (block128_f) HWAES_encrypt);
2703 # ifdef HWAES_ctr32_encrypt_blocks
2704 gctx->ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2705 # else
2706 gctx->ctr = NULL;
2707 # endif
2708 break;
2709 } else
2710 #endif
2711 #ifdef BSAES_CAPABLE
2712 if (BSAES_CAPABLE) {
2713 AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2714 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2715 (block128_f) AES_encrypt);
2716 gctx->ctr = (ctr128_f) bsaes_ctr32_encrypt_blocks;
2717 break;
2718 } else
2719 #endif
2720 #ifdef VPAES_CAPABLE
2721 if (VPAES_CAPABLE) {
2722 vpaes_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2723 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2724 (block128_f) vpaes_encrypt);
2725 gctx->ctr = NULL;
2726 break;
2727 } else
2728 #endif
2729 (void)0; /* terminate potentially open 'else' */
2730
2731 AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
2732 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2733 (block128_f) AES_encrypt);
2734 #ifdef AES_CTR_ASM
2735 gctx->ctr = (ctr128_f) AES_ctr32_encrypt;
2736 #else
2737 gctx->ctr = NULL;
2738 #endif
2739 } while (0);
2740
2741 /*
2742 * If we have an iv can set it directly, otherwise use saved IV.
2743 */
2744 if (iv == NULL && gctx->iv_set)
2745 iv = gctx->iv;
2746 if (iv) {
2747 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2748 gctx->iv_set = 1;
2749 }
2750 gctx->key_set = 1;
2751 } else {
2752 /* If key set use IV, otherwise copy */
2753 if (gctx->key_set)
2754 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2755 else
2756 memcpy(gctx->iv, iv, gctx->ivlen);
2757 gctx->iv_set = 1;
2758 gctx->iv_gen = 0;
2759 }
2760 return 1;
2761 }
2762
2763 /*
2764 * Handle TLS GCM packet format. This consists of the last portion of the IV
2765 * followed by the payload and finally the tag. On encrypt generate IV,
2766 * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
2767 * and verify tag.
2768 */
2769
2770 static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2771 const unsigned char *in, size_t len)
2772 {
2773 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2774 int rv = -1;
2775 /* Encrypt/decrypt must be performed in place */
2776 if (out != in
2777 || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
2778 return -1;
2779
2780 /*
2781 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
2782 * Requirements from SP 800-38D". The requirements is for one party to the
2783 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
2784 * side only.
2785 */
2786 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
2787 EVPerr(EVP_F_AES_GCM_TLS_CIPHER, EVP_R_TOO_MANY_RECORDS);
2788 goto err;
2789 }
2790
2791 /*
2792 * Set IV from start of buffer or generate IV and write to start of
2793 * buffer.
2794 */
2795 if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN
2796 : EVP_CTRL_GCM_SET_IV_INV,
2797 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
2798 goto err;
2799 /* Use saved AAD */
2800 if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len))
2801 goto err;
2802 /* Fix buffer and length to point to payload */
2803 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2804 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2805 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2806 if (ctx->encrypt) {
2807 /* Encrypt payload */
2808 if (gctx->ctr) {
2809 size_t bulk = 0;
2810 #if defined(AES_GCM_ASM)
2811 if (len >= 32 && AES_GCM_ASM(gctx)) {
2812 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2813 return -1;
2814
2815 bulk = AES_gcm_encrypt(in, out, len,
2816 gctx->gcm.key,
2817 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2818 gctx->gcm.len.u[1] += bulk;
2819 }
2820 #endif
2821 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2822 in + bulk,
2823 out + bulk,
2824 len - bulk, gctx->ctr))
2825 goto err;
2826 } else {
2827 size_t bulk = 0;
2828 #if defined(AES_GCM_ASM2)
2829 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2830 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2831 return -1;
2832
2833 bulk = AES_gcm_encrypt(in, out, len,
2834 gctx->gcm.key,
2835 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2836 gctx->gcm.len.u[1] += bulk;
2837 }
2838 #endif
2839 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
2840 in + bulk, out + bulk, len - bulk))
2841 goto err;
2842 }
2843 out += len;
2844 /* Finally write tag */
2845 CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN);
2846 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2847 } else {
2848 /* Decrypt */
2849 if (gctx->ctr) {
2850 size_t bulk = 0;
2851 #if defined(AES_GCM_ASM)
2852 if (len >= 16 && AES_GCM_ASM(gctx)) {
2853 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2854 return -1;
2855
2856 bulk = AES_gcm_decrypt(in, out, len,
2857 gctx->gcm.key,
2858 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2859 gctx->gcm.len.u[1] += bulk;
2860 }
2861 #endif
2862 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
2863 in + bulk,
2864 out + bulk,
2865 len - bulk, gctx->ctr))
2866 goto err;
2867 } else {
2868 size_t bulk = 0;
2869 #if defined(AES_GCM_ASM2)
2870 if (len >= 16 && AES_GCM_ASM2(gctx)) {
2871 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2872 return -1;
2873
2874 bulk = AES_gcm_decrypt(in, out, len,
2875 gctx->gcm.key,
2876 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2877 gctx->gcm.len.u[1] += bulk;
2878 }
2879 #endif
2880 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
2881 in + bulk, out + bulk, len - bulk))
2882 goto err;
2883 }
2884 /* Retrieve tag */
2885 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN);
2886 /* If tag mismatch wipe buffer */
2887 if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) {
2888 OPENSSL_cleanse(out, len);
2889 goto err;
2890 }
2891 rv = len;
2892 }
2893
2894 err:
2895 gctx->iv_set = 0;
2896 gctx->tls_aad_len = -1;
2897 return rv;
2898 }
2899
2900 #ifdef FIPS_MODE
2901 /*
2902 * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
2903 *
2904 * See also 8.2.2 RBG-based construction.
2905 * Random construction consists of a free field (which can be NULL) and a
2906 * random field which will use a DRBG that can return at least 96 bits of
2907 * entropy strength. (The DRBG must be seeded by the FIPS module).
2908 */
2909 static int aes_gcm_iv_generate(EVP_AES_GCM_CTX *gctx, int offset)
2910 {
2911 int sz = gctx->ivlen - offset;
2912
2913 /* Must be at least 96 bits */
2914 if (sz <= 0 || gctx->ivlen < 12)
2915 return 0;
2916
2917 /* Use DRBG to generate random iv */
2918 if (RAND_bytes(gctx->iv + offset, sz) <= 0)
2919 return 0;
2920 return 1;
2921 }
2922 #endif /* FIPS_MODE */
2923
2924 static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2925 const unsigned char *in, size_t len)
2926 {
2927 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2928
2929 /* If not set up, return error */
2930 if (!gctx->key_set)
2931 return -1;
2932
2933 if (gctx->tls_aad_len >= 0)
2934 return aes_gcm_tls_cipher(ctx, out, in, len);
2935
2936 #ifdef FIPS_MODE
2937 /*
2938 * FIPS requires generation of AES-GCM IV's inside the FIPS module.
2939 * The IV can still be set externally (the security policy will state that
2940 * this is not FIPS compliant). There are some applications
2941 * where setting the IV externally is the only option available.
2942 */
2943 if (!gctx->iv_set) {
2944 if (!ctx->encrypt || !aes_gcm_iv_generate(gctx, 0))
2945 return -1;
2946 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2947 gctx->iv_set = 1;
2948 gctx->iv_gen_rand = 1;
2949 }
2950 #else
2951 if (!gctx->iv_set)
2952 return -1;
2953 #endif /* FIPS_MODE */
2954
2955 if (in) {
2956 if (out == NULL) {
2957 if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
2958 return -1;
2959 } else if (ctx->encrypt) {
2960 if (gctx->ctr) {
2961 size_t bulk = 0;
2962 #if defined(AES_GCM_ASM)
2963 if (len >= 32 && AES_GCM_ASM(gctx)) {
2964 size_t res = (16 - gctx->gcm.mres) % 16;
2965
2966 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
2967 return -1;
2968
2969 bulk = AES_gcm_encrypt(in + res,
2970 out + res, len - res,
2971 gctx->gcm.key, gctx->gcm.Yi.c,
2972 gctx->gcm.Xi.u);
2973 gctx->gcm.len.u[1] += bulk;
2974 bulk += res;
2975 }
2976 #endif
2977 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2978 in + bulk,
2979 out + bulk,
2980 len - bulk, gctx->ctr))
2981 return -1;
2982 } else {
2983 size_t bulk = 0;
2984 #if defined(AES_GCM_ASM2)
2985 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2986 size_t res = (16 - gctx->gcm.mres) % 16;
2987
2988 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
2989 return -1;
2990
2991 bulk = AES_gcm_encrypt(in + res,
2992 out + res, len - res,
2993 gctx->gcm.key, gctx->gcm.Yi.c,
2994 gctx->gcm.Xi.u);
2995 gctx->gcm.len.u[1] += bulk;
2996 bulk += res;
2997 }
2998 #endif
2999 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3000 in + bulk, out + bulk, len - bulk))
3001 return -1;
3002 }
3003 } else {
3004 if (gctx->ctr) {
3005 size_t bulk = 0;
3006 #if defined(AES_GCM_ASM)
3007 if (len >= 16 && AES_GCM_ASM(gctx)) {
3008 size_t res = (16 - gctx->gcm.mres) % 16;
3009
3010 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3011 return -1;
3012
3013 bulk = AES_gcm_decrypt(in + res,
3014 out + res, len - res,
3015 gctx->gcm.key,
3016 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3017 gctx->gcm.len.u[1] += bulk;
3018 bulk += res;
3019 }
3020 #endif
3021 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3022 in + bulk,
3023 out + bulk,
3024 len - bulk, gctx->ctr))
3025 return -1;
3026 } else {
3027 size_t bulk = 0;
3028 #if defined(AES_GCM_ASM2)
3029 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3030 size_t res = (16 - gctx->gcm.mres) % 16;
3031
3032 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3033 return -1;
3034
3035 bulk = AES_gcm_decrypt(in + res,
3036 out + res, len - res,
3037 gctx->gcm.key,
3038 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3039 gctx->gcm.len.u[1] += bulk;
3040 bulk += res;
3041 }
3042 #endif
3043 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3044 in + bulk, out + bulk, len - bulk))
3045 return -1;
3046 }
3047 }
3048 return len;
3049 } else {
3050 if (!ctx->encrypt) {
3051 if (gctx->taglen < 0)
3052 return -1;
3053 if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0)
3054 return -1;
3055 gctx->iv_set = 0;
3056 return 0;
3057 }
3058 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
3059 gctx->taglen = 16;
3060 /* Don't reuse the IV */
3061 gctx->iv_set = 0;
3062 return 0;
3063 }
3064
3065 }
3066
3067 #define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \
3068 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3069 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3070 | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH)
3071
3072 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, gcm, GCM,
3073 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3074 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, gcm, GCM,
3075 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3076 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, gcm, GCM,
3077 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3078
3079 static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3080 {
3081 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX, c);
3082
3083 if (type == EVP_CTRL_COPY) {
3084 EVP_CIPHER_CTX *out = ptr;
3085 EVP_AES_XTS_CTX *xctx_out = EVP_C_DATA(EVP_AES_XTS_CTX,out);
3086
3087 if (xctx->xts.key1) {
3088 if (xctx->xts.key1 != &xctx->ks1)
3089 return 0;
3090 xctx_out->xts.key1 = &xctx_out->ks1;
3091 }
3092 if (xctx->xts.key2) {
3093 if (xctx->xts.key2 != &xctx->ks2)
3094 return 0;
3095 xctx_out->xts.key2 = &xctx_out->ks2;
3096 }
3097 return 1;
3098 } else if (type != EVP_CTRL_INIT)
3099 return -1;
3100 /* key1 and key2 are used as an indicator both key and IV are set */
3101 xctx->xts.key1 = NULL;
3102 xctx->xts.key2 = NULL;
3103 return 1;
3104 }
3105
3106 static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3107 const unsigned char *iv, int enc)
3108 {
3109 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3110
3111 if (!iv && !key)
3112 return 1;
3113
3114 if (key) {
3115 do {
3116 /* The key is two half length keys in reality */
3117 const int bytes = EVP_CIPHER_CTX_key_length(ctx) / 2;
3118 const int bits = bytes * 8;
3119
3120 /*
3121 * Verify that the two keys are different.
3122 *
3123 * This addresses the vulnerability described in Rogaway's
3124 * September 2004 paper:
3125 *
3126 * "Efficient Instantiations of Tweakable Blockciphers and
3127 * Refinements to Modes OCB and PMAC".
3128 * (http://web.cs.ucdavis.edu/~rogaway/papers/offsets.pdf)
3129 *
3130 * FIPS 140-2 IG A.9 XTS-AES Key Generation Requirements states
3131 * that:
3132 * "The check for Key_1 != Key_2 shall be done at any place
3133 * BEFORE using the keys in the XTS-AES algorithm to process
3134 * data with them."
3135 */
3136 if ((!allow_insecure_decrypt || enc)
3137 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
3138 EVPerr(EVP_F_AES_XTS_INIT_KEY, EVP_R_XTS_DUPLICATED_KEYS);
3139 return 0;
3140 }
3141
3142 #ifdef AES_XTS_ASM
3143 xctx->stream = enc ? AES_xts_encrypt : AES_xts_decrypt;
3144 #else
3145 xctx->stream = NULL;
3146 #endif
3147 /* key_len is two AES keys */
3148 #ifdef HWAES_CAPABLE
3149 if (HWAES_CAPABLE) {
3150 if (enc) {
3151 HWAES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3152 xctx->xts.block1 = (block128_f) HWAES_encrypt;
3153 # ifdef HWAES_xts_encrypt
3154 xctx->stream = HWAES_xts_encrypt;
3155 # endif
3156 } else {
3157 HWAES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3158 xctx->xts.block1 = (block128_f) HWAES_decrypt;
3159 # ifdef HWAES_xts_decrypt
3160 xctx->stream = HWAES_xts_decrypt;
3161 #endif
3162 }
3163
3164 HWAES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3165 xctx->xts.block2 = (block128_f) HWAES_encrypt;
3166
3167 xctx->xts.key1 = &xctx->ks1;
3168 break;
3169 } else
3170 #endif
3171 #ifdef BSAES_CAPABLE
3172 if (BSAES_CAPABLE)
3173 xctx->stream = enc ? bsaes_xts_encrypt : bsaes_xts_decrypt;
3174 else
3175 #endif
3176 #ifdef VPAES_CAPABLE
3177 if (VPAES_CAPABLE) {
3178 if (enc) {
3179 vpaes_set_encrypt_key(key, bits, &xctx->ks1.ks);
3180 xctx->xts.block1 = (block128_f) vpaes_encrypt;
3181 } else {
3182 vpaes_set_decrypt_key(key, bits, &xctx->ks1.ks);
3183 xctx->xts.block1 = (block128_f) vpaes_decrypt;
3184 }
3185
3186 vpaes_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3187 xctx->xts.block2 = (block128_f) vpaes_encrypt;
3188
3189 xctx->xts.key1 = &xctx->ks1;
3190 break;
3191 } else
3192 #endif
3193 (void)0; /* terminate potentially open 'else' */
3194
3195 if (enc) {
3196 AES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3197 xctx->xts.block1 = (block128_f) AES_encrypt;
3198 } else {
3199 AES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3200 xctx->xts.block1 = (block128_f) AES_decrypt;
3201 }
3202
3203 AES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3204 xctx->xts.block2 = (block128_f) AES_encrypt;
3205
3206 xctx->xts.key1 = &xctx->ks1;
3207 } while (0);
3208 }
3209
3210 if (iv) {
3211 xctx->xts.key2 = &xctx->ks2;
3212 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 16);
3213 }
3214
3215 return 1;
3216 }
3217
3218 static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3219 const unsigned char *in, size_t len)
3220 {
3221 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3222
3223 if (xctx->xts.key1 == NULL
3224 || xctx->xts.key2 == NULL
3225 || out == NULL
3226 || in == NULL
3227 || len < AES_BLOCK_SIZE)
3228 return 0;
3229
3230 /*
3231 * Impose a limit of 2^20 blocks per data unit as specifed by
3232 * IEEE Std 1619-2018. The earlier and obsolete IEEE Std 1619-2007
3233 * indicated that this was a SHOULD NOT rather than a MUST NOT.
3234 * NIST SP 800-38E mandates the same limit.
3235 */
3236 if (len > XTS_MAX_BLOCKS_PER_DATA_UNIT * AES_BLOCK_SIZE) {
3237 EVPerr(EVP_F_AES_XTS_CIPHER, EVP_R_XTS_DATA_UNIT_IS_TOO_LARGE);
3238 return 0;
3239 }
3240
3241 if (xctx->stream)
3242 (*xctx->stream) (in, out, len,
3243 xctx->xts.key1, xctx->xts.key2,
3244 EVP_CIPHER_CTX_iv_noconst(ctx));
3245 else if (CRYPTO_xts128_encrypt(&xctx->xts, EVP_CIPHER_CTX_iv_noconst(ctx),
3246 in, out, len,
3247 EVP_CIPHER_CTX_encrypting(ctx)))
3248 return 0;
3249 return 1;
3250 }
3251
3252 #define aes_xts_cleanup NULL
3253
3254 #define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \
3255 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3256 | EVP_CIPH_CUSTOM_COPY)
3257
3258 BLOCK_CIPHER_custom(NID_aes, 128, 1, 16, xts, XTS, XTS_FLAGS)
3259 BLOCK_CIPHER_custom(NID_aes, 256, 1, 16, xts, XTS, XTS_FLAGS)
3260
3261 static int aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3262 {
3263 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,c);
3264 switch (type) {
3265 case EVP_CTRL_INIT:
3266 cctx->key_set = 0;
3267 cctx->iv_set = 0;
3268 cctx->L = 8;
3269 cctx->M = 12;
3270 cctx->tag_set = 0;
3271 cctx->len_set = 0;
3272 cctx->tls_aad_len = -1;
3273 return 1;
3274
3275 case EVP_CTRL_GET_IVLEN:
3276 *(int *)ptr = 15 - cctx->L;
3277 return 1;
3278
3279 case EVP_CTRL_AEAD_TLS1_AAD:
3280 /* Save the AAD for later use */
3281 if (arg != EVP_AEAD_TLS1_AAD_LEN)
3282 return 0;
3283 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3284 cctx->tls_aad_len = arg;
3285 {
3286 uint16_t len =
3287 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
3288 | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
3289 /* Correct length for explicit IV */
3290 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
3291 return 0;
3292 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
3293 /* If decrypting correct for tag too */
3294 if (!EVP_CIPHER_CTX_encrypting(c)) {
3295 if (len < cctx->M)
3296 return 0;
3297 len -= cctx->M;
3298 }
3299 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
3300 EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
3301 }
3302 /* Extra padding: tag appended to record */
3303 return cctx->M;
3304
3305 case EVP_CTRL_CCM_SET_IV_FIXED:
3306 /* Sanity check length */
3307 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
3308 return 0;
3309 /* Just copy to first part of IV */
3310 memcpy(EVP_CIPHER_CTX_iv_noconst(c), ptr, arg);
3311 return 1;
3312
3313 case EVP_CTRL_AEAD_SET_IVLEN:
3314 arg = 15 - arg;
3315 /* fall thru */
3316 case EVP_CTRL_CCM_SET_L:
3317 if (arg < 2 || arg > 8)
3318 return 0;
3319 cctx->L = arg;
3320 return 1;
3321
3322 case EVP_CTRL_AEAD_SET_TAG:
3323 if ((arg & 1) || arg < 4 || arg > 16)
3324 return 0;
3325 if (EVP_CIPHER_CTX_encrypting(c) && ptr)
3326 return 0;
3327 if (ptr) {
3328 cctx->tag_set = 1;
3329 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3330 }
3331 cctx->M = arg;
3332 return 1;
3333
3334 case EVP_CTRL_AEAD_GET_TAG:
3335 if (!EVP_CIPHER_CTX_encrypting(c) || !cctx->tag_set)
3336 return 0;
3337 if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg))
3338 return 0;
3339 cctx->tag_set = 0;
3340 cctx->iv_set = 0;
3341 cctx->len_set = 0;
3342 return 1;
3343
3344 case EVP_CTRL_COPY:
3345 {
3346 EVP_CIPHER_CTX *out = ptr;
3347 EVP_AES_CCM_CTX *cctx_out = EVP_C_DATA(EVP_AES_CCM_CTX,out);
3348 if (cctx->ccm.key) {
3349 if (cctx->ccm.key != &cctx->ks)
3350 return 0;
3351 cctx_out->ccm.key = &cctx_out->ks;
3352 }
3353 return 1;
3354 }
3355
3356 default:
3357 return -1;
3358
3359 }
3360 }
3361
3362 static int aes_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3363 const unsigned char *iv, int enc)
3364 {
3365 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3366 if (!iv && !key)
3367 return 1;
3368 if (key)
3369 do {
3370 #ifdef HWAES_CAPABLE
3371 if (HWAES_CAPABLE) {
3372 HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3373 &cctx->ks.ks);
3374
3375 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3376 &cctx->ks, (block128_f) HWAES_encrypt);
3377 cctx->str = NULL;
3378 cctx->key_set = 1;
3379 break;
3380 } else
3381 #endif
3382 #ifdef VPAES_CAPABLE
3383 if (VPAES_CAPABLE) {
3384 vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3385 &cctx->ks.ks);
3386 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3387 &cctx->ks, (block128_f) vpaes_encrypt);
3388 cctx->str = NULL;
3389 cctx->key_set = 1;
3390 break;
3391 }
3392 #endif
3393 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3394 &cctx->ks.ks);
3395 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3396 &cctx->ks, (block128_f) AES_encrypt);
3397 cctx->str = NULL;
3398 cctx->key_set = 1;
3399 } while (0);
3400 if (iv) {
3401 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L);
3402 cctx->iv_set = 1;
3403 }
3404 return 1;
3405 }
3406
3407 static int aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3408 const unsigned char *in, size_t len)
3409 {
3410 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3411 CCM128_CONTEXT *ccm = &cctx->ccm;
3412 /* Encrypt/decrypt must be performed in place */
3413 if (out != in || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->M))
3414 return -1;
3415 /* If encrypting set explicit IV from sequence number (start of AAD) */
3416 if (EVP_CIPHER_CTX_encrypting(ctx))
3417 memcpy(out, EVP_CIPHER_CTX_buf_noconst(ctx),
3418 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3419 /* Get rest of IV from explicit IV */
3420 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx) + EVP_CCM_TLS_FIXED_IV_LEN, in,
3421 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3422 /* Correct length value */
3423 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3424 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), 15 - cctx->L,
3425 len))
3426 return -1;
3427 /* Use saved AAD */
3428 CRYPTO_ccm128_aad(ccm, EVP_CIPHER_CTX_buf_noconst(ctx), cctx->tls_aad_len);
3429 /* Fix buffer to point to payload */
3430 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3431 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3432 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3433 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3434 cctx->str) :
3435 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3436 return -1;
3437 if (!CRYPTO_ccm128_tag(ccm, out + len, cctx->M))
3438 return -1;
3439 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3440 } else {
3441 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3442 cctx->str) :
3443 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3444 unsigned char tag[16];
3445 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3446 if (!CRYPTO_memcmp(tag, in + len, cctx->M))
3447 return len;
3448 }
3449 }
3450 OPENSSL_cleanse(out, len);
3451 return -1;
3452 }
3453 }
3454
3455 static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3456 const unsigned char *in, size_t len)
3457 {
3458 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3459 CCM128_CONTEXT *ccm = &cctx->ccm;
3460 /* If not set up, return error */
3461 if (!cctx->key_set)
3462 return -1;
3463
3464 if (cctx->tls_aad_len >= 0)
3465 return aes_ccm_tls_cipher(ctx, out, in, len);
3466
3467 /* EVP_*Final() doesn't return any data */
3468 if (in == NULL && out != NULL)
3469 return 0;
3470
3471 if (!cctx->iv_set)
3472 return -1;
3473
3474 if (!out) {
3475 if (!in) {
3476 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx),
3477 15 - cctx->L, len))
3478 return -1;
3479 cctx->len_set = 1;
3480 return len;
3481 }
3482 /* If have AAD need message length */
3483 if (!cctx->len_set && len)
3484 return -1;
3485 CRYPTO_ccm128_aad(ccm, in, len);
3486 return len;
3487 }
3488
3489 /* The tag must be set before actually decrypting data */
3490 if (!EVP_CIPHER_CTX_encrypting(ctx) && !cctx->tag_set)
3491 return -1;
3492
3493 /* If not set length yet do it */
3494 if (!cctx->len_set) {
3495 if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx),
3496 15 - cctx->L, len))
3497 return -1;
3498 cctx->len_set = 1;
3499 }
3500 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3501 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3502 cctx->str) :
3503 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3504 return -1;
3505 cctx->tag_set = 1;
3506 return len;
3507 } else {
3508 int rv = -1;
3509 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3510 cctx->str) :
3511 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3512 unsigned char tag[16];
3513 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3514 if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx),
3515 cctx->M))
3516 rv = len;
3517 }
3518 }
3519 if (rv == -1)
3520 OPENSSL_cleanse(out, len);
3521 cctx->iv_set = 0;
3522 cctx->tag_set = 0;
3523 cctx->len_set = 0;
3524 return rv;
3525 }
3526 }
3527
3528 #define aes_ccm_cleanup NULL
3529
3530 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, ccm, CCM,
3531 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3532 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, ccm, CCM,
3533 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3534 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, ccm, CCM,
3535 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3536
3537 typedef struct {
3538 union {
3539 OSSL_UNION_ALIGN;
3540 AES_KEY ks;
3541 } ks;
3542 /* Indicates if IV has been set */
3543 unsigned char *iv;
3544 } EVP_AES_WRAP_CTX;
3545
3546 static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3547 const unsigned char *iv, int enc)
3548 {
3549 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3550 if (!iv && !key)
3551 return 1;
3552 if (key) {
3553 if (EVP_CIPHER_CTX_encrypting(ctx))
3554 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3555 &wctx->ks.ks);
3556 else
3557 AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3558 &wctx->ks.ks);
3559 if (!iv)
3560 wctx->iv = NULL;
3561 }
3562 if (iv) {
3563 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, EVP_CIPHER_CTX_iv_length(ctx));
3564 wctx->iv = EVP_CIPHER_CTX_iv_noconst(ctx);
3565 }
3566 return 1;
3567 }
3568
3569 static int aes_wrap_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3570 const unsigned char *in, size_t inlen)
3571 {
3572 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3573 size_t rv;
3574 /* AES wrap with padding has IV length of 4, without padding 8 */
3575 int pad = EVP_CIPHER_CTX_iv_length(ctx) == 4;
3576 /* No final operation so always return zero length */
3577 if (!in)
3578 return 0;
3579 /* Input length must always be non-zero */
3580 if (!inlen)
3581 return -1;
3582 /* If decrypting need at least 16 bytes and multiple of 8 */
3583 if (!EVP_CIPHER_CTX_encrypting(ctx) && (inlen < 16 || inlen & 0x7))
3584 return -1;
3585 /* If not padding input must be multiple of 8 */
3586 if (!pad && inlen & 0x7)
3587 return -1;
3588 if (is_partially_overlapping(out, in, inlen)) {
3589 EVPerr(EVP_F_AES_WRAP_CIPHER, EVP_R_PARTIALLY_OVERLAPPING);
3590 return 0;
3591 }
3592 if (!out) {
3593 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3594 /* If padding round up to multiple of 8 */
3595 if (pad)
3596 inlen = (inlen + 7) / 8 * 8;
3597 /* 8 byte prefix */
3598 return inlen + 8;
3599 } else {
3600 /*
3601 * If not padding output will be exactly 8 bytes smaller than
3602 * input. If padding it will be at least 8 bytes smaller but we
3603 * don't know how much.
3604 */
3605 return inlen - 8;
3606 }
3607 }
3608 if (pad) {
3609 if (EVP_CIPHER_CTX_encrypting(ctx))
3610 rv = CRYPTO_128_wrap_pad(&wctx->ks.ks, wctx->iv,
3611 out, in, inlen,
3612 (block128_f) AES_encrypt);
3613 else
3614 rv = CRYPTO_128_unwrap_pad(&wctx->ks.ks, wctx->iv,
3615 out, in, inlen,
3616 (block128_f) AES_decrypt);
3617 } else {
3618 if (EVP_CIPHER_CTX_encrypting(ctx))
3619 rv = CRYPTO_128_wrap(&wctx->ks.ks, wctx->iv,
3620 out, in, inlen, (block128_f) AES_encrypt);
3621 else
3622 rv = CRYPTO_128_unwrap(&wctx->ks.ks, wctx->iv,
3623 out, in, inlen, (block128_f) AES_decrypt);
3624 }
3625 return rv ? (int)rv : -1;
3626 }
3627
3628 #define WRAP_FLAGS (EVP_CIPH_WRAP_MODE \
3629 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3630 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_FLAG_DEFAULT_ASN1)
3631
3632 static const EVP_CIPHER aes_128_wrap = {
3633 NID_id_aes128_wrap,
3634 8, 16, 8, WRAP_FLAGS,
3635 aes_wrap_init_key, aes_wrap_cipher,
3636 NULL,
3637 sizeof(EVP_AES_WRAP_CTX),
3638 NULL, NULL, NULL, NULL
3639 };
3640
3641 const EVP_CIPHER *EVP_aes_128_wrap(void)
3642 {
3643 return &aes_128_wrap;
3644 }
3645
3646 static const EVP_CIPHER aes_192_wrap = {
3647 NID_id_aes192_wrap,
3648 8, 24, 8, WRAP_FLAGS,
3649 aes_wrap_init_key, aes_wrap_cipher,
3650 NULL,
3651 sizeof(EVP_AES_WRAP_CTX),
3652 NULL, NULL, NULL, NULL
3653 };
3654
3655 const EVP_CIPHER *EVP_aes_192_wrap(void)
3656 {
3657 return &aes_192_wrap;
3658 }
3659
3660 static const EVP_CIPHER aes_256_wrap = {
3661 NID_id_aes256_wrap,
3662 8, 32, 8, WRAP_FLAGS,
3663 aes_wrap_init_key, aes_wrap_cipher,
3664 NULL,
3665 sizeof(EVP_AES_WRAP_CTX),
3666 NULL, NULL, NULL, NULL
3667 };
3668
3669 const EVP_CIPHER *EVP_aes_256_wrap(void)
3670 {
3671 return &aes_256_wrap;
3672 }
3673
3674 static const EVP_CIPHER aes_128_wrap_pad = {
3675 NID_id_aes128_wrap_pad,
3676 8, 16, 4, WRAP_FLAGS,
3677 aes_wrap_init_key, aes_wrap_cipher,
3678 NULL,
3679 sizeof(EVP_AES_WRAP_CTX),
3680 NULL, NULL, NULL, NULL
3681 };
3682
3683 const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
3684 {
3685 return &aes_128_wrap_pad;
3686 }
3687
3688 static const EVP_CIPHER aes_192_wrap_pad = {
3689 NID_id_aes192_wrap_pad,
3690 8, 24, 4, WRAP_FLAGS,
3691 aes_wrap_init_key, aes_wrap_cipher,
3692 NULL,
3693 sizeof(EVP_AES_WRAP_CTX),
3694 NULL, NULL, NULL, NULL
3695 };
3696
3697 const EVP_CIPHER *EVP_aes_192_wrap_pad(void)
3698 {
3699 return &aes_192_wrap_pad;
3700 }
3701
3702 static const EVP_CIPHER aes_256_wrap_pad = {
3703 NID_id_aes256_wrap_pad,
3704 8, 32, 4, WRAP_FLAGS,
3705 aes_wrap_init_key, aes_wrap_cipher,
3706 NULL,
3707 sizeof(EVP_AES_WRAP_CTX),
3708 NULL, NULL, NULL, NULL
3709 };
3710
3711 const EVP_CIPHER *EVP_aes_256_wrap_pad(void)
3712 {
3713 return &aes_256_wrap_pad;
3714 }
3715
3716 #ifndef OPENSSL_NO_OCB
3717 static int aes_ocb_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3718 {
3719 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
3720 EVP_CIPHER_CTX *newc;
3721 EVP_AES_OCB_CTX *new_octx;
3722
3723 switch (type) {
3724 case EVP_CTRL_INIT:
3725 octx->key_set = 0;
3726 octx->iv_set = 0;
3727 octx->ivlen = EVP_CIPHER_iv_length(c->cipher);
3728 octx->iv = EVP_CIPHER_CTX_iv_noconst(c);
3729 octx->taglen = 16;
3730 octx->data_buf_len = 0;
3731 octx->aad_buf_len = 0;
3732 return 1;
3733
3734 case EVP_CTRL_GET_IVLEN:
3735 *(int *)ptr = octx->ivlen;
3736 return 1;
3737
3738 case EVP_CTRL_AEAD_SET_IVLEN:
3739 /* IV len must be 1 to 15 */
3740 if (arg <= 0 || arg > 15)
3741 return 0;
3742
3743 octx->ivlen = arg;
3744 return 1;
3745
3746 case EVP_CTRL_AEAD_SET_TAG:
3747 if (ptr == NULL) {
3748 /* Tag len must be 0 to 16 */
3749 if (arg < 0 || arg > 16)
3750 return 0;
3751
3752 octx->taglen = arg;
3753 return 1;
3754 }
3755 if (arg != octx->taglen || EVP_CIPHER_CTX_encrypting(c))
3756 return 0;
3757 memcpy(octx->tag, ptr, arg);
3758 return 1;
3759
3760 case EVP_CTRL_AEAD_GET_TAG:
3761 if (arg != octx->taglen || !EVP_CIPHER_CTX_encrypting(c))
3762 return 0;
3763
3764 memcpy(ptr, octx->tag, arg);
3765 return 1;
3766
3767 case EVP_CTRL_COPY:
3768 newc = (EVP_CIPHER_CTX *)ptr;
3769 new_octx = EVP_C_DATA(EVP_AES_OCB_CTX,newc);
3770 return CRYPTO_ocb128_copy_ctx(&new_octx->ocb, &octx->ocb,
3771 &new_octx->ksenc.ks,
3772 &new_octx->ksdec.ks);
3773
3774 default:
3775 return -1;
3776
3777 }
3778 }
3779
3780 static int aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3781 const unsigned char *iv, int enc)
3782 {
3783 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3784 if (!iv && !key)
3785 return 1;
3786 if (key) {
3787 do {
3788 /*
3789 * We set both the encrypt and decrypt key here because decrypt
3790 * needs both. We could possibly optimise to remove setting the
3791 * decrypt for an encryption operation.
3792 */
3793 # ifdef HWAES_CAPABLE
3794 if (HWAES_CAPABLE) {
3795 HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3796 &octx->ksenc.ks);
3797 HWAES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3798 &octx->ksdec.ks);
3799 if (!CRYPTO_ocb128_init(&octx->ocb,
3800 &octx->ksenc.ks, &octx->ksdec.ks,
3801 (block128_f) HWAES_encrypt,
3802 (block128_f) HWAES_decrypt,
3803 enc ? HWAES_ocb_encrypt
3804 : HWAES_ocb_decrypt))
3805 return 0;
3806 break;
3807 }
3808 # endif
3809 # ifdef VPAES_CAPABLE
3810 if (VPAES_CAPABLE) {
3811 vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3812 &octx->ksenc.ks);
3813 vpaes_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3814 &octx->ksdec.ks);
3815 if (!CRYPTO_ocb128_init(&octx->ocb,
3816 &octx->ksenc.ks, &octx->ksdec.ks,
3817 (block128_f) vpaes_encrypt,
3818 (block128_f) vpaes_decrypt,
3819 NULL))
3820 return 0;
3821 break;
3822 }
3823 # endif
3824 AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3825 &octx->ksenc.ks);
3826 AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
3827 &octx->ksdec.ks);
3828 if (!CRYPTO_ocb128_init(&octx->ocb,
3829 &octx->ksenc.ks, &octx->ksdec.ks,
3830 (block128_f) AES_encrypt,
3831 (block128_f) AES_decrypt,
3832 NULL))
3833 return 0;
3834 }
3835 while (0);
3836
3837 /*
3838 * If we have an iv we can set it directly, otherwise use saved IV.
3839 */
3840 if (iv == NULL && octx->iv_set)
3841 iv = octx->iv;
3842 if (iv) {
3843 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
3844 != 1)
3845 return 0;
3846 octx->iv_set = 1;
3847 }
3848 octx->key_set = 1;
3849 } else {
3850 /* If key set use IV, otherwise copy */
3851 if (octx->key_set)
3852 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
3853 else
3854 memcpy(octx->iv, iv, octx->ivlen);
3855 octx->iv_set = 1;
3856 }
3857 return 1;
3858 }
3859
3860 static int aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3861 const unsigned char *in, size_t len)
3862 {
3863 unsigned char *buf;
3864 int *buf_len;
3865 int written_len = 0;
3866 size_t trailing_len;
3867 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3868
3869 /* If IV or Key not set then return error */
3870 if (!octx->iv_set)
3871 return -1;
3872
3873 if (!octx->key_set)
3874 return -1;
3875
3876 if (in != NULL) {
3877 /*
3878 * Need to ensure we are only passing full blocks to low level OCB
3879 * routines. We do it here rather than in EVP_EncryptUpdate/
3880 * EVP_DecryptUpdate because we need to pass full blocks of AAD too
3881 * and those routines don't support that
3882 */
3883
3884 /* Are we dealing with AAD or normal data here? */
3885 if (out == NULL) {
3886 buf = octx->aad_buf;
3887 buf_len = &(octx->aad_buf_len);
3888 } else {
3889 buf = octx->data_buf;
3890 buf_len = &(octx->data_buf_len);
3891
3892 if (is_partially_overlapping(out + *buf_len, in, len)) {
3893 EVPerr(EVP_F_AES_OCB_CIPHER, EVP_R_PARTIALLY_OVERLAPPING);
3894 return 0;
3895 }
3896 }
3897
3898 /*
3899 * If we've got a partially filled buffer from a previous call then
3900 * use that data first
3901 */
3902 if (*buf_len > 0) {
3903 unsigned int remaining;
3904
3905 remaining = AES_BLOCK_SIZE - (*buf_len);
3906 if (remaining > len) {
3907 memcpy(buf + (*buf_len), in, len);
3908 *(buf_len) += len;
3909 return 0;
3910 }
3911 memcpy(buf + (*buf_len), in, remaining);
3912
3913 /*
3914 * If we get here we've filled the buffer, so process it
3915 */
3916 len -= remaining;
3917 in += remaining;
3918 if (out == NULL) {
3919 if (!CRYPTO_ocb128_aad(&octx->ocb, buf, AES_BLOCK_SIZE))
3920 return -1;
3921 } else if (EVP_CIPHER_CTX_encrypting(ctx)) {
3922 if (!CRYPTO_ocb128_encrypt(&octx->ocb, buf, out,
3923 AES_BLOCK_SIZE))
3924 return -1;
3925 } else {
3926 if (!CRYPTO_ocb128_decrypt(&octx->ocb, buf, out,
3927 AES_BLOCK_SIZE))
3928 return -1;
3929 }
3930 written_len = AES_BLOCK_SIZE;
3931 *buf_len = 0;
3932 if (out != NULL)
3933 out += AES_BLOCK_SIZE;
3934 }
3935
3936 /* Do we have a partial block to handle at the end? */
3937 trailing_len = len % AES_BLOCK_SIZE;
3938
3939 /*
3940 * If we've got some full blocks to handle, then process these first
3941 */
3942 if (len != trailing_len) {
3943 if (out == NULL) {
3944 if (!CRYPTO_ocb128_aad(&octx->ocb, in, len - trailing_len))
3945 return -1;
3946 } else if (EVP_CIPHER_CTX_encrypting(ctx)) {
3947 if (!CRYPTO_ocb128_encrypt
3948 (&octx->ocb, in, out, len - trailing_len))
3949 return -1;
3950 } else {
3951 if (!CRYPTO_ocb128_decrypt
3952 (&octx->ocb, in, out, len - trailing_len))
3953 return -1;
3954 }
3955 written_len += len - trailing_len;
3956 in += len - trailing_len;
3957 }
3958
3959 /* Handle any trailing partial block */
3960 if (trailing_len > 0) {
3961 memcpy(buf, in, trailing_len);
3962 *buf_len = trailing_len;
3963 }
3964
3965 return written_len;
3966 } else {
3967 /*
3968 * First of all empty the buffer of any partial block that we might
3969 * have been provided - both for data and AAD
3970 */
3971 if (octx->data_buf_len > 0) {
3972 if (EVP_CIPHER_CTX_encrypting(ctx)) {
3973 if (!CRYPTO_ocb128_encrypt(&octx->ocb, octx->data_buf, out,
3974 octx->data_buf_len))
3975 return -1;
3976 } else {
3977 if (!CRYPTO_ocb128_decrypt(&octx->ocb, octx->data_buf, out,
3978 octx->data_buf_len))
3979 return -1;
3980 }
3981 written_len = octx->data_buf_len;
3982 octx->data_buf_len = 0;
3983 }
3984 if (octx->aad_buf_len > 0) {
3985 if (!CRYPTO_ocb128_aad
3986 (&octx->ocb, octx->aad_buf, octx->aad_buf_len))
3987 return -1;
3988 octx->aad_buf_len = 0;
3989 }
3990 /* If decrypting then verify */
3991 if (!EVP_CIPHER_CTX_encrypting(ctx)) {
3992 if (octx->taglen < 0)
3993 return -1;
3994 if (CRYPTO_ocb128_finish(&octx->ocb,
3995 octx->tag, octx->taglen) != 0)
3996 return -1;
3997 octx->iv_set = 0;
3998 return written_len;
3999 }
4000 /* If encrypting then just get the tag */
4001 if (CRYPTO_ocb128_tag(&octx->ocb, octx->tag, 16) != 1)
4002 return -1;
4003 /* Don't reuse the IV */
4004 octx->iv_set = 0;
4005 return written_len;
4006 }
4007 }
4008
4009 static int aes_ocb_cleanup(EVP_CIPHER_CTX *c)
4010 {
4011 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4012 CRYPTO_ocb128_cleanup(&octx->ocb);
4013 return 1;
4014 }
4015
4016 BLOCK_CIPHER_custom(NID_aes, 128, 16, 12, ocb, OCB,
4017 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4018 BLOCK_CIPHER_custom(NID_aes, 192, 16, 12, ocb, OCB,
4019 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4020 BLOCK_CIPHER_custom(NID_aes, 256, 16, 12, ocb, OCB,
4021 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4022 #endif /* OPENSSL_NO_OCB */
4023
4024 /* AES-SIV mode */
4025 #ifndef OPENSSL_NO_SIV
4026
4027 typedef SIV128_CONTEXT EVP_AES_SIV_CTX;
4028
4029 #define aesni_siv_init_key aes_siv_init_key
4030 static int aes_siv_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
4031 const unsigned char *iv, int enc)
4032 {
4033 const EVP_CIPHER *ctr;
4034 const EVP_CIPHER *cbc;
4035 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, ctx);
4036 int klen = EVP_CIPHER_CTX_key_length(ctx) / 2;
4037
4038 if (key == NULL)
4039 return 1;
4040
4041 switch (klen) {
4042 case 16:
4043 cbc = EVP_aes_128_cbc();
4044 ctr = EVP_aes_128_ctr();
4045 break;
4046 case 24:
4047 cbc = EVP_aes_192_cbc();
4048 ctr = EVP_aes_192_ctr();
4049 break;
4050 case 32:
4051 cbc = EVP_aes_256_cbc();
4052 ctr = EVP_aes_256_ctr();
4053 break;
4054 default:
4055 return 0;
4056 }
4057
4058 /* klen is the length of the underlying cipher, not the input key,
4059 which should be twice as long */
4060 return CRYPTO_siv128_init(sctx, key, klen, cbc, ctr);
4061 }
4062
4063 #define aesni_siv_cipher aes_siv_cipher
4064 static int aes_siv_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
4065 const unsigned char *in, size_t len)
4066 {
4067 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, ctx);
4068
4069 /* EncryptFinal or DecryptFinal */
4070 if (in == NULL)
4071 return CRYPTO_siv128_finish(sctx);
4072
4073 /* Deal with associated data */
4074 if (out == NULL)
4075 return CRYPTO_siv128_aad(sctx, in, len);
4076
4077 if (EVP_CIPHER_CTX_encrypting(ctx))
4078 return CRYPTO_siv128_encrypt(sctx, in, out, len);
4079
4080 return CRYPTO_siv128_decrypt(sctx, in, out, len);
4081 }
4082
4083 #define aesni_siv_cleanup aes_siv_cleanup
4084 static int aes_siv_cleanup(EVP_CIPHER_CTX *c)
4085 {
4086 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, c);
4087
4088 return CRYPTO_siv128_cleanup(sctx);
4089 }
4090
4091
4092 #define aesni_siv_ctrl aes_siv_ctrl
4093 static int aes_siv_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
4094 {
4095 SIV128_CONTEXT *sctx = EVP_C_DATA(SIV128_CONTEXT, c);
4096 SIV128_CONTEXT *sctx_out;
4097
4098 switch (type) {
4099 case EVP_CTRL_INIT:
4100 return CRYPTO_siv128_cleanup(sctx);
4101
4102 case EVP_CTRL_SET_SPEED:
4103 return CRYPTO_siv128_speed(sctx, arg);
4104
4105 case EVP_CTRL_AEAD_SET_TAG:
4106 if (!EVP_CIPHER_CTX_encrypting(c))
4107 return CRYPTO_siv128_set_tag(sctx, ptr, arg);
4108 return 1;
4109
4110 case EVP_CTRL_AEAD_GET_TAG:
4111 if (!EVP_CIPHER_CTX_encrypting(c))
4112 return 0;
4113 return CRYPTO_siv128_get_tag(sctx, ptr, arg);
4114
4115 case EVP_CTRL_COPY:
4116 sctx_out = EVP_C_DATA(SIV128_CONTEXT, (EVP_CIPHER_CTX*)ptr);
4117 return CRYPTO_siv128_copy_ctx(sctx_out, sctx);
4118
4119 default:
4120 return -1;
4121
4122 }
4123 }
4124
4125 #define SIV_FLAGS (EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_DEFAULT_ASN1 \
4126 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
4127 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CUSTOM_COPY \
4128 | EVP_CIPH_CTRL_INIT)
4129
4130 BLOCK_CIPHER_custom(NID_aes, 128, 1, 0, siv, SIV, SIV_FLAGS)
4131 BLOCK_CIPHER_custom(NID_aes, 192, 1, 0, siv, SIV, SIV_FLAGS)
4132 BLOCK_CIPHER_custom(NID_aes, 256, 1, 0, siv, SIV, SIV_FLAGS)
4133 #endif