]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/evp/e_aes.c
71ce6df94ec36199902f7adc4f736cc76e4d002f
[thirdparty/openssl.git] / crypto / evp / e_aes.c
1 /*
2 * Copyright 2001-2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 /*
11 * This file uses the low level AES functions (which are deprecated for
12 * non-internal use) in order to implement the EVP AES ciphers.
13 */
14 #include "internal/deprecated.h"
15
16 #include <string.h>
17 #include <assert.h>
18 #include <openssl/opensslconf.h>
19 #include <openssl/crypto.h>
20 #include <openssl/evp.h>
21 #include <openssl/err.h>
22 #include <openssl/aes.h>
23 #include <openssl/rand.h>
24 #include <openssl/cmac.h>
25 #include "crypto/evp.h"
26 #include "internal/cryptlib.h"
27 #include "crypto/modes.h"
28 #include "crypto/siv.h"
29 #include "crypto/aes_platform.h"
30 #include "evp_local.h"
31
32 typedef struct {
33 union {
34 OSSL_UNION_ALIGN;
35 AES_KEY ks;
36 } ks;
37 block128_f block;
38 union {
39 cbc128_f cbc;
40 ctr128_f ctr;
41 } stream;
42 } EVP_AES_KEY;
43
44 typedef struct {
45 union {
46 OSSL_UNION_ALIGN;
47 AES_KEY ks;
48 } ks; /* AES key schedule to use */
49 int key_set; /* Set if key initialised */
50 int iv_set; /* Set if an iv is set */
51 GCM128_CONTEXT gcm;
52 unsigned char *iv; /* Temporary IV store */
53 int ivlen; /* IV length */
54 int taglen;
55 int iv_gen; /* It is OK to generate IVs */
56 int iv_gen_rand; /* No IV was specified, so generate a rand IV */
57 int tls_aad_len; /* TLS AAD length */
58 uint64_t tls_enc_records; /* Number of TLS records encrypted */
59 ctr128_f ctr;
60 } EVP_AES_GCM_CTX;
61
62 typedef struct {
63 union {
64 OSSL_UNION_ALIGN;
65 AES_KEY ks;
66 } ks1, ks2; /* AES key schedules to use */
67 XTS128_CONTEXT xts;
68 void (*stream) (const unsigned char *in,
69 unsigned char *out, size_t length,
70 const AES_KEY *key1, const AES_KEY *key2,
71 const unsigned char iv[16]);
72 } EVP_AES_XTS_CTX;
73
74 #ifdef FIPS_MODULE
75 static const int allow_insecure_decrypt = 0;
76 #else
77 static const int allow_insecure_decrypt = 1;
78 #endif
79
80 typedef struct {
81 union {
82 OSSL_UNION_ALIGN;
83 AES_KEY ks;
84 } ks; /* AES key schedule to use */
85 int key_set; /* Set if key initialised */
86 int iv_set; /* Set if an iv is set */
87 int tag_set; /* Set if tag is valid */
88 int len_set; /* Set if message length set */
89 int L, M; /* L and M parameters from RFC3610 */
90 int tls_aad_len; /* TLS AAD length */
91 CCM128_CONTEXT ccm;
92 ccm128_f str;
93 } EVP_AES_CCM_CTX;
94
95 #ifndef OPENSSL_NO_OCB
96 typedef struct {
97 union {
98 OSSL_UNION_ALIGN;
99 AES_KEY ks;
100 } ksenc; /* AES key schedule to use for encryption */
101 union {
102 OSSL_UNION_ALIGN;
103 AES_KEY ks;
104 } ksdec; /* AES key schedule to use for decryption */
105 int key_set; /* Set if key initialised */
106 int iv_set; /* Set if an iv is set */
107 OCB128_CONTEXT ocb;
108 unsigned char *iv; /* Temporary IV store */
109 unsigned char tag[16];
110 unsigned char data_buf[16]; /* Store partial data blocks */
111 unsigned char aad_buf[16]; /* Store partial AAD blocks */
112 int data_buf_len;
113 int aad_buf_len;
114 int ivlen; /* IV length */
115 int taglen;
116 } EVP_AES_OCB_CTX;
117 #endif
118
119 #define MAXBITCHUNK ((size_t)1<<(sizeof(size_t)*8-4))
120
121 /* increment counter (64-bit int) by 1 */
122 static void ctr64_inc(unsigned char *counter)
123 {
124 int n = 8;
125 unsigned char c;
126
127 do {
128 --n;
129 c = counter[n];
130 ++c;
131 counter[n] = c;
132 if (c)
133 return;
134 } while (n);
135 }
136
137 #if defined(AESNI_CAPABLE)
138 # if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
139 # define AES_GCM_ASM2(gctx) (gctx->gcm.block==(block128_f)aesni_encrypt && \
140 gctx->gcm.ghash==gcm_ghash_avx)
141 # undef AES_GCM_ASM2 /* minor size optimization */
142 # endif
143
144 static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
145 const unsigned char *iv, int enc)
146 {
147 int ret, mode;
148 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
149 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
150
151 if (keylen <= 0) {
152 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
153 return 0;
154 }
155 mode = EVP_CIPHER_CTX_get_mode(ctx);
156 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
157 && !enc) {
158 ret = aesni_set_decrypt_key(key, keylen, &dat->ks.ks);
159 dat->block = (block128_f) aesni_decrypt;
160 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
161 (cbc128_f) aesni_cbc_encrypt : NULL;
162 } else {
163 ret = aesni_set_encrypt_key(key, keylen, &dat->ks.ks);
164 dat->block = (block128_f) aesni_encrypt;
165 if (mode == EVP_CIPH_CBC_MODE)
166 dat->stream.cbc = (cbc128_f) aesni_cbc_encrypt;
167 else if (mode == EVP_CIPH_CTR_MODE)
168 dat->stream.ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
169 else
170 dat->stream.cbc = NULL;
171 }
172
173 if (ret < 0) {
174 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
175 return 0;
176 }
177
178 return 1;
179 }
180
181 static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
182 const unsigned char *in, size_t len)
183 {
184 aesni_cbc_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
185 ctx->iv, EVP_CIPHER_CTX_is_encrypting(ctx));
186
187 return 1;
188 }
189
190 static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
191 const unsigned char *in, size_t len)
192 {
193 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
194
195 if (len < bl)
196 return 1;
197
198 aesni_ecb_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
199 EVP_CIPHER_CTX_is_encrypting(ctx));
200
201 return 1;
202 }
203
204 # define aesni_ofb_cipher aes_ofb_cipher
205 static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
206 const unsigned char *in, size_t len);
207
208 # define aesni_cfb_cipher aes_cfb_cipher
209 static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
210 const unsigned char *in, size_t len);
211
212 # define aesni_cfb8_cipher aes_cfb8_cipher
213 static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
214 const unsigned char *in, size_t len);
215
216 # define aesni_cfb1_cipher aes_cfb1_cipher
217 static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
218 const unsigned char *in, size_t len);
219
220 # define aesni_ctr_cipher aes_ctr_cipher
221 static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
222 const unsigned char *in, size_t len);
223
224 static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
225 const unsigned char *iv, int enc)
226 {
227 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX, ctx);
228
229 if (iv == NULL && key == NULL)
230 return 1;
231
232 if (key) {
233 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
234
235 if (keylen <= 0) {
236 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
237 return 0;
238 }
239 aesni_set_encrypt_key(key, keylen, &gctx->ks.ks);
240 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
241 gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
242 /*
243 * If we have an iv can set it directly, otherwise use saved IV.
244 */
245 if (iv == NULL && gctx->iv_set)
246 iv = gctx->iv;
247 if (iv) {
248 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
249 gctx->iv_set = 1;
250 }
251 gctx->key_set = 1;
252 } else {
253 /* If key set use IV, otherwise copy */
254 if (gctx->key_set)
255 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
256 else
257 memcpy(gctx->iv, iv, gctx->ivlen);
258 gctx->iv_set = 1;
259 gctx->iv_gen = 0;
260 }
261 return 1;
262 }
263
264 # define aesni_gcm_cipher aes_gcm_cipher
265 static int aesni_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
266 const unsigned char *in, size_t len);
267
268 static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
269 const unsigned char *iv, int enc)
270 {
271 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
272
273 if (iv == NULL && key == NULL)
274 return 1;
275
276 if (key) {
277 /* The key is two half length keys in reality */
278 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
279 const int bytes = keylen / 2;
280 const int bits = bytes * 8;
281
282 if (keylen <= 0) {
283 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
284 return 0;
285 }
286 /*
287 * Verify that the two keys are different.
288 *
289 * This addresses Rogaway's vulnerability.
290 * See comment in aes_xts_init_key() below.
291 */
292 if ((!allow_insecure_decrypt || enc)
293 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
294 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
295 return 0;
296 }
297
298 /* key_len is two AES keys */
299 if (enc) {
300 aesni_set_encrypt_key(key, bits, &xctx->ks1.ks);
301 xctx->xts.block1 = (block128_f) aesni_encrypt;
302 xctx->stream = aesni_xts_encrypt;
303 } else {
304 aesni_set_decrypt_key(key, bits, &xctx->ks1.ks);
305 xctx->xts.block1 = (block128_f) aesni_decrypt;
306 xctx->stream = aesni_xts_decrypt;
307 }
308
309 aesni_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
310 xctx->xts.block2 = (block128_f) aesni_encrypt;
311
312 xctx->xts.key1 = &xctx->ks1;
313 }
314
315 if (iv) {
316 xctx->xts.key2 = &xctx->ks2;
317 memcpy(ctx->iv, iv, 16);
318 }
319
320 return 1;
321 }
322
323 # define aesni_xts_cipher aes_xts_cipher
324 static int aesni_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
325 const unsigned char *in, size_t len);
326
327 static int aesni_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
328 const unsigned char *iv, int enc)
329 {
330 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
331
332 if (iv == NULL && key == NULL)
333 return 1;
334
335 if (key != NULL) {
336 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
337
338 if (keylen <= 0) {
339 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
340 return 0;
341 }
342 aesni_set_encrypt_key(key, keylen, &cctx->ks.ks);
343 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
344 &cctx->ks, (block128_f) aesni_encrypt);
345 cctx->str = enc ? (ccm128_f) aesni_ccm64_encrypt_blocks :
346 (ccm128_f) aesni_ccm64_decrypt_blocks;
347 cctx->key_set = 1;
348 }
349 if (iv) {
350 memcpy(ctx->iv, iv, 15 - cctx->L);
351 cctx->iv_set = 1;
352 }
353 return 1;
354 }
355
356 # define aesni_ccm_cipher aes_ccm_cipher
357 static int aesni_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
358 const unsigned char *in, size_t len);
359
360 # ifndef OPENSSL_NO_OCB
361 static int aesni_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
362 const unsigned char *iv, int enc)
363 {
364 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
365
366 if (iv == NULL && key == NULL)
367 return 1;
368
369 if (key != NULL) {
370 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
371
372 if (keylen <= 0) {
373 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
374 return 0;
375 }
376 do {
377 /*
378 * We set both the encrypt and decrypt key here because decrypt
379 * needs both. We could possibly optimise to remove setting the
380 * decrypt for an encryption operation.
381 */
382 aesni_set_encrypt_key(key, keylen, &octx->ksenc.ks);
383 aesni_set_decrypt_key(key, keylen, &octx->ksdec.ks);
384 if (!CRYPTO_ocb128_init(&octx->ocb,
385 &octx->ksenc.ks, &octx->ksdec.ks,
386 (block128_f) aesni_encrypt,
387 (block128_f) aesni_decrypt,
388 enc ? aesni_ocb_encrypt
389 : aesni_ocb_decrypt))
390 return 0;
391 }
392 while (0);
393
394 /*
395 * If we have an iv we can set it directly, otherwise use saved IV.
396 */
397 if (iv == NULL && octx->iv_set)
398 iv = octx->iv;
399 if (iv) {
400 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
401 != 1)
402 return 0;
403 octx->iv_set = 1;
404 }
405 octx->key_set = 1;
406 } else {
407 /* If key set use IV, otherwise copy */
408 if (octx->key_set)
409 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
410 else
411 memcpy(octx->iv, iv, octx->ivlen);
412 octx->iv_set = 1;
413 }
414 return 1;
415 }
416
417 # define aesni_ocb_cipher aes_ocb_cipher
418 static int aesni_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
419 const unsigned char *in, size_t len);
420 # endif /* OPENSSL_NO_OCB */
421
422 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
423 static const EVP_CIPHER aesni_##keylen##_##mode = { \
424 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
425 flags|EVP_CIPH_##MODE##_MODE, \
426 EVP_ORIG_GLOBAL, \
427 aesni_init_key, \
428 aesni_##mode##_cipher, \
429 NULL, \
430 sizeof(EVP_AES_KEY), \
431 NULL,NULL,NULL,NULL }; \
432 static const EVP_CIPHER aes_##keylen##_##mode = { \
433 nid##_##keylen##_##nmode,blocksize, \
434 keylen/8,ivlen, \
435 flags|EVP_CIPH_##MODE##_MODE, \
436 EVP_ORIG_GLOBAL, \
437 aes_init_key, \
438 aes_##mode##_cipher, \
439 NULL, \
440 sizeof(EVP_AES_KEY), \
441 NULL,NULL,NULL,NULL }; \
442 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
443 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
444
445 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
446 static const EVP_CIPHER aesni_##keylen##_##mode = { \
447 nid##_##keylen##_##mode,blocksize, \
448 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
449 ivlen, \
450 flags|EVP_CIPH_##MODE##_MODE, \
451 EVP_ORIG_GLOBAL, \
452 aesni_##mode##_init_key, \
453 aesni_##mode##_cipher, \
454 aes_##mode##_cleanup, \
455 sizeof(EVP_AES_##MODE##_CTX), \
456 NULL,NULL,aes_##mode##_ctrl,NULL }; \
457 static const EVP_CIPHER aes_##keylen##_##mode = { \
458 nid##_##keylen##_##mode,blocksize, \
459 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
460 ivlen, \
461 flags|EVP_CIPH_##MODE##_MODE, \
462 EVP_ORIG_GLOBAL, \
463 aes_##mode##_init_key, \
464 aes_##mode##_cipher, \
465 aes_##mode##_cleanup, \
466 sizeof(EVP_AES_##MODE##_CTX), \
467 NULL,NULL,aes_##mode##_ctrl,NULL }; \
468 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
469 { return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
470
471 #elif defined(SPARC_AES_CAPABLE)
472
473 static int aes_t4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
474 const unsigned char *iv, int enc)
475 {
476 int ret, mode, bits;
477 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
478
479 mode = EVP_CIPHER_CTX_get_mode(ctx);
480 bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
481 if (bits <= 0) {
482 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
483 return 0;
484 }
485 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
486 && !enc) {
487 ret = 0;
488 aes_t4_set_decrypt_key(key, bits, &dat->ks.ks);
489 dat->block = (block128_f) aes_t4_decrypt;
490 switch (bits) {
491 case 128:
492 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
493 (cbc128_f) aes128_t4_cbc_decrypt : NULL;
494 break;
495 case 192:
496 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
497 (cbc128_f) aes192_t4_cbc_decrypt : NULL;
498 break;
499 case 256:
500 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
501 (cbc128_f) aes256_t4_cbc_decrypt : NULL;
502 break;
503 default:
504 ret = -1;
505 }
506 } else {
507 ret = 0;
508 aes_t4_set_encrypt_key(key, bits, &dat->ks.ks);
509 dat->block = (block128_f) aes_t4_encrypt;
510 switch (bits) {
511 case 128:
512 if (mode == EVP_CIPH_CBC_MODE)
513 dat->stream.cbc = (cbc128_f) aes128_t4_cbc_encrypt;
514 else if (mode == EVP_CIPH_CTR_MODE)
515 dat->stream.ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
516 else
517 dat->stream.cbc = NULL;
518 break;
519 case 192:
520 if (mode == EVP_CIPH_CBC_MODE)
521 dat->stream.cbc = (cbc128_f) aes192_t4_cbc_encrypt;
522 else if (mode == EVP_CIPH_CTR_MODE)
523 dat->stream.ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
524 else
525 dat->stream.cbc = NULL;
526 break;
527 case 256:
528 if (mode == EVP_CIPH_CBC_MODE)
529 dat->stream.cbc = (cbc128_f) aes256_t4_cbc_encrypt;
530 else if (mode == EVP_CIPH_CTR_MODE)
531 dat->stream.ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
532 else
533 dat->stream.cbc = NULL;
534 break;
535 default:
536 ret = -1;
537 }
538 }
539
540 if (ret < 0) {
541 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
542 return 0;
543 }
544
545 return 1;
546 }
547
548 # define aes_t4_cbc_cipher aes_cbc_cipher
549 static int aes_t4_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
550 const unsigned char *in, size_t len);
551
552 # define aes_t4_ecb_cipher aes_ecb_cipher
553 static int aes_t4_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
554 const unsigned char *in, size_t len);
555
556 # define aes_t4_ofb_cipher aes_ofb_cipher
557 static int aes_t4_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
558 const unsigned char *in, size_t len);
559
560 # define aes_t4_cfb_cipher aes_cfb_cipher
561 static int aes_t4_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
562 const unsigned char *in, size_t len);
563
564 # define aes_t4_cfb8_cipher aes_cfb8_cipher
565 static int aes_t4_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
566 const unsigned char *in, size_t len);
567
568 # define aes_t4_cfb1_cipher aes_cfb1_cipher
569 static int aes_t4_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
570 const unsigned char *in, size_t len);
571
572 # define aes_t4_ctr_cipher aes_ctr_cipher
573 static int aes_t4_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
574 const unsigned char *in, size_t len);
575
576 static int aes_t4_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
577 const unsigned char *iv, int enc)
578 {
579 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
580
581 if (iv == NULL && key == NULL)
582 return 1;
583 if (key) {
584 const int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
585
586 if (bits <= 0) {
587 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
588 return 0;
589 }
590 aes_t4_set_encrypt_key(key, bits, &gctx->ks.ks);
591 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
592 (block128_f) aes_t4_encrypt);
593 switch (bits) {
594 case 128:
595 gctx->ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
596 break;
597 case 192:
598 gctx->ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
599 break;
600 case 256:
601 gctx->ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
602 break;
603 default:
604 return 0;
605 }
606 /*
607 * If we have an iv can set it directly, otherwise use saved IV.
608 */
609 if (iv == NULL && gctx->iv_set)
610 iv = gctx->iv;
611 if (iv) {
612 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
613 gctx->iv_set = 1;
614 }
615 gctx->key_set = 1;
616 } else {
617 /* If key set use IV, otherwise copy */
618 if (gctx->key_set)
619 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
620 else
621 memcpy(gctx->iv, iv, gctx->ivlen);
622 gctx->iv_set = 1;
623 gctx->iv_gen = 0;
624 }
625 return 1;
626 }
627
628 # define aes_t4_gcm_cipher aes_gcm_cipher
629 static int aes_t4_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
630 const unsigned char *in, size_t len);
631
632 static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
633 const unsigned char *iv, int enc)
634 {
635 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
636
637 if (!iv && !key)
638 return 1;
639
640 if (key) {
641 /* The key is two half length keys in reality */
642 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
643 const int bytes = keylen / 2;
644 const int bits = bytes * 8;
645
646 if (keylen <= 0) {
647 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
648 return 0;
649 }
650 /*
651 * Verify that the two keys are different.
652 *
653 * This addresses Rogaway's vulnerability.
654 * See comment in aes_xts_init_key() below.
655 */
656 if ((!allow_insecure_decrypt || enc)
657 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
658 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
659 return 0;
660 }
661
662 xctx->stream = NULL;
663 /* key_len is two AES keys */
664 if (enc) {
665 aes_t4_set_encrypt_key(key, bits, &xctx->ks1.ks);
666 xctx->xts.block1 = (block128_f) aes_t4_encrypt;
667 switch (bits) {
668 case 128:
669 xctx->stream = aes128_t4_xts_encrypt;
670 break;
671 case 256:
672 xctx->stream = aes256_t4_xts_encrypt;
673 break;
674 default:
675 return 0;
676 }
677 } else {
678 aes_t4_set_decrypt_key(key, bits, &xctx->ks1.ks);
679 xctx->xts.block1 = (block128_f) aes_t4_decrypt;
680 switch (bits) {
681 case 128:
682 xctx->stream = aes128_t4_xts_decrypt;
683 break;
684 case 256:
685 xctx->stream = aes256_t4_xts_decrypt;
686 break;
687 default:
688 return 0;
689 }
690 }
691
692 aes_t4_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
693 xctx->xts.block2 = (block128_f) aes_t4_encrypt;
694
695 xctx->xts.key1 = &xctx->ks1;
696 }
697
698 if (iv) {
699 xctx->xts.key2 = &xctx->ks2;
700 memcpy(ctx->iv, iv, 16);
701 }
702
703 return 1;
704 }
705
706 # define aes_t4_xts_cipher aes_xts_cipher
707 static int aes_t4_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
708 const unsigned char *in, size_t len);
709
710 static int aes_t4_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
711 const unsigned char *iv, int enc)
712 {
713 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
714
715 if (iv == NULL && key == NULL)
716 return 1;
717
718 if (key != NULL) {
719 const int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
720
721 if (bits <= 0) {
722 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
723 return 0;
724 }
725 aes_t4_set_encrypt_key(key, bits, &cctx->ks.ks);
726 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
727 &cctx->ks, (block128_f) aes_t4_encrypt);
728 cctx->str = NULL;
729 cctx->key_set = 1;
730 }
731 if (iv) {
732 memcpy(ctx->iv, iv, 15 - cctx->L);
733 cctx->iv_set = 1;
734 }
735 return 1;
736 }
737
738 # define aes_t4_ccm_cipher aes_ccm_cipher
739 static int aes_t4_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
740 const unsigned char *in, size_t len);
741
742 # ifndef OPENSSL_NO_OCB
743 static int aes_t4_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
744 const unsigned char *iv, int enc)
745 {
746 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
747
748 if (iv == NULL && key == NULL)
749 return 1;
750
751 if (key != NULL) {
752 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
753
754 if (keylen <= 0) {
755 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
756 return 0;
757 }
758 do {
759 /*
760 * We set both the encrypt and decrypt key here because decrypt
761 * needs both. We could possibly optimise to remove setting the
762 * decrypt for an encryption operation.
763 */
764 aes_t4_set_encrypt_key(key, keylen, &octx->ksenc.ks);
765 aes_t4_set_decrypt_key(key, keylen, &octx->ksdec.ks);
766 if (!CRYPTO_ocb128_init(&octx->ocb,
767 &octx->ksenc.ks, &octx->ksdec.ks,
768 (block128_f) aes_t4_encrypt,
769 (block128_f) aes_t4_decrypt,
770 NULL))
771 return 0;
772 }
773 while (0);
774
775 /*
776 * If we have an iv we can set it directly, otherwise use saved IV.
777 */
778 if (iv == NULL && octx->iv_set)
779 iv = octx->iv;
780 if (iv) {
781 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
782 != 1)
783 return 0;
784 octx->iv_set = 1;
785 }
786 octx->key_set = 1;
787 } else {
788 /* If key set use IV, otherwise copy */
789 if (octx->key_set)
790 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
791 else
792 memcpy(octx->iv, iv, octx->ivlen);
793 octx->iv_set = 1;
794 }
795 return 1;
796 }
797
798 # define aes_t4_ocb_cipher aes_ocb_cipher
799 static int aes_t4_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
800 const unsigned char *in, size_t len);
801 # endif /* OPENSSL_NO_OCB */
802
803 # ifndef OPENSSL_NO_SIV
804 # define aes_t4_siv_init_key aes_siv_init_key
805 # define aes_t4_siv_cipher aes_siv_cipher
806 # endif /* OPENSSL_NO_SIV */
807
808 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
809 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
810 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
811 flags|EVP_CIPH_##MODE##_MODE, \
812 EVP_ORIG_GLOBAL, \
813 aes_t4_init_key, \
814 aes_t4_##mode##_cipher, \
815 NULL, \
816 sizeof(EVP_AES_KEY), \
817 NULL,NULL,NULL,NULL }; \
818 static const EVP_CIPHER aes_##keylen##_##mode = { \
819 nid##_##keylen##_##nmode,blocksize, \
820 keylen/8,ivlen, \
821 flags|EVP_CIPH_##MODE##_MODE, \
822 EVP_ORIG_GLOBAL, \
823 aes_init_key, \
824 aes_##mode##_cipher, \
825 NULL, \
826 sizeof(EVP_AES_KEY), \
827 NULL,NULL,NULL,NULL }; \
828 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
829 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
830
831 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
832 static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
833 nid##_##keylen##_##mode,blocksize, \
834 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
835 ivlen, \
836 flags|EVP_CIPH_##MODE##_MODE, \
837 EVP_ORIG_GLOBAL, \
838 aes_t4_##mode##_init_key, \
839 aes_t4_##mode##_cipher, \
840 aes_##mode##_cleanup, \
841 sizeof(EVP_AES_##MODE##_CTX), \
842 NULL,NULL,aes_##mode##_ctrl,NULL }; \
843 static const EVP_CIPHER aes_##keylen##_##mode = { \
844 nid##_##keylen##_##mode,blocksize, \
845 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
846 ivlen, \
847 flags|EVP_CIPH_##MODE##_MODE, \
848 EVP_ORIG_GLOBAL, \
849 aes_##mode##_init_key, \
850 aes_##mode##_cipher, \
851 aes_##mode##_cleanup, \
852 sizeof(EVP_AES_##MODE##_CTX), \
853 NULL,NULL,aes_##mode##_ctrl,NULL }; \
854 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
855 { return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
856
857 #elif defined(S390X_aes_128_CAPABLE)
858 /* IBM S390X support */
859 typedef struct {
860 union {
861 OSSL_UNION_ALIGN;
862 /*-
863 * KM-AES parameter block - begin
864 * (see z/Architecture Principles of Operation >= SA22-7832-06)
865 */
866 struct {
867 unsigned char k[32];
868 } param;
869 /* KM-AES parameter block - end */
870 } km;
871 unsigned int fc;
872 } S390X_AES_ECB_CTX;
873
874 typedef struct {
875 union {
876 OSSL_UNION_ALIGN;
877 /*-
878 * KMO-AES parameter block - begin
879 * (see z/Architecture Principles of Operation >= SA22-7832-08)
880 */
881 struct {
882 unsigned char cv[16];
883 unsigned char k[32];
884 } param;
885 /* KMO-AES parameter block - end */
886 } kmo;
887 unsigned int fc;
888
889 int res;
890 } S390X_AES_OFB_CTX;
891
892 typedef struct {
893 union {
894 OSSL_UNION_ALIGN;
895 /*-
896 * KMF-AES parameter block - begin
897 * (see z/Architecture Principles of Operation >= SA22-7832-08)
898 */
899 struct {
900 unsigned char cv[16];
901 unsigned char k[32];
902 } param;
903 /* KMF-AES parameter block - end */
904 } kmf;
905 unsigned int fc;
906
907 int res;
908 } S390X_AES_CFB_CTX;
909
910 typedef struct {
911 union {
912 OSSL_UNION_ALIGN;
913 /*-
914 * KMA-GCM-AES parameter block - begin
915 * (see z/Architecture Principles of Operation >= SA22-7832-11)
916 */
917 struct {
918 unsigned char reserved[12];
919 union {
920 unsigned int w;
921 unsigned char b[4];
922 } cv;
923 union {
924 unsigned long long g[2];
925 unsigned char b[16];
926 } t;
927 unsigned char h[16];
928 unsigned long long taadl;
929 unsigned long long tpcl;
930 union {
931 unsigned long long g[2];
932 unsigned int w[4];
933 } j0;
934 unsigned char k[32];
935 } param;
936 /* KMA-GCM-AES parameter block - end */
937 } kma;
938 unsigned int fc;
939 int key_set;
940
941 unsigned char *iv;
942 int ivlen;
943 int iv_set;
944 int iv_gen;
945
946 int taglen;
947
948 unsigned char ares[16];
949 unsigned char mres[16];
950 unsigned char kres[16];
951 int areslen;
952 int mreslen;
953 int kreslen;
954
955 int tls_aad_len;
956 uint64_t tls_enc_records; /* Number of TLS records encrypted */
957 } S390X_AES_GCM_CTX;
958
959 typedef struct {
960 union {
961 OSSL_UNION_ALIGN;
962 /*-
963 * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
964 * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
965 * rounds field is used to store the function code and that the key
966 * schedule is not stored (if aes hardware support is detected).
967 */
968 struct {
969 unsigned char pad[16];
970 AES_KEY k;
971 } key;
972
973 struct {
974 /*-
975 * KMAC-AES parameter block - begin
976 * (see z/Architecture Principles of Operation >= SA22-7832-08)
977 */
978 struct {
979 union {
980 unsigned long long g[2];
981 unsigned char b[16];
982 } icv;
983 unsigned char k[32];
984 } kmac_param;
985 /* KMAC-AES parameter block - end */
986
987 union {
988 unsigned long long g[2];
989 unsigned char b[16];
990 } nonce;
991 union {
992 unsigned long long g[2];
993 unsigned char b[16];
994 } buf;
995
996 unsigned long long blocks;
997 int l;
998 int m;
999 int tls_aad_len;
1000 int iv_set;
1001 int tag_set;
1002 int len_set;
1003 int key_set;
1004
1005 unsigned char pad[140];
1006 unsigned int fc;
1007 } ccm;
1008 } aes;
1009 } S390X_AES_CCM_CTX;
1010
1011 # define s390x_aes_init_key aes_init_key
1012 static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
1013 const unsigned char *iv, int enc);
1014
1015 # define S390X_AES_CBC_CTX EVP_AES_KEY
1016
1017 # define s390x_aes_cbc_init_key aes_init_key
1018
1019 # define s390x_aes_cbc_cipher aes_cbc_cipher
1020 static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1021 const unsigned char *in, size_t len);
1022
1023 static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
1024 const unsigned char *key,
1025 const unsigned char *iv, int enc)
1026 {
1027 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1028 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1029
1030 if (keylen <= 0) {
1031 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1032 return 0;
1033 }
1034 cctx->fc = S390X_AES_FC(keylen);
1035 if (!enc)
1036 cctx->fc |= S390X_DECRYPT;
1037
1038 memcpy(cctx->km.param.k, key, keylen);
1039 return 1;
1040 }
1041
1042 static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1043 const unsigned char *in, size_t len)
1044 {
1045 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1046
1047 s390x_km(in, len, out, cctx->fc, &cctx->km.param);
1048 return 1;
1049 }
1050
1051 static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
1052 const unsigned char *key,
1053 const unsigned char *ivec, int enc)
1054 {
1055 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1056 const unsigned char *iv = ctx->oiv;
1057 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1058 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1059
1060 if (keylen <= 0) {
1061 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1062 return 0;
1063 }
1064 if (ivlen <= 0) {
1065 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1066 return 0;
1067 }
1068 memcpy(cctx->kmo.param.cv, iv, ivlen);
1069 memcpy(cctx->kmo.param.k, key, keylen);
1070 cctx->fc = S390X_AES_FC(keylen);
1071 cctx->res = 0;
1072 return 1;
1073 }
1074
1075 static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1076 const unsigned char *in, size_t len)
1077 {
1078 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1079 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1080 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1081 int n = cctx->res;
1082 int rem;
1083
1084 memcpy(cctx->kmo.param.cv, iv, ivlen);
1085 while (n && len) {
1086 *out = *in ^ cctx->kmo.param.cv[n];
1087 n = (n + 1) & 0xf;
1088 --len;
1089 ++in;
1090 ++out;
1091 }
1092
1093 rem = len & 0xf;
1094
1095 len &= ~(size_t)0xf;
1096 if (len) {
1097 s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
1098
1099 out += len;
1100 in += len;
1101 }
1102
1103 if (rem) {
1104 s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
1105 cctx->kmo.param.k);
1106
1107 while (rem--) {
1108 out[n] = in[n] ^ cctx->kmo.param.cv[n];
1109 ++n;
1110 }
1111 }
1112
1113 memcpy(iv, cctx->kmo.param.cv, ivlen);
1114 cctx->res = n;
1115 return 1;
1116 }
1117
1118 static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
1119 const unsigned char *key,
1120 const unsigned char *ivec, int enc)
1121 {
1122 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1123 const unsigned char *iv = ctx->oiv;
1124 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1125 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1126
1127 if (keylen <= 0) {
1128 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1129 return 0;
1130 }
1131 if (ivlen <= 0) {
1132 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1133 return 0;
1134 }
1135 cctx->fc = S390X_AES_FC(keylen);
1136 cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
1137 if (!enc)
1138 cctx->fc |= S390X_DECRYPT;
1139
1140 cctx->res = 0;
1141 memcpy(cctx->kmf.param.cv, iv, ivlen);
1142 memcpy(cctx->kmf.param.k, key, keylen);
1143 return 1;
1144 }
1145
1146 static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1147 const unsigned char *in, size_t len)
1148 {
1149 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1150 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1151 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1152 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1153 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1154 int n = cctx->res;
1155 int rem;
1156 unsigned char tmp;
1157
1158 if (keylen <= 0) {
1159 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1160 return 0;
1161 }
1162 if (ivlen <= 0) {
1163 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1164 return 0;
1165 }
1166 memcpy(cctx->kmf.param.cv, iv, ivlen);
1167 while (n && len) {
1168 tmp = *in;
1169 *out = cctx->kmf.param.cv[n] ^ tmp;
1170 cctx->kmf.param.cv[n] = enc ? *out : tmp;
1171 n = (n + 1) & 0xf;
1172 --len;
1173 ++in;
1174 ++out;
1175 }
1176
1177 rem = len & 0xf;
1178
1179 len &= ~(size_t)0xf;
1180 if (len) {
1181 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1182
1183 out += len;
1184 in += len;
1185 }
1186
1187 if (rem) {
1188 s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
1189 S390X_AES_FC(keylen), cctx->kmf.param.k);
1190
1191 while (rem--) {
1192 tmp = in[n];
1193 out[n] = cctx->kmf.param.cv[n] ^ tmp;
1194 cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
1195 ++n;
1196 }
1197 }
1198
1199 memcpy(iv, cctx->kmf.param.cv, ivlen);
1200 cctx->res = n;
1201 return 1;
1202 }
1203
1204 static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
1205 const unsigned char *key,
1206 const unsigned char *ivec, int enc)
1207 {
1208 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1209 const unsigned char *iv = ctx->oiv;
1210 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1211 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1212
1213 if (keylen <= 0) {
1214 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1215 return 0;
1216 }
1217 if (ivlen <= 0) {
1218 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1219 return 0;
1220 }
1221 cctx->fc = S390X_AES_FC(keylen);
1222 cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
1223 if (!enc)
1224 cctx->fc |= S390X_DECRYPT;
1225
1226 memcpy(cctx->kmf.param.cv, iv, ivlen);
1227 memcpy(cctx->kmf.param.k, key, keylen);
1228 return 1;
1229 }
1230
1231 static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1232 const unsigned char *in, size_t len)
1233 {
1234 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1235 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1236 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1237
1238 memcpy(cctx->kmf.param.cv, iv, ivlen);
1239 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1240 memcpy(iv, cctx->kmf.param.cv, ivlen);
1241 return 1;
1242 }
1243
1244 # define s390x_aes_cfb1_init_key aes_init_key
1245
1246 # define s390x_aes_cfb1_cipher aes_cfb1_cipher
1247 static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1248 const unsigned char *in, size_t len);
1249
1250 # define S390X_AES_CTR_CTX EVP_AES_KEY
1251
1252 # define s390x_aes_ctr_init_key aes_init_key
1253
1254 # define s390x_aes_ctr_cipher aes_ctr_cipher
1255 static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1256 const unsigned char *in, size_t len);
1257
1258 /* iv + padding length for iv lengths != 12 */
1259 # define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
1260
1261 /*-
1262 * Process additional authenticated data. Returns 0 on success. Code is
1263 * big-endian.
1264 */
1265 static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
1266 size_t len)
1267 {
1268 unsigned long long alen;
1269 int n, rem;
1270
1271 if (ctx->kma.param.tpcl)
1272 return -2;
1273
1274 alen = ctx->kma.param.taadl + len;
1275 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
1276 return -1;
1277 ctx->kma.param.taadl = alen;
1278
1279 n = ctx->areslen;
1280 if (n) {
1281 while (n && len) {
1282 ctx->ares[n] = *aad;
1283 n = (n + 1) & 0xf;
1284 ++aad;
1285 --len;
1286 }
1287 /* ctx->ares contains a complete block if offset has wrapped around */
1288 if (!n) {
1289 s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1290 ctx->fc |= S390X_KMA_HS;
1291 }
1292 ctx->areslen = n;
1293 }
1294
1295 rem = len & 0xf;
1296
1297 len &= ~(size_t)0xf;
1298 if (len) {
1299 s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1300 aad += len;
1301 ctx->fc |= S390X_KMA_HS;
1302 }
1303
1304 if (rem) {
1305 ctx->areslen = rem;
1306
1307 do {
1308 --rem;
1309 ctx->ares[rem] = aad[rem];
1310 } while (rem);
1311 }
1312 return 0;
1313 }
1314
1315 /*-
1316 * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
1317 * success. Code is big-endian.
1318 */
1319 static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
1320 unsigned char *out, size_t len)
1321 {
1322 const unsigned char *inptr;
1323 unsigned long long mlen;
1324 union {
1325 unsigned int w[4];
1326 unsigned char b[16];
1327 } buf;
1328 size_t inlen;
1329 int n, rem, i;
1330
1331 mlen = ctx->kma.param.tpcl + len;
1332 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1333 return -1;
1334 ctx->kma.param.tpcl = mlen;
1335
1336 n = ctx->mreslen;
1337 if (n) {
1338 inptr = in;
1339 inlen = len;
1340 while (n && inlen) {
1341 ctx->mres[n] = *inptr;
1342 n = (n + 1) & 0xf;
1343 ++inptr;
1344 --inlen;
1345 }
1346 /* ctx->mres contains a complete block if offset has wrapped around */
1347 if (!n) {
1348 s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
1349 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1350 ctx->fc |= S390X_KMA_HS;
1351 ctx->areslen = 0;
1352
1353 /* previous call already encrypted/decrypted its remainder,
1354 * see comment below */
1355 n = ctx->mreslen;
1356 while (n) {
1357 *out = buf.b[n];
1358 n = (n + 1) & 0xf;
1359 ++out;
1360 ++in;
1361 --len;
1362 }
1363 ctx->mreslen = 0;
1364 }
1365 }
1366
1367 rem = len & 0xf;
1368
1369 len &= ~(size_t)0xf;
1370 if (len) {
1371 s390x_kma(ctx->ares, ctx->areslen, in, len, out,
1372 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1373 in += len;
1374 out += len;
1375 ctx->fc |= S390X_KMA_HS;
1376 ctx->areslen = 0;
1377 }
1378
1379 /*-
1380 * If there is a remainder, it has to be saved such that it can be
1381 * processed by kma later. However, we also have to do the for-now
1382 * unauthenticated encryption/decryption part here and now...
1383 */
1384 if (rem) {
1385 if (!ctx->mreslen) {
1386 buf.w[0] = ctx->kma.param.j0.w[0];
1387 buf.w[1] = ctx->kma.param.j0.w[1];
1388 buf.w[2] = ctx->kma.param.j0.w[2];
1389 buf.w[3] = ctx->kma.param.cv.w + 1;
1390 s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
1391 }
1392
1393 n = ctx->mreslen;
1394 for (i = 0; i < rem; i++) {
1395 ctx->mres[n + i] = in[i];
1396 out[i] = in[i] ^ ctx->kres[n + i];
1397 }
1398
1399 ctx->mreslen += rem;
1400 }
1401 return 0;
1402 }
1403
1404 /*-
1405 * Initialize context structure. Code is big-endian.
1406 */
1407 static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
1408 const unsigned char *iv)
1409 {
1410 ctx->kma.param.t.g[0] = 0;
1411 ctx->kma.param.t.g[1] = 0;
1412 ctx->kma.param.tpcl = 0;
1413 ctx->kma.param.taadl = 0;
1414 ctx->mreslen = 0;
1415 ctx->areslen = 0;
1416 ctx->kreslen = 0;
1417
1418 if (ctx->ivlen == 12) {
1419 memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
1420 ctx->kma.param.j0.w[3] = 1;
1421 ctx->kma.param.cv.w = 1;
1422 } else {
1423 /* ctx->iv has the right size and is already padded. */
1424 memcpy(ctx->iv, iv, ctx->ivlen);
1425 s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
1426 ctx->fc, &ctx->kma.param);
1427 ctx->fc |= S390X_KMA_HS;
1428
1429 ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
1430 ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
1431 ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
1432 ctx->kma.param.t.g[0] = 0;
1433 ctx->kma.param.t.g[1] = 0;
1434 }
1435 }
1436
1437 /*-
1438 * Performs various operations on the context structure depending on control
1439 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
1440 * Code is big-endian.
1441 */
1442 static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
1443 {
1444 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1445 S390X_AES_GCM_CTX *gctx_out;
1446 EVP_CIPHER_CTX *out;
1447 unsigned char *buf;
1448 int ivlen, enc, len;
1449
1450 switch (type) {
1451 case EVP_CTRL_INIT:
1452 ivlen = EVP_CIPHER_get_iv_length(c->cipher);
1453 gctx->key_set = 0;
1454 gctx->iv_set = 0;
1455 gctx->ivlen = ivlen;
1456 gctx->iv = c->iv;
1457 gctx->taglen = -1;
1458 gctx->iv_gen = 0;
1459 gctx->tls_aad_len = -1;
1460 return 1;
1461
1462 case EVP_CTRL_GET_IVLEN:
1463 *(int *)ptr = gctx->ivlen;
1464 return 1;
1465
1466 case EVP_CTRL_AEAD_SET_IVLEN:
1467 if (arg <= 0)
1468 return 0;
1469
1470 if (arg != 12) {
1471 len = S390X_gcm_ivpadlen(arg);
1472
1473 /* Allocate memory for iv if needed. */
1474 if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
1475 if (gctx->iv != c->iv)
1476 OPENSSL_free(gctx->iv);
1477
1478 if ((gctx->iv = OPENSSL_malloc(len)) == NULL) {
1479 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
1480 return 0;
1481 }
1482 }
1483 /* Add padding. */
1484 memset(gctx->iv + arg, 0, len - arg - 8);
1485 *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
1486 }
1487 gctx->ivlen = arg;
1488 return 1;
1489
1490 case EVP_CTRL_AEAD_SET_TAG:
1491 buf = EVP_CIPHER_CTX_buf_noconst(c);
1492 enc = EVP_CIPHER_CTX_is_encrypting(c);
1493 if (arg <= 0 || arg > 16 || enc)
1494 return 0;
1495
1496 memcpy(buf, ptr, arg);
1497 gctx->taglen = arg;
1498 return 1;
1499
1500 case EVP_CTRL_AEAD_GET_TAG:
1501 enc = EVP_CIPHER_CTX_is_encrypting(c);
1502 if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
1503 return 0;
1504
1505 memcpy(ptr, gctx->kma.param.t.b, arg);
1506 return 1;
1507
1508 case EVP_CTRL_GCM_SET_IV_FIXED:
1509 /* Special case: -1 length restores whole iv */
1510 if (arg == -1) {
1511 memcpy(gctx->iv, ptr, gctx->ivlen);
1512 gctx->iv_gen = 1;
1513 return 1;
1514 }
1515 /*
1516 * Fixed field must be at least 4 bytes and invocation field at least
1517 * 8.
1518 */
1519 if ((arg < 4) || (gctx->ivlen - arg) < 8)
1520 return 0;
1521
1522 if (arg)
1523 memcpy(gctx->iv, ptr, arg);
1524
1525 enc = EVP_CIPHER_CTX_is_encrypting(c);
1526 if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
1527 return 0;
1528
1529 gctx->iv_gen = 1;
1530 return 1;
1531
1532 case EVP_CTRL_GCM_IV_GEN:
1533 if (gctx->iv_gen == 0 || gctx->key_set == 0)
1534 return 0;
1535
1536 s390x_aes_gcm_setiv(gctx, gctx->iv);
1537
1538 if (arg <= 0 || arg > gctx->ivlen)
1539 arg = gctx->ivlen;
1540
1541 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
1542 /*
1543 * Invocation field will be at least 8 bytes in size and so no need
1544 * to check wrap around or increment more than last 8 bytes.
1545 */
1546 ctr64_inc(gctx->iv + gctx->ivlen - 8);
1547 gctx->iv_set = 1;
1548 return 1;
1549
1550 case EVP_CTRL_GCM_SET_IV_INV:
1551 enc = EVP_CIPHER_CTX_is_encrypting(c);
1552 if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
1553 return 0;
1554
1555 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
1556 s390x_aes_gcm_setiv(gctx, gctx->iv);
1557 gctx->iv_set = 1;
1558 return 1;
1559
1560 case EVP_CTRL_AEAD_TLS1_AAD:
1561 /* Save the aad for later use. */
1562 if (arg != EVP_AEAD_TLS1_AAD_LEN)
1563 return 0;
1564
1565 buf = EVP_CIPHER_CTX_buf_noconst(c);
1566 memcpy(buf, ptr, arg);
1567 gctx->tls_aad_len = arg;
1568 gctx->tls_enc_records = 0;
1569
1570 len = buf[arg - 2] << 8 | buf[arg - 1];
1571 /* Correct length for explicit iv. */
1572 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
1573 return 0;
1574 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
1575
1576 /* If decrypting correct for tag too. */
1577 enc = EVP_CIPHER_CTX_is_encrypting(c);
1578 if (!enc) {
1579 if (len < EVP_GCM_TLS_TAG_LEN)
1580 return 0;
1581 len -= EVP_GCM_TLS_TAG_LEN;
1582 }
1583 buf[arg - 2] = len >> 8;
1584 buf[arg - 1] = len & 0xff;
1585 /* Extra padding: tag appended to record. */
1586 return EVP_GCM_TLS_TAG_LEN;
1587
1588 case EVP_CTRL_COPY:
1589 out = ptr;
1590 gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
1591
1592 if (gctx->iv == c->iv) {
1593 gctx_out->iv = out->iv;
1594 } else {
1595 len = S390X_gcm_ivpadlen(gctx->ivlen);
1596
1597 if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL) {
1598 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
1599 return 0;
1600 }
1601
1602 memcpy(gctx_out->iv, gctx->iv, len);
1603 }
1604 return 1;
1605
1606 default:
1607 return -1;
1608 }
1609 }
1610
1611 /*-
1612 * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
1613 */
1614 static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
1615 const unsigned char *key,
1616 const unsigned char *iv, int enc)
1617 {
1618 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1619 int keylen;
1620
1621 if (iv == NULL && key == NULL)
1622 return 1;
1623
1624 if (key != NULL) {
1625 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1626 if (keylen <= 0) {
1627 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1628 return 0;
1629 }
1630
1631 memcpy(&gctx->kma.param.k, key, keylen);
1632
1633 gctx->fc = S390X_AES_FC(keylen);
1634 if (!enc)
1635 gctx->fc |= S390X_DECRYPT;
1636
1637 if (iv == NULL && gctx->iv_set)
1638 iv = gctx->iv;
1639
1640 if (iv != NULL) {
1641 s390x_aes_gcm_setiv(gctx, iv);
1642 gctx->iv_set = 1;
1643 }
1644 gctx->key_set = 1;
1645 } else {
1646 if (gctx->key_set)
1647 s390x_aes_gcm_setiv(gctx, iv);
1648 else
1649 memcpy(gctx->iv, iv, gctx->ivlen);
1650
1651 gctx->iv_set = 1;
1652 gctx->iv_gen = 0;
1653 }
1654 return 1;
1655 }
1656
1657 /*-
1658 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1659 * if successful. Otherwise -1 is returned. Code is big-endian.
1660 */
1661 static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1662 const unsigned char *in, size_t len)
1663 {
1664 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1665 const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1666 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1667 int rv = -1;
1668
1669 if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
1670 return -1;
1671
1672 /*
1673 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
1674 * Requirements from SP 800-38D". The requirements is for one party to the
1675 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
1676 * side only.
1677 */
1678 if (enc && ++gctx->tls_enc_records == 0) {
1679 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
1680 goto err;
1681 }
1682
1683 if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
1684 : EVP_CTRL_GCM_SET_IV_INV,
1685 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
1686 goto err;
1687
1688 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1689 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1690 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1691
1692 gctx->kma.param.taadl = gctx->tls_aad_len << 3;
1693 gctx->kma.param.tpcl = len << 3;
1694 s390x_kma(buf, gctx->tls_aad_len, in, len, out,
1695 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1696
1697 if (enc) {
1698 memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
1699 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1700 } else {
1701 if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
1702 EVP_GCM_TLS_TAG_LEN)) {
1703 OPENSSL_cleanse(out, len);
1704 goto err;
1705 }
1706 rv = len;
1707 }
1708 err:
1709 gctx->iv_set = 0;
1710 gctx->tls_aad_len = -1;
1711 return rv;
1712 }
1713
1714 /*-
1715 * Called from EVP layer to initialize context, process additional
1716 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1717 * ciphertext or process a TLS packet, depending on context. Returns bytes
1718 * written on success. Otherwise -1 is returned. Code is big-endian.
1719 */
1720 static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1721 const unsigned char *in, size_t len)
1722 {
1723 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1724 unsigned char *buf, tmp[16];
1725 int enc;
1726
1727 if (!gctx->key_set)
1728 return -1;
1729
1730 if (gctx->tls_aad_len >= 0)
1731 return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
1732
1733 if (!gctx->iv_set)
1734 return -1;
1735
1736 if (in != NULL) {
1737 if (out == NULL) {
1738 if (s390x_aes_gcm_aad(gctx, in, len))
1739 return -1;
1740 } else {
1741 if (s390x_aes_gcm(gctx, in, out, len))
1742 return -1;
1743 }
1744 return len;
1745 } else {
1746 gctx->kma.param.taadl <<= 3;
1747 gctx->kma.param.tpcl <<= 3;
1748 s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
1749 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1750 /* recall that we already did en-/decrypt gctx->mres
1751 * and returned it to caller... */
1752 OPENSSL_cleanse(tmp, gctx->mreslen);
1753 gctx->iv_set = 0;
1754
1755 enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1756 if (enc) {
1757 gctx->taglen = 16;
1758 } else {
1759 if (gctx->taglen < 0)
1760 return -1;
1761
1762 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1763 if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
1764 return -1;
1765 }
1766 return 0;
1767 }
1768 }
1769
1770 static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
1771 {
1772 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1773
1774 if (gctx == NULL)
1775 return 0;
1776
1777 if (gctx->iv != c->iv)
1778 OPENSSL_free(gctx->iv);
1779
1780 OPENSSL_cleanse(gctx, sizeof(*gctx));
1781 return 1;
1782 }
1783
1784 # define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
1785
1786 # define s390x_aes_xts_init_key aes_xts_init_key
1787 static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
1788 const unsigned char *key,
1789 const unsigned char *iv, int enc);
1790 # define s390x_aes_xts_cipher aes_xts_cipher
1791 static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1792 const unsigned char *in, size_t len);
1793 # define s390x_aes_xts_ctrl aes_xts_ctrl
1794 static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
1795 # define s390x_aes_xts_cleanup aes_xts_cleanup
1796
1797 /*-
1798 * Set nonce and length fields. Code is big-endian.
1799 */
1800 static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
1801 const unsigned char *nonce,
1802 size_t mlen)
1803 {
1804 ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
1805 ctx->aes.ccm.nonce.g[1] = mlen;
1806 memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
1807 }
1808
1809 /*-
1810 * Process additional authenticated data. Code is big-endian.
1811 */
1812 static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
1813 size_t alen)
1814 {
1815 unsigned char *ptr;
1816 int i, rem;
1817
1818 if (!alen)
1819 return;
1820
1821 ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
1822
1823 /* Suppress 'type-punned pointer dereference' warning. */
1824 ptr = ctx->aes.ccm.buf.b;
1825
1826 if (alen < ((1 << 16) - (1 << 8))) {
1827 *(uint16_t *)ptr = alen;
1828 i = 2;
1829 } else if (sizeof(alen) == 8
1830 && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
1831 *(uint16_t *)ptr = 0xffff;
1832 *(uint64_t *)(ptr + 2) = alen;
1833 i = 10;
1834 } else {
1835 *(uint16_t *)ptr = 0xfffe;
1836 *(uint32_t *)(ptr + 2) = alen;
1837 i = 6;
1838 }
1839
1840 while (i < 16 && alen) {
1841 ctx->aes.ccm.buf.b[i] = *aad;
1842 ++aad;
1843 --alen;
1844 ++i;
1845 }
1846 while (i < 16) {
1847 ctx->aes.ccm.buf.b[i] = 0;
1848 ++i;
1849 }
1850
1851 ctx->aes.ccm.kmac_param.icv.g[0] = 0;
1852 ctx->aes.ccm.kmac_param.icv.g[1] = 0;
1853 s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
1854 &ctx->aes.ccm.kmac_param);
1855 ctx->aes.ccm.blocks += 2;
1856
1857 rem = alen & 0xf;
1858 alen &= ~(size_t)0xf;
1859 if (alen) {
1860 s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1861 ctx->aes.ccm.blocks += alen >> 4;
1862 aad += alen;
1863 }
1864 if (rem) {
1865 for (i = 0; i < rem; i++)
1866 ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
1867
1868 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1869 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1870 ctx->aes.ccm.kmac_param.k);
1871 ctx->aes.ccm.blocks++;
1872 }
1873 }
1874
1875 /*-
1876 * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
1877 * success.
1878 */
1879 static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
1880 unsigned char *out, size_t len, int enc)
1881 {
1882 size_t n, rem;
1883 unsigned int i, l, num;
1884 unsigned char flags;
1885
1886 flags = ctx->aes.ccm.nonce.b[0];
1887 if (!(flags & S390X_CCM_AAD_FLAG)) {
1888 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
1889 ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
1890 ctx->aes.ccm.blocks++;
1891 }
1892 l = flags & 0x7;
1893 ctx->aes.ccm.nonce.b[0] = l;
1894
1895 /*-
1896 * Reconstruct length from encoded length field
1897 * and initialize it with counter value.
1898 */
1899 n = 0;
1900 for (i = 15 - l; i < 15; i++) {
1901 n |= ctx->aes.ccm.nonce.b[i];
1902 ctx->aes.ccm.nonce.b[i] = 0;
1903 n <<= 8;
1904 }
1905 n |= ctx->aes.ccm.nonce.b[15];
1906 ctx->aes.ccm.nonce.b[15] = 1;
1907
1908 if (n != len)
1909 return -1; /* length mismatch */
1910
1911 if (enc) {
1912 /* Two operations per block plus one for tag encryption */
1913 ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
1914 if (ctx->aes.ccm.blocks > (1ULL << 61))
1915 return -2; /* too much data */
1916 }
1917
1918 num = 0;
1919 rem = len & 0xf;
1920 len &= ~(size_t)0xf;
1921
1922 if (enc) {
1923 /* mac-then-encrypt */
1924 if (len)
1925 s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1926 if (rem) {
1927 for (i = 0; i < rem; i++)
1928 ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
1929
1930 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1931 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1932 ctx->aes.ccm.kmac_param.k);
1933 }
1934
1935 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1936 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1937 &num, (ctr128_f)AES_ctr32_encrypt);
1938 } else {
1939 /* decrypt-then-mac */
1940 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1941 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1942 &num, (ctr128_f)AES_ctr32_encrypt);
1943
1944 if (len)
1945 s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1946 if (rem) {
1947 for (i = 0; i < rem; i++)
1948 ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
1949
1950 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1951 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1952 ctx->aes.ccm.kmac_param.k);
1953 }
1954 }
1955 /* encrypt tag */
1956 for (i = 15 - l; i < 16; i++)
1957 ctx->aes.ccm.nonce.b[i] = 0;
1958
1959 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
1960 ctx->aes.ccm.kmac_param.k);
1961 ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
1962 ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
1963
1964 ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
1965 return 0;
1966 }
1967
1968 /*-
1969 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1970 * if successful. Otherwise -1 is returned.
1971 */
1972 static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1973 const unsigned char *in, size_t len)
1974 {
1975 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1976 unsigned char *ivec = ctx->iv;
1977 unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1978 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1979
1980 if (out != in
1981 || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
1982 return -1;
1983
1984 if (enc) {
1985 /* Set explicit iv (sequence number). */
1986 memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1987 }
1988
1989 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1990 /*-
1991 * Get explicit iv (sequence number). We already have fixed iv
1992 * (server/client_write_iv) here.
1993 */
1994 memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1995 s390x_aes_ccm_setiv(cctx, ivec, len);
1996
1997 /* Process aad (sequence number|type|version|length) */
1998 s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
1999
2000 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
2001 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
2002
2003 if (enc) {
2004 if (s390x_aes_ccm(cctx, in, out, len, enc))
2005 return -1;
2006
2007 memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2008 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
2009 } else {
2010 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2011 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
2012 cctx->aes.ccm.m))
2013 return len;
2014 }
2015
2016 OPENSSL_cleanse(out, len);
2017 return -1;
2018 }
2019 }
2020
2021 /*-
2022 * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
2023 * returned.
2024 */
2025 static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
2026 const unsigned char *key,
2027 const unsigned char *iv, int enc)
2028 {
2029 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2030 int keylen;
2031
2032 if (iv == NULL && key == NULL)
2033 return 1;
2034
2035 if (key != NULL) {
2036 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
2037 if (keylen <= 0) {
2038 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2039 return 0;
2040 }
2041
2042 cctx->aes.ccm.fc = S390X_AES_FC(keylen);
2043 memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
2044
2045 /* Store encoded m and l. */
2046 cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
2047 | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
2048 memset(cctx->aes.ccm.nonce.b + 1, 0,
2049 sizeof(cctx->aes.ccm.nonce.b));
2050 cctx->aes.ccm.blocks = 0;
2051
2052 cctx->aes.ccm.key_set = 1;
2053 }
2054
2055 if (iv != NULL) {
2056 memcpy(ctx->iv, iv, 15 - cctx->aes.ccm.l);
2057
2058 cctx->aes.ccm.iv_set = 1;
2059 }
2060
2061 return 1;
2062 }
2063
2064 /*-
2065 * Called from EVP layer to initialize context, process additional
2066 * authenticated data, en/de-crypt plain/cipher-text and authenticate
2067 * plaintext or process a TLS packet, depending on context. Returns bytes
2068 * written on success. Otherwise -1 is returned.
2069 */
2070 static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2071 const unsigned char *in, size_t len)
2072 {
2073 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2074 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
2075 int rv;
2076 unsigned char *buf;
2077
2078 if (!cctx->aes.ccm.key_set)
2079 return -1;
2080
2081 if (cctx->aes.ccm.tls_aad_len >= 0)
2082 return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
2083
2084 /*-
2085 * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
2086 * so integrity must be checked already at Update() i.e., before
2087 * potentially corrupted data is output.
2088 */
2089 if (in == NULL && out != NULL)
2090 return 0;
2091
2092 if (!cctx->aes.ccm.iv_set)
2093 return -1;
2094
2095 if (out == NULL) {
2096 /* Update(): Pass message length. */
2097 if (in == NULL) {
2098 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2099
2100 cctx->aes.ccm.len_set = 1;
2101 return len;
2102 }
2103
2104 /* Update(): Process aad. */
2105 if (!cctx->aes.ccm.len_set && len)
2106 return -1;
2107
2108 s390x_aes_ccm_aad(cctx, in, len);
2109 return len;
2110 }
2111
2112 /* The tag must be set before actually decrypting data */
2113 if (!enc && !cctx->aes.ccm.tag_set)
2114 return -1;
2115
2116 /* Update(): Process message. */
2117
2118 if (!cctx->aes.ccm.len_set) {
2119 /*-
2120 * In case message length was not previously set explicitly via
2121 * Update(), set it now.
2122 */
2123 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2124
2125 cctx->aes.ccm.len_set = 1;
2126 }
2127
2128 if (enc) {
2129 if (s390x_aes_ccm(cctx, in, out, len, enc))
2130 return -1;
2131
2132 cctx->aes.ccm.tag_set = 1;
2133 return len;
2134 } else {
2135 rv = -1;
2136
2137 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2138 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2139 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
2140 cctx->aes.ccm.m))
2141 rv = len;
2142 }
2143
2144 if (rv == -1)
2145 OPENSSL_cleanse(out, len);
2146
2147 cctx->aes.ccm.iv_set = 0;
2148 cctx->aes.ccm.tag_set = 0;
2149 cctx->aes.ccm.len_set = 0;
2150 return rv;
2151 }
2152 }
2153
2154 /*-
2155 * Performs various operations on the context structure depending on control
2156 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
2157 * Code is big-endian.
2158 */
2159 static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2160 {
2161 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
2162 unsigned char *buf;
2163 int enc, len;
2164
2165 switch (type) {
2166 case EVP_CTRL_INIT:
2167 cctx->aes.ccm.key_set = 0;
2168 cctx->aes.ccm.iv_set = 0;
2169 cctx->aes.ccm.l = 8;
2170 cctx->aes.ccm.m = 12;
2171 cctx->aes.ccm.tag_set = 0;
2172 cctx->aes.ccm.len_set = 0;
2173 cctx->aes.ccm.tls_aad_len = -1;
2174 return 1;
2175
2176 case EVP_CTRL_GET_IVLEN:
2177 *(int *)ptr = 15 - cctx->aes.ccm.l;
2178 return 1;
2179
2180 case EVP_CTRL_AEAD_TLS1_AAD:
2181 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2182 return 0;
2183
2184 /* Save the aad for later use. */
2185 buf = EVP_CIPHER_CTX_buf_noconst(c);
2186 memcpy(buf, ptr, arg);
2187 cctx->aes.ccm.tls_aad_len = arg;
2188
2189 len = buf[arg - 2] << 8 | buf[arg - 1];
2190 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
2191 return 0;
2192
2193 /* Correct length for explicit iv. */
2194 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
2195
2196 enc = EVP_CIPHER_CTX_is_encrypting(c);
2197 if (!enc) {
2198 if (len < cctx->aes.ccm.m)
2199 return 0;
2200
2201 /* Correct length for tag. */
2202 len -= cctx->aes.ccm.m;
2203 }
2204
2205 buf[arg - 2] = len >> 8;
2206 buf[arg - 1] = len & 0xff;
2207
2208 /* Extra padding: tag appended to record. */
2209 return cctx->aes.ccm.m;
2210
2211 case EVP_CTRL_CCM_SET_IV_FIXED:
2212 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
2213 return 0;
2214
2215 /* Copy to first part of the iv. */
2216 memcpy(c->iv, ptr, arg);
2217 return 1;
2218
2219 case EVP_CTRL_AEAD_SET_IVLEN:
2220 arg = 15 - arg;
2221 /* fall-through */
2222
2223 case EVP_CTRL_CCM_SET_L:
2224 if (arg < 2 || arg > 8)
2225 return 0;
2226
2227 cctx->aes.ccm.l = arg;
2228 return 1;
2229
2230 case EVP_CTRL_AEAD_SET_TAG:
2231 if ((arg & 1) || arg < 4 || arg > 16)
2232 return 0;
2233
2234 enc = EVP_CIPHER_CTX_is_encrypting(c);
2235 if (enc && ptr)
2236 return 0;
2237
2238 if (ptr) {
2239 cctx->aes.ccm.tag_set = 1;
2240 buf = EVP_CIPHER_CTX_buf_noconst(c);
2241 memcpy(buf, ptr, arg);
2242 }
2243
2244 cctx->aes.ccm.m = arg;
2245 return 1;
2246
2247 case EVP_CTRL_AEAD_GET_TAG:
2248 enc = EVP_CIPHER_CTX_is_encrypting(c);
2249 if (!enc || !cctx->aes.ccm.tag_set)
2250 return 0;
2251
2252 if (arg < cctx->aes.ccm.m)
2253 return 0;
2254
2255 memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2256 cctx->aes.ccm.tag_set = 0;
2257 cctx->aes.ccm.iv_set = 0;
2258 cctx->aes.ccm.len_set = 0;
2259 return 1;
2260
2261 case EVP_CTRL_COPY:
2262 return 1;
2263
2264 default:
2265 return -1;
2266 }
2267 }
2268
2269 # define s390x_aes_ccm_cleanup aes_ccm_cleanup
2270
2271 # ifndef OPENSSL_NO_OCB
2272 # define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
2273
2274 # define s390x_aes_ocb_init_key aes_ocb_init_key
2275 static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2276 const unsigned char *iv, int enc);
2277 # define s390x_aes_ocb_cipher aes_ocb_cipher
2278 static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2279 const unsigned char *in, size_t len);
2280 # define s390x_aes_ocb_cleanup aes_ocb_cleanup
2281 static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
2282 # define s390x_aes_ocb_ctrl aes_ocb_ctrl
2283 static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
2284 # endif
2285
2286 # ifndef OPENSSL_NO_SIV
2287 # define S390X_AES_SIV_CTX EVP_AES_SIV_CTX
2288
2289 # define s390x_aes_siv_init_key aes_siv_init_key
2290 # define s390x_aes_siv_cipher aes_siv_cipher
2291 # define s390x_aes_siv_cleanup aes_siv_cleanup
2292 # define s390x_aes_siv_ctrl aes_siv_ctrl
2293 # endif
2294
2295 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
2296 MODE,flags) \
2297 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2298 nid##_##keylen##_##nmode,blocksize, \
2299 keylen / 8, \
2300 ivlen, \
2301 flags | EVP_CIPH_##MODE##_MODE, \
2302 EVP_ORIG_GLOBAL, \
2303 s390x_aes_##mode##_init_key, \
2304 s390x_aes_##mode##_cipher, \
2305 NULL, \
2306 sizeof(S390X_AES_##MODE##_CTX), \
2307 NULL, \
2308 NULL, \
2309 NULL, \
2310 NULL \
2311 }; \
2312 static const EVP_CIPHER aes_##keylen##_##mode = { \
2313 nid##_##keylen##_##nmode, \
2314 blocksize, \
2315 keylen / 8, \
2316 ivlen, \
2317 flags | EVP_CIPH_##MODE##_MODE, \
2318 EVP_ORIG_GLOBAL, \
2319 aes_init_key, \
2320 aes_##mode##_cipher, \
2321 NULL, \
2322 sizeof(EVP_AES_KEY), \
2323 NULL, \
2324 NULL, \
2325 NULL, \
2326 NULL \
2327 }; \
2328 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2329 { \
2330 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2331 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2332 }
2333
2334 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
2335 static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2336 nid##_##keylen##_##mode, \
2337 blocksize, \
2338 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2339 ivlen, \
2340 flags | EVP_CIPH_##MODE##_MODE, \
2341 EVP_ORIG_GLOBAL, \
2342 s390x_aes_##mode##_init_key, \
2343 s390x_aes_##mode##_cipher, \
2344 s390x_aes_##mode##_cleanup, \
2345 sizeof(S390X_AES_##MODE##_CTX), \
2346 NULL, \
2347 NULL, \
2348 s390x_aes_##mode##_ctrl, \
2349 NULL \
2350 }; \
2351 static const EVP_CIPHER aes_##keylen##_##mode = { \
2352 nid##_##keylen##_##mode,blocksize, \
2353 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2354 ivlen, \
2355 flags | EVP_CIPH_##MODE##_MODE, \
2356 EVP_ORIG_GLOBAL, \
2357 aes_##mode##_init_key, \
2358 aes_##mode##_cipher, \
2359 aes_##mode##_cleanup, \
2360 sizeof(EVP_AES_##MODE##_CTX), \
2361 NULL, \
2362 NULL, \
2363 aes_##mode##_ctrl, \
2364 NULL \
2365 }; \
2366 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2367 { \
2368 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2369 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2370 }
2371
2372 #else
2373
2374 # define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
2375 static const EVP_CIPHER aes_##keylen##_##mode = { \
2376 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
2377 flags|EVP_CIPH_##MODE##_MODE, \
2378 EVP_ORIG_GLOBAL, \
2379 aes_init_key, \
2380 aes_##mode##_cipher, \
2381 NULL, \
2382 sizeof(EVP_AES_KEY), \
2383 NULL,NULL,NULL,NULL }; \
2384 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2385 { return &aes_##keylen##_##mode; }
2386
2387 # define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
2388 static const EVP_CIPHER aes_##keylen##_##mode = { \
2389 nid##_##keylen##_##mode,blocksize, \
2390 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
2391 ivlen, \
2392 flags|EVP_CIPH_##MODE##_MODE, \
2393 EVP_ORIG_GLOBAL, \
2394 aes_##mode##_init_key, \
2395 aes_##mode##_cipher, \
2396 aes_##mode##_cleanup, \
2397 sizeof(EVP_AES_##MODE##_CTX), \
2398 NULL,NULL,aes_##mode##_ctrl,NULL }; \
2399 const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2400 { return &aes_##keylen##_##mode; }
2401
2402 #endif
2403
2404 #define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
2405 BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2406 BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2407 BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2408 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2409 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags) \
2410 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags) \
2411 BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
2412
2413 static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2414 const unsigned char *iv, int enc)
2415 {
2416 int ret, mode;
2417 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2418 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
2419
2420 if (keylen <= 0) {
2421 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2422 return 0;
2423 }
2424
2425 mode = EVP_CIPHER_CTX_get_mode(ctx);
2426 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
2427 && !enc) {
2428 #ifdef HWAES_CAPABLE
2429 if (HWAES_CAPABLE) {
2430 ret = HWAES_set_decrypt_key(key, keylen, &dat->ks.ks);
2431 dat->block = (block128_f) HWAES_decrypt;
2432 dat->stream.cbc = NULL;
2433 # ifdef HWAES_cbc_encrypt
2434 if (mode == EVP_CIPH_CBC_MODE)
2435 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2436 # endif
2437 } else
2438 #endif
2439 #ifdef BSAES_CAPABLE
2440 if (BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE) {
2441 ret = AES_set_decrypt_key(key, keylen, &dat->ks.ks);
2442 dat->block = (block128_f) AES_decrypt;
2443 dat->stream.cbc = (cbc128_f) ossl_bsaes_cbc_encrypt;
2444 } else
2445 #endif
2446 #ifdef VPAES_CAPABLE
2447 if (VPAES_CAPABLE) {
2448 ret = vpaes_set_decrypt_key(key, keylen, &dat->ks.ks);
2449 dat->block = (block128_f) vpaes_decrypt;
2450 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2451 (cbc128_f) vpaes_cbc_encrypt : NULL;
2452 } else
2453 #endif
2454 {
2455 ret = AES_set_decrypt_key(key, keylen, &dat->ks.ks);
2456 dat->block = (block128_f) AES_decrypt;
2457 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2458 (cbc128_f) AES_cbc_encrypt : NULL;
2459 }
2460 } else
2461 #ifdef HWAES_CAPABLE
2462 if (HWAES_CAPABLE) {
2463 ret = HWAES_set_encrypt_key(key, keylen, &dat->ks.ks);
2464 dat->block = (block128_f) HWAES_encrypt;
2465 dat->stream.cbc = NULL;
2466 # ifdef HWAES_cbc_encrypt
2467 if (mode == EVP_CIPH_CBC_MODE)
2468 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2469 else
2470 # endif
2471 # ifdef HWAES_ctr32_encrypt_blocks
2472 if (mode == EVP_CIPH_CTR_MODE)
2473 dat->stream.ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2474 else
2475 # endif
2476 (void)0; /* terminate potentially open 'else' */
2477 } else
2478 #endif
2479 #ifdef BSAES_CAPABLE
2480 if (BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE) {
2481 ret = AES_set_encrypt_key(key, keylen, &dat->ks.ks);
2482 dat->block = (block128_f) AES_encrypt;
2483 dat->stream.ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2484 } else
2485 #endif
2486 #ifdef VPAES_CAPABLE
2487 if (VPAES_CAPABLE) {
2488 ret = vpaes_set_encrypt_key(key, keylen, &dat->ks.ks);
2489 dat->block = (block128_f) vpaes_encrypt;
2490 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2491 (cbc128_f) vpaes_cbc_encrypt : NULL;
2492 } else
2493 #endif
2494 {
2495 ret = AES_set_encrypt_key(key, keylen, &dat->ks.ks);
2496 dat->block = (block128_f) AES_encrypt;
2497 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2498 (cbc128_f) AES_cbc_encrypt : NULL;
2499 #ifdef AES_CTR_ASM
2500 if (mode == EVP_CIPH_CTR_MODE)
2501 dat->stream.ctr = (ctr128_f) AES_ctr32_encrypt;
2502 #endif
2503 }
2504
2505 if (ret < 0) {
2506 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
2507 return 0;
2508 }
2509
2510 return 1;
2511 }
2512
2513 static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2514 const unsigned char *in, size_t len)
2515 {
2516 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2517
2518 if (dat->stream.cbc)
2519 (*dat->stream.cbc) (in, out, len, &dat->ks, ctx->iv,
2520 EVP_CIPHER_CTX_is_encrypting(ctx));
2521 else if (EVP_CIPHER_CTX_is_encrypting(ctx))
2522 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv,
2523 dat->block);
2524 else
2525 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks,
2526 ctx->iv, dat->block);
2527
2528 return 1;
2529 }
2530
2531 static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2532 const unsigned char *in, size_t len)
2533 {
2534 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
2535 size_t i;
2536 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2537
2538 if (len < bl)
2539 return 1;
2540
2541 for (i = 0, len -= bl; i <= len; i += bl)
2542 (*dat->block) (in + i, out + i, &dat->ks);
2543
2544 return 1;
2545 }
2546
2547 static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2548 const unsigned char *in, size_t len)
2549 {
2550 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2551
2552 int num = EVP_CIPHER_CTX_get_num(ctx);
2553 CRYPTO_ofb128_encrypt(in, out, len, &dat->ks,
2554 ctx->iv, &num, dat->block);
2555 EVP_CIPHER_CTX_set_num(ctx, num);
2556 return 1;
2557 }
2558
2559 static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2560 const unsigned char *in, size_t len)
2561 {
2562 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2563
2564 int num = EVP_CIPHER_CTX_get_num(ctx);
2565 CRYPTO_cfb128_encrypt(in, out, len, &dat->ks,
2566 ctx->iv, &num,
2567 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2568 EVP_CIPHER_CTX_set_num(ctx, num);
2569 return 1;
2570 }
2571
2572 static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2573 const unsigned char *in, size_t len)
2574 {
2575 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2576
2577 int num = EVP_CIPHER_CTX_get_num(ctx);
2578 CRYPTO_cfb128_8_encrypt(in, out, len, &dat->ks,
2579 ctx->iv, &num,
2580 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2581 EVP_CIPHER_CTX_set_num(ctx, num);
2582 return 1;
2583 }
2584
2585 static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2586 const unsigned char *in, size_t len)
2587 {
2588 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2589
2590 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) {
2591 int num = EVP_CIPHER_CTX_get_num(ctx);
2592 CRYPTO_cfb128_1_encrypt(in, out, len, &dat->ks,
2593 ctx->iv, &num,
2594 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2595 EVP_CIPHER_CTX_set_num(ctx, num);
2596 return 1;
2597 }
2598
2599 while (len >= MAXBITCHUNK) {
2600 int num = EVP_CIPHER_CTX_get_num(ctx);
2601 CRYPTO_cfb128_1_encrypt(in, out, MAXBITCHUNK * 8, &dat->ks,
2602 ctx->iv, &num,
2603 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2604 EVP_CIPHER_CTX_set_num(ctx, num);
2605 len -= MAXBITCHUNK;
2606 out += MAXBITCHUNK;
2607 in += MAXBITCHUNK;
2608 }
2609 if (len) {
2610 int num = EVP_CIPHER_CTX_get_num(ctx);
2611 CRYPTO_cfb128_1_encrypt(in, out, len * 8, &dat->ks,
2612 ctx->iv, &num,
2613 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2614 EVP_CIPHER_CTX_set_num(ctx, num);
2615 }
2616
2617 return 1;
2618 }
2619
2620 static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2621 const unsigned char *in, size_t len)
2622 {
2623 int n = EVP_CIPHER_CTX_get_num(ctx);
2624 unsigned int num;
2625 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2626
2627 if (n < 0)
2628 return 0;
2629 num = (unsigned int)n;
2630
2631 if (dat->stream.ctr)
2632 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks,
2633 ctx->iv,
2634 EVP_CIPHER_CTX_buf_noconst(ctx),
2635 &num, dat->stream.ctr);
2636 else
2637 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks,
2638 ctx->iv,
2639 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
2640 dat->block);
2641 EVP_CIPHER_CTX_set_num(ctx, num);
2642 return 1;
2643 }
2644
2645 BLOCK_CIPHER_generic_pack(NID_aes, 128, 0)
2646 BLOCK_CIPHER_generic_pack(NID_aes, 192, 0)
2647 BLOCK_CIPHER_generic_pack(NID_aes, 256, 0)
2648
2649 static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
2650 {
2651 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2652 if (gctx == NULL)
2653 return 0;
2654 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
2655 if (gctx->iv != c->iv)
2656 OPENSSL_free(gctx->iv);
2657 return 1;
2658 }
2659
2660 static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2661 {
2662 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2663 switch (type) {
2664 case EVP_CTRL_INIT:
2665 gctx->key_set = 0;
2666 gctx->iv_set = 0;
2667 gctx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
2668 gctx->iv = c->iv;
2669 gctx->taglen = -1;
2670 gctx->iv_gen = 0;
2671 gctx->tls_aad_len = -1;
2672 return 1;
2673
2674 case EVP_CTRL_GET_IVLEN:
2675 *(int *)ptr = gctx->ivlen;
2676 return 1;
2677
2678 case EVP_CTRL_AEAD_SET_IVLEN:
2679 if (arg <= 0)
2680 return 0;
2681 /* Allocate memory for IV if needed */
2682 if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
2683 if (gctx->iv != c->iv)
2684 OPENSSL_free(gctx->iv);
2685 if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) {
2686 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
2687 return 0;
2688 }
2689 }
2690 gctx->ivlen = arg;
2691 return 1;
2692
2693 case EVP_CTRL_AEAD_SET_TAG:
2694 if (arg <= 0 || arg > 16 || c->encrypt)
2695 return 0;
2696 memcpy(c->buf, ptr, arg);
2697 gctx->taglen = arg;
2698 return 1;
2699
2700 case EVP_CTRL_AEAD_GET_TAG:
2701 if (arg <= 0 || arg > 16 || !c->encrypt
2702 || gctx->taglen < 0)
2703 return 0;
2704 memcpy(ptr, c->buf, arg);
2705 return 1;
2706
2707 case EVP_CTRL_GCM_SET_IV_FIXED:
2708 /* Special case: -1 length restores whole IV */
2709 if (arg == -1) {
2710 memcpy(gctx->iv, ptr, gctx->ivlen);
2711 gctx->iv_gen = 1;
2712 return 1;
2713 }
2714 /*
2715 * Fixed field must be at least 4 bytes and invocation field at least
2716 * 8.
2717 */
2718 if ((arg < 4) || (gctx->ivlen - arg) < 8)
2719 return 0;
2720 if (arg)
2721 memcpy(gctx->iv, ptr, arg);
2722 if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
2723 return 0;
2724 gctx->iv_gen = 1;
2725 return 1;
2726
2727 case EVP_CTRL_GCM_IV_GEN:
2728 if (gctx->iv_gen == 0 || gctx->key_set == 0)
2729 return 0;
2730 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2731 if (arg <= 0 || arg > gctx->ivlen)
2732 arg = gctx->ivlen;
2733 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
2734 /*
2735 * Invocation field will be at least 8 bytes in size and so no need
2736 * to check wrap around or increment more than last 8 bytes.
2737 */
2738 ctr64_inc(gctx->iv + gctx->ivlen - 8);
2739 gctx->iv_set = 1;
2740 return 1;
2741
2742 case EVP_CTRL_GCM_SET_IV_INV:
2743 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
2744 return 0;
2745 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
2746 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2747 gctx->iv_set = 1;
2748 return 1;
2749
2750 case EVP_CTRL_AEAD_TLS1_AAD:
2751 /* Save the AAD for later use */
2752 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2753 return 0;
2754 memcpy(c->buf, ptr, arg);
2755 gctx->tls_aad_len = arg;
2756 gctx->tls_enc_records = 0;
2757 {
2758 unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
2759 /* Correct length for explicit IV */
2760 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
2761 return 0;
2762 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
2763 /* If decrypting correct for tag too */
2764 if (!c->encrypt) {
2765 if (len < EVP_GCM_TLS_TAG_LEN)
2766 return 0;
2767 len -= EVP_GCM_TLS_TAG_LEN;
2768 }
2769 c->buf[arg - 2] = len >> 8;
2770 c->buf[arg - 1] = len & 0xff;
2771 }
2772 /* Extra padding: tag appended to record */
2773 return EVP_GCM_TLS_TAG_LEN;
2774
2775 case EVP_CTRL_COPY:
2776 {
2777 EVP_CIPHER_CTX *out = ptr;
2778 EVP_AES_GCM_CTX *gctx_out = EVP_C_DATA(EVP_AES_GCM_CTX,out);
2779 if (gctx->gcm.key) {
2780 if (gctx->gcm.key != &gctx->ks)
2781 return 0;
2782 gctx_out->gcm.key = &gctx_out->ks;
2783 }
2784 if (gctx->iv == c->iv)
2785 gctx_out->iv = out->iv;
2786 else {
2787 if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) {
2788 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
2789 return 0;
2790 }
2791 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
2792 }
2793 return 1;
2794 }
2795
2796 default:
2797 return -1;
2798
2799 }
2800 }
2801
2802 static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2803 const unsigned char *iv, int enc)
2804 {
2805 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2806
2807 if (iv == NULL && key == NULL)
2808 return 1;
2809
2810 if (key != NULL) {
2811 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
2812
2813 if (keylen <= 0) {
2814 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2815 return 0;
2816 }
2817 do {
2818 #ifdef HWAES_CAPABLE
2819 if (HWAES_CAPABLE) {
2820 HWAES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2821 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2822 (block128_f) HWAES_encrypt);
2823 # ifdef HWAES_ctr32_encrypt_blocks
2824 gctx->ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2825 # else
2826 gctx->ctr = NULL;
2827 # endif
2828 break;
2829 } else
2830 #endif
2831 #ifdef BSAES_CAPABLE
2832 if (BSAES_CAPABLE) {
2833 AES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2834 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2835 (block128_f) AES_encrypt);
2836 gctx->ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2837 break;
2838 } else
2839 #endif
2840 #ifdef VPAES_CAPABLE
2841 if (VPAES_CAPABLE) {
2842 vpaes_set_encrypt_key(key, keylen, &gctx->ks.ks);
2843 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2844 (block128_f) vpaes_encrypt);
2845 gctx->ctr = NULL;
2846 break;
2847 } else
2848 #endif
2849 (void)0; /* terminate potentially open 'else' */
2850
2851 AES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2852 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2853 (block128_f) AES_encrypt);
2854 #ifdef AES_CTR_ASM
2855 gctx->ctr = (ctr128_f) AES_ctr32_encrypt;
2856 #else
2857 gctx->ctr = NULL;
2858 #endif
2859 } while (0);
2860
2861 /*
2862 * If we have an iv can set it directly, otherwise use saved IV.
2863 */
2864 if (iv == NULL && gctx->iv_set)
2865 iv = gctx->iv;
2866 if (iv) {
2867 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2868 gctx->iv_set = 1;
2869 }
2870 gctx->key_set = 1;
2871 } else {
2872 /* If key set use IV, otherwise copy */
2873 if (gctx->key_set)
2874 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2875 else
2876 memcpy(gctx->iv, iv, gctx->ivlen);
2877 gctx->iv_set = 1;
2878 gctx->iv_gen = 0;
2879 }
2880 return 1;
2881 }
2882
2883 /*
2884 * Handle TLS GCM packet format. This consists of the last portion of the IV
2885 * followed by the payload and finally the tag. On encrypt generate IV,
2886 * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
2887 * and verify tag.
2888 */
2889
2890 static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2891 const unsigned char *in, size_t len)
2892 {
2893 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2894 int rv = -1;
2895 /* Encrypt/decrypt must be performed in place */
2896 if (out != in
2897 || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
2898 return -1;
2899
2900 /*
2901 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
2902 * Requirements from SP 800-38D". The requirements is for one party to the
2903 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
2904 * side only.
2905 */
2906 if (EVP_CIPHER_CTX_is_encrypting(ctx) && ++gctx->tls_enc_records == 0) {
2907 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
2908 goto err;
2909 }
2910
2911 /*
2912 * Set IV from start of buffer or generate IV and write to start of
2913 * buffer.
2914 */
2915 if (EVP_CIPHER_CTX_ctrl(ctx,
2916 EVP_CIPHER_CTX_is_encrypting(ctx) ?
2917 EVP_CTRL_GCM_IV_GEN : EVP_CTRL_GCM_SET_IV_INV,
2918 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
2919 goto err;
2920 /* Use saved AAD */
2921 if (CRYPTO_gcm128_aad(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx),
2922 gctx->tls_aad_len))
2923 goto err;
2924 /* Fix buffer and length to point to payload */
2925 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2926 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2927 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2928 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
2929 /* Encrypt payload */
2930 if (gctx->ctr) {
2931 size_t bulk = 0;
2932 #if defined(AES_GCM_ASM)
2933 if (len >= 32 && AES_GCM_ASM(gctx)) {
2934 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2935 return -1;
2936
2937 bulk = AES_gcm_encrypt(in, out, len,
2938 gctx->gcm.key,
2939 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2940 gctx->gcm.len.u[1] += bulk;
2941 }
2942 #endif
2943 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2944 in + bulk,
2945 out + bulk,
2946 len - bulk, gctx->ctr))
2947 goto err;
2948 } else {
2949 size_t bulk = 0;
2950 #if defined(AES_GCM_ASM2)
2951 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2952 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2953 return -1;
2954
2955 bulk = AES_gcm_encrypt(in, out, len,
2956 gctx->gcm.key,
2957 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2958 gctx->gcm.len.u[1] += bulk;
2959 }
2960 #endif
2961 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
2962 in + bulk, out + bulk, len - bulk))
2963 goto err;
2964 }
2965 out += len;
2966 /* Finally write tag */
2967 CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN);
2968 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2969 } else {
2970 /* Decrypt */
2971 if (gctx->ctr) {
2972 size_t bulk = 0;
2973 #if defined(AES_GCM_ASM)
2974 if (len >= 16 && AES_GCM_ASM(gctx)) {
2975 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2976 return -1;
2977
2978 bulk = AES_gcm_decrypt(in, out, len,
2979 gctx->gcm.key,
2980 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2981 gctx->gcm.len.u[1] += bulk;
2982 }
2983 #endif
2984 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
2985 in + bulk,
2986 out + bulk,
2987 len - bulk, gctx->ctr))
2988 goto err;
2989 } else {
2990 size_t bulk = 0;
2991 #if defined(AES_GCM_ASM2)
2992 if (len >= 16 && AES_GCM_ASM2(gctx)) {
2993 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2994 return -1;
2995
2996 bulk = AES_gcm_decrypt(in, out, len,
2997 gctx->gcm.key,
2998 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2999 gctx->gcm.len.u[1] += bulk;
3000 }
3001 #endif
3002 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3003 in + bulk, out + bulk, len - bulk))
3004 goto err;
3005 }
3006 /* Retrieve tag */
3007 CRYPTO_gcm128_tag(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx),
3008 EVP_GCM_TLS_TAG_LEN);
3009 /* If tag mismatch wipe buffer */
3010 if (CRYPTO_memcmp(EVP_CIPHER_CTX_buf_noconst(ctx), in + len,
3011 EVP_GCM_TLS_TAG_LEN)) {
3012 OPENSSL_cleanse(out, len);
3013 goto err;
3014 }
3015 rv = len;
3016 }
3017
3018 err:
3019 gctx->iv_set = 0;
3020 gctx->tls_aad_len = -1;
3021 return rv;
3022 }
3023
3024 #ifdef FIPS_MODULE
3025 /*
3026 * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
3027 *
3028 * See also 8.2.2 RBG-based construction.
3029 * Random construction consists of a free field (which can be NULL) and a
3030 * random field which will use a DRBG that can return at least 96 bits of
3031 * entropy strength. (The DRBG must be seeded by the FIPS module).
3032 */
3033 static int aes_gcm_iv_generate(EVP_AES_GCM_CTX *gctx, int offset)
3034 {
3035 int sz = gctx->ivlen - offset;
3036
3037 /* Must be at least 96 bits */
3038 if (sz <= 0 || gctx->ivlen < 12)
3039 return 0;
3040
3041 /* Use DRBG to generate random iv */
3042 if (RAND_bytes(gctx->iv + offset, sz) <= 0)
3043 return 0;
3044 return 1;
3045 }
3046 #endif /* FIPS_MODULE */
3047
3048 static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3049 const unsigned char *in, size_t len)
3050 {
3051 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
3052
3053 /* If not set up, return error */
3054 if (!gctx->key_set)
3055 return -1;
3056
3057 if (gctx->tls_aad_len >= 0)
3058 return aes_gcm_tls_cipher(ctx, out, in, len);
3059
3060 #ifdef FIPS_MODULE
3061 /*
3062 * FIPS requires generation of AES-GCM IV's inside the FIPS module.
3063 * The IV can still be set externally (the security policy will state that
3064 * this is not FIPS compliant). There are some applications
3065 * where setting the IV externally is the only option available.
3066 */
3067 if (!gctx->iv_set) {
3068 if (!EVP_CIPHER_CTX_is_encrypting(ctx) || !aes_gcm_iv_generate(gctx, 0))
3069 return -1;
3070 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
3071 gctx->iv_set = 1;
3072 gctx->iv_gen_rand = 1;
3073 }
3074 #else
3075 if (!gctx->iv_set)
3076 return -1;
3077 #endif /* FIPS_MODULE */
3078
3079 if (in) {
3080 if (out == NULL) {
3081 if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
3082 return -1;
3083 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3084 if (gctx->ctr) {
3085 size_t bulk = 0;
3086 #if defined(AES_GCM_ASM)
3087 if (len >= 32 && AES_GCM_ASM(gctx)) {
3088 size_t res = (16 - gctx->gcm.mres) % 16;
3089
3090 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3091 return -1;
3092
3093 bulk = AES_gcm_encrypt(in + res,
3094 out + res, len - res,
3095 gctx->gcm.key, gctx->gcm.Yi.c,
3096 gctx->gcm.Xi.u);
3097 gctx->gcm.len.u[1] += bulk;
3098 bulk += res;
3099 }
3100 #endif
3101 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
3102 in + bulk,
3103 out + bulk,
3104 len - bulk, gctx->ctr))
3105 return -1;
3106 } else {
3107 size_t bulk = 0;
3108 #if defined(AES_GCM_ASM2)
3109 if (len >= 32 && AES_GCM_ASM2(gctx)) {
3110 size_t res = (16 - gctx->gcm.mres) % 16;
3111
3112 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3113 return -1;
3114
3115 bulk = AES_gcm_encrypt(in + res,
3116 out + res, len - res,
3117 gctx->gcm.key, gctx->gcm.Yi.c,
3118 gctx->gcm.Xi.u);
3119 gctx->gcm.len.u[1] += bulk;
3120 bulk += res;
3121 }
3122 #endif
3123 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3124 in + bulk, out + bulk, len - bulk))
3125 return -1;
3126 }
3127 } else {
3128 if (gctx->ctr) {
3129 size_t bulk = 0;
3130 #if defined(AES_GCM_ASM)
3131 if (len >= 16 && AES_GCM_ASM(gctx)) {
3132 size_t res = (16 - gctx->gcm.mres) % 16;
3133
3134 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3135 return -1;
3136
3137 bulk = AES_gcm_decrypt(in + res,
3138 out + res, len - res,
3139 gctx->gcm.key,
3140 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3141 gctx->gcm.len.u[1] += bulk;
3142 bulk += res;
3143 }
3144 #endif
3145 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3146 in + bulk,
3147 out + bulk,
3148 len - bulk, gctx->ctr))
3149 return -1;
3150 } else {
3151 size_t bulk = 0;
3152 #if defined(AES_GCM_ASM2)
3153 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3154 size_t res = (16 - gctx->gcm.mres) % 16;
3155
3156 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3157 return -1;
3158
3159 bulk = AES_gcm_decrypt(in + res,
3160 out + res, len - res,
3161 gctx->gcm.key,
3162 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3163 gctx->gcm.len.u[1] += bulk;
3164 bulk += res;
3165 }
3166 #endif
3167 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3168 in + bulk, out + bulk, len - bulk))
3169 return -1;
3170 }
3171 }
3172 return len;
3173 } else {
3174 if (!EVP_CIPHER_CTX_is_encrypting(ctx)) {
3175 if (gctx->taglen < 0)
3176 return -1;
3177 if (CRYPTO_gcm128_finish(&gctx->gcm,
3178 EVP_CIPHER_CTX_buf_noconst(ctx),
3179 gctx->taglen) != 0)
3180 return -1;
3181 gctx->iv_set = 0;
3182 return 0;
3183 }
3184 CRYPTO_gcm128_tag(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx), 16);
3185 gctx->taglen = 16;
3186 /* Don't reuse the IV */
3187 gctx->iv_set = 0;
3188 return 0;
3189 }
3190
3191 }
3192
3193 #define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \
3194 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3195 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3196 | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH)
3197
3198 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, gcm, GCM,
3199 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3200 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, gcm, GCM,
3201 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3202 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, gcm, GCM,
3203 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3204
3205 static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3206 {
3207 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX, c);
3208
3209 if (type == EVP_CTRL_COPY) {
3210 EVP_CIPHER_CTX *out = ptr;
3211 EVP_AES_XTS_CTX *xctx_out = EVP_C_DATA(EVP_AES_XTS_CTX,out);
3212
3213 if (xctx->xts.key1) {
3214 if (xctx->xts.key1 != &xctx->ks1)
3215 return 0;
3216 xctx_out->xts.key1 = &xctx_out->ks1;
3217 }
3218 if (xctx->xts.key2) {
3219 if (xctx->xts.key2 != &xctx->ks2)
3220 return 0;
3221 xctx_out->xts.key2 = &xctx_out->ks2;
3222 }
3223 return 1;
3224 } else if (type != EVP_CTRL_INIT)
3225 return -1;
3226 /* key1 and key2 are used as an indicator both key and IV are set */
3227 xctx->xts.key1 = NULL;
3228 xctx->xts.key2 = NULL;
3229 return 1;
3230 }
3231
3232 static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3233 const unsigned char *iv, int enc)
3234 {
3235 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3236
3237 if (iv == NULL && key == NULL)
3238 return 1;
3239
3240 if (key != NULL) {
3241 do {
3242 /* The key is two half length keys in reality */
3243 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
3244 const int bytes = keylen / 2;
3245 const int bits = bytes * 8;
3246
3247 if (keylen <= 0) {
3248 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3249 return 0;
3250 }
3251 /*
3252 * Verify that the two keys are different.
3253 *
3254 * This addresses the vulnerability described in Rogaway's
3255 * September 2004 paper:
3256 *
3257 * "Efficient Instantiations of Tweakable Blockciphers and
3258 * Refinements to Modes OCB and PMAC".
3259 * (http://web.cs.ucdavis.edu/~rogaway/papers/offsets.pdf)
3260 *
3261 * FIPS 140-2 IG A.9 XTS-AES Key Generation Requirements states
3262 * that:
3263 * "The check for Key_1 != Key_2 shall be done at any place
3264 * BEFORE using the keys in the XTS-AES algorithm to process
3265 * data with them."
3266 */
3267 if ((!allow_insecure_decrypt || enc)
3268 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
3269 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
3270 return 0;
3271 }
3272
3273 #ifdef AES_XTS_ASM
3274 xctx->stream = enc ? AES_xts_encrypt : AES_xts_decrypt;
3275 #else
3276 xctx->stream = NULL;
3277 #endif
3278 /* key_len is two AES keys */
3279 #ifdef HWAES_CAPABLE
3280 if (HWAES_CAPABLE) {
3281 if (enc) {
3282 HWAES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3283 xctx->xts.block1 = (block128_f) HWAES_encrypt;
3284 # ifdef HWAES_xts_encrypt
3285 xctx->stream = HWAES_xts_encrypt;
3286 # endif
3287 } else {
3288 HWAES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3289 xctx->xts.block1 = (block128_f) HWAES_decrypt;
3290 # ifdef HWAES_xts_decrypt
3291 xctx->stream = HWAES_xts_decrypt;
3292 #endif
3293 }
3294
3295 HWAES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3296 xctx->xts.block2 = (block128_f) HWAES_encrypt;
3297
3298 xctx->xts.key1 = &xctx->ks1;
3299 break;
3300 } else
3301 #endif
3302 #ifdef BSAES_CAPABLE
3303 if (BSAES_CAPABLE)
3304 xctx->stream = enc ? ossl_bsaes_xts_encrypt : ossl_bsaes_xts_decrypt;
3305 else
3306 #endif
3307 #ifdef VPAES_CAPABLE
3308 if (VPAES_CAPABLE) {
3309 if (enc) {
3310 vpaes_set_encrypt_key(key, bits, &xctx->ks1.ks);
3311 xctx->xts.block1 = (block128_f) vpaes_encrypt;
3312 } else {
3313 vpaes_set_decrypt_key(key, bits, &xctx->ks1.ks);
3314 xctx->xts.block1 = (block128_f) vpaes_decrypt;
3315 }
3316
3317 vpaes_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3318 xctx->xts.block2 = (block128_f) vpaes_encrypt;
3319
3320 xctx->xts.key1 = &xctx->ks1;
3321 break;
3322 } else
3323 #endif
3324 (void)0; /* terminate potentially open 'else' */
3325
3326 if (enc) {
3327 AES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3328 xctx->xts.block1 = (block128_f) AES_encrypt;
3329 } else {
3330 AES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3331 xctx->xts.block1 = (block128_f) AES_decrypt;
3332 }
3333
3334 AES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3335 xctx->xts.block2 = (block128_f) AES_encrypt;
3336
3337 xctx->xts.key1 = &xctx->ks1;
3338 } while (0);
3339 }
3340
3341 if (iv) {
3342 xctx->xts.key2 = &xctx->ks2;
3343 memcpy(ctx->iv, iv, 16);
3344 }
3345
3346 return 1;
3347 }
3348
3349 static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3350 const unsigned char *in, size_t len)
3351 {
3352 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3353
3354 if (xctx->xts.key1 == NULL
3355 || xctx->xts.key2 == NULL
3356 || out == NULL
3357 || in == NULL
3358 || len < AES_BLOCK_SIZE)
3359 return 0;
3360
3361 /*
3362 * Impose a limit of 2^20 blocks per data unit as specified by
3363 * IEEE Std 1619-2018. The earlier and obsolete IEEE Std 1619-2007
3364 * indicated that this was a SHOULD NOT rather than a MUST NOT.
3365 * NIST SP 800-38E mandates the same limit.
3366 */
3367 if (len > XTS_MAX_BLOCKS_PER_DATA_UNIT * AES_BLOCK_SIZE) {
3368 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DATA_UNIT_IS_TOO_LARGE);
3369 return 0;
3370 }
3371
3372 if (xctx->stream)
3373 (*xctx->stream) (in, out, len,
3374 xctx->xts.key1, xctx->xts.key2,
3375 ctx->iv);
3376 else if (CRYPTO_xts128_encrypt(&xctx->xts, ctx->iv, in, out, len,
3377 EVP_CIPHER_CTX_is_encrypting(ctx)))
3378 return 0;
3379 return 1;
3380 }
3381
3382 #define aes_xts_cleanup NULL
3383
3384 #define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \
3385 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3386 | EVP_CIPH_CUSTOM_COPY)
3387
3388 BLOCK_CIPHER_custom(NID_aes, 128, 1, 16, xts, XTS, XTS_FLAGS)
3389 BLOCK_CIPHER_custom(NID_aes, 256, 1, 16, xts, XTS, XTS_FLAGS)
3390
3391 static int aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3392 {
3393 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,c);
3394 switch (type) {
3395 case EVP_CTRL_INIT:
3396 cctx->key_set = 0;
3397 cctx->iv_set = 0;
3398 cctx->L = 8;
3399 cctx->M = 12;
3400 cctx->tag_set = 0;
3401 cctx->len_set = 0;
3402 cctx->tls_aad_len = -1;
3403 return 1;
3404
3405 case EVP_CTRL_GET_IVLEN:
3406 *(int *)ptr = 15 - cctx->L;
3407 return 1;
3408
3409 case EVP_CTRL_AEAD_TLS1_AAD:
3410 /* Save the AAD for later use */
3411 if (arg != EVP_AEAD_TLS1_AAD_LEN)
3412 return 0;
3413 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3414 cctx->tls_aad_len = arg;
3415 {
3416 uint16_t len =
3417 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
3418 | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
3419 /* Correct length for explicit IV */
3420 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
3421 return 0;
3422 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
3423 /* If decrypting correct for tag too */
3424 if (!EVP_CIPHER_CTX_is_encrypting(c)) {
3425 if (len < cctx->M)
3426 return 0;
3427 len -= cctx->M;
3428 }
3429 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
3430 EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
3431 }
3432 /* Extra padding: tag appended to record */
3433 return cctx->M;
3434
3435 case EVP_CTRL_CCM_SET_IV_FIXED:
3436 /* Sanity check length */
3437 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
3438 return 0;
3439 /* Just copy to first part of IV */
3440 memcpy(c->iv, ptr, arg);
3441 return 1;
3442
3443 case EVP_CTRL_AEAD_SET_IVLEN:
3444 arg = 15 - arg;
3445 /* fall thru */
3446 case EVP_CTRL_CCM_SET_L:
3447 if (arg < 2 || arg > 8)
3448 return 0;
3449 cctx->L = arg;
3450 return 1;
3451
3452 case EVP_CTRL_AEAD_SET_TAG:
3453 if ((arg & 1) || arg < 4 || arg > 16)
3454 return 0;
3455 if (EVP_CIPHER_CTX_is_encrypting(c) && ptr)
3456 return 0;
3457 if (ptr) {
3458 cctx->tag_set = 1;
3459 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3460 }
3461 cctx->M = arg;
3462 return 1;
3463
3464 case EVP_CTRL_AEAD_GET_TAG:
3465 if (!EVP_CIPHER_CTX_is_encrypting(c) || !cctx->tag_set)
3466 return 0;
3467 if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg))
3468 return 0;
3469 cctx->tag_set = 0;
3470 cctx->iv_set = 0;
3471 cctx->len_set = 0;
3472 return 1;
3473
3474 case EVP_CTRL_COPY:
3475 {
3476 EVP_CIPHER_CTX *out = ptr;
3477 EVP_AES_CCM_CTX *cctx_out = EVP_C_DATA(EVP_AES_CCM_CTX,out);
3478 if (cctx->ccm.key) {
3479 if (cctx->ccm.key != &cctx->ks)
3480 return 0;
3481 cctx_out->ccm.key = &cctx_out->ks;
3482 }
3483 return 1;
3484 }
3485
3486 default:
3487 return -1;
3488
3489 }
3490 }
3491
3492 static int aes_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3493 const unsigned char *iv, int enc)
3494 {
3495 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3496
3497 if (iv == NULL && key == NULL)
3498 return 1;
3499
3500 if (key != NULL) {
3501 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3502
3503 if (keylen <= 0) {
3504 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3505 return 0;
3506 }
3507 do {
3508 #ifdef HWAES_CAPABLE
3509 if (HWAES_CAPABLE) {
3510 HWAES_set_encrypt_key(key, keylen, &cctx->ks.ks);
3511
3512 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3513 &cctx->ks, (block128_f) HWAES_encrypt);
3514 cctx->str = NULL;
3515 cctx->key_set = 1;
3516 break;
3517 } else
3518 #endif
3519 #ifdef VPAES_CAPABLE
3520 if (VPAES_CAPABLE) {
3521 vpaes_set_encrypt_key(key, keylen, &cctx->ks.ks);
3522 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3523 &cctx->ks, (block128_f) vpaes_encrypt);
3524 cctx->str = NULL;
3525 cctx->key_set = 1;
3526 break;
3527 }
3528 #endif
3529 AES_set_encrypt_key(key, keylen, &cctx->ks.ks);
3530 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3531 &cctx->ks, (block128_f) AES_encrypt);
3532 cctx->str = NULL;
3533 cctx->key_set = 1;
3534 } while (0);
3535 }
3536 if (iv != NULL) {
3537 memcpy(ctx->iv, iv, 15 - cctx->L);
3538 cctx->iv_set = 1;
3539 }
3540 return 1;
3541 }
3542
3543 static int aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3544 const unsigned char *in, size_t len)
3545 {
3546 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3547 CCM128_CONTEXT *ccm = &cctx->ccm;
3548 /* Encrypt/decrypt must be performed in place */
3549 if (out != in || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->M))
3550 return -1;
3551 /* If encrypting set explicit IV from sequence number (start of AAD) */
3552 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3553 memcpy(out, EVP_CIPHER_CTX_buf_noconst(ctx),
3554 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3555 /* Get rest of IV from explicit IV */
3556 memcpy(ctx->iv + EVP_CCM_TLS_FIXED_IV_LEN, in,
3557 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3558 /* Correct length value */
3559 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3560 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L,
3561 len))
3562 return -1;
3563 /* Use saved AAD */
3564 CRYPTO_ccm128_aad(ccm, EVP_CIPHER_CTX_buf_noconst(ctx),
3565 cctx->tls_aad_len);
3566 /* Fix buffer to point to payload */
3567 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3568 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3569 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3570 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3571 cctx->str) :
3572 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3573 return -1;
3574 if (!CRYPTO_ccm128_tag(ccm, out + len, cctx->M))
3575 return -1;
3576 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3577 } else {
3578 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3579 cctx->str) :
3580 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3581 unsigned char tag[16];
3582 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3583 if (!CRYPTO_memcmp(tag, in + len, cctx->M))
3584 return len;
3585 }
3586 }
3587 OPENSSL_cleanse(out, len);
3588 return -1;
3589 }
3590 }
3591
3592 static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3593 const unsigned char *in, size_t len)
3594 {
3595 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3596 CCM128_CONTEXT *ccm = &cctx->ccm;
3597 /* If not set up, return error */
3598 if (!cctx->key_set)
3599 return -1;
3600
3601 if (cctx->tls_aad_len >= 0)
3602 return aes_ccm_tls_cipher(ctx, out, in, len);
3603
3604 /* EVP_*Final() doesn't return any data */
3605 if (in == NULL && out != NULL)
3606 return 0;
3607
3608 if (!cctx->iv_set)
3609 return -1;
3610
3611 if (!out) {
3612 if (!in) {
3613 if (CRYPTO_ccm128_setiv(ccm, ctx->iv,
3614 15 - cctx->L, len))
3615 return -1;
3616 cctx->len_set = 1;
3617 return len;
3618 }
3619 /* If have AAD need message length */
3620 if (!cctx->len_set && len)
3621 return -1;
3622 CRYPTO_ccm128_aad(ccm, in, len);
3623 return len;
3624 }
3625
3626 /* The tag must be set before actually decrypting data */
3627 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && !cctx->tag_set)
3628 return -1;
3629
3630 /* If not set length yet do it */
3631 if (!cctx->len_set) {
3632 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L, len))
3633 return -1;
3634 cctx->len_set = 1;
3635 }
3636 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3637 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3638 cctx->str) :
3639 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3640 return -1;
3641 cctx->tag_set = 1;
3642 return len;
3643 } else {
3644 int rv = -1;
3645 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3646 cctx->str) :
3647 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3648 unsigned char tag[16];
3649 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3650 if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx),
3651 cctx->M))
3652 rv = len;
3653 }
3654 }
3655 if (rv == -1)
3656 OPENSSL_cleanse(out, len);
3657 cctx->iv_set = 0;
3658 cctx->tag_set = 0;
3659 cctx->len_set = 0;
3660 return rv;
3661 }
3662 }
3663
3664 #define aes_ccm_cleanup NULL
3665
3666 BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, ccm, CCM,
3667 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3668 BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, ccm, CCM,
3669 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3670 BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, ccm, CCM,
3671 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3672
3673 typedef struct {
3674 union {
3675 OSSL_UNION_ALIGN;
3676 AES_KEY ks;
3677 } ks;
3678 /* Indicates if IV has been set */
3679 unsigned char *iv;
3680 } EVP_AES_WRAP_CTX;
3681
3682 static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3683 const unsigned char *iv, int enc)
3684 {
3685 int len;
3686 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3687
3688 if (iv == NULL && key == NULL)
3689 return 1;
3690 if (key != NULL) {
3691 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3692
3693 if (keylen <= 0) {
3694 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3695 return 0;
3696 }
3697 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3698 AES_set_encrypt_key(key, keylen, &wctx->ks.ks);
3699 else
3700 AES_set_decrypt_key(key, keylen, &wctx->ks.ks);
3701 if (iv == NULL)
3702 wctx->iv = NULL;
3703 }
3704 if (iv != NULL) {
3705 if ((len = EVP_CIPHER_CTX_get_iv_length(ctx)) < 0)
3706 return 0;
3707 memcpy(ctx->iv, iv, len);
3708 wctx->iv = ctx->iv;
3709 }
3710 return 1;
3711 }
3712
3713 static int aes_wrap_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3714 const unsigned char *in, size_t inlen)
3715 {
3716 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3717 size_t rv;
3718 /* AES wrap with padding has IV length of 4, without padding 8 */
3719 int pad = EVP_CIPHER_CTX_get_iv_length(ctx) == 4;
3720 /* No final operation so always return zero length */
3721 if (!in)
3722 return 0;
3723 /* Input length must always be non-zero */
3724 if (!inlen)
3725 return -1;
3726 /* If decrypting need at least 16 bytes and multiple of 8 */
3727 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && (inlen < 16 || inlen & 0x7))
3728 return -1;
3729 /* If not padding input must be multiple of 8 */
3730 if (!pad && inlen & 0x7)
3731 return -1;
3732 if (ossl_is_partially_overlapping(out, in, inlen)) {
3733 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
3734 return 0;
3735 }
3736 if (!out) {
3737 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3738 /* If padding round up to multiple of 8 */
3739 if (pad)
3740 inlen = (inlen + 7) / 8 * 8;
3741 /* 8 byte prefix */
3742 return inlen + 8;
3743 } else {
3744 /*
3745 * If not padding output will be exactly 8 bytes smaller than
3746 * input. If padding it will be at least 8 bytes smaller but we
3747 * don't know how much.
3748 */
3749 return inlen - 8;
3750 }
3751 }
3752 if (pad) {
3753 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3754 rv = CRYPTO_128_wrap_pad(&wctx->ks.ks, wctx->iv,
3755 out, in, inlen,
3756 (block128_f) AES_encrypt);
3757 else
3758 rv = CRYPTO_128_unwrap_pad(&wctx->ks.ks, wctx->iv,
3759 out, in, inlen,
3760 (block128_f) AES_decrypt);
3761 } else {
3762 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3763 rv = CRYPTO_128_wrap(&wctx->ks.ks, wctx->iv,
3764 out, in, inlen, (block128_f) AES_encrypt);
3765 else
3766 rv = CRYPTO_128_unwrap(&wctx->ks.ks, wctx->iv,
3767 out, in, inlen, (block128_f) AES_decrypt);
3768 }
3769 return rv ? (int)rv : -1;
3770 }
3771
3772 #define WRAP_FLAGS (EVP_CIPH_WRAP_MODE \
3773 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3774 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_FLAG_DEFAULT_ASN1)
3775
3776 static const EVP_CIPHER aes_128_wrap = {
3777 NID_id_aes128_wrap,
3778 8, 16, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3779 aes_wrap_init_key, aes_wrap_cipher,
3780 NULL,
3781 sizeof(EVP_AES_WRAP_CTX),
3782 NULL, NULL, NULL, NULL
3783 };
3784
3785 const EVP_CIPHER *EVP_aes_128_wrap(void)
3786 {
3787 return &aes_128_wrap;
3788 }
3789
3790 static const EVP_CIPHER aes_192_wrap = {
3791 NID_id_aes192_wrap,
3792 8, 24, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3793 aes_wrap_init_key, aes_wrap_cipher,
3794 NULL,
3795 sizeof(EVP_AES_WRAP_CTX),
3796 NULL, NULL, NULL, NULL
3797 };
3798
3799 const EVP_CIPHER *EVP_aes_192_wrap(void)
3800 {
3801 return &aes_192_wrap;
3802 }
3803
3804 static const EVP_CIPHER aes_256_wrap = {
3805 NID_id_aes256_wrap,
3806 8, 32, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3807 aes_wrap_init_key, aes_wrap_cipher,
3808 NULL,
3809 sizeof(EVP_AES_WRAP_CTX),
3810 NULL, NULL, NULL, NULL
3811 };
3812
3813 const EVP_CIPHER *EVP_aes_256_wrap(void)
3814 {
3815 return &aes_256_wrap;
3816 }
3817
3818 static const EVP_CIPHER aes_128_wrap_pad = {
3819 NID_id_aes128_wrap_pad,
3820 8, 16, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3821 aes_wrap_init_key, aes_wrap_cipher,
3822 NULL,
3823 sizeof(EVP_AES_WRAP_CTX),
3824 NULL, NULL, NULL, NULL
3825 };
3826
3827 const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
3828 {
3829 return &aes_128_wrap_pad;
3830 }
3831
3832 static const EVP_CIPHER aes_192_wrap_pad = {
3833 NID_id_aes192_wrap_pad,
3834 8, 24, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3835 aes_wrap_init_key, aes_wrap_cipher,
3836 NULL,
3837 sizeof(EVP_AES_WRAP_CTX),
3838 NULL, NULL, NULL, NULL
3839 };
3840
3841 const EVP_CIPHER *EVP_aes_192_wrap_pad(void)
3842 {
3843 return &aes_192_wrap_pad;
3844 }
3845
3846 static const EVP_CIPHER aes_256_wrap_pad = {
3847 NID_id_aes256_wrap_pad,
3848 8, 32, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3849 aes_wrap_init_key, aes_wrap_cipher,
3850 NULL,
3851 sizeof(EVP_AES_WRAP_CTX),
3852 NULL, NULL, NULL, NULL
3853 };
3854
3855 const EVP_CIPHER *EVP_aes_256_wrap_pad(void)
3856 {
3857 return &aes_256_wrap_pad;
3858 }
3859
3860 #ifndef OPENSSL_NO_OCB
3861 static int aes_ocb_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3862 {
3863 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
3864 EVP_CIPHER_CTX *newc;
3865 EVP_AES_OCB_CTX *new_octx;
3866
3867 switch (type) {
3868 case EVP_CTRL_INIT:
3869 octx->key_set = 0;
3870 octx->iv_set = 0;
3871 octx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
3872 octx->iv = c->iv;
3873 octx->taglen = 16;
3874 octx->data_buf_len = 0;
3875 octx->aad_buf_len = 0;
3876 return 1;
3877
3878 case EVP_CTRL_GET_IVLEN:
3879 *(int *)ptr = octx->ivlen;
3880 return 1;
3881
3882 case EVP_CTRL_AEAD_SET_IVLEN:
3883 /* IV len must be 1 to 15 */
3884 if (arg <= 0 || arg > 15)
3885 return 0;
3886
3887 octx->ivlen = arg;
3888 return 1;
3889
3890 case EVP_CTRL_AEAD_SET_TAG:
3891 if (ptr == NULL) {
3892 /* Tag len must be 0 to 16 */
3893 if (arg < 0 || arg > 16)
3894 return 0;
3895
3896 octx->taglen = arg;
3897 return 1;
3898 }
3899 if (arg != octx->taglen || EVP_CIPHER_CTX_is_encrypting(c))
3900 return 0;
3901 memcpy(octx->tag, ptr, arg);
3902 return 1;
3903
3904 case EVP_CTRL_AEAD_GET_TAG:
3905 if (arg != octx->taglen || !EVP_CIPHER_CTX_is_encrypting(c))
3906 return 0;
3907
3908 memcpy(ptr, octx->tag, arg);
3909 return 1;
3910
3911 case EVP_CTRL_COPY:
3912 newc = (EVP_CIPHER_CTX *)ptr;
3913 new_octx = EVP_C_DATA(EVP_AES_OCB_CTX,newc);
3914 return CRYPTO_ocb128_copy_ctx(&new_octx->ocb, &octx->ocb,
3915 &new_octx->ksenc.ks,
3916 &new_octx->ksdec.ks);
3917
3918 default:
3919 return -1;
3920
3921 }
3922 }
3923
3924 static int aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3925 const unsigned char *iv, int enc)
3926 {
3927 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3928
3929 if (iv == NULL && key == NULL)
3930 return 1;
3931
3932 if (key != NULL) {
3933 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3934
3935 if (keylen <= 0) {
3936 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3937 return 0;
3938 }
3939 do {
3940 /*
3941 * We set both the encrypt and decrypt key here because decrypt
3942 * needs both. We could possibly optimise to remove setting the
3943 * decrypt for an encryption operation.
3944 */
3945 # ifdef HWAES_CAPABLE
3946 if (HWAES_CAPABLE) {
3947 HWAES_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3948 HWAES_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3949 if (!CRYPTO_ocb128_init(&octx->ocb,
3950 &octx->ksenc.ks, &octx->ksdec.ks,
3951 (block128_f) HWAES_encrypt,
3952 (block128_f) HWAES_decrypt,
3953 enc ? HWAES_ocb_encrypt
3954 : HWAES_ocb_decrypt))
3955 return 0;
3956 break;
3957 }
3958 # endif
3959 # ifdef VPAES_CAPABLE
3960 if (VPAES_CAPABLE) {
3961 vpaes_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3962 vpaes_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3963 if (!CRYPTO_ocb128_init(&octx->ocb,
3964 &octx->ksenc.ks, &octx->ksdec.ks,
3965 (block128_f) vpaes_encrypt,
3966 (block128_f) vpaes_decrypt,
3967 NULL))
3968 return 0;
3969 break;
3970 }
3971 # endif
3972 AES_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3973 AES_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3974 if (!CRYPTO_ocb128_init(&octx->ocb,
3975 &octx->ksenc.ks, &octx->ksdec.ks,
3976 (block128_f) AES_encrypt,
3977 (block128_f) AES_decrypt,
3978 NULL))
3979 return 0;
3980 }
3981 while (0);
3982
3983 /*
3984 * If we have an iv we can set it directly, otherwise use saved IV.
3985 */
3986 if (iv == NULL && octx->iv_set)
3987 iv = octx->iv;
3988 if (iv) {
3989 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
3990 != 1)
3991 return 0;
3992 octx->iv_set = 1;
3993 }
3994 octx->key_set = 1;
3995 } else {
3996 /* If key set use IV, otherwise copy */
3997 if (octx->key_set)
3998 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
3999 else
4000 memcpy(octx->iv, iv, octx->ivlen);
4001 octx->iv_set = 1;
4002 }
4003 return 1;
4004 }
4005
4006 static int aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
4007 const unsigned char *in, size_t len)
4008 {
4009 unsigned char *buf;
4010 int *buf_len;
4011 int written_len = 0;
4012 size_t trailing_len;
4013 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
4014
4015 /* If IV or Key not set then return error */
4016 if (!octx->iv_set)
4017 return -1;
4018
4019 if (!octx->key_set)
4020 return -1;
4021
4022 if (in != NULL) {
4023 /*
4024 * Need to ensure we are only passing full blocks to low level OCB
4025 * routines. We do it here rather than in EVP_EncryptUpdate/
4026 * EVP_DecryptUpdate because we need to pass full blocks of AAD too
4027 * and those routines don't support that
4028 */
4029
4030 /* Are we dealing with AAD or normal data here? */
4031 if (out == NULL) {
4032 buf = octx->aad_buf;
4033 buf_len = &(octx->aad_buf_len);
4034 } else {
4035 buf = octx->data_buf;
4036 buf_len = &(octx->data_buf_len);
4037
4038 if (ossl_is_partially_overlapping(out + *buf_len, in, len)) {
4039 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
4040 return 0;
4041 }
4042 }
4043
4044 /*
4045 * If we've got a partially filled buffer from a previous call then
4046 * use that data first
4047 */
4048 if (*buf_len > 0) {
4049 unsigned int remaining;
4050
4051 remaining = AES_BLOCK_SIZE - (*buf_len);
4052 if (remaining > len) {
4053 memcpy(buf + (*buf_len), in, len);
4054 *(buf_len) += len;
4055 return 0;
4056 }
4057 memcpy(buf + (*buf_len), in, remaining);
4058
4059 /*
4060 * If we get here we've filled the buffer, so process it
4061 */
4062 len -= remaining;
4063 in += remaining;
4064 if (out == NULL) {
4065 if (!CRYPTO_ocb128_aad(&octx->ocb, buf, AES_BLOCK_SIZE))
4066 return -1;
4067 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4068 if (!CRYPTO_ocb128_encrypt(&octx->ocb, buf, out,
4069 AES_BLOCK_SIZE))
4070 return -1;
4071 } else {
4072 if (!CRYPTO_ocb128_decrypt(&octx->ocb, buf, out,
4073 AES_BLOCK_SIZE))
4074 return -1;
4075 }
4076 written_len = AES_BLOCK_SIZE;
4077 *buf_len = 0;
4078 if (out != NULL)
4079 out += AES_BLOCK_SIZE;
4080 }
4081
4082 /* Do we have a partial block to handle at the end? */
4083 trailing_len = len % AES_BLOCK_SIZE;
4084
4085 /*
4086 * If we've got some full blocks to handle, then process these first
4087 */
4088 if (len != trailing_len) {
4089 if (out == NULL) {
4090 if (!CRYPTO_ocb128_aad(&octx->ocb, in, len - trailing_len))
4091 return -1;
4092 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4093 if (!CRYPTO_ocb128_encrypt
4094 (&octx->ocb, in, out, len - trailing_len))
4095 return -1;
4096 } else {
4097 if (!CRYPTO_ocb128_decrypt
4098 (&octx->ocb, in, out, len - trailing_len))
4099 return -1;
4100 }
4101 written_len += len - trailing_len;
4102 in += len - trailing_len;
4103 }
4104
4105 /* Handle any trailing partial block */
4106 if (trailing_len > 0) {
4107 memcpy(buf, in, trailing_len);
4108 *buf_len = trailing_len;
4109 }
4110
4111 return written_len;
4112 } else {
4113 /*
4114 * First of all empty the buffer of any partial block that we might
4115 * have been provided - both for data and AAD
4116 */
4117 if (octx->data_buf_len > 0) {
4118 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4119 if (!CRYPTO_ocb128_encrypt(&octx->ocb, octx->data_buf, out,
4120 octx->data_buf_len))
4121 return -1;
4122 } else {
4123 if (!CRYPTO_ocb128_decrypt(&octx->ocb, octx->data_buf, out,
4124 octx->data_buf_len))
4125 return -1;
4126 }
4127 written_len = octx->data_buf_len;
4128 octx->data_buf_len = 0;
4129 }
4130 if (octx->aad_buf_len > 0) {
4131 if (!CRYPTO_ocb128_aad
4132 (&octx->ocb, octx->aad_buf, octx->aad_buf_len))
4133 return -1;
4134 octx->aad_buf_len = 0;
4135 }
4136 /* If decrypting then verify */
4137 if (!EVP_CIPHER_CTX_is_encrypting(ctx)) {
4138 if (octx->taglen < 0)
4139 return -1;
4140 if (CRYPTO_ocb128_finish(&octx->ocb,
4141 octx->tag, octx->taglen) != 0)
4142 return -1;
4143 octx->iv_set = 0;
4144 return written_len;
4145 }
4146 /* If encrypting then just get the tag */
4147 if (CRYPTO_ocb128_tag(&octx->ocb, octx->tag, 16) != 1)
4148 return -1;
4149 /* Don't reuse the IV */
4150 octx->iv_set = 0;
4151 return written_len;
4152 }
4153 }
4154
4155 static int aes_ocb_cleanup(EVP_CIPHER_CTX *c)
4156 {
4157 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4158 CRYPTO_ocb128_cleanup(&octx->ocb);
4159 return 1;
4160 }
4161
4162 BLOCK_CIPHER_custom(NID_aes, 128, 16, 12, ocb, OCB,
4163 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4164 BLOCK_CIPHER_custom(NID_aes, 192, 16, 12, ocb, OCB,
4165 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4166 BLOCK_CIPHER_custom(NID_aes, 256, 16, 12, ocb, OCB,
4167 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4168 #endif /* OPENSSL_NO_OCB */