]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/evp/evp_enc.c
641ad1974444616d7ea20400ba0f663fcd68df79
[thirdparty/openssl.git] / crypto / evp / evp_enc.c
1 /*
2 * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <stdio.h>
11 #include <assert.h>
12 #include "internal/cryptlib.h"
13 #include <openssl/evp.h>
14 #include <openssl/err.h>
15 #include <openssl/rand.h>
16 #include <openssl/rand_drbg.h>
17 #include <openssl/engine.h>
18 #include "internal/evp_int.h"
19 #include "evp_locl.h"
20
21 int EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *c)
22 {
23 if (c == NULL)
24 return 1;
25 if (c->cipher != NULL) {
26 if (c->cipher->cleanup && !c->cipher->cleanup(c))
27 return 0;
28 /* Cleanse cipher context data */
29 if (c->cipher_data && c->cipher->ctx_size)
30 OPENSSL_cleanse(c->cipher_data, c->cipher->ctx_size);
31 }
32 OPENSSL_free(c->cipher_data);
33 #ifndef OPENSSL_NO_ENGINE
34 ENGINE_finish(c->engine);
35 #endif
36 memset(c, 0, sizeof(*c));
37 return 1;
38 }
39
40 EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void)
41 {
42 return OPENSSL_zalloc(sizeof(EVP_CIPHER_CTX));
43 }
44
45 void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx)
46 {
47 EVP_CIPHER_CTX_reset(ctx);
48 OPENSSL_free(ctx);
49 }
50
51 int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
52 const unsigned char *key, const unsigned char *iv, int enc)
53 {
54 if (cipher != NULL)
55 EVP_CIPHER_CTX_reset(ctx);
56 return EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, enc);
57 }
58
59 int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
60 ENGINE *impl, const unsigned char *key,
61 const unsigned char *iv, int enc)
62 {
63 if (enc == -1)
64 enc = ctx->encrypt;
65 else {
66 if (enc)
67 enc = 1;
68 ctx->encrypt = enc;
69 }
70 #ifndef OPENSSL_NO_ENGINE
71 /*
72 * Whether it's nice or not, "Inits" can be used on "Final"'d contexts so
73 * this context may already have an ENGINE! Try to avoid releasing the
74 * previous handle, re-querying for an ENGINE, and having a
75 * reinitialisation, when it may all be unnecessary.
76 */
77 if (ctx->engine && ctx->cipher
78 && (cipher == NULL || cipher->nid == ctx->cipher->nid))
79 goto skip_to_init;
80 #endif
81 if (cipher) {
82 /*
83 * Ensure a context left lying around from last time is cleared (the
84 * previous check attempted to avoid this if the same ENGINE and
85 * EVP_CIPHER could be used).
86 */
87 if (ctx->cipher) {
88 unsigned long flags = ctx->flags;
89 EVP_CIPHER_CTX_reset(ctx);
90 /* Restore encrypt and flags */
91 ctx->encrypt = enc;
92 ctx->flags = flags;
93 }
94 #ifndef OPENSSL_NO_ENGINE
95 if (impl) {
96 if (!ENGINE_init(impl)) {
97 EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_INITIALIZATION_ERROR);
98 return 0;
99 }
100 } else
101 /* Ask if an ENGINE is reserved for this job */
102 impl = ENGINE_get_cipher_engine(cipher->nid);
103 if (impl) {
104 /* There's an ENGINE for this job ... (apparently) */
105 const EVP_CIPHER *c = ENGINE_get_cipher(impl, cipher->nid);
106 if (!c) {
107 /*
108 * One positive side-effect of US's export control history,
109 * is that we should at least be able to avoid using US
110 * misspellings of "initialisation"?
111 */
112 EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_INITIALIZATION_ERROR);
113 return 0;
114 }
115 /* We'll use the ENGINE's private cipher definition */
116 cipher = c;
117 /*
118 * Store the ENGINE functional reference so we know 'cipher' came
119 * from an ENGINE and we need to release it when done.
120 */
121 ctx->engine = impl;
122 } else
123 ctx->engine = NULL;
124 #endif
125
126 ctx->cipher = cipher;
127 if (ctx->cipher->ctx_size) {
128 ctx->cipher_data = OPENSSL_zalloc(ctx->cipher->ctx_size);
129 if (ctx->cipher_data == NULL) {
130 ctx->cipher = NULL;
131 EVPerr(EVP_F_EVP_CIPHERINIT_EX, ERR_R_MALLOC_FAILURE);
132 return 0;
133 }
134 } else {
135 ctx->cipher_data = NULL;
136 }
137 ctx->key_len = cipher->key_len;
138 /* Preserve wrap enable flag, zero everything else */
139 ctx->flags &= EVP_CIPHER_CTX_FLAG_WRAP_ALLOW;
140 if (ctx->cipher->flags & EVP_CIPH_CTRL_INIT) {
141 if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_INIT, 0, NULL)) {
142 ctx->cipher = NULL;
143 EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_INITIALIZATION_ERROR);
144 return 0;
145 }
146 }
147 } else if (!ctx->cipher) {
148 EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_NO_CIPHER_SET);
149 return 0;
150 }
151 #ifndef OPENSSL_NO_ENGINE
152 skip_to_init:
153 #endif
154 /* we assume block size is a power of 2 in *cryptUpdate */
155 OPENSSL_assert(ctx->cipher->block_size == 1
156 || ctx->cipher->block_size == 8
157 || ctx->cipher->block_size == 16);
158
159 if (!(ctx->flags & EVP_CIPHER_CTX_FLAG_WRAP_ALLOW)
160 && EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_WRAP_MODE) {
161 EVPerr(EVP_F_EVP_CIPHERINIT_EX, EVP_R_WRAP_MODE_NOT_ALLOWED);
162 return 0;
163 }
164
165 if (!(EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(ctx)) & EVP_CIPH_CUSTOM_IV)) {
166 switch (EVP_CIPHER_CTX_mode(ctx)) {
167
168 case EVP_CIPH_STREAM_CIPHER:
169 case EVP_CIPH_ECB_MODE:
170 break;
171
172 case EVP_CIPH_CFB_MODE:
173 case EVP_CIPH_OFB_MODE:
174
175 ctx->num = 0;
176 /* fall-through */
177
178 case EVP_CIPH_CBC_MODE:
179
180 OPENSSL_assert(EVP_CIPHER_CTX_iv_length(ctx) <=
181 (int)sizeof(ctx->iv));
182 if (iv)
183 memcpy(ctx->oiv, iv, EVP_CIPHER_CTX_iv_length(ctx));
184 memcpy(ctx->iv, ctx->oiv, EVP_CIPHER_CTX_iv_length(ctx));
185 break;
186
187 case EVP_CIPH_CTR_MODE:
188 ctx->num = 0;
189 /* Don't reuse IV for CTR mode */
190 if (iv)
191 memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx));
192 break;
193
194 default:
195 return 0;
196 }
197 }
198
199 if (key || (ctx->cipher->flags & EVP_CIPH_ALWAYS_CALL_INIT)) {
200 if (!ctx->cipher->init(ctx, key, iv, enc))
201 return 0;
202 }
203 ctx->buf_len = 0;
204 ctx->final_used = 0;
205 ctx->block_mask = ctx->cipher->block_size - 1;
206 return 1;
207 }
208
209 int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl,
210 const unsigned char *in, int inl)
211 {
212 if (ctx->encrypt)
213 return EVP_EncryptUpdate(ctx, out, outl, in, inl);
214 else
215 return EVP_DecryptUpdate(ctx, out, outl, in, inl);
216 }
217
218 int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl)
219 {
220 if (ctx->encrypt)
221 return EVP_EncryptFinal_ex(ctx, out, outl);
222 else
223 return EVP_DecryptFinal_ex(ctx, out, outl);
224 }
225
226 int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl)
227 {
228 if (ctx->encrypt)
229 return EVP_EncryptFinal(ctx, out, outl);
230 else
231 return EVP_DecryptFinal(ctx, out, outl);
232 }
233
234 int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
235 const unsigned char *key, const unsigned char *iv)
236 {
237 return EVP_CipherInit(ctx, cipher, key, iv, 1);
238 }
239
240 int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
241 ENGINE *impl, const unsigned char *key,
242 const unsigned char *iv)
243 {
244 return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 1);
245 }
246
247 int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
248 const unsigned char *key, const unsigned char *iv)
249 {
250 return EVP_CipherInit(ctx, cipher, key, iv, 0);
251 }
252
253 int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
254 ENGINE *impl, const unsigned char *key,
255 const unsigned char *iv)
256 {
257 return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 0);
258 }
259
260 /*
261 * According to the letter of standard difference between pointers
262 * is specified to be valid only within same object. This makes
263 * it formally challenging to determine if input and output buffers
264 * are not partially overlapping with standard pointer arithmetic.
265 */
266 #ifdef PTRDIFF_T
267 # undef PTRDIFF_T
268 #endif
269 #if defined(OPENSSL_SYS_VMS) && __INITIAL_POINTER_SIZE==64
270 /*
271 * Then we have VMS that distinguishes itself by adhering to
272 * sizeof(size_t)==4 even in 64-bit builds, which means that
273 * difference between two pointers might be truncated to 32 bits.
274 * In the context one can even wonder how comparison for
275 * equality is implemented. To be on the safe side we adhere to
276 * PTRDIFF_T even for comparison for equality.
277 */
278 # define PTRDIFF_T uint64_t
279 #else
280 # define PTRDIFF_T size_t
281 #endif
282
283 int is_partially_overlapping(const void *ptr1, const void *ptr2, int len)
284 {
285 PTRDIFF_T diff = (PTRDIFF_T)ptr1-(PTRDIFF_T)ptr2;
286 /*
287 * Check for partially overlapping buffers. [Binary logical
288 * operations are used instead of boolean to minimize number
289 * of conditional branches.]
290 */
291 int overlapped = (len > 0) & (diff != 0) & ((diff < (PTRDIFF_T)len) |
292 (diff > (0 - (PTRDIFF_T)len)));
293
294 return overlapped;
295 }
296
297 static int evp_EncryptDecryptUpdate(EVP_CIPHER_CTX *ctx,
298 unsigned char *out, int *outl,
299 const unsigned char *in, int inl)
300 {
301 int i, j, bl, cmpl = inl;
302
303 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS))
304 cmpl = (cmpl + 7) / 8;
305
306 bl = ctx->cipher->block_size;
307
308 if (inl <= 0) {
309 *outl = 0;
310 return inl == 0;
311 }
312
313 if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
314 /* If block size > 1 then the cipher will have to do this check */
315 if (bl == 1 && is_partially_overlapping(out, in, cmpl)) {
316 EVPerr(EVP_F_EVP_ENCRYPTDECRYPTUPDATE, EVP_R_PARTIALLY_OVERLAPPING);
317 return 0;
318 }
319
320 i = ctx->cipher->do_cipher(ctx, out, in, inl);
321 if (i < 0)
322 return 0;
323 else
324 *outl = i;
325 return 1;
326 }
327
328 if (is_partially_overlapping(out + ctx->buf_len, in, cmpl)) {
329 EVPerr(EVP_F_EVP_ENCRYPTDECRYPTUPDATE, EVP_R_PARTIALLY_OVERLAPPING);
330 return 0;
331 }
332
333 if (ctx->buf_len == 0 && (inl & (ctx->block_mask)) == 0) {
334 if (ctx->cipher->do_cipher(ctx, out, in, inl)) {
335 *outl = inl;
336 return 1;
337 } else {
338 *outl = 0;
339 return 0;
340 }
341 }
342 i = ctx->buf_len;
343 OPENSSL_assert(bl <= (int)sizeof(ctx->buf));
344 if (i != 0) {
345 if (bl - i > inl) {
346 memcpy(&(ctx->buf[i]), in, inl);
347 ctx->buf_len += inl;
348 *outl = 0;
349 return 1;
350 } else {
351 j = bl - i;
352 memcpy(&(ctx->buf[i]), in, j);
353 inl -= j;
354 in += j;
355 if (!ctx->cipher->do_cipher(ctx, out, ctx->buf, bl))
356 return 0;
357 out += bl;
358 *outl = bl;
359 }
360 } else
361 *outl = 0;
362 i = inl & (bl - 1);
363 inl -= i;
364 if (inl > 0) {
365 if (!ctx->cipher->do_cipher(ctx, out, in, inl))
366 return 0;
367 *outl += inl;
368 }
369
370 if (i != 0)
371 memcpy(ctx->buf, &(in[inl]), i);
372 ctx->buf_len = i;
373 return 1;
374 }
375
376
377 int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl,
378 const unsigned char *in, int inl)
379 {
380 /* Prevent accidental use of decryption context when encrypting */
381 if (!ctx->encrypt) {
382 EVPerr(EVP_F_EVP_ENCRYPTUPDATE, EVP_R_INVALID_OPERATION);
383 return 0;
384 }
385
386 return evp_EncryptDecryptUpdate(ctx, out, outl, in, inl);
387 }
388
389 int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl)
390 {
391 int ret;
392 ret = EVP_EncryptFinal_ex(ctx, out, outl);
393 return ret;
394 }
395
396 int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl)
397 {
398 int n, ret;
399 unsigned int i, b, bl;
400
401 /* Prevent accidental use of decryption context when encrypting */
402 if (!ctx->encrypt) {
403 EVPerr(EVP_F_EVP_ENCRYPTFINAL_EX, EVP_R_INVALID_OPERATION);
404 return 0;
405 }
406
407 if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
408 ret = ctx->cipher->do_cipher(ctx, out, NULL, 0);
409 if (ret < 0)
410 return 0;
411 else
412 *outl = ret;
413 return 1;
414 }
415
416 b = ctx->cipher->block_size;
417 OPENSSL_assert(b <= sizeof(ctx->buf));
418 if (b == 1) {
419 *outl = 0;
420 return 1;
421 }
422 bl = ctx->buf_len;
423 if (ctx->flags & EVP_CIPH_NO_PADDING) {
424 if (bl) {
425 EVPerr(EVP_F_EVP_ENCRYPTFINAL_EX,
426 EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH);
427 return 0;
428 }
429 *outl = 0;
430 return 1;
431 }
432
433 n = b - bl;
434 for (i = bl; i < b; i++)
435 ctx->buf[i] = n;
436 ret = ctx->cipher->do_cipher(ctx, out, ctx->buf, b);
437
438 if (ret)
439 *outl = b;
440
441 return ret;
442 }
443
444 int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl,
445 const unsigned char *in, int inl)
446 {
447 int fix_len, cmpl = inl;
448 unsigned int b;
449
450 /* Prevent accidental use of encryption context when decrypting */
451 if (ctx->encrypt) {
452 EVPerr(EVP_F_EVP_DECRYPTUPDATE, EVP_R_INVALID_OPERATION);
453 return 0;
454 }
455
456 b = ctx->cipher->block_size;
457
458 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS))
459 cmpl = (cmpl + 7) / 8;
460
461 if (inl <= 0) {
462 *outl = 0;
463 return inl == 0;
464 }
465
466 if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
467 if (b == 1 && is_partially_overlapping(out, in, cmpl)) {
468 EVPerr(EVP_F_EVP_DECRYPTUPDATE, EVP_R_PARTIALLY_OVERLAPPING);
469 return 0;
470 }
471
472 fix_len = ctx->cipher->do_cipher(ctx, out, in, inl);
473 if (fix_len < 0) {
474 *outl = 0;
475 return 0;
476 } else
477 *outl = fix_len;
478 return 1;
479 }
480
481 if (ctx->flags & EVP_CIPH_NO_PADDING)
482 return evp_EncryptDecryptUpdate(ctx, out, outl, in, inl);
483
484 OPENSSL_assert(b <= sizeof(ctx->final));
485
486 if (ctx->final_used) {
487 /* see comment about PTRDIFF_T comparison above */
488 if (((PTRDIFF_T)out == (PTRDIFF_T)in)
489 || is_partially_overlapping(out, in, b)) {
490 EVPerr(EVP_F_EVP_DECRYPTUPDATE, EVP_R_PARTIALLY_OVERLAPPING);
491 return 0;
492 }
493 memcpy(out, ctx->final, b);
494 out += b;
495 fix_len = 1;
496 } else
497 fix_len = 0;
498
499 if (!evp_EncryptDecryptUpdate(ctx, out, outl, in, inl))
500 return 0;
501
502 /*
503 * if we have 'decrypted' a multiple of block size, make sure we have a
504 * copy of this last block
505 */
506 if (b > 1 && !ctx->buf_len) {
507 *outl -= b;
508 ctx->final_used = 1;
509 memcpy(ctx->final, &out[*outl], b);
510 } else
511 ctx->final_used = 0;
512
513 if (fix_len)
514 *outl += b;
515
516 return 1;
517 }
518
519 int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl)
520 {
521 int ret;
522 ret = EVP_DecryptFinal_ex(ctx, out, outl);
523 return ret;
524 }
525
526 int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl)
527 {
528 int i, n;
529 unsigned int b;
530
531 /* Prevent accidental use of encryption context when decrypting */
532 if (ctx->encrypt) {
533 EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, EVP_R_INVALID_OPERATION);
534 return 0;
535 }
536
537 *outl = 0;
538
539 if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) {
540 i = ctx->cipher->do_cipher(ctx, out, NULL, 0);
541 if (i < 0)
542 return 0;
543 else
544 *outl = i;
545 return 1;
546 }
547
548 b = ctx->cipher->block_size;
549 if (ctx->flags & EVP_CIPH_NO_PADDING) {
550 if (ctx->buf_len) {
551 EVPerr(EVP_F_EVP_DECRYPTFINAL_EX,
552 EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH);
553 return 0;
554 }
555 *outl = 0;
556 return 1;
557 }
558 if (b > 1) {
559 if (ctx->buf_len || !ctx->final_used) {
560 EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, EVP_R_WRONG_FINAL_BLOCK_LENGTH);
561 return 0;
562 }
563 OPENSSL_assert(b <= sizeof(ctx->final));
564
565 /*
566 * The following assumes that the ciphertext has been authenticated.
567 * Otherwise it provides a padding oracle.
568 */
569 n = ctx->final[b - 1];
570 if (n == 0 || n > (int)b) {
571 EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, EVP_R_BAD_DECRYPT);
572 return 0;
573 }
574 for (i = 0; i < n; i++) {
575 if (ctx->final[--b] != n) {
576 EVPerr(EVP_F_EVP_DECRYPTFINAL_EX, EVP_R_BAD_DECRYPT);
577 return 0;
578 }
579 }
580 n = ctx->cipher->block_size - n;
581 for (i = 0; i < n; i++)
582 out[i] = ctx->final[i];
583 *outl = n;
584 } else
585 *outl = 0;
586 return 1;
587 }
588
589 int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *c, int keylen)
590 {
591 if (c->cipher->flags & EVP_CIPH_CUSTOM_KEY_LENGTH)
592 return EVP_CIPHER_CTX_ctrl(c, EVP_CTRL_SET_KEY_LENGTH, keylen, NULL);
593 if (c->key_len == keylen)
594 return 1;
595 if ((keylen > 0) && (c->cipher->flags & EVP_CIPH_VARIABLE_LENGTH)) {
596 c->key_len = keylen;
597 return 1;
598 }
599 EVPerr(EVP_F_EVP_CIPHER_CTX_SET_KEY_LENGTH, EVP_R_INVALID_KEY_LENGTH);
600 return 0;
601 }
602
603 int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad)
604 {
605 if (pad)
606 ctx->flags &= ~EVP_CIPH_NO_PADDING;
607 else
608 ctx->flags |= EVP_CIPH_NO_PADDING;
609 return 1;
610 }
611
612 int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr)
613 {
614 int ret;
615
616 if (!ctx->cipher) {
617 EVPerr(EVP_F_EVP_CIPHER_CTX_CTRL, EVP_R_NO_CIPHER_SET);
618 return 0;
619 }
620
621 if (!ctx->cipher->ctrl) {
622 EVPerr(EVP_F_EVP_CIPHER_CTX_CTRL, EVP_R_CTRL_NOT_IMPLEMENTED);
623 return 0;
624 }
625
626 ret = ctx->cipher->ctrl(ctx, type, arg, ptr);
627 if (ret == -1) {
628 EVPerr(EVP_F_EVP_CIPHER_CTX_CTRL,
629 EVP_R_CTRL_OPERATION_NOT_IMPLEMENTED);
630 return 0;
631 }
632 return ret;
633 }
634
635 int EVP_CIPHER_CTX_rand_key(EVP_CIPHER_CTX *ctx, unsigned char *key)
636 {
637 if (ctx->cipher->flags & EVP_CIPH_RAND_KEY)
638 return EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_RAND_KEY, 0, key);
639 if (RAND_priv_bytes(key, ctx->key_len) <= 0)
640 return 0;
641 return 1;
642 }
643
644 int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in)
645 {
646 if ((in == NULL) || (in->cipher == NULL)) {
647 EVPerr(EVP_F_EVP_CIPHER_CTX_COPY, EVP_R_INPUT_NOT_INITIALIZED);
648 return 0;
649 }
650 #ifndef OPENSSL_NO_ENGINE
651 /* Make sure it's safe to copy a cipher context using an ENGINE */
652 if (in->engine && !ENGINE_init(in->engine)) {
653 EVPerr(EVP_F_EVP_CIPHER_CTX_COPY, ERR_R_ENGINE_LIB);
654 return 0;
655 }
656 #endif
657
658 EVP_CIPHER_CTX_reset(out);
659 memcpy(out, in, sizeof(*out));
660
661 if (in->cipher_data && in->cipher->ctx_size) {
662 out->cipher_data = OPENSSL_malloc(in->cipher->ctx_size);
663 if (out->cipher_data == NULL) {
664 out->cipher = NULL;
665 EVPerr(EVP_F_EVP_CIPHER_CTX_COPY, ERR_R_MALLOC_FAILURE);
666 return 0;
667 }
668 memcpy(out->cipher_data, in->cipher_data, in->cipher->ctx_size);
669 }
670
671 if (in->cipher->flags & EVP_CIPH_CUSTOM_COPY)
672 if (!in->cipher->ctrl((EVP_CIPHER_CTX *)in, EVP_CTRL_COPY, 0, out)) {
673 out->cipher = NULL;
674 EVPerr(EVP_F_EVP_CIPHER_CTX_COPY, EVP_R_INITIALIZATION_ERROR);
675 return 0;
676 }
677 return 1;
678 }