2 * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
11 * RSA low level APIs are deprecated for public use, but still ok for
14 #include "internal/deprecated.h"
16 #include "internal/cryptlib.h"
17 #include "crypto/bn.h"
18 #include "rsa_local.h"
19 #include "internal/constant_time.h"
20 #include <openssl/evp.h>
21 #include <openssl/sha.h>
22 #include <openssl/hmac.h>
24 static int rsa_ossl_public_encrypt(int flen
, const unsigned char *from
,
25 unsigned char *to
, RSA
*rsa
, int padding
);
26 static int rsa_ossl_private_encrypt(int flen
, const unsigned char *from
,
27 unsigned char *to
, RSA
*rsa
, int padding
);
28 static int rsa_ossl_public_decrypt(int flen
, const unsigned char *from
,
29 unsigned char *to
, RSA
*rsa
, int padding
);
30 static int rsa_ossl_private_decrypt(int flen
, const unsigned char *from
,
31 unsigned char *to
, RSA
*rsa
, int padding
);
32 static int rsa_ossl_mod_exp(BIGNUM
*r0
, const BIGNUM
*i
, RSA
*rsa
,
34 static int rsa_ossl_init(RSA
*rsa
);
35 static int rsa_ossl_finish(RSA
*rsa
);
37 static int rsa_ossl_s390x_mod_exp(BIGNUM
*r0
, const BIGNUM
*i
, RSA
*rsa
,
39 static RSA_METHOD rsa_pkcs1_ossl_meth
= {
41 rsa_ossl_public_encrypt
,
42 rsa_ossl_public_decrypt
, /* signature verification */
43 rsa_ossl_private_encrypt
, /* signing */
44 rsa_ossl_private_decrypt
,
45 rsa_ossl_s390x_mod_exp
,
49 RSA_FLAG_FIPS_METHOD
, /* flags */
53 NULL
, /* rsa_keygen */
54 NULL
/* rsa_multi_prime_keygen */
57 static RSA_METHOD rsa_pkcs1_ossl_meth
= {
59 rsa_ossl_public_encrypt
,
60 rsa_ossl_public_decrypt
, /* signature verification */
61 rsa_ossl_private_encrypt
, /* signing */
62 rsa_ossl_private_decrypt
,
64 BN_mod_exp_mont
, /* XXX probably we should not use Montgomery
68 RSA_FLAG_FIPS_METHOD
, /* flags */
72 NULL
, /* rsa_keygen */
73 NULL
/* rsa_multi_prime_keygen */
77 static const RSA_METHOD
*default_RSA_meth
= &rsa_pkcs1_ossl_meth
;
79 void RSA_set_default_method(const RSA_METHOD
*meth
)
81 default_RSA_meth
= meth
;
84 const RSA_METHOD
*RSA_get_default_method(void)
86 return default_RSA_meth
;
89 const RSA_METHOD
*RSA_PKCS1_OpenSSL(void)
91 return &rsa_pkcs1_ossl_meth
;
94 const RSA_METHOD
*RSA_null_method(void)
99 static int rsa_ossl_public_encrypt(int flen
, const unsigned char *from
,
100 unsigned char *to
, RSA
*rsa
, int padding
)
103 int i
, num
= 0, r
= -1;
104 unsigned char *buf
= NULL
;
107 if (BN_num_bits(rsa
->n
) > OPENSSL_RSA_MAX_MODULUS_BITS
) {
108 ERR_raise(ERR_LIB_RSA
, RSA_R_MODULUS_TOO_LARGE
);
112 if (BN_ucmp(rsa
->n
, rsa
->e
) <= 0) {
113 ERR_raise(ERR_LIB_RSA
, RSA_R_BAD_E_VALUE
);
117 /* for large moduli, enforce exponent limit */
118 if (BN_num_bits(rsa
->n
) > OPENSSL_RSA_SMALL_MODULUS_BITS
) {
119 if (BN_num_bits(rsa
->e
) > OPENSSL_RSA_MAX_PUBEXP_BITS
) {
120 ERR_raise(ERR_LIB_RSA
, RSA_R_BAD_E_VALUE
);
125 if ((ctx
= BN_CTX_new_ex(rsa
->libctx
)) == NULL
)
129 ret
= BN_CTX_get(ctx
);
130 num
= BN_num_bytes(rsa
->n
);
131 buf
= OPENSSL_malloc(num
);
132 if (ret
== NULL
|| buf
== NULL
)
136 case RSA_PKCS1_PADDING
:
137 i
= ossl_rsa_padding_add_PKCS1_type_2_ex(rsa
->libctx
, buf
, num
,
140 case RSA_PKCS1_OAEP_PADDING
:
141 i
= ossl_rsa_padding_add_PKCS1_OAEP_mgf1_ex(rsa
->libctx
, buf
, num
,
146 i
= RSA_padding_add_none(buf
, num
, from
, flen
);
149 ERR_raise(ERR_LIB_RSA
, RSA_R_UNKNOWN_PADDING_TYPE
);
155 if (BN_bin2bn(buf
, num
, f
) == NULL
)
160 * See SP800-56Br2, section 7.1.1.1
161 * RSAEP: 1 < f < (n – 1).
162 * (where f is the plaintext).
164 if (padding
== RSA_NO_PADDING
) {
165 BIGNUM
*nminus1
= BN_CTX_get(ctx
);
167 if (BN_ucmp(f
, BN_value_one()) <= 0) {
168 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_SMALL
);
172 || BN_copy(nminus1
, rsa
->n
) == NULL
173 || !BN_sub_word(nminus1
, 1))
175 if (BN_ucmp(f
, nminus1
) >= 0) {
176 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_LARGE_FOR_MODULUS
);
182 if (BN_ucmp(f
, rsa
->n
) >= 0) {
183 /* usually the padding functions would catch this */
184 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_LARGE_FOR_MODULUS
);
189 if (rsa
->flags
& RSA_FLAG_CACHE_PUBLIC
)
190 if (!BN_MONT_CTX_set_locked(&rsa
->_method_mod_n
, rsa
->lock
,
194 if (!rsa
->meth
->bn_mod_exp(ret
, f
, rsa
->e
, rsa
->n
, ctx
,
199 * BN_bn2binpad puts in leading 0 bytes if the number is less than
200 * the length of the modulus.
202 r
= BN_bn2binpad(ret
, to
, num
);
206 OPENSSL_clear_free(buf
, num
);
210 static BN_BLINDING
*rsa_get_blinding(RSA
*rsa
, int *local
, BN_CTX
*ctx
)
214 if (!CRYPTO_THREAD_read_lock(rsa
->lock
))
217 if (rsa
->blinding
== NULL
) {
219 * This dance with upgrading the lock from read to write will be
220 * slower in cases of a single use RSA object, but should be
221 * significantly better in multi-thread cases (e.g. servers). It's
224 CRYPTO_THREAD_unlock(rsa
->lock
);
225 if (!CRYPTO_THREAD_write_lock(rsa
->lock
))
227 if (rsa
->blinding
== NULL
)
228 rsa
->blinding
= RSA_setup_blinding(rsa
, ctx
);
235 if (BN_BLINDING_is_current_thread(ret
)) {
236 /* rsa->blinding is ours! */
240 /* resort to rsa->mt_blinding instead */
243 * instructs rsa_blinding_convert(), rsa_blinding_invert() that the
244 * BN_BLINDING is shared, meaning that accesses require locks, and
245 * that the blinding factor must be stored outside the BN_BLINDING
249 if (rsa
->mt_blinding
== NULL
) {
250 CRYPTO_THREAD_unlock(rsa
->lock
);
251 if (!CRYPTO_THREAD_write_lock(rsa
->lock
))
253 if (rsa
->mt_blinding
== NULL
)
254 rsa
->mt_blinding
= RSA_setup_blinding(rsa
, ctx
);
256 ret
= rsa
->mt_blinding
;
260 CRYPTO_THREAD_unlock(rsa
->lock
);
264 static int rsa_blinding_convert(BN_BLINDING
*b
, BIGNUM
*f
, BIGNUM
*unblind
,
267 if (unblind
== NULL
) {
269 * Local blinding: store the unblinding factor in BN_BLINDING.
271 return BN_BLINDING_convert_ex(f
, NULL
, b
, ctx
);
274 * Shared blinding: store the unblinding factor outside BN_BLINDING.
278 if (!BN_BLINDING_lock(b
))
281 ret
= BN_BLINDING_convert_ex(f
, unblind
, b
, ctx
);
282 BN_BLINDING_unlock(b
);
288 static int rsa_blinding_invert(BN_BLINDING
*b
, BIGNUM
*f
, BIGNUM
*unblind
,
292 * For local blinding, unblind is set to NULL, and BN_BLINDING_invert_ex
293 * will use the unblinding factor stored in BN_BLINDING. If BN_BLINDING
294 * is shared between threads, unblind must be non-null:
295 * BN_BLINDING_invert_ex will then use the local unblinding factor, and
296 * will only read the modulus from BN_BLINDING. In both cases it's safe
297 * to access the blinding without a lock.
299 BN_set_flags(f
, BN_FLG_CONSTTIME
);
300 return BN_BLINDING_invert_ex(f
, unblind
, b
, ctx
);
304 static int rsa_ossl_private_encrypt(int flen
, const unsigned char *from
,
305 unsigned char *to
, RSA
*rsa
, int padding
)
307 BIGNUM
*f
, *ret
, *res
;
308 int i
, num
= 0, r
= -1;
309 unsigned char *buf
= NULL
;
311 int local_blinding
= 0;
313 * Used only if the blinding structure is shared. A non-NULL unblind
314 * instructs rsa_blinding_convert() and rsa_blinding_invert() to store
315 * the unblinding factor outside the blinding structure.
317 BIGNUM
*unblind
= NULL
;
318 BN_BLINDING
*blinding
= NULL
;
320 if ((ctx
= BN_CTX_new_ex(rsa
->libctx
)) == NULL
)
324 ret
= BN_CTX_get(ctx
);
325 num
= BN_num_bytes(rsa
->n
);
326 buf
= OPENSSL_malloc(num
);
327 if (ret
== NULL
|| buf
== NULL
)
331 case RSA_PKCS1_PADDING
:
332 i
= RSA_padding_add_PKCS1_type_1(buf
, num
, from
, flen
);
334 case RSA_X931_PADDING
:
335 i
= RSA_padding_add_X931(buf
, num
, from
, flen
);
338 i
= RSA_padding_add_none(buf
, num
, from
, flen
);
341 ERR_raise(ERR_LIB_RSA
, RSA_R_UNKNOWN_PADDING_TYPE
);
347 if (BN_bin2bn(buf
, num
, f
) == NULL
)
350 if (BN_ucmp(f
, rsa
->n
) >= 0) {
351 /* usually the padding functions would catch this */
352 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_LARGE_FOR_MODULUS
);
356 if (rsa
->flags
& RSA_FLAG_CACHE_PUBLIC
)
357 if (!BN_MONT_CTX_set_locked(&rsa
->_method_mod_n
, rsa
->lock
,
361 if (!(rsa
->flags
& RSA_FLAG_NO_BLINDING
)) {
362 blinding
= rsa_get_blinding(rsa
, &local_blinding
, ctx
);
363 if (blinding
== NULL
) {
364 ERR_raise(ERR_LIB_RSA
, ERR_R_INTERNAL_ERROR
);
369 if (blinding
!= NULL
) {
370 if (!local_blinding
&& ((unblind
= BN_CTX_get(ctx
)) == NULL
)) {
371 ERR_raise(ERR_LIB_RSA
, ERR_R_BN_LIB
);
374 if (!rsa_blinding_convert(blinding
, f
, unblind
, ctx
))
378 if ((rsa
->flags
& RSA_FLAG_EXT_PKEY
) ||
379 (rsa
->version
== RSA_ASN1_VERSION_MULTI
) ||
382 (rsa
->dmp1
!= NULL
) && (rsa
->dmq1
!= NULL
) && (rsa
->iqmp
!= NULL
))) {
383 if (!rsa
->meth
->rsa_mod_exp(ret
, f
, rsa
, ctx
))
386 BIGNUM
*d
= BN_new();
388 ERR_raise(ERR_LIB_RSA
, ERR_R_BN_LIB
);
391 if (rsa
->d
== NULL
) {
392 ERR_raise(ERR_LIB_RSA
, RSA_R_MISSING_PRIVATE_KEY
);
396 BN_with_flags(d
, rsa
->d
, BN_FLG_CONSTTIME
);
398 if (!rsa
->meth
->bn_mod_exp(ret
, f
, d
, rsa
->n
, ctx
,
399 rsa
->_method_mod_n
)) {
403 /* We MUST free d before any further use of rsa->d */
408 if (!rsa_blinding_invert(blinding
, ret
, unblind
, ctx
))
411 if (padding
== RSA_X931_PADDING
) {
412 if (!BN_sub(f
, rsa
->n
, ret
))
414 if (BN_cmp(ret
, f
) > 0)
423 * BN_bn2binpad puts in leading 0 bytes if the number is less than
424 * the length of the modulus.
426 r
= BN_bn2binpad(res
, to
, num
);
430 OPENSSL_clear_free(buf
, num
);
434 static int derive_kdk(int flen
, const unsigned char *from
, RSA
*rsa
,
435 unsigned char *buf
, int num
, unsigned char *kdk
)
438 HMAC_CTX
*hmac
= NULL
;
440 unsigned int md_len
= SHA256_DIGEST_LENGTH
;
441 unsigned char d_hash
[SHA256_DIGEST_LENGTH
] = {0};
443 * because we use d as a handle to rsa->d we need to keep it local and
444 * free before any further use of rsa->d
446 BIGNUM
*d
= BN_new();
449 ERR_raise(ERR_LIB_RSA
, ERR_R_CRYPTO_LIB
);
452 if (rsa
->d
== NULL
) {
453 ERR_raise(ERR_LIB_RSA
, RSA_R_MISSING_PRIVATE_KEY
);
457 BN_with_flags(d
, rsa
->d
, BN_FLG_CONSTTIME
);
458 if (BN_bn2binpad(d
, buf
, num
) < 0) {
459 ERR_raise(ERR_LIB_RSA
, ERR_R_INTERNAL_ERROR
);
466 * we use hardcoded hash so that migrating between versions that use
467 * different hash doesn't provide a Bleichenbacher oracle:
468 * if the attacker can see that different versions return different
469 * messages for the same ciphertext, they'll know that the message is
470 * synthetically generated, which means that the padding check failed
472 md
= EVP_MD_fetch(rsa
->libctx
, "sha256", NULL
);
474 ERR_raise(ERR_LIB_RSA
, ERR_R_FETCH_FAILED
);
478 if (EVP_Digest(buf
, num
, d_hash
, NULL
, md
, NULL
) <= 0) {
479 ERR_raise(ERR_LIB_RSA
, ERR_R_INTERNAL_ERROR
);
483 hmac
= HMAC_CTX_new();
485 ERR_raise(ERR_LIB_RSA
, ERR_R_CRYPTO_LIB
);
489 if (HMAC_Init_ex(hmac
, d_hash
, sizeof(d_hash
), md
, NULL
) <= 0) {
490 ERR_raise(ERR_LIB_RSA
, ERR_R_INTERNAL_ERROR
);
495 memset(buf
, 0, num
- flen
);
496 if (HMAC_Update(hmac
, buf
, num
- flen
) <= 0) {
497 ERR_raise(ERR_LIB_RSA
, ERR_R_INTERNAL_ERROR
);
501 if (HMAC_Update(hmac
, from
, flen
) <= 0) {
502 ERR_raise(ERR_LIB_RSA
, ERR_R_INTERNAL_ERROR
);
506 md_len
= SHA256_DIGEST_LENGTH
;
507 if (HMAC_Final(hmac
, kdk
, &md_len
) <= 0) {
508 ERR_raise(ERR_LIB_RSA
, ERR_R_INTERNAL_ERROR
);
519 static int rsa_ossl_private_decrypt(int flen
, const unsigned char *from
,
520 unsigned char *to
, RSA
*rsa
, int padding
)
523 int j
, num
= 0, r
= -1;
524 unsigned char *buf
= NULL
;
525 unsigned char kdk
[SHA256_DIGEST_LENGTH
] = {0};
527 int local_blinding
= 0;
529 * Used only if the blinding structure is shared. A non-NULL unblind
530 * instructs rsa_blinding_convert() and rsa_blinding_invert() to store
531 * the unblinding factor outside the blinding structure.
533 BIGNUM
*unblind
= NULL
;
534 BN_BLINDING
*blinding
= NULL
;
537 * we need the value of the private exponent to perform implicit rejection
539 if ((rsa
->flags
& RSA_FLAG_EXT_PKEY
) && (padding
== RSA_PKCS1_PADDING
))
540 padding
= RSA_PKCS1_NO_IMPLICIT_REJECT_PADDING
;
542 if ((ctx
= BN_CTX_new_ex(rsa
->libctx
)) == NULL
)
546 ret
= BN_CTX_get(ctx
);
548 ERR_raise(ERR_LIB_RSA
, ERR_R_BN_LIB
);
551 num
= BN_num_bytes(rsa
->n
);
552 buf
= OPENSSL_malloc(num
);
557 * This check was for equality but PGP does evil things and chops off the
561 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_GREATER_THAN_MOD_LEN
);
566 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_SMALL
);
570 /* make data into a big number */
571 if (BN_bin2bn(from
, (int)flen
, f
) == NULL
)
576 * See SP800-56Br2, section 7.1.2.1
577 * RSADP: 1 < f < (n – 1)
578 * (where f is the ciphertext).
580 if (padding
== RSA_NO_PADDING
) {
581 BIGNUM
*nminus1
= BN_CTX_get(ctx
);
583 if (BN_ucmp(f
, BN_value_one()) <= 0) {
584 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_SMALL
);
588 || BN_copy(nminus1
, rsa
->n
) == NULL
589 || !BN_sub_word(nminus1
, 1))
591 if (BN_ucmp(f
, nminus1
) >= 0) {
592 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_LARGE_FOR_MODULUS
);
598 if (BN_ucmp(f
, rsa
->n
) >= 0) {
599 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_LARGE_FOR_MODULUS
);
603 if (rsa
->flags
& RSA_FLAG_CACHE_PUBLIC
)
604 if (!BN_MONT_CTX_set_locked(&rsa
->_method_mod_n
, rsa
->lock
,
608 if (!(rsa
->flags
& RSA_FLAG_NO_BLINDING
)) {
609 blinding
= rsa_get_blinding(rsa
, &local_blinding
, ctx
);
610 if (blinding
== NULL
) {
611 ERR_raise(ERR_LIB_RSA
, ERR_R_INTERNAL_ERROR
);
616 if (blinding
!= NULL
) {
617 if (!local_blinding
&& ((unblind
= BN_CTX_get(ctx
)) == NULL
)) {
618 ERR_raise(ERR_LIB_RSA
, ERR_R_BN_LIB
);
621 if (!rsa_blinding_convert(blinding
, f
, unblind
, ctx
))
626 if ((rsa
->flags
& RSA_FLAG_EXT_PKEY
) ||
627 (rsa
->version
== RSA_ASN1_VERSION_MULTI
) ||
630 (rsa
->dmp1
!= NULL
) && (rsa
->dmq1
!= NULL
) && (rsa
->iqmp
!= NULL
))) {
631 if (!rsa
->meth
->rsa_mod_exp(ret
, f
, rsa
, ctx
))
634 BIGNUM
*d
= BN_new();
636 ERR_raise(ERR_LIB_RSA
, ERR_R_BN_LIB
);
639 if (rsa
->d
== NULL
) {
640 ERR_raise(ERR_LIB_RSA
, RSA_R_MISSING_PRIVATE_KEY
);
644 BN_with_flags(d
, rsa
->d
, BN_FLG_CONSTTIME
);
645 if (!rsa
->meth
->bn_mod_exp(ret
, f
, d
, rsa
->n
, ctx
,
646 rsa
->_method_mod_n
)) {
650 /* We MUST free d before any further use of rsa->d */
655 if (!rsa_blinding_invert(blinding
, ret
, unblind
, ctx
))
659 * derive the Key Derivation Key from private exponent and public
662 if (padding
== RSA_PKCS1_PADDING
) {
663 if (derive_kdk(flen
, from
, rsa
, buf
, num
, kdk
) == 0)
667 j
= BN_bn2binpad(ret
, buf
, num
);
672 case RSA_PKCS1_NO_IMPLICIT_REJECT_PADDING
:
673 r
= RSA_padding_check_PKCS1_type_2(to
, num
, buf
, j
, num
);
675 case RSA_PKCS1_PADDING
:
676 r
= ossl_rsa_padding_check_PKCS1_type_2(rsa
->libctx
, to
, num
, buf
, j
, num
, kdk
);
678 case RSA_PKCS1_OAEP_PADDING
:
679 r
= RSA_padding_check_PKCS1_OAEP(to
, num
, buf
, j
, num
, NULL
, 0);
682 memcpy(to
, buf
, (r
= j
));
685 ERR_raise(ERR_LIB_RSA
, RSA_R_UNKNOWN_PADDING_TYPE
);
690 * This trick doesn't work in the FIPS provider because libcrypto manages
691 * the error stack. Instead we opt not to put an error on the stack at all
692 * in case of padding failure in the FIPS provider.
694 ERR_raise(ERR_LIB_RSA
, RSA_R_PADDING_CHECK_FAILED
);
695 err_clear_last_constant_time(1 & ~constant_time_msb(r
));
701 OPENSSL_clear_free(buf
, num
);
705 /* signature verification */
706 static int rsa_ossl_public_decrypt(int flen
, const unsigned char *from
,
707 unsigned char *to
, RSA
*rsa
, int padding
)
710 int i
, num
= 0, r
= -1;
711 unsigned char *buf
= NULL
;
714 if (BN_num_bits(rsa
->n
) > OPENSSL_RSA_MAX_MODULUS_BITS
) {
715 ERR_raise(ERR_LIB_RSA
, RSA_R_MODULUS_TOO_LARGE
);
719 if (BN_ucmp(rsa
->n
, rsa
->e
) <= 0) {
720 ERR_raise(ERR_LIB_RSA
, RSA_R_BAD_E_VALUE
);
724 /* for large moduli, enforce exponent limit */
725 if (BN_num_bits(rsa
->n
) > OPENSSL_RSA_SMALL_MODULUS_BITS
) {
726 if (BN_num_bits(rsa
->e
) > OPENSSL_RSA_MAX_PUBEXP_BITS
) {
727 ERR_raise(ERR_LIB_RSA
, RSA_R_BAD_E_VALUE
);
732 if ((ctx
= BN_CTX_new_ex(rsa
->libctx
)) == NULL
)
736 ret
= BN_CTX_get(ctx
);
738 ERR_raise(ERR_LIB_RSA
, ERR_R_BN_LIB
);
741 num
= BN_num_bytes(rsa
->n
);
742 buf
= OPENSSL_malloc(num
);
747 * This check was for equality but PGP does evil things and chops off the
751 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_GREATER_THAN_MOD_LEN
);
755 if (BN_bin2bn(from
, flen
, f
) == NULL
)
758 if (BN_ucmp(f
, rsa
->n
) >= 0) {
759 ERR_raise(ERR_LIB_RSA
, RSA_R_DATA_TOO_LARGE_FOR_MODULUS
);
763 if (rsa
->flags
& RSA_FLAG_CACHE_PUBLIC
)
764 if (!BN_MONT_CTX_set_locked(&rsa
->_method_mod_n
, rsa
->lock
,
768 if (!rsa
->meth
->bn_mod_exp(ret
, f
, rsa
->e
, rsa
->n
, ctx
,
772 if ((padding
== RSA_X931_PADDING
) && ((bn_get_words(ret
)[0] & 0xf) != 12))
773 if (!BN_sub(ret
, rsa
->n
, ret
))
776 i
= BN_bn2binpad(ret
, buf
, num
);
781 case RSA_PKCS1_PADDING
:
782 r
= RSA_padding_check_PKCS1_type_1(to
, num
, buf
, i
, num
);
784 case RSA_X931_PADDING
:
785 r
= RSA_padding_check_X931(to
, num
, buf
, i
, num
);
788 memcpy(to
, buf
, (r
= i
));
791 ERR_raise(ERR_LIB_RSA
, RSA_R_UNKNOWN_PADDING_TYPE
);
795 ERR_raise(ERR_LIB_RSA
, RSA_R_PADDING_CHECK_FAILED
);
800 OPENSSL_clear_free(buf
, num
);
804 static int rsa_ossl_mod_exp(BIGNUM
*r0
, const BIGNUM
*I
, RSA
*rsa
, BN_CTX
*ctx
)
806 BIGNUM
*r1
, *m1
, *vrfy
;
807 int ret
= 0, smooth
= 0;
809 BIGNUM
*r2
, *m
[RSA_MAX_PRIME_NUM
- 2];
810 int i
, ex_primes
= 0;
811 RSA_PRIME_INFO
*pinfo
;
816 r1
= BN_CTX_get(ctx
);
818 r2
= BN_CTX_get(ctx
);
820 m1
= BN_CTX_get(ctx
);
821 vrfy
= BN_CTX_get(ctx
);
826 if (rsa
->version
== RSA_ASN1_VERSION_MULTI
827 && ((ex_primes
= sk_RSA_PRIME_INFO_num(rsa
->prime_infos
)) <= 0
828 || ex_primes
> RSA_MAX_PRIME_NUM
- 2))
832 if (rsa
->flags
& RSA_FLAG_CACHE_PRIVATE
) {
833 BIGNUM
*factor
= BN_new();
839 * Make sure BN_mod_inverse in Montgomery initialization uses the
840 * BN_FLG_CONSTTIME flag
842 if (!(BN_with_flags(factor
, rsa
->p
, BN_FLG_CONSTTIME
),
843 BN_MONT_CTX_set_locked(&rsa
->_method_mod_p
, rsa
->lock
,
845 || !(BN_with_flags(factor
, rsa
->q
, BN_FLG_CONSTTIME
),
846 BN_MONT_CTX_set_locked(&rsa
->_method_mod_q
, rsa
->lock
,
852 for (i
= 0; i
< ex_primes
; i
++) {
853 pinfo
= sk_RSA_PRIME_INFO_value(rsa
->prime_infos
, i
);
854 BN_with_flags(factor
, pinfo
->r
, BN_FLG_CONSTTIME
);
855 if (!BN_MONT_CTX_set_locked(&pinfo
->m
, rsa
->lock
, factor
, ctx
)) {
862 * We MUST free |factor| before any further use of the prime factors
866 smooth
= (rsa
->meth
->bn_mod_exp
== BN_mod_exp_mont
)
870 && (BN_num_bits(rsa
->q
) == BN_num_bits(rsa
->p
));
873 if (rsa
->flags
& RSA_FLAG_CACHE_PUBLIC
)
874 if (!BN_MONT_CTX_set_locked(&rsa
->_method_mod_n
, rsa
->lock
,
880 * Conversion from Montgomery domain, a.k.a. Montgomery reduction,
881 * accepts values in [0-m*2^w) range. w is m's bit width rounded up
882 * to limb width. So that at the very least if |I| is fully reduced,
883 * i.e. less than p*q, we can count on from-to round to perform
884 * below modulo operations on |I|. Unlike BN_mod it's constant time.
886 if (/* m1 = I moq q */
887 !bn_from_mont_fixed_top(m1
, I
, rsa
->_method_mod_q
, ctx
)
888 || !bn_to_mont_fixed_top(m1
, m1
, rsa
->_method_mod_q
, ctx
)
890 || !bn_from_mont_fixed_top(r1
, I
, rsa
->_method_mod_p
, ctx
)
891 || !bn_to_mont_fixed_top(r1
, r1
, rsa
->_method_mod_p
, ctx
)
893 * Use parallel exponentiations optimization if possible,
894 * otherwise fallback to two sequential exponentiations:
898 || !BN_mod_exp_mont_consttime_x2(m1
, m1
, rsa
->dmq1
, rsa
->q
,
900 r1
, r1
, rsa
->dmp1
, rsa
->p
,
903 /* r1 = (r1 - m1) mod p */
905 * bn_mod_sub_fixed_top is not regular modular subtraction,
906 * it can tolerate subtrahend to be larger than modulus, but
907 * not bit-wise wider. This makes up for uncommon q>p case,
908 * when |m1| can be larger than |rsa->p|.
910 || !bn_mod_sub_fixed_top(r1
, r1
, m1
, rsa
->p
)
912 /* r1 = r1 * iqmp mod p */
913 || !bn_to_mont_fixed_top(r1
, r1
, rsa
->_method_mod_p
, ctx
)
914 || !bn_mul_mont_fixed_top(r1
, r1
, rsa
->iqmp
, rsa
->_method_mod_p
,
916 /* r0 = r1 * q + m1 */
917 || !bn_mul_fixed_top(r0
, r1
, rsa
->q
, ctx
)
918 || !bn_mod_add_fixed_top(r0
, r0
, m1
, rsa
->n
))
924 /* compute I mod q */
926 BIGNUM
*c
= BN_new();
929 BN_with_flags(c
, I
, BN_FLG_CONSTTIME
);
931 if (!BN_mod(r1
, c
, rsa
->q
, ctx
)) {
937 BIGNUM
*dmq1
= BN_new();
942 BN_with_flags(dmq1
, rsa
->dmq1
, BN_FLG_CONSTTIME
);
944 /* compute r1^dmq1 mod q */
945 if (!rsa
->meth
->bn_mod_exp(m1
, r1
, dmq1
, rsa
->q
, ctx
,
946 rsa
->_method_mod_q
)) {
951 /* We MUST free dmq1 before any further use of rsa->dmq1 */
955 /* compute I mod p */
956 if (!BN_mod(r1
, c
, rsa
->p
, ctx
)) {
960 /* We MUST free c before any further use of I */
965 BIGNUM
*dmp1
= BN_new();
968 BN_with_flags(dmp1
, rsa
->dmp1
, BN_FLG_CONSTTIME
);
970 /* compute r1^dmp1 mod p */
971 if (!rsa
->meth
->bn_mod_exp(r0
, r1
, dmp1
, rsa
->p
, ctx
,
972 rsa
->_method_mod_p
)) {
976 /* We MUST free dmp1 before any further use of rsa->dmp1 */
982 BIGNUM
*di
= BN_new(), *cc
= BN_new();
984 if (cc
== NULL
|| di
== NULL
) {
990 for (i
= 0; i
< ex_primes
; i
++) {
992 if ((m
[i
] = BN_CTX_get(ctx
)) == NULL
) {
998 pinfo
= sk_RSA_PRIME_INFO_value(rsa
->prime_infos
, i
);
1000 /* prepare c and d_i */
1001 BN_with_flags(cc
, I
, BN_FLG_CONSTTIME
);
1002 BN_with_flags(di
, pinfo
->d
, BN_FLG_CONSTTIME
);
1004 if (!BN_mod(r1
, cc
, pinfo
->r
, ctx
)) {
1009 /* compute r1 ^ d_i mod r_i */
1010 if (!rsa
->meth
->bn_mod_exp(m
[i
], r1
, di
, pinfo
->r
, ctx
, pinfo
->m
)) {
1022 if (!BN_sub(r0
, r0
, m1
))
1025 * This will help stop the size of r0 increasing, which does affect the
1026 * multiply if it optimised for a power of 2 size
1028 if (BN_is_negative(r0
))
1029 if (!BN_add(r0
, r0
, rsa
->p
))
1032 if (!BN_mul(r1
, r0
, rsa
->iqmp
, ctx
))
1036 BIGNUM
*pr1
= BN_new();
1039 BN_with_flags(pr1
, r1
, BN_FLG_CONSTTIME
);
1041 if (!BN_mod(r0
, pr1
, rsa
->p
, ctx
)) {
1045 /* We MUST free pr1 before any further use of r1 */
1050 * If p < q it is occasionally possible for the correction of adding 'p'
1051 * if r0 is negative above to leave the result still negative. This can
1052 * break the private key operations: the following second correction
1053 * should *always* correct this rare occurrence. This will *never* happen
1054 * with OpenSSL generated keys because they ensure p > q [steve]
1056 if (BN_is_negative(r0
))
1057 if (!BN_add(r0
, r0
, rsa
->p
))
1059 if (!BN_mul(r1
, r0
, rsa
->q
, ctx
))
1061 if (!BN_add(r0
, r1
, m1
))
1065 /* add m_i to m in multi-prime case */
1066 if (ex_primes
> 0) {
1067 BIGNUM
*pr2
= BN_new();
1072 for (i
= 0; i
< ex_primes
; i
++) {
1073 pinfo
= sk_RSA_PRIME_INFO_value(rsa
->prime_infos
, i
);
1074 if (!BN_sub(r1
, m
[i
], r0
)) {
1079 if (!BN_mul(r2
, r1
, pinfo
->t
, ctx
)) {
1084 BN_with_flags(pr2
, r2
, BN_FLG_CONSTTIME
);
1086 if (!BN_mod(r1
, pr2
, pinfo
->r
, ctx
)) {
1091 if (BN_is_negative(r1
))
1092 if (!BN_add(r1
, r1
, pinfo
->r
)) {
1096 if (!BN_mul(r1
, r1
, pinfo
->pp
, ctx
)) {
1100 if (!BN_add(r0
, r0
, r1
)) {
1110 if (rsa
->e
&& rsa
->n
) {
1111 if (rsa
->meth
->bn_mod_exp
== BN_mod_exp_mont
) {
1112 if (!BN_mod_exp_mont(vrfy
, r0
, rsa
->e
, rsa
->n
, ctx
,
1113 rsa
->_method_mod_n
))
1117 if (!rsa
->meth
->bn_mod_exp(vrfy
, r0
, rsa
->e
, rsa
->n
, ctx
,
1118 rsa
->_method_mod_n
))
1122 * If 'I' was greater than (or equal to) rsa->n, the operation will
1123 * be equivalent to using 'I mod n'. However, the result of the
1124 * verify will *always* be less than 'n' so we don't check for
1125 * absolute equality, just congruency.
1127 if (!BN_sub(vrfy
, vrfy
, I
))
1129 if (BN_is_zero(vrfy
)) {
1132 goto err
; /* not actually error */
1134 if (!BN_mod(vrfy
, vrfy
, rsa
->n
, ctx
))
1136 if (BN_is_negative(vrfy
))
1137 if (!BN_add(vrfy
, vrfy
, rsa
->n
))
1139 if (!BN_is_zero(vrfy
)) {
1141 * 'I' and 'vrfy' aren't congruent mod n. Don't leak
1142 * miscalculated CRT output, just do a raw (slower) mod_exp and
1143 * return that instead.
1146 BIGNUM
*d
= BN_new();
1149 BN_with_flags(d
, rsa
->d
, BN_FLG_CONSTTIME
);
1151 if (!rsa
->meth
->bn_mod_exp(r0
, I
, d
, rsa
->n
, ctx
,
1152 rsa
->_method_mod_n
)) {
1156 /* We MUST free d before any further use of rsa->d */
1161 * It's unfortunate that we have to bn_correct_top(r0). What hopefully
1162 * saves the day is that correction is highly unlike, and private key
1163 * operations are customarily performed on blinded message. Which means
1164 * that attacker won't observe correlation with chosen plaintext.
1165 * Secondly, remaining code would still handle it in same computational
1166 * time and even conceal memory access pattern around corrected top.
1175 static int rsa_ossl_init(RSA
*rsa
)
1177 rsa
->flags
|= RSA_FLAG_CACHE_PUBLIC
| RSA_FLAG_CACHE_PRIVATE
;
1181 static int rsa_ossl_finish(RSA
*rsa
)
1185 RSA_PRIME_INFO
*pinfo
;
1187 for (i
= 0; i
< sk_RSA_PRIME_INFO_num(rsa
->prime_infos
); i
++) {
1188 pinfo
= sk_RSA_PRIME_INFO_value(rsa
->prime_infos
, i
);
1189 BN_MONT_CTX_free(pinfo
->m
);
1193 BN_MONT_CTX_free(rsa
->_method_mod_n
);
1194 BN_MONT_CTX_free(rsa
->_method_mod_p
);
1195 BN_MONT_CTX_free(rsa
->_method_mod_q
);
1199 #ifdef S390X_MOD_EXP
1200 static int rsa_ossl_s390x_mod_exp(BIGNUM
*r0
, const BIGNUM
*i
, RSA
*rsa
,
1203 if (rsa
->version
!= RSA_ASN1_VERSION_MULTI
) {
1204 if (s390x_crt(r0
, i
, rsa
->p
, rsa
->q
, rsa
->dmp1
, rsa
->dmq1
, rsa
->iqmp
) == 1)
1207 return rsa_ossl_mod_exp(r0
, i
, rsa
, ctx
);