]> git.ipfire.org Git - thirdparty/openssl.git/commitdiff
Take RSAZ modules into build loop, add glue and engage.
authorAndy Polyakov <appro@openssl.org>
Fri, 5 Jul 2013 19:39:47 +0000 (21:39 +0200)
committerAndy Polyakov <appro@openssl.org>
Fri, 5 Jul 2013 19:39:47 +0000 (21:39 +0200)
RT: 2582, 2850

Configure
TABLE
crypto/bn/Makefile
crypto/bn/bn_exp.c
crypto/bn/rsaz_exp.c [new file with mode: 0644]
crypto/bn/rsaz_exp.h [new file with mode: 0644]

index 601f8931a637d2a60ab249ff160ba9c86eb1d1eb..dbb403b74c14002b08ae9eca089c4c28d188ff03 100755 (executable)
--- a/Configure
+++ b/Configure
@@ -128,7 +128,7 @@ my $x86_asm="x86cpuid.o:bn-586.o co-586.o x86-mont.o x86-gf2m.o:des-586.o crypt5
 
 my $x86_elf_asm="$x86_asm:elf";
 
-my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o::aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o rc4-md5-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o:ghash-x86_64.o aesni-gcm-x86_64.o:e_padlock-x86_64.o";
+my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o::aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o rc4-md5-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o:ghash-x86_64.o aesni-gcm-x86_64.o:e_padlock-x86_64.o";
 my $ia64_asm="ia64cpuid.o:bn-ia64.o ia64-mont.o::aes_core.o aes_cbc.o aes-ia64.o::md5-ia64.o:sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o rc4_skey.o:::::ghash-ia64.o::void";
 my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o:des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o:aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o::md5-sparcv9.o:sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o::::::camellia.o cmll_misc.o cmll_cbc.o cmllt4-sparcv9.o:ghash-sparcv9.o::void";
 my $sparcv8_asm=":sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::::::void";
diff --git a/TABLE b/TABLE
index 1e90ee37d5c0e962de62a31e46b076ae8ece5980..6e0b602b9ad07c69f8d5ab704fe4fd53f3a41cbf 100644 (file)
--- a/TABLE
+++ b/TABLE
@@ -306,7 +306,7 @@ $sys_id       =
 $lflags       = 
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -801,7 +801,7 @@ $sys_id       = WIN64A
 $lflags       = 
 $bn_ops       = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = bn_asm.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = bn_asm.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -1494,7 +1494,7 @@ $sys_id       = MACOSX
 $lflags       = -Wl,-search_paths_first%
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -1659,7 +1659,7 @@ $sys_id       = WIN64A
 $lflags       = 
 $bn_ops       = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = bn_asm.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = bn_asm.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -1758,7 +1758,7 @@ $sys_id       = MACOSX
 $lflags       = -Wl,-search_paths_first%
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -1824,7 +1824,7 @@ $sys_id       =
 $lflags       = 
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -2022,7 +2022,7 @@ $sys_id       =
 $lflags       = -ldl
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -2550,7 +2550,7 @@ $sys_id       =
 $lflags       = -ldl
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -2748,7 +2748,7 @@ $sys_id       =
 $lflags       = -ldl
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -2814,7 +2814,7 @@ $sys_id       =
 $lflags       = -ldl
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -4464,7 +4464,7 @@ $sys_id       =
 $lflags       = -ldl
 $bn_ops       = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -4497,7 +4497,7 @@ $sys_id       =
 $lflags       = -ldl
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -4530,7 +4530,7 @@ $sys_id       =
 $lflags       = -ldl -no_cpprt
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -4728,7 +4728,7 @@ $sys_id       = MINGW64
 $lflags       = -lws2_32 -lgdi32 -lcrypt32
 $bn_ops       = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -5718,7 +5718,7 @@ $sys_id       =
 $lflags       = -lsocket -lnsl -ldl
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
@@ -5751,7 +5751,7 @@ $sys_id       =
 $lflags       = -lsocket -lnsl -ldl
 $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
-$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o
+$bn_obj       = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o rsaz_exp.o rsaz-x86_64.o rsaz-avx2.o
 $des_obj      = 
 $aes_obj      = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o
 $bf_obj       = 
index 6267ef4bb94f7b42645ec03afdaff165fd88cec3..30cc210feb133c333b3d504ac35640e4964f1cde 100644 (file)
@@ -110,6 +110,10 @@ x86_64-gf2m.s:     asm/x86_64-gf2m.pl
        $(PERL) asm/x86_64-gf2m.pl $(PERLASM_SCHEME) > $@
 modexp512-x86_64.s:    asm/modexp512-x86_64.pl
        $(PERL) asm/modexp512-x86_64.pl $(PERLASM_SCHEME) > $@
+rsaz-x86_64.s: asm/rsaz-x86_64.pl
+       $(PERL) asm/rsaz-x86_64.pl $(PERLASM_SCHEME) > $@
+rsaz-avx2.s:   asm/rsaz-avx2.pl 
+       $(PERL) asm/rsaz-avx2.pl $(PERLASM_SCHEME) > $@
 
 bn-ia64.s:     asm/ia64.S
        $(CC) $(CFLAGS) -E asm/ia64.S > $@
index b17b5694fcb8ac102f62c1759be955bc533b97df..cfbaf2b66fd10f0c6a9c0831eadc25b81543b953 100644 (file)
 # include <alloca.h>
 #endif
 
+#undef RSAZ_ENABLED
+#if defined(OPENSSL_BN_ASM_MONT) && \
+       (defined(__x86_64) || defined(__x86_64__) || \
+        defined(_M_AMD64) || defined(_M_X64))
+# include "rsaz_exp.h"
+# define RSAZ_ENABLED
+#endif
+
 #undef SPARC_T4_MONT
 #if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
 # include "sparc_arch.h"
@@ -677,6 +685,35 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
                if (!BN_MONT_CTX_set(mont,m,ctx)) goto err;
                }
 
+#ifdef RSAZ_ENABLED
+       /*
+        * If the size of the operands allow it, perform the optimized
+        * RSAZ exponentiation. For further information see
+        * crypto/bn/rsaz_exp.c and accompanying assembly modules.
+        */
+       if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024)
+           && rsaz_avx2_eligible())
+               {
+               if (NULL == bn_wexpand(rr, 16)) goto err;
+               RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, mont->n0[0]);
+               rr->top = 16;
+               rr->neg = 0;
+               bn_correct_top(rr);
+               ret = 1;
+               goto err;
+               }
+       else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512))
+               {
+               if (NULL == bn_wexpand(rr,8)) goto err;
+               RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d);
+               rr->top = 8;
+               rr->neg = 0;
+               bn_correct_top(rr);
+               ret = 1;
+               goto err;
+               }
+#endif
+
        /* Get the window size to use with size of p. */
        window = BN_window_bits_for_ctime_exponent_size(bits);
 #if defined(SPARC_T4_MONT)
diff --git a/crypto/bn/rsaz_exp.c b/crypto/bn/rsaz_exp.c
new file mode 100644 (file)
index 0000000..57591b8
--- /dev/null
@@ -0,0 +1,306 @@
+/******************************************************************************\r
+* Copyright(c) 2012, Intel Corp.                                             \r
+* Developers and authors:                                                    \r
+* Shay Gueron (1, 2), and Vlad Krasnov (1)                                   \r
+* (1) Intel Corporation, Israel Development Center, Haifa, Israel                               \r
+* (2) University of Haifa, Israel                                              \r
+******************************************************************************\r
+* LICENSE:                                                                \r
+* This submission to OpenSSL is to be made available under the OpenSSL  \r
+* license, and only to the OpenSSL project, in order to allow integration    \r
+* into the publicly distributed code. \r
+* The use of this code, or portions of this code, or concepts embedded in\r
+* this code, or modification of this code and/or algorithm(s) in it, or the\r
+* use of this code for any other purpose than stated above, requires special\r
+* licensing.                                                                  \r
+******************************************************************************\r
+* DISCLAIMER:                                                                \r
+* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS AND THE COPYRIGHT OWNERS     \r
+* ``AS IS''. ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED \r
+* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \r
+* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS OR THE COPYRIGHT\r
+* OWNERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, \r
+* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF    \r
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS   \r
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN    \r
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)    \r
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE \r
+* POSSIBILITY OF SUCH DAMAGE.                                                \r
+******************************************************************************/\r
+\r
+#include "rsaz_exp.h"\r
+\r
+/*\r
+ * See crypto/bn/asm/rsaz-avx2.pl for further details.\r
+ */\r
+void rsaz_1024_norm2red_avx2(void *red,const void *norm);\r
+void rsaz_1024_mul_avx2(void *ret,const void *a,const void *b,const void *n,unsigned long k);\r
+void rsaz_1024_sqr_avx2(void *ret,const void *a,const void *n,unsigned long k,int cnt);\r
+void rsaz_1024_scatter5_avx2(void *tbl,const void *val,int i);\r
+void rsaz_1024_gather5_avx2(void *val,const void *tbl,int i);\r
+void rsaz_1024_red2norm_avx2(void *norm,const void *red);\r
+\r
+#if defined(__GNUC__)\r
+# define ALIGN64       __attribute__((aligned(64)))\r
+#elif defined(_MSC_VER)\r
+# define ALIGN64       __declspec(align(64))\r
+#elif defined(__SUNPRO_C)\r
+# define ALIGN64\r
+# pragma align 64(one,two80)\r
+#else\r
+# define ALIGN64       /* not fatal, might hurt performance a little */\r
+#endif\r
+\r
+ALIGN64 static const unsigned long one[40] =\r
+       {1,0,0,    0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};\r
+ALIGN64 static const unsigned long two80[40] =\r
+       {0,0,1<<22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};\r
+\r
+void RSAZ_1024_mod_exp_avx2(BN_ULONG result_norm[16],\r
+       const BN_ULONG base_norm[16], const BN_ULONG exponent[16],\r
+       const BN_ULONG m_norm[16], const BN_ULONG RR[16], BN_ULONG k0)\r
+{\r
+       unsigned char    storage[320*3+32*9*16+64];     /* 5.5KB */\r
+       unsigned char   *p_str = storage + (64-((size_t)storage%64));\r
+       unsigned char   *a_inv, *m, *result,\r
+                       *table_s = p_str+320*3,\r
+                       *R2      = table_s;     /* borrow */\r
+       int index;\r
+       int wvalue;\r
+\r
+       if ((((size_t)p_str&4095)+320)>>12) {\r
+               result = p_str;\r
+               a_inv = p_str + 320;\r
+               m = p_str + 320*2;      /* should not cross page */\r
+       } else {\r
+               m = p_str;              /* should not cross page */\r
+               result = p_str + 320;\r
+               a_inv = p_str + 320*2;\r
+       }\r
+\r
+       rsaz_1024_norm2red_avx2(m, m_norm);\r
+       rsaz_1024_norm2red_avx2(a_inv, base_norm);\r
+       rsaz_1024_norm2red_avx2(R2, RR);\r
+\r
+       rsaz_1024_mul_avx2(R2, R2, R2, m, k0);\r
+       rsaz_1024_mul_avx2(R2, R2, two80, m, k0);\r
+\r
+       /* table[0] = 1 */\r
+       rsaz_1024_mul_avx2(result, R2, one, m, k0);\r
+       /* table[1] = a_inv^1 */\r
+       rsaz_1024_mul_avx2(a_inv, a_inv, R2, m, k0);\r
+\r
+       rsaz_1024_scatter5_avx2(table_s,result,0);\r
+       rsaz_1024_scatter5_avx2(table_s,a_inv,1);\r
+\r
+       /* table[2] = a_inv^2 */\r
+       rsaz_1024_sqr_avx2(result, a_inv, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,2);\r
+#if 0\r
+       /* this is almost 2x smaller and less than 1% slower */\r
+       for (index=3; index<32; index++) {\r
+               rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+               rsaz_1024_scatter5_avx2(table_s,result,index);\r
+       }\r
+#else\r
+       /* table[4] = a_inv^4 */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,4);\r
+       /* table[8] = a_inv^8 */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,8);\r
+       /* table[16] = a_inv^16 */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,16);\r
+       /* table[17] = a_inv^17 */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,17);\r
+\r
+       /* table[3] */\r
+       rsaz_1024_gather5_avx2(result,table_s,2);\r
+       rsaz_1024_mul_avx2(result,result,a_inv,m,k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,3);\r
+       /* table[6] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,6);\r
+       /* table[12] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,12);\r
+       /* table[24] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,24);\r
+       /* table[25] */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,25);\r
+\r
+       /* table[5] */\r
+       rsaz_1024_gather5_avx2(result,table_s,4);\r
+       rsaz_1024_mul_avx2(result,result,a_inv,m,k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,5);\r
+       /* table[10] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,10);\r
+       /* table[20] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,20);\r
+       /* table[21] */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,21);\r
+\r
+       /* table[7] */\r
+       rsaz_1024_gather5_avx2(result,table_s,6);\r
+       rsaz_1024_mul_avx2(result,result,a_inv,m,k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,7);\r
+       /* table[14] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,14);\r
+       /* table[28] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,28);\r
+       /* table[29] */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,29);\r
+\r
+       /* table[9] */\r
+       rsaz_1024_gather5_avx2(result,table_s,8);\r
+       rsaz_1024_mul_avx2(result,result,a_inv,m,k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,9);\r
+       /* table[18] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,18);\r
+       /* table[19] */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,19);\r
+\r
+       /* table[11] */\r
+       rsaz_1024_gather5_avx2(result,table_s,10);\r
+       rsaz_1024_mul_avx2(result,result,a_inv,m,k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,11);\r
+       /* table[22] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,22);\r
+       /* table[23] */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,23);\r
+\r
+       /* table[13] */\r
+       rsaz_1024_gather5_avx2(result,table_s,12);\r
+       rsaz_1024_mul_avx2(result,result,a_inv,m,k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,13);\r
+       /* table[26] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,26);\r
+       /* table[27] */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,27);\r
+\r
+       /* table[15] */\r
+       rsaz_1024_gather5_avx2(result,table_s,14);\r
+       rsaz_1024_mul_avx2(result,result,a_inv,m,k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,15);\r
+       /* table[30] */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 1);\r
+       rsaz_1024_scatter5_avx2(table_s,result,30);\r
+       /* table[31] */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       rsaz_1024_scatter5_avx2(table_s,result,31);\r
+#endif\r
+\r
+       /* load first window */\r
+       p_str = (unsigned char*)exponent;\r
+       wvalue = p_str[127] >> 3;\r
+       rsaz_1024_gather5_avx2(result,table_s,wvalue);\r
+\r
+       index = 1014;\r
+\r
+       while(index > -1) {     /* loop for the remaining 127 windows */\r
+\r
+               rsaz_1024_sqr_avx2(result, result, m, k0, 5);\r
+\r
+               wvalue = *((unsigned short*)&p_str[index/8]);\r
+               wvalue = (wvalue>> (index%8)) & 31;\r
+               index-=5;\r
+\r
+               rsaz_1024_gather5_avx2(a_inv,table_s,wvalue);   /* borrow a_inv */\r
+               rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+       }\r
+\r
+       /* square four times */\r
+       rsaz_1024_sqr_avx2(result, result, m, k0, 4);\r
+\r
+       wvalue = p_str[0] & 15;\r
+\r
+       rsaz_1024_gather5_avx2(a_inv,table_s,wvalue);   /* borrow a_inv */\r
+       rsaz_1024_mul_avx2(result, result, a_inv, m, k0);\r
+\r
+       /* from Montgomery */\r
+       rsaz_1024_mul_avx2(result, result, one, m, k0);\r
+\r
+       rsaz_1024_red2norm_avx2(result_norm, result);\r
+\r
+       OPENSSL_cleanse(storage,sizeof(storage));\r
+}\r
+\r
+/*\r
+ * See crypto/bn/rsaz-x86_64.pl for further details.\r
+ */\r
+void rsaz_512_mul(void *ret,const void *a,const void *b,const void *n,unsigned long k);\r
+void rsaz_512_mul_scatter4(void *ret,const void *a,const void *n,unsigned long k,const void *tbl,unsigned int power);\r
+void rsaz_512_mul_gather4(void *ret,const void *a,const void *tbl,const void *n,unsigned long k,unsigned int power);\r
+void rsaz_512_mul_by_one(void *ret,const void *a,const void *n,unsigned long k);\r
+void rsaz_512_sqr(void *ret,const void *a,const void *n,unsigned long k,int cnt);\r
+void rsaz_512_scatter4(void *tbl, const unsigned long *val, int power);\r
+void rsaz_512_gather4(unsigned long *val, const void *tbl, int power);\r
+\r
+void RSAZ_512_mod_exp(BN_ULONG result[8],\r
+       const BN_ULONG base[8], const BN_ULONG exponent[8],\r
+       const BN_ULONG m[8], BN_ULONG k0, const BN_ULONG RR[8])\r
+{\r
+       unsigned char    storage[16*8*8+64*2+64];       /* 1.2KB */\r
+       unsigned char   *table = storage + (64-((size_t)storage%64));\r
+       unsigned long   *a_inv = (unsigned long *)(table+16*8*8),\r
+                       *temp  = (unsigned long *)(table+16*8*8+8*8);\r
+       unsigned char   *p_str = (unsigned char*)exponent;\r
+       int index;\r
+       unsigned int wvalue;\r
+\r
+       /* table[0] = 1_inv */\r
+       temp[0] = 0-m[0];       temp[1] = ~m[1];\r
+       temp[2] = ~m[2];        temp[3] = ~m[3];\r
+       temp[4] = ~m[4];        temp[5] = ~m[5];\r
+       temp[6] = ~m[6];        temp[7] = ~m[7];\r
+       rsaz_512_scatter4(table, temp, 0);\r
+\r
+       /* table [1] = a_inv^1 */\r
+       rsaz_512_mul(a_inv, base, RR, m, k0);\r
+       rsaz_512_scatter4(table, a_inv, 1);\r
+\r
+       /* table [2] = a_inv^2 */\r
+       rsaz_512_sqr(temp, a_inv, m, k0, 1);\r
+       rsaz_512_scatter4(table, temp, 2);\r
+\r
+       for (index=3; index<16; index++)\r
+               rsaz_512_mul_scatter4(temp, a_inv, m, k0, table, index);\r
+\r
+       /* load first window */\r
+       wvalue = p_str[63];\r
+\r
+       rsaz_512_gather4(temp, table, wvalue>>4);\r
+       rsaz_512_sqr(temp, temp, m, k0, 4);\r
+       rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue&0xf);\r
+\r
+       for (index=62; index>=0; index--) {\r
+               wvalue = p_str[index];\r
+\r
+               rsaz_512_sqr(temp, temp, m, k0, 4);\r
+               rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue>>4);\r
+\r
+               rsaz_512_sqr(temp, temp, m, k0, 4);\r
+               rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue&0x0f);\r
+       }\r
+\r
+       /* from Montgomery */\r
+       rsaz_512_mul_by_one(result, temp, m, k0);\r
+\r
+       OPENSSL_cleanse(storage,sizeof(storage));\r
+}\r
diff --git a/crypto/bn/rsaz_exp.h b/crypto/bn/rsaz_exp.h
new file mode 100644 (file)
index 0000000..4241a1f
--- /dev/null
@@ -0,0 +1,44 @@
+/******************************************************************************
+* Copyright(c) 2012, Intel Corp.                                             
+* Developers and authors:                                                    
+* Shay Gueron (1, 2), and Vlad Krasnov (1)                                   
+* (1) Intel Corporation, Israel Development Center, Haifa, Israel                               
+* (2) University of Haifa, Israel                                              
+******************************************************************************
+* LICENSE:                                                                
+* This submission to OpenSSL is to be made available under the OpenSSL  
+* license, and only to the OpenSSL project, in order to allow integration    
+* into the publicly distributed code. 
+* The use of this code, or portions of this code, or concepts embedded in
+* this code, or modification of this code and/or algorithm(s) in it, or the
+* use of this code for any other purpose than stated above, requires special
+* licensing.                                                                  
+******************************************************************************
+* DISCLAIMER:                                                                
+* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS AND THE COPYRIGHT OWNERS     
+* ``AS IS''. ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
+* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
+* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS OR THE COPYRIGHT
+* OWNERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 
+* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF    
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS   
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN    
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)    
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+* POSSIBILITY OF SUCH DAMAGE.                                                
+******************************************************************************/
+
+#ifndef RSAZ_EXP_H
+#define RSAZ_EXP_H
+
+#include <openssl/bn.h>
+
+void RSAZ_1024_mod_exp_avx2(BN_ULONG result[16],
+       const BN_ULONG base_norm[16], const BN_ULONG exponent[16],
+       const BN_ULONG m_norm[16], const BN_ULONG RR[16], BN_ULONG k0);
+int rsaz_avx2_eligible();
+
+void RSAZ_512_mod_exp(BN_ULONG result[8],
+       const BN_ULONG base_norm[8], const BN_ULONG exponent[8],
+       const BN_ULONG m_norm[8], BN_ULONG k0, const BN_ULONG RR[8]);
+#endif