2 # Copyright 2012-2021 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # SPARCv9 VIS3 Montgomery multiplication procedure suitable for T3 and
20 # onward. There are three new instructions used here: umulxhi,
21 # addxc[cc] and initializing store. On T3 RSA private key operations
22 # are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key
23 # lengths. This is without dedicated squaring procedure. On T4
24 # corresponding coefficients are 1.47/2.10/2.80/2.90x, which is mostly
25 # for reference purposes, because T4 has dedicated Montgomery
26 # multiplication and squaring *instructions* that deliver even more.
28 $output = pop and open STDOUT
,">$output";
30 $frame = "STACK_FRAME";
35 # define __ASSEMBLER__ 1
37 #include "crypto/sparc_arch.h"
40 .register
%g2,#scratch
41 .register
%g3,#scratch
44 .section
".text",#alloc,#execinstr
47 ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
48 (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
51 $rp="%o0"; # BN_ULONG *rp,
52 $ap="%o1"; # const BN_ULONG *ap,
53 $bp="%o2"; # const BN_ULONG *bp,
54 $np="%o3"; # const BN_ULONG *np,
55 $n0p="%o4"; # const BN_ULONG *n0,
56 $num="%o5"; # int num); # caller ensures that num is even
59 .globl bn_mul_mont_vis3
62 add
%sp, $bias, %g4 ! real top of stack
63 sll
$num, 2, $num ! size
in bytes
65 andn
%g5, 63, %g5 ! buffer size rounded up to
64 bytes
67 add
%g5, %g1, %g1 ! 3*buffer size
69 andn
%g1, 63, %g1 ! align at
64 byte
70 sub %g1, $frame, %g1 ! new top of stack
76 # +-------------------------------+<----- %sp
78 # +-------------------------------+<----- aligned at 64 bytes
80 # +-------------------------------+
83 # +-------------------------------+<----- aligned at 64 bytes
84 # | __int64 ap[1..0] | converted ap[]
85 # +-------------------------------+
86 # | __int64 np[1..0] | converted np[]
87 # +-------------------------------+
88 # | __int64 ap[3..2] |
91 # +-------------------------------+
92 ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
93 ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$anp)=map("%l$_",(0..7));
96 ld
[$n0p+0], $t0 ! pull n0
[0..1] value
97 add
%sp, $bias+$frame, $tp
100 ld
[$bp+0], $t2 ! m0
=bp
[0]
106 ld
[$ap+0], $t0 ! ap
[0]
111 ld
[$ap+8], $t2 ! ap
[1]
116 stx
$aj, [$anp] ! converted ap
[0]
118 mulx
$aj, $m0, $lo0 ! ap
[0]*bp
[0]
119 umulxhi
$aj, $m0, $hi0
121 ld
[$np+0], $t0 ! np
[0]
126 ld
[$np+8], $t2 ! np
[1]
131 stx
$nj, [$anp+8] ! converted np
[0]
133 mulx
$lo0, $n0, $m1 ! "tp[0]"*n0
134 stx
$aj, [$anp+16] ! converted ap
[1]
136 mulx
$aj, $m0, $alo ! ap
[1]*bp
[0]
137 umulxhi
$aj, $m0, $aj ! ahi
=aj
139 mulx
$nj, $m1, $lo1 ! np
[0]*m1
140 umulxhi
$nj, $m1, $hi1
144 stx
$nj, [$anp+24] ! converted np
[1]
147 addcc
$lo0, $lo1, $lo1
148 addxc
%g0, $hi1, $hi1
150 mulx
$nj, $m1, $nlo ! np
[1]*m1
151 umulxhi
$nj, $m1, $nj ! nhi
=nj
154 sub $num, 24, $cnt ! cnt
=num
-3
158 ld
[$ap+0], $t0 ! ap
[j
]
159 addcc
$alo, $hi0, $lo0
166 stx
$aj, [$anp] ! converted ap
[j
]
168 ld
[$np+0], $t2 ! np
[j
]
169 addcc
$nlo, $hi1, $lo1
171 addxc
$nj, %g0, $hi1 ! nhi
=nj
175 mulx
$aj, $m0, $alo ! ap
[j
]*bp
[0]
177 umulxhi
$aj, $m0, $aj ! ahi
=aj
178 stx
$nj, [$anp+8] ! converted np
[j
]
179 add
$anp, 16, $anp ! anp
++
181 mulx
$nj, $m1, $nlo ! np
[j
]*m1
182 addcc
$lo0, $lo1, $lo1 ! np
[j
]*m1
+ap
[j
]*bp
[0]
183 umulxhi
$nj, $m1, $nj ! nhi
=nj
184 addxc
%g0, $hi1, $hi1
185 stx
$lo1, [$tp] ! tp
[j
-1]
186 add
$tp, 8, $tp ! tp
++
189 sub $cnt, 8, $cnt ! j
--
191 addcc
$alo, $hi0, $lo0
192 addxc
$aj, %g0, $hi0 ! ahi
=aj
194 addcc
$nlo, $hi1, $lo1
196 addcc
$lo0, $lo1, $lo1 ! np
[j
]*m1
+ap
[j
]*bp
[0]
197 addxc
%g0, $hi1, $hi1
198 stx
$lo1, [$tp] ! tp
[j
-1]
201 addcc
$hi0, $hi1, $hi1
202 addxc
%g0, %g0, $ovf ! upmost overflow bit
207 sub $num, 16, $i ! i
=num
-2
211 ld
[$bp+0], $t2 ! m0
=bp
[i
]
214 sub $anp, $num, $anp ! rewind
220 ldx
[$anp+0], $aj ! ap
[0]
222 ldx
[$anp+8], $nj ! np
[0]
224 mulx
$aj, $m0, $lo0 ! ap
[0]*bp
[i
]
225 ldx
[$tp], $tj ! tp
[0]
226 umulxhi
$aj, $m0, $hi0
227 ldx
[$anp+16], $aj ! ap
[1]
228 addcc
$lo0, $tj, $lo0 ! ap
[0]*bp
[i
]+tp
[0]
229 mulx
$aj, $m0, $alo ! ap
[1]*bp
[i
]
230 addxc
%g0, $hi0, $hi0
231 mulx
$lo0, $n0, $m1 ! tp
[0]*n0
232 umulxhi
$aj, $m0, $aj ! ahi
=aj
233 mulx
$nj, $m1, $lo1 ! np
[0]*m1
234 umulxhi
$nj, $m1, $hi1
235 ldx
[$anp+24], $nj ! np
[1]
237 addcc
$lo1, $lo0, $lo1
238 mulx
$nj, $m1, $nlo ! np
[1]*m1
239 addxc
%g0, $hi1, $hi1
240 umulxhi
$nj, $m1, $nj ! nhi
=nj
243 sub $num, 24, $cnt ! cnt
=num
-3
246 addcc
$alo, $hi0, $lo0
247 ldx
[$tp+8], $tj ! tp
[j
]
248 addxc
$aj, %g0, $hi0 ! ahi
=aj
249 ldx
[$anp+0], $aj ! ap
[j
]
250 addcc
$nlo, $hi1, $lo1
251 mulx
$aj, $m0, $alo ! ap
[j
]*bp
[i
]
252 addxc
$nj, %g0, $hi1 ! nhi
=nj
253 ldx
[$anp+8], $nj ! np
[j
]
255 umulxhi
$aj, $m0, $aj ! ahi
=aj
256 addcc
$lo0, $tj, $lo0 ! ap
[j
]*bp
[i
]+tp
[j
]
257 mulx
$nj, $m1, $nlo ! np
[j
]*m1
258 addxc
%g0, $hi0, $hi0
259 umulxhi
$nj, $m1, $nj ! nhi
=nj
260 addcc
$lo1, $lo0, $lo1 ! np
[j
]*m1
+ap
[j
]*bp
[i
]+tp
[j
]
261 addxc
%g0, $hi1, $hi1
262 stx
$lo1, [$tp] ! tp
[j
-1]
264 brnz
,pt
$cnt, .Linner
267 ldx
[$tp+8], $tj ! tp
[j
]
268 addcc
$alo, $hi0, $lo0
269 addxc
$aj, %g0, $hi0 ! ahi
=aj
270 addcc
$lo0, $tj, $lo0 ! ap
[j
]*bp
[i
]+tp
[j
]
271 addxc
%g0, $hi0, $hi0
273 addcc
$nlo, $hi1, $lo1
274 addxc
$nj, %g0, $hi1 ! nhi
=nj
275 addcc
$lo1, $lo0, $lo1 ! np
[j
]*m1
+ap
[j
]*bp
[i
]+tp
[j
]
276 addxc
%g0, $hi1, $hi1
277 stx
$lo1, [$tp] ! tp
[j
-1]
279 subcc
%g0, $ovf, %g0 ! move upmost overflow to CCR
.xcc
280 addxccc
$hi1, $hi0, $hi1
288 sub $anp, $num, $anp ! rewind
292 subcc
$num, 8, $cnt ! cnt
=num
-1 and clear CCR
.xcc
300 subccc
$tj, $nj, $t2 ! tp
[j
]-np
[j
]
305 st
$t2, [$rp-4] ! reverse order
310 sub $anp, $num, $anp ! rewind
315 subccc
$ovf, %g0, $ovf ! handle upmost overflow bit
320 .Lcopy
: ! conditional copy
327 stx
%g0, [$anp] ! zap
332 st
$t3, [$rp+0] ! flip order
341 .type bn_mul_mont_vis3
, #function
342 .size bn_mul_mont_vis3
, .-bn_mul_mont_vis3
343 .asciz
"Montgomery Multiplication for SPARCv9 VIS3, CRYPTOGAMS by <appro\@openssl.org>"
347 # Purpose of these subroutines is to explicitly encode VIS instructions,
348 # so that one can compile the module without having to specify VIS
349 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
350 # Idea is to reserve for option to produce "universal" binary and let
351 # programmer detect if current CPU is VIS capable at run-time.
353 my ($mnemonic,$rs1,$rs2,$rd)=@_;
354 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
356 my %visopf = ( "addxc" => 0x011,
358 "umulxhi" => 0x016 );
360 $ref = "$mnemonic\t$rs1,$rs2,$rd";
362 if ($opf=$visopf{$mnemonic}) {
363 foreach ($rs1,$rs2,$rd) {
364 return $ref if (!/%([goli])([0-9])/);
368 return sprintf ".word\t0x%08x !%s",
369 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
376 foreach (split("\n",$code)) {
377 s/\`([^\`]*)\`/eval $1/ge;
379 s
/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
386 close STDOUT
or die "error closing STDOUT: $!";