]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/bn/asm/vis3-mont.pl
Update copyright year
[thirdparty/openssl.git] / crypto / bn / asm / vis3-mont.pl
1 #! /usr/bin/env perl
2 # Copyright 2012-2021 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16
17 # October 2012.
18 #
19 # SPARCv9 VIS3 Montgomery multiplication procedure suitable for T3 and
20 # onward. There are three new instructions used here: umulxhi,
21 # addxc[cc] and initializing store. On T3 RSA private key operations
22 # are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key
23 # lengths. This is without dedicated squaring procedure. On T4
24 # corresponding coefficients are 1.47/2.10/2.80/2.90x, which is mostly
25 # for reference purposes, because T4 has dedicated Montgomery
26 # multiplication and squaring *instructions* that deliver even more.
27
28 $output = pop and open STDOUT,">$output";
29
30 $frame = "STACK_FRAME";
31 $bias = "STACK_BIAS";
32
33 $code.=<<___;
34 #ifndef __ASSEMBLER__
35 # define __ASSEMBLER__ 1
36 #endif
37 #include "crypto/sparc_arch.h"
38
39 #ifdef __arch64__
40 .register %g2,#scratch
41 .register %g3,#scratch
42 #endif
43
44 .section ".text",#alloc,#execinstr
45 ___
46
47 ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
48 (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
49
50 # int bn_mul_mont(
51 $rp="%o0"; # BN_ULONG *rp,
52 $ap="%o1"; # const BN_ULONG *ap,
53 $bp="%o2"; # const BN_ULONG *bp,
54 $np="%o3"; # const BN_ULONG *np,
55 $n0p="%o4"; # const BN_ULONG *n0,
56 $num="%o5"; # int num); # caller ensures that num is even
57 # and >=6
58 $code.=<<___;
59 .globl bn_mul_mont_vis3
60 .align 32
61 bn_mul_mont_vis3:
62 add %sp, $bias, %g4 ! real top of stack
63 sll $num, 2, $num ! size in bytes
64 add $num, 63, %g5
65 andn %g5, 63, %g5 ! buffer size rounded up to 64 bytes
66 add %g5, %g5, %g1
67 add %g5, %g1, %g1 ! 3*buffer size
68 sub %g4, %g1, %g1
69 andn %g1, 63, %g1 ! align at 64 byte
70 sub %g1, $frame, %g1 ! new top of stack
71 sub %g1, %g4, %g1
72
73 save %sp, %g1, %sp
74 ___
75 \f
76 # +-------------------------------+<----- %sp
77 # . .
78 # +-------------------------------+<----- aligned at 64 bytes
79 # | __int64 tmp[0] |
80 # +-------------------------------+
81 # . .
82 # . .
83 # +-------------------------------+<----- aligned at 64 bytes
84 # | __int64 ap[1..0] | converted ap[]
85 # +-------------------------------+
86 # | __int64 np[1..0] | converted np[]
87 # +-------------------------------+
88 # | __int64 ap[3..2] |
89 # . .
90 # . .
91 # +-------------------------------+
92 ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
93 ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$anp)=map("%l$_",(0..7));
94 ($ovf,$i)=($t0,$t1);
95 $code.=<<___;
96 ld [$n0p+0], $t0 ! pull n0[0..1] value
97 add %sp, $bias+$frame, $tp
98 ld [$n0p+4], $t1
99 add $tp, %g5, $anp
100 ld [$bp+0], $t2 ! m0=bp[0]
101 sllx $t1, 32, $n0
102 ld [$bp+4], $t3
103 or $t0, $n0, $n0
104 add $bp, 8, $bp
105 \f
106 ld [$ap+0], $t0 ! ap[0]
107 sllx $t3, 32, $m0
108 ld [$ap+4], $t1
109 or $t2, $m0, $m0
110
111 ld [$ap+8], $t2 ! ap[1]
112 sllx $t1, 32, $aj
113 ld [$ap+12], $t3
114 or $t0, $aj, $aj
115 add $ap, 16, $ap
116 stx $aj, [$anp] ! converted ap[0]
117
118 mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
119 umulxhi $aj, $m0, $hi0
120
121 ld [$np+0], $t0 ! np[0]
122 sllx $t3, 32, $aj
123 ld [$np+4], $t1
124 or $t2, $aj, $aj
125
126 ld [$np+8], $t2 ! np[1]
127 sllx $t1, 32, $nj
128 ld [$np+12], $t3
129 or $t0, $nj, $nj
130 add $np, 16, $np
131 stx $nj, [$anp+8] ! converted np[0]
132
133 mulx $lo0, $n0, $m1 ! "tp[0]"*n0
134 stx $aj, [$anp+16] ! converted ap[1]
135
136 mulx $aj, $m0, $alo ! ap[1]*bp[0]
137 umulxhi $aj, $m0, $aj ! ahi=aj
138
139 mulx $nj, $m1, $lo1 ! np[0]*m1
140 umulxhi $nj, $m1, $hi1
141
142 sllx $t3, 32, $nj
143 or $t2, $nj, $nj
144 stx $nj, [$anp+24] ! converted np[1]
145 add $anp, 32, $anp
146
147 addcc $lo0, $lo1, $lo1
148 addxc %g0, $hi1, $hi1
149
150 mulx $nj, $m1, $nlo ! np[1]*m1
151 umulxhi $nj, $m1, $nj ! nhi=nj
152 \f
153 ba .L1st
154 sub $num, 24, $cnt ! cnt=num-3
155
156 .align 16
157 .L1st:
158 ld [$ap+0], $t0 ! ap[j]
159 addcc $alo, $hi0, $lo0
160 ld [$ap+4], $t1
161 addxc $aj, %g0, $hi0
162
163 sllx $t1, 32, $aj
164 add $ap, 8, $ap
165 or $t0, $aj, $aj
166 stx $aj, [$anp] ! converted ap[j]
167
168 ld [$np+0], $t2 ! np[j]
169 addcc $nlo, $hi1, $lo1
170 ld [$np+4], $t3
171 addxc $nj, %g0, $hi1 ! nhi=nj
172
173 sllx $t3, 32, $nj
174 add $np, 8, $np
175 mulx $aj, $m0, $alo ! ap[j]*bp[0]
176 or $t2, $nj, $nj
177 umulxhi $aj, $m0, $aj ! ahi=aj
178 stx $nj, [$anp+8] ! converted np[j]
179 add $anp, 16, $anp ! anp++
180
181 mulx $nj, $m1, $nlo ! np[j]*m1
182 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
183 umulxhi $nj, $m1, $nj ! nhi=nj
184 addxc %g0, $hi1, $hi1
185 stx $lo1, [$tp] ! tp[j-1]
186 add $tp, 8, $tp ! tp++
187
188 brnz,pt $cnt, .L1st
189 sub $cnt, 8, $cnt ! j--
190 !.L1st
191 addcc $alo, $hi0, $lo0
192 addxc $aj, %g0, $hi0 ! ahi=aj
193
194 addcc $nlo, $hi1, $lo1
195 addxc $nj, %g0, $hi1
196 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
197 addxc %g0, $hi1, $hi1
198 stx $lo1, [$tp] ! tp[j-1]
199 add $tp, 8, $tp
200
201 addcc $hi0, $hi1, $hi1
202 addxc %g0, %g0, $ovf ! upmost overflow bit
203 stx $hi1, [$tp]
204 add $tp, 8, $tp
205 \f
206 ba .Louter
207 sub $num, 16, $i ! i=num-2
208
209 .align 16
210 .Louter:
211 ld [$bp+0], $t2 ! m0=bp[i]
212 ld [$bp+4], $t3
213
214 sub $anp, $num, $anp ! rewind
215 sub $tp, $num, $tp
216 sub $anp, $num, $anp
217
218 add $bp, 8, $bp
219 sllx $t3, 32, $m0
220 ldx [$anp+0], $aj ! ap[0]
221 or $t2, $m0, $m0
222 ldx [$anp+8], $nj ! np[0]
223
224 mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
225 ldx [$tp], $tj ! tp[0]
226 umulxhi $aj, $m0, $hi0
227 ldx [$anp+16], $aj ! ap[1]
228 addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
229 mulx $aj, $m0, $alo ! ap[1]*bp[i]
230 addxc %g0, $hi0, $hi0
231 mulx $lo0, $n0, $m1 ! tp[0]*n0
232 umulxhi $aj, $m0, $aj ! ahi=aj
233 mulx $nj, $m1, $lo1 ! np[0]*m1
234 umulxhi $nj, $m1, $hi1
235 ldx [$anp+24], $nj ! np[1]
236 add $anp, 32, $anp
237 addcc $lo1, $lo0, $lo1
238 mulx $nj, $m1, $nlo ! np[1]*m1
239 addxc %g0, $hi1, $hi1
240 umulxhi $nj, $m1, $nj ! nhi=nj
241 \f
242 ba .Linner
243 sub $num, 24, $cnt ! cnt=num-3
244 .align 16
245 .Linner:
246 addcc $alo, $hi0, $lo0
247 ldx [$tp+8], $tj ! tp[j]
248 addxc $aj, %g0, $hi0 ! ahi=aj
249 ldx [$anp+0], $aj ! ap[j]
250 addcc $nlo, $hi1, $lo1
251 mulx $aj, $m0, $alo ! ap[j]*bp[i]
252 addxc $nj, %g0, $hi1 ! nhi=nj
253 ldx [$anp+8], $nj ! np[j]
254 add $anp, 16, $anp
255 umulxhi $aj, $m0, $aj ! ahi=aj
256 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
257 mulx $nj, $m1, $nlo ! np[j]*m1
258 addxc %g0, $hi0, $hi0
259 umulxhi $nj, $m1, $nj ! nhi=nj
260 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
261 addxc %g0, $hi1, $hi1
262 stx $lo1, [$tp] ! tp[j-1]
263 add $tp, 8, $tp
264 brnz,pt $cnt, .Linner
265 sub $cnt, 8, $cnt
266 !.Linner
267 ldx [$tp+8], $tj ! tp[j]
268 addcc $alo, $hi0, $lo0
269 addxc $aj, %g0, $hi0 ! ahi=aj
270 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
271 addxc %g0, $hi0, $hi0
272
273 addcc $nlo, $hi1, $lo1
274 addxc $nj, %g0, $hi1 ! nhi=nj
275 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
276 addxc %g0, $hi1, $hi1
277 stx $lo1, [$tp] ! tp[j-1]
278
279 subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
280 addxccc $hi1, $hi0, $hi1
281 addxc %g0, %g0, $ovf
282 stx $hi1, [$tp+8]
283 add $tp, 16, $tp
284
285 brnz,pt $i, .Louter
286 sub $i, 8, $i
287 \f
288 sub $anp, $num, $anp ! rewind
289 sub $tp, $num, $tp
290 sub $anp, $num, $anp
291 ba .Lsub
292 subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
293
294 .align 16
295 .Lsub:
296 ldx [$tp], $tj
297 add $tp, 8, $tp
298 ldx [$anp+8], $nj
299 add $anp, 16, $anp
300 subccc $tj, $nj, $t2 ! tp[j]-np[j]
301 srlx $tj, 32, $tj
302 srlx $nj, 32, $nj
303 subccc $tj, $nj, $t3
304 add $rp, 8, $rp
305 st $t2, [$rp-4] ! reverse order
306 st $t3, [$rp-8]
307 brnz,pt $cnt, .Lsub
308 sub $cnt, 8, $cnt
309
310 sub $anp, $num, $anp ! rewind
311 sub $tp, $num, $tp
312 sub $anp, $num, $anp
313 sub $rp, $num, $rp
314
315 subccc $ovf, %g0, $ovf ! handle upmost overflow bit
316 ba .Lcopy
317 sub $num, 8, $cnt
318
319 .align 16
320 .Lcopy: ! conditional copy
321 ld [$tp+0], $t0
322 ld [$tp+4], $t1
323 ld [$rp+0], $t2
324 ld [$rp+4], $t3
325 stx %g0, [$tp] ! zap
326 add $tp, 8, $tp
327 stx %g0, [$anp] ! zap
328 stx %g0, [$anp+8]
329 add $anp, 16, $anp
330 movcs %icc, $t0, $t2
331 movcs %icc, $t1, $t3
332 st $t3, [$rp+0] ! flip order
333 st $t2, [$rp+4]
334 add $rp, 8, $rp
335 brnz $cnt, .Lcopy
336 sub $cnt, 8, $cnt
337
338 mov 1, %o0
339 ret
340 restore
341 .type bn_mul_mont_vis3, #function
342 .size bn_mul_mont_vis3, .-bn_mul_mont_vis3
343 .asciz "Montgomery Multiplication for SPARCv9 VIS3, CRYPTOGAMS by <appro\@openssl.org>"
344 .align 4
345 ___
346 \f
347 # Purpose of these subroutines is to explicitly encode VIS instructions,
348 # so that one can compile the module without having to specify VIS
349 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
350 # Idea is to reserve for option to produce "universal" binary and let
351 # programmer detect if current CPU is VIS capable at run-time.
352 sub unvis3 {
353 my ($mnemonic,$rs1,$rs2,$rd)=@_;
354 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
355 my ($ref,$opf);
356 my %visopf = ( "addxc" => 0x011,
357 "addxccc" => 0x013,
358 "umulxhi" => 0x016 );
359
360 $ref = "$mnemonic\t$rs1,$rs2,$rd";
361
362 if ($opf=$visopf{$mnemonic}) {
363 foreach ($rs1,$rs2,$rd) {
364 return $ref if (!/%([goli])([0-9])/);
365 $_=$bias{$1}+$2;
366 }
367
368 return sprintf ".word\t0x%08x !%s",
369 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
370 $ref;
371 } else {
372 return $ref;
373 }
374 }
375
376 foreach (split("\n",$code)) {
377 s/\`([^\`]*)\`/eval $1/ge;
378
379 s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
380 &unvis3($1,$2,$3,$4)
381 /ge;
382
383 print $_,"\n";
384 }
385
386 close STDOUT or die "error closing STDOUT: $!";