]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/bn/asm/vis3-mont.pl
misspellings fixes by https://github.com/vlajos/misspell_fixer
[thirdparty/openssl.git] / crypto / bn / asm / vis3-mont.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # October 2012.
11 #
12 # SPARCv9 VIS3 Montgomery multiplicaion procedure suitable for T3 and
13 # onward. There are three new instructions used here: umulxhi,
14 # addxc[cc] and initializing store. On T3 RSA private key operations
15 # are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key
16 # lengths. This is without dedicated squaring procedure. On T4
17 # corresponding coefficients are 1.47/2.10/2.80/2.90x, which is mostly
18 # for reference purposes, because T4 has dedicated Montgomery
19 # multiplication and squaring *instructions* that deliver even more.
20
21 $bits=32;
22 for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
23 if ($bits==64) { $bias=2047; $frame=192; }
24 else { $bias=0; $frame=112; }
25
26 $code.=<<___ if ($bits==64);
27 .register %g2,#scratch
28 .register %g3,#scratch
29 ___
30 $code.=<<___;
31 .section ".text",#alloc,#execinstr
32 ___
33
34 ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
35 (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
36
37 # int bn_mul_mont(
38 $rp="%o0"; # BN_ULONG *rp,
39 $ap="%o1"; # const BN_ULONG *ap,
40 $bp="%o2"; # const BN_ULONG *bp,
41 $np="%o3"; # const BN_ULONG *np,
42 $n0p="%o4"; # const BN_ULONG *n0,
43 $num="%o5"; # int num); # caller ensures that num is even
44 # and >=6
45 $code.=<<___;
46 .globl bn_mul_mont_vis3
47 .align 32
48 bn_mul_mont_vis3:
49 add %sp, $bias, %g4 ! real top of stack
50 sll $num, 2, $num ! size in bytes
51 add $num, 63, %g5
52 andn %g5, 63, %g5 ! buffer size rounded up to 64 bytes
53 add %g5, %g5, %g1
54 add %g5, %g1, %g1 ! 3*buffer size
55 sub %g4, %g1, %g1
56 andn %g1, 63, %g1 ! align at 64 byte
57 sub %g1, $frame, %g1 ! new top of stack
58 sub %g1, %g4, %g1
59
60 save %sp, %g1, %sp
61 ___
62 \f
63 # +-------------------------------+<----- %sp
64 # . .
65 # +-------------------------------+<----- aligned at 64 bytes
66 # | __int64 tmp[0] |
67 # +-------------------------------+
68 # . .
69 # . .
70 # +-------------------------------+<----- aligned at 64 bytes
71 # | __int64 ap[1..0] | converted ap[]
72 # +-------------------------------+
73 # | __int64 np[1..0] | converted np[]
74 # +-------------------------------+
75 # | __int64 ap[3..2] |
76 # . .
77 # . .
78 # +-------------------------------+
79 ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
80 ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$anp)=map("%l$_",(0..7));
81 ($ovf,$i)=($t0,$t1);
82 $code.=<<___;
83 ld [$n0p+0], $t0 ! pull n0[0..1] value
84 add %sp, $bias+$frame, $tp
85 ld [$n0p+4], $t1
86 add $tp, %g5, $anp
87 ld [$bp+0], $t2 ! m0=bp[0]
88 sllx $t1, 32, $n0
89 ld [$bp+4], $t3
90 or $t0, $n0, $n0
91 add $bp, 8, $bp
92 \f
93 ld [$ap+0], $t0 ! ap[0]
94 sllx $t3, 32, $m0
95 ld [$ap+4], $t1
96 or $t2, $m0, $m0
97
98 ld [$ap+8], $t2 ! ap[1]
99 sllx $t1, 32, $aj
100 ld [$ap+12], $t3
101 or $t0, $aj, $aj
102 add $ap, 16, $ap
103 stxa $aj, [$anp]0xe2 ! converted ap[0]
104
105 mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
106 umulxhi $aj, $m0, $hi0
107
108 ld [$np+0], $t0 ! np[0]
109 sllx $t3, 32, $aj
110 ld [$np+4], $t1
111 or $t2, $aj, $aj
112
113 ld [$np+8], $t2 ! np[1]
114 sllx $t1, 32, $nj
115 ld [$np+12], $t3
116 or $t0, $nj, $nj
117 add $np, 16, $np
118 stx $nj, [$anp+8] ! converted np[0]
119
120 mulx $lo0, $n0, $m1 ! "tp[0]"*n0
121 stx $aj, [$anp+16] ! converted ap[1]
122
123 mulx $aj, $m0, $alo ! ap[1]*bp[0]
124 umulxhi $aj, $m0, $aj ! ahi=aj
125
126 mulx $nj, $m1, $lo1 ! np[0]*m1
127 umulxhi $nj, $m1, $hi1
128
129 sllx $t3, 32, $nj
130 or $t2, $nj, $nj
131 stx $nj, [$anp+24] ! converted np[1]
132 add $anp, 32, $anp
133
134 addcc $lo0, $lo1, $lo1
135 addxc %g0, $hi1, $hi1
136
137 mulx $nj, $m1, $nlo ! np[1]*m1
138 umulxhi $nj, $m1, $nj ! nhi=nj
139 \f
140 ba .L1st
141 sub $num, 24, $cnt ! cnt=num-3
142
143 .align 16
144 .L1st:
145 ld [$ap+0], $t0 ! ap[j]
146 addcc $alo, $hi0, $lo0
147 ld [$ap+4], $t1
148 addxc $aj, %g0, $hi0
149
150 sllx $t1, 32, $aj
151 add $ap, 8, $ap
152 or $t0, $aj, $aj
153 stxa $aj, [$anp]0xe2 ! converted ap[j]
154
155 ld [$np+0], $t2 ! np[j]
156 addcc $nlo, $hi1, $lo1
157 ld [$np+4], $t3
158 addxc $nj, %g0, $hi1 ! nhi=nj
159
160 sllx $t3, 32, $nj
161 add $np, 8, $np
162 mulx $aj, $m0, $alo ! ap[j]*bp[0]
163 or $t2, $nj, $nj
164 umulxhi $aj, $m0, $aj ! ahi=aj
165 stx $nj, [$anp+8] ! converted np[j]
166 add $anp, 16, $anp ! anp++
167
168 mulx $nj, $m1, $nlo ! np[j]*m1
169 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
170 umulxhi $nj, $m1, $nj ! nhi=nj
171 addxc %g0, $hi1, $hi1
172 stxa $lo1, [$tp]0xe2 ! tp[j-1]
173 add $tp, 8, $tp ! tp++
174
175 brnz,pt $cnt, .L1st
176 sub $cnt, 8, $cnt ! j--
177 !.L1st
178 addcc $alo, $hi0, $lo0
179 addxc $aj, %g0, $hi0 ! ahi=aj
180
181 addcc $nlo, $hi1, $lo1
182 addxc $nj, %g0, $hi1
183 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
184 addxc %g0, $hi1, $hi1
185 stxa $lo1, [$tp]0xe2 ! tp[j-1]
186 add $tp, 8, $tp
187
188 addcc $hi0, $hi1, $hi1
189 addxc %g0, %g0, $ovf ! upmost overflow bit
190 stxa $hi1, [$tp]0xe2
191 add $tp, 8, $tp
192 \f
193 ba .Louter
194 sub $num, 16, $i ! i=num-2
195
196 .align 16
197 .Louter:
198 ld [$bp+0], $t2 ! m0=bp[i]
199 ld [$bp+4], $t3
200
201 sub $anp, $num, $anp ! rewind
202 sub $tp, $num, $tp
203 sub $anp, $num, $anp
204
205 add $bp, 8, $bp
206 sllx $t3, 32, $m0
207 ldx [$anp+0], $aj ! ap[0]
208 or $t2, $m0, $m0
209 ldx [$anp+8], $nj ! np[0]
210
211 mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
212 ldx [$tp], $tj ! tp[0]
213 umulxhi $aj, $m0, $hi0
214 ldx [$anp+16], $aj ! ap[1]
215 addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
216 mulx $aj, $m0, $alo ! ap[1]*bp[i]
217 addxc %g0, $hi0, $hi0
218 mulx $lo0, $n0, $m1 ! tp[0]*n0
219 umulxhi $aj, $m0, $aj ! ahi=aj
220 mulx $nj, $m1, $lo1 ! np[0]*m1
221 umulxhi $nj, $m1, $hi1
222 ldx [$anp+24], $nj ! np[1]
223 add $anp, 32, $anp
224 addcc $lo1, $lo0, $lo1
225 mulx $nj, $m1, $nlo ! np[1]*m1
226 addxc %g0, $hi1, $hi1
227 umulxhi $nj, $m1, $nj ! nhi=nj
228 \f
229 ba .Linner
230 sub $num, 24, $cnt ! cnt=num-3
231 .align 16
232 .Linner:
233 addcc $alo, $hi0, $lo0
234 ldx [$tp+8], $tj ! tp[j]
235 addxc $aj, %g0, $hi0 ! ahi=aj
236 ldx [$anp+0], $aj ! ap[j]
237 addcc $nlo, $hi1, $lo1
238 mulx $aj, $m0, $alo ! ap[j]*bp[i]
239 addxc $nj, %g0, $hi1 ! nhi=nj
240 ldx [$anp+8], $nj ! np[j]
241 add $anp, 16, $anp
242 umulxhi $aj, $m0, $aj ! ahi=aj
243 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
244 mulx $nj, $m1, $nlo ! np[j]*m1
245 addxc %g0, $hi0, $hi0
246 umulxhi $nj, $m1, $nj ! nhi=nj
247 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
248 addxc %g0, $hi1, $hi1
249 stx $lo1, [$tp] ! tp[j-1]
250 add $tp, 8, $tp
251 brnz,pt $cnt, .Linner
252 sub $cnt, 8, $cnt
253 !.Linner
254 ldx [$tp+8], $tj ! tp[j]
255 addcc $alo, $hi0, $lo0
256 addxc $aj, %g0, $hi0 ! ahi=aj
257 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
258 addxc %g0, $hi0, $hi0
259
260 addcc $nlo, $hi1, $lo1
261 addxc $nj, %g0, $hi1 ! nhi=nj
262 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
263 addxc %g0, $hi1, $hi1
264 stx $lo1, [$tp] ! tp[j-1]
265
266 subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
267 addxccc $hi1, $hi0, $hi1
268 addxc %g0, %g0, $ovf
269 stx $hi1, [$tp+8]
270 add $tp, 16, $tp
271
272 brnz,pt $i, .Louter
273 sub $i, 8, $i
274 \f
275 sub $anp, $num, $anp ! rewind
276 sub $tp, $num, $tp
277 sub $anp, $num, $anp
278 ba .Lsub
279 subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
280
281 .align 16
282 .Lsub:
283 ldx [$tp], $tj
284 add $tp, 8, $tp
285 ldx [$anp+8], $nj
286 add $anp, 16, $anp
287 subccc $tj, $nj, $t2 ! tp[j]-np[j]
288 srlx $tj, 32, $tj
289 srlx $nj, 32, $nj
290 subccc $tj, $nj, $t3
291 add $rp, 8, $rp
292 st $t2, [$rp-4] ! reverse order
293 st $t3, [$rp-8]
294 brnz,pt $cnt, .Lsub
295 sub $cnt, 8, $cnt
296
297 sub $anp, $num, $anp ! rewind
298 sub $tp, $num, $tp
299 sub $anp, $num, $anp
300 sub $rp, $num, $rp
301
302 subc $ovf, %g0, $ovf ! handle upmost overflow bit
303 and $tp, $ovf, $ap
304 andn $rp, $ovf, $np
305 or $np, $ap, $ap ! ap=borrow?tp:rp
306 ba .Lcopy
307 sub $num, 8, $cnt
308
309 .align 16
310 .Lcopy: ! copy or in-place refresh
311 ld [$ap+0], $t2
312 ld [$ap+4], $t3
313 add $ap, 8, $ap
314 stx %g0, [$tp] ! zap
315 add $tp, 8, $tp
316 stx %g0, [$anp] ! zap
317 stx %g0, [$anp+8]
318 add $anp, 16, $anp
319 st $t3, [$rp+0] ! flip order
320 st $t2, [$rp+4]
321 add $rp, 8, $rp
322 brnz $cnt, .Lcopy
323 sub $cnt, 8, $cnt
324
325 mov 1, %o0
326 ret
327 restore
328 .type bn_mul_mont_vis3, #function
329 .size bn_mul_mont_vis3, .-bn_mul_mont_vis3
330 .asciz "Montgomery Multiplication for SPARCv9 VIS3, CRYPTOGAMS by <appro\@openssl.org>"
331 .align 4
332 ___
333 \f
334 # Purpose of these subroutines is to explicitly encode VIS instructions,
335 # so that one can compile the module without having to specify VIS
336 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
337 # Idea is to reserve for option to produce "universal" binary and let
338 # programmer detect if current CPU is VIS capable at run-time.
339 sub unvis3 {
340 my ($mnemonic,$rs1,$rs2,$rd)=@_;
341 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
342 my ($ref,$opf);
343 my %visopf = ( "addxc" => 0x011,
344 "addxccc" => 0x013,
345 "umulxhi" => 0x016 );
346
347 $ref = "$mnemonic\t$rs1,$rs2,$rd";
348
349 if ($opf=$visopf{$mnemonic}) {
350 foreach ($rs1,$rs2,$rd) {
351 return $ref if (!/%([goli])([0-9])/);
352 $_=$bias{$1}+$2;
353 }
354
355 return sprintf ".word\t0x%08x !%s",
356 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
357 $ref;
358 } else {
359 return $ref;
360 }
361 }
362
363 foreach (split("\n",$code)) {
364 s/\`([^\`]*)\`/eval $1/ge;
365
366 s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
367 &unvis3($1,$2,$3,$4)
368 /ge;
369
370 print $_,"\n";
371 }
372
373 close STDOUT;