]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/md5/asm/md5-sparcv9.pl
Add OpenSSL copyright to .pl files
[thirdparty/openssl.git] / crypto / md5 / asm / md5-sparcv9.pl
1 #! /usr/bin/env perl
2 # Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 #
16 # Hardware SPARC T4 support by David S. Miller <davem@davemloft.net>.
17 # ====================================================================
18
19 # MD5 for SPARCv9, 6.9 cycles per byte on UltraSPARC, >40% faster than
20 # code generated by Sun C 5.2.
21
22 # SPARC T4 MD5 hardware achieves 3.20 cycles per byte, which is 2.1x
23 # faster than software. Multi-process benchmark saturates at 12x
24 # single-process result on 8-core processor, or ~11GBps per 2.85GHz
25 # socket.
26
27 $output=pop;
28 open STDOUT,">$output";
29
30 use integer;
31
32 ($ctx,$inp,$len)=("%i0","%i1","%i2"); # input arguments
33
34 # 64-bit values
35 @X=("%o0","%o1","%o2","%o3","%o4","%o5","%o7","%g1","%g2");
36 $tx="%g3";
37 ($AB,$CD)=("%g4","%g5");
38
39 # 32-bit values
40 @V=($A,$B,$C,$D)=map("%l$_",(0..3));
41 ($t1,$t2,$t3,$saved_asi)=map("%l$_",(4..7));
42 ($shr,$shl1,$shl2)=("%i3","%i4","%i5");
43
44 my @K=( 0xd76aa478,0xe8c7b756,0x242070db,0xc1bdceee,
45 0xf57c0faf,0x4787c62a,0xa8304613,0xfd469501,
46 0x698098d8,0x8b44f7af,0xffff5bb1,0x895cd7be,
47 0x6b901122,0xfd987193,0xa679438e,0x49b40821,
48
49 0xf61e2562,0xc040b340,0x265e5a51,0xe9b6c7aa,
50 0xd62f105d,0x02441453,0xd8a1e681,0xe7d3fbc8,
51 0x21e1cde6,0xc33707d6,0xf4d50d87,0x455a14ed,
52 0xa9e3e905,0xfcefa3f8,0x676f02d9,0x8d2a4c8a,
53
54 0xfffa3942,0x8771f681,0x6d9d6122,0xfde5380c,
55 0xa4beea44,0x4bdecfa9,0xf6bb4b60,0xbebfbc70,
56 0x289b7ec6,0xeaa127fa,0xd4ef3085,0x04881d05,
57 0xd9d4d039,0xe6db99e5,0x1fa27cf8,0xc4ac5665,
58
59 0xf4292244,0x432aff97,0xab9423a7,0xfc93a039,
60 0x655b59c3,0x8f0ccc92,0xffeff47d,0x85845dd1,
61 0x6fa87e4f,0xfe2ce6e0,0xa3014314,0x4e0811a1,
62 0xf7537e82,0xbd3af235,0x2ad7d2bb,0xeb86d391, 0 );
63
64 sub R0 {
65 my ($i,$a,$b,$c,$d) = @_;
66 my $rot = (7,12,17,22)[$i%4];
67 my $j = ($i+1)/2;
68
69 if ($i&1) {
70 $code.=<<___;
71 srlx @X[$j],$shr,@X[$j] ! align X[`$i+1`]
72 and $b,$t1,$t1 ! round $i
73 sllx @X[$j+1],$shl1,$tx
74 add $t2,$a,$a
75 sllx $tx,$shl2,$tx
76 xor $d,$t1,$t1
77 or $tx,@X[$j],@X[$j]
78 sethi %hi(@K[$i+1]),$t2
79 add $t1,$a,$a
80 or $t2,%lo(@K[$i+1]),$t2
81 sll $a,$rot,$t3
82 add @X[$j],$t2,$t2 ! X[`$i+1`]+K[`$i+1`]
83 srl $a,32-$rot,$a
84 add $b,$t3,$t3
85 xor $b,$c,$t1
86 add $t3,$a,$a
87 ___
88 } else {
89 $code.=<<___;
90 srlx @X[$j],32,$tx ! extract X[`2*$j+1`]
91 and $b,$t1,$t1 ! round $i
92 add $t2,$a,$a
93 xor $d,$t1,$t1
94 sethi %hi(@K[$i+1]),$t2
95 add $t1,$a,$a
96 or $t2,%lo(@K[$i+1]),$t2
97 sll $a,$rot,$t3
98 add $tx,$t2,$t2 ! X[`2*$j+1`]+K[`$i+1`]
99 srl $a,32-$rot,$a
100 add $b,$t3,$t3
101 xor $b,$c,$t1
102 add $t3,$a,$a
103 ___
104 }
105 }
106
107 sub R0_1 {
108 my ($i,$a,$b,$c,$d) = @_;
109 my $rot = (7,12,17,22)[$i%4];
110
111 $code.=<<___;
112 srlx @X[0],32,$tx ! extract X[1]
113 and $b,$t1,$t1 ! round $i
114 add $t2,$a,$a
115 xor $d,$t1,$t1
116 sethi %hi(@K[$i+1]),$t2
117 add $t1,$a,$a
118 or $t2,%lo(@K[$i+1]),$t2
119 sll $a,$rot,$t3
120 add $tx,$t2,$t2 ! X[1]+K[`$i+1`]
121 srl $a,32-$rot,$a
122 add $b,$t3,$t3
123 andn $b,$c,$t1
124 add $t3,$a,$a
125 ___
126 }
127
128 sub R1 {
129 my ($i,$a,$b,$c,$d) = @_;
130 my $rot = (5,9,14,20)[$i%4];
131 my $j = $i<31 ? (1+5*($i+1))%16 : (5+3*($i+1))%16;
132 my $xi = @X[$j/2];
133
134 $code.=<<___ if ($j&1 && ($xi=$tx));
135 srlx @X[$j/2],32,$xi ! extract X[$j]
136 ___
137 $code.=<<___;
138 and $b,$d,$t3 ! round $i
139 add $t2,$a,$a
140 or $t3,$t1,$t1
141 sethi %hi(@K[$i+1]),$t2
142 add $t1,$a,$a
143 or $t2,%lo(@K[$i+1]),$t2
144 sll $a,$rot,$t3
145 add $xi,$t2,$t2 ! X[$j]+K[`$i+1`]
146 srl $a,32-$rot,$a
147 add $b,$t3,$t3
148 `$i<31?"andn":"xor"` $b,$c,$t1
149 add $t3,$a,$a
150 ___
151 }
152
153 sub R2 {
154 my ($i,$a,$b,$c,$d) = @_;
155 my $rot = (4,11,16,23)[$i%4];
156 my $j = $i<47 ? (5+3*($i+1))%16 : (0+7*($i+1))%16;
157 my $xi = @X[$j/2];
158
159 $code.=<<___ if ($j&1 && ($xi=$tx));
160 srlx @X[$j/2],32,$xi ! extract X[$j]
161 ___
162 $code.=<<___;
163 add $t2,$a,$a ! round $i
164 xor $b,$t1,$t1
165 sethi %hi(@K[$i+1]),$t2
166 add $t1,$a,$a
167 or $t2,%lo(@K[$i+1]),$t2
168 sll $a,$rot,$t3
169 add $xi,$t2,$t2 ! X[$j]+K[`$i+1`]
170 srl $a,32-$rot,$a
171 add $b,$t3,$t3
172 xor $b,$c,$t1
173 add $t3,$a,$a
174 ___
175 }
176
177 sub R3 {
178 my ($i,$a,$b,$c,$d) = @_;
179 my $rot = (6,10,15,21)[$i%4];
180 my $j = (0+7*($i+1))%16;
181 my $xi = @X[$j/2];
182
183 $code.=<<___;
184 add $t2,$a,$a ! round $i
185 ___
186 $code.=<<___ if ($j&1 && ($xi=$tx));
187 srlx @X[$j/2],32,$xi ! extract X[$j]
188 ___
189 $code.=<<___;
190 orn $b,$d,$t1
191 sethi %hi(@K[$i+1]),$t2
192 xor $c,$t1,$t1
193 or $t2,%lo(@K[$i+1]),$t2
194 add $t1,$a,$a
195 sll $a,$rot,$t3
196 add $xi,$t2,$t2 ! X[$j]+K[`$i+1`]
197 srl $a,32-$rot,$a
198 add $b,$t3,$t3
199 add $t3,$a,$a
200 ___
201 }
202
203 $code.=<<___;
204 #include "sparc_arch.h"
205
206 #ifdef __arch64__
207 .register %g2,#scratch
208 .register %g3,#scratch
209 #endif
210
211 .section ".text",#alloc,#execinstr
212
213 #ifdef __PIC__
214 SPARC_PIC_THUNK(%g1)
215 #endif
216
217 .globl md5_block_asm_data_order
218 .align 32
219 md5_block_asm_data_order:
220 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
221 ld [%g1+4],%g1 ! OPENSSL_sparcv9cap_P[1]
222
223 andcc %g1, CFR_MD5, %g0
224 be .Lsoftware
225 nop
226
227 mov 4, %g1
228 andcc %o1, 0x7, %g0
229 lda [%o0 + %g0]0x88, %f0 ! load context
230 lda [%o0 + %g1]0x88, %f1
231 add %o0, 8, %o0
232 lda [%o0 + %g0]0x88, %f2
233 lda [%o0 + %g1]0x88, %f3
234 bne,pn %icc, .Lhwunaligned
235 sub %o0, 8, %o0
236
237 .Lhw_loop:
238 ldd [%o1 + 0x00], %f8
239 ldd [%o1 + 0x08], %f10
240 ldd [%o1 + 0x10], %f12
241 ldd [%o1 + 0x18], %f14
242 ldd [%o1 + 0x20], %f16
243 ldd [%o1 + 0x28], %f18
244 ldd [%o1 + 0x30], %f20
245 subcc %o2, 1, %o2 ! done yet?
246 ldd [%o1 + 0x38], %f22
247 add %o1, 0x40, %o1
248 prefetch [%o1 + 63], 20
249
250 .word 0x81b02800 ! MD5
251
252 bne,pt SIZE_T_CC, .Lhw_loop
253 nop
254
255 .Lhwfinish:
256 sta %f0, [%o0 + %g0]0x88 ! store context
257 sta %f1, [%o0 + %g1]0x88
258 add %o0, 8, %o0
259 sta %f2, [%o0 + %g0]0x88
260 sta %f3, [%o0 + %g1]0x88
261 retl
262 nop
263
264 .align 8
265 .Lhwunaligned:
266 alignaddr %o1, %g0, %o1
267
268 ldd [%o1 + 0x00], %f10
269 .Lhwunaligned_loop:
270 ldd [%o1 + 0x08], %f12
271 ldd [%o1 + 0x10], %f14
272 ldd [%o1 + 0x18], %f16
273 ldd [%o1 + 0x20], %f18
274 ldd [%o1 + 0x28], %f20
275 ldd [%o1 + 0x30], %f22
276 ldd [%o1 + 0x38], %f24
277 subcc %o2, 1, %o2 ! done yet?
278 ldd [%o1 + 0x40], %f26
279 add %o1, 0x40, %o1
280 prefetch [%o1 + 63], 20
281
282 faligndata %f10, %f12, %f8
283 faligndata %f12, %f14, %f10
284 faligndata %f14, %f16, %f12
285 faligndata %f16, %f18, %f14
286 faligndata %f18, %f20, %f16
287 faligndata %f20, %f22, %f18
288 faligndata %f22, %f24, %f20
289 faligndata %f24, %f26, %f22
290
291 .word 0x81b02800 ! MD5
292
293 bne,pt SIZE_T_CC, .Lhwunaligned_loop
294 for %f26, %f26, %f10 ! %f10=%f26
295
296 ba .Lhwfinish
297 nop
298
299 .align 16
300 .Lsoftware:
301 save %sp,-STACK_FRAME,%sp
302
303 rd %asi,$saved_asi
304 wr %g0,0x88,%asi ! ASI_PRIMARY_LITTLE
305 and $inp,7,$shr
306 andn $inp,7,$inp
307
308 sll $shr,3,$shr ! *=8
309 mov 56,$shl2
310 ld [$ctx+0],$A
311 sub $shl2,$shr,$shl2
312 ld [$ctx+4],$B
313 and $shl2,32,$shl1
314 add $shl2,8,$shl2
315 ld [$ctx+8],$C
316 sub $shl2,$shl1,$shl2 ! shr+shl1+shl2==64
317 ld [$ctx+12],$D
318 nop
319
320 .Loop:
321 cmp $shr,0 ! was inp aligned?
322 ldxa [$inp+0]%asi,@X[0] ! load little-endian input
323 ldxa [$inp+8]%asi,@X[1]
324 ldxa [$inp+16]%asi,@X[2]
325 ldxa [$inp+24]%asi,@X[3]
326 ldxa [$inp+32]%asi,@X[4]
327 sllx $A,32,$AB ! pack A,B
328 ldxa [$inp+40]%asi,@X[5]
329 sllx $C,32,$CD ! pack C,D
330 ldxa [$inp+48]%asi,@X[6]
331 or $B,$AB,$AB
332 ldxa [$inp+56]%asi,@X[7]
333 or $D,$CD,$CD
334 bnz,a,pn %icc,.+8
335 ldxa [$inp+64]%asi,@X[8]
336
337 srlx @X[0],$shr,@X[0] ! align X[0]
338 sllx @X[1],$shl1,$tx
339 sethi %hi(@K[0]),$t2
340 sllx $tx,$shl2,$tx
341 or $t2,%lo(@K[0]),$t2
342 or $tx,@X[0],@X[0]
343 xor $C,$D,$t1
344 add @X[0],$t2,$t2 ! X[0]+K[0]
345 ___
346 for ($i=0;$i<15;$i++) { &R0($i,@V); unshift(@V,pop(@V)); }
347 for (;$i<16;$i++) { &R0_1($i,@V); unshift(@V,pop(@V)); }
348 for (;$i<32;$i++) { &R1($i,@V); unshift(@V,pop(@V)); }
349 for (;$i<48;$i++) { &R2($i,@V); unshift(@V,pop(@V)); }
350 for (;$i<64;$i++) { &R3($i,@V); unshift(@V,pop(@V)); }
351 $code.=<<___;
352 srlx $AB,32,$t1 ! unpack A,B,C,D and accumulate
353 add $inp,64,$inp ! advance inp
354 srlx $CD,32,$t2
355 add $t1,$A,$A
356 subcc $len,1,$len ! done yet?
357 add $AB,$B,$B
358 add $t2,$C,$C
359 add $CD,$D,$D
360 srl $B,0,$B ! clruw $B
361 bne SIZE_T_CC,.Loop
362 srl $D,0,$D ! clruw $D
363
364 st $A,[$ctx+0] ! write out ctx
365 st $B,[$ctx+4]
366 st $C,[$ctx+8]
367 st $D,[$ctx+12]
368
369 wr %g0,$saved_asi,%asi
370 ret
371 restore
372 .type md5_block_asm_data_order,#function
373 .size md5_block_asm_data_order,(.-md5_block_asm_data_order)
374
375 .asciz "MD5 block transform for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
376 .align 4
377 ___
378
379 # Purpose of these subroutines is to explicitly encode VIS instructions,
380 # so that one can compile the module without having to specify VIS
381 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
382 # Idea is to reserve for option to produce "universal" binary and let
383 # programmer detect if current CPU is VIS capable at run-time.
384 sub unvis {
385 my ($mnemonic,$rs1,$rs2,$rd)=@_;
386 my $ref,$opf;
387 my %visopf = ( "faligndata" => 0x048,
388 "for" => 0x07c );
389
390 $ref = "$mnemonic\t$rs1,$rs2,$rd";
391
392 if ($opf=$visopf{$mnemonic}) {
393 foreach ($rs1,$rs2,$rd) {
394 return $ref if (!/%f([0-9]{1,2})/);
395 $_=$1;
396 if ($1>=32) {
397 return $ref if ($1&1);
398 # re-encode for upper double register addressing
399 $_=($1|$1>>5)&31;
400 }
401 }
402
403 return sprintf ".word\t0x%08x !%s",
404 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
405 $ref;
406 } else {
407 return $ref;
408 }
409 }
410 sub unalignaddr {
411 my ($mnemonic,$rs1,$rs2,$rd)=@_;
412 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
413 my $ref="$mnemonic\t$rs1,$rs2,$rd";
414
415 foreach ($rs1,$rs2,$rd) {
416 if (/%([goli])([0-7])/) { $_=$bias{$1}+$2; }
417 else { return $ref; }
418 }
419 return sprintf ".word\t0x%08x !%s",
420 0x81b00300|$rd<<25|$rs1<<14|$rs2,
421 $ref;
422 }
423
424 foreach (split("\n",$code)) {
425 s/\`([^\`]*)\`/eval $1/ge;
426
427 s/\b(f[^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
428 &unvis($1,$2,$3,$4)
429 /ge;
430 s/\b(alignaddr)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
431 &unalignaddr($1,$2,$3,$4)
432 /ge;
433
434 print $_,"\n";
435 }
436
437 close STDOUT;