]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/sha/asm/sha512-sparcv9.pl
sha512-sparcv9.pl: fix binutils compilation error.
[thirdparty/openssl.git] / crypto / sha / asm / sha512-sparcv9.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # SHA256 performance improvement over compiler generated code varies
11 # from 40% for Sun C [32-bit build] to 70% for gcc [3.3, 64-bit
12 # build]. Just like in SHA1 module I aim to ensure scalability on
13 # UltraSPARC T1 by packing X[16] to 8 64-bit registers.
14
15 # SHA512 on pre-T1 UltraSPARC.
16 #
17 # Performance is >75% better than 64-bit code generated by Sun C and
18 # over 2x than 32-bit code. X[16] resides on stack, but access to it
19 # is scheduled for L2 latency and staged through 32 least significant
20 # bits of %l0-%l7. The latter is done to achieve 32-/64-bit ABI
21 # duality. Nevetheless it's ~40% faster than SHA256, which is pretty
22 # good [optimal coefficient is 50%].
23 #
24 # SHA512 on UltraSPARC T1.
25 #
26 # It's not any faster than 64-bit code generated by Sun C 5.8. This is
27 # because 64-bit code generator has the advantage of using 64-bit
28 # loads(*) to access X[16], which I consciously traded for 32-/64-bit
29 # ABI duality [as per above]. But it surpasses 32-bit Sun C generated
30 # code by 60%, not to mention that it doesn't suffer from severe decay
31 # when running 4 times physical cores threads and that it leaves gcc
32 # [3.4] behind by over 4x factor! If compared to SHA256, single thread
33 # performance is only 10% better, but overall throughput for maximum
34 # amount of threads for given CPU exceeds corresponding one of SHA256
35 # by 30% [again, optimal coefficient is 50%].
36 #
37 # (*) Unlike pre-T1 UltraSPARC loads on T1 are executed strictly
38 # in-order, i.e. load instruction has to complete prior next
39 # instruction in given thread is executed, even if the latter is
40 # not dependent on load result! This means that on T1 two 32-bit
41 # loads are always slower than one 64-bit load. Once again this
42 # is unlike pre-T1 UltraSPARC, where, if scheduled appropriately,
43 # 2x32-bit loads can be as fast as 1x64-bit ones.
44
45 $bits=32;
46 for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
47 if ($bits==64) { $bias=2047; $frame=192; }
48 else { $bias=0; $frame=112; }
49
50 $output=shift;
51 open STDOUT,">$output";
52
53 if ($output =~ /512/) {
54 $label="512";
55 $SZ=8;
56 $LD="ldx"; # load from memory
57 $ST="stx"; # store to memory
58 $SLL="sllx"; # shift left logical
59 $SRL="srlx"; # shift right logical
60 @Sigma0=(28,34,39);
61 @Sigma1=(14,18,41);
62 @sigma0=( 7, 1, 8); # right shift first
63 @sigma1=( 6,19,61); # right shift first
64 $lastK=0x817;
65 $rounds=80;
66 $align=4;
67
68 $locals=16*$SZ; # X[16]
69
70 $A="%o0";
71 $B="%o1";
72 $C="%o2";
73 $D="%o3";
74 $E="%o4";
75 $F="%o5";
76 $G="%g1";
77 $H="%o7";
78 @V=($A,$B,$C,$D,$E,$F,$G,$H);
79 } else {
80 $label="256";
81 $SZ=4;
82 $LD="ld"; # load from memory
83 $ST="st"; # store to memory
84 $SLL="sll"; # shift left logical
85 $SRL="srl"; # shift right logical
86 @Sigma0=( 2,13,22);
87 @Sigma1=( 6,11,25);
88 @sigma0=( 3, 7,18); # right shift first
89 @sigma1=(10,17,19); # right shift first
90 $lastK=0x8f2;
91 $rounds=64;
92 $align=8;
93
94 $locals=0; # X[16] is register resident
95 @X=("%o0","%o1","%o2","%o3","%o4","%o5","%g1","%o7");
96
97 $A="%l0";
98 $B="%l1";
99 $C="%l2";
100 $D="%l3";
101 $E="%l4";
102 $F="%l5";
103 $G="%l6";
104 $H="%l7";
105 @V=($A,$B,$C,$D,$E,$F,$G,$H);
106 }
107 $T1="%g2";
108 $tmp0="%g3";
109 $tmp1="%g4";
110 $tmp2="%g5";
111
112 $ctx="%i0";
113 $inp="%i1";
114 $len="%i2";
115 $Ktbl="%i3";
116 $tmp31="%i4";
117 $tmp32="%i5";
118
119 ########### SHA256
120 $Xload = sub {
121 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
122
123 if ($i==0) {
124 $code.=<<___;
125 ldx [$inp+0],@X[0]
126 ldx [$inp+16],@X[2]
127 ldx [$inp+32],@X[4]
128 ldx [$inp+48],@X[6]
129 ldx [$inp+8],@X[1]
130 ldx [$inp+24],@X[3]
131 subcc %g0,$tmp31,$tmp32 ! should be 64-$tmp31, but -$tmp31 works too
132 ldx [$inp+40],@X[5]
133 bz,pt %icc,.Laligned
134 ldx [$inp+56],@X[7]
135
136 sllx @X[0],$tmp31,@X[0]
137 ldx [$inp+64],$T1
138 ___
139 for($j=0;$j<7;$j++)
140 { $code.=<<___;
141 srlx @X[$j+1],$tmp32,$tmp1
142 sllx @X[$j+1],$tmp31,@X[$j+1]
143 or $tmp1,@X[$j],@X[$j]
144 ___
145 }
146 $code.=<<___;
147 srlx $T1,$tmp32,$T1
148 or $T1,@X[7],@X[7]
149 .Laligned:
150 ___
151 }
152
153 if ($i&1) {
154 $code.="\tadd @X[$i/2],$h,$T1\n";
155 } else {
156 $code.="\tsrlx @X[$i/2],32,$T1\n\tadd $h,$T1,$T1\n";
157 }
158 } if ($SZ==4);
159
160 ########### SHA512
161 $Xload = sub {
162 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
163 my @pair=("%l".eval(($i*2)%8),"%l".eval(($i*2)%8+1),"%l".eval((($i+1)*2)%8));
164
165 $code.=<<___ if ($i==0);
166 ld [$inp+0],%l0
167 ld [$inp+4],%l1
168 ld [$inp+8],%l2
169 ld [$inp+12],%l3
170 ld [$inp+16],%l4
171 ld [$inp+20],%l5
172 ld [$inp+24],%l6
173 cmp $tmp31,0
174 ld [$inp+28],%l7
175 ___
176 $code.=<<___ if ($i<15);
177 sllx @pair[1],$tmp31,$tmp2 ! Xload($i)
178 add $tmp31,32,$tmp0
179 sllx @pair[0],$tmp0,$tmp1
180 `"ld [$inp+".eval(32+0+$i*8)."],@pair[0]" if ($i<12)`
181 srlx @pair[2],$tmp32,@pair[1]
182 or $tmp1,$tmp2,$tmp2
183 or @pair[1],$tmp2,$tmp2
184 `"ld [$inp+".eval(32+4+$i*8)."],@pair[1]" if ($i<12)`
185 add $h,$tmp2,$T1
186 $ST $tmp2,[%sp+`$bias+$frame+$i*$SZ`]
187 ___
188 $code.=<<___ if ($i==12);
189 bnz,a,pn %icc,.+8
190 ld [$inp+128],%l0
191 ___
192 $code.=<<___ if ($i==15);
193 ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+0`],%l2
194 sllx @pair[1],$tmp31,$tmp2 ! Xload($i)
195 add $tmp31,32,$tmp0
196 ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+4`],%l3
197 sllx @pair[0],$tmp0,$tmp1
198 ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+0`],%l4
199 srlx @pair[2],$tmp32,@pair[1]
200 or $tmp1,$tmp2,$tmp2
201 ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+4`],%l5
202 or @pair[1],$tmp2,$tmp2
203 ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+0`],%l6
204 add $h,$tmp2,$T1
205 $ST $tmp2,[%sp+`$bias+$frame+$i*$SZ`]
206 ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+4`],%l7
207 ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+0`],%l0
208 ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+4`],%l1
209 ___
210 } if ($SZ==8);
211
212 ########### common
213 sub BODY_00_15 {
214 my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
215
216 if ($i<16) {
217 &$Xload(@_);
218 } else {
219 $code.="\tadd $h,$T1,$T1\n";
220 }
221
222 $code.=<<___;
223 $SRL $e,@Sigma1[0],$h !! $i
224 xor $f,$g,$tmp2
225 $SLL $e,`$SZ*8-@Sigma1[2]`,$tmp1
226 and $e,$tmp2,$tmp2
227 $SRL $e,@Sigma1[1],$tmp0
228 xor $tmp1,$h,$h
229 $SLL $e,`$SZ*8-@Sigma1[1]`,$tmp1
230 xor $tmp0,$h,$h
231 $SRL $e,@Sigma1[2],$tmp0
232 xor $tmp1,$h,$h
233 $SLL $e,`$SZ*8-@Sigma1[0]`,$tmp1
234 xor $tmp0,$h,$h
235 xor $g,$tmp2,$tmp2 ! Ch(e,f,g)
236 xor $tmp1,$h,$tmp0 ! Sigma1(e)
237
238 $SRL $a,@Sigma0[0],$h
239 add $tmp2,$T1,$T1
240 $LD [$Ktbl+`$i*$SZ`],$tmp2 ! K[$i]
241 $SLL $a,`$SZ*8-@Sigma0[2]`,$tmp1
242 add $tmp0,$T1,$T1
243 $SRL $a,@Sigma0[1],$tmp0
244 xor $tmp1,$h,$h
245 $SLL $a,`$SZ*8-@Sigma0[1]`,$tmp1
246 xor $tmp0,$h,$h
247 $SRL $a,@Sigma0[2],$tmp0
248 xor $tmp1,$h,$h
249 $SLL $a,`$SZ*8-@Sigma0[0]`,$tmp1
250 xor $tmp0,$h,$h
251 xor $tmp1,$h,$h ! Sigma0(a)
252
253 or $a,$b,$tmp0
254 and $a,$b,$tmp1
255 and $c,$tmp0,$tmp0
256 or $tmp0,$tmp1,$tmp1 ! Maj(a,b,c)
257 add $tmp2,$T1,$T1 ! +=K[$i]
258 add $tmp1,$h,$h
259
260 add $T1,$d,$d
261 add $T1,$h,$h
262 ___
263 }
264
265 ########### SHA256
266 $BODY_16_XX = sub {
267 my $i=@_[0];
268 my $xi;
269
270 if ($i&1) {
271 $xi=$tmp32;
272 $code.="\tsrlx @X[(($i+1)/2)%8],32,$xi\n";
273 } else {
274 $xi=@X[(($i+1)/2)%8];
275 }
276 $code.=<<___;
277 srl $xi,@sigma0[0],$T1 !! Xupdate($i)
278 sll $xi,`32-@sigma0[2]`,$tmp1
279 srl $xi,@sigma0[1],$tmp0
280 xor $tmp1,$T1,$T1
281 sll $tmp1,`@sigma0[2]-@sigma0[1]`,$tmp1
282 xor $tmp0,$T1,$T1
283 srl $xi,@sigma0[2],$tmp0
284 xor $tmp1,$T1,$T1
285 ___
286 if ($i&1) {
287 $xi=@X[(($i+14)/2)%8];
288 } else {
289 $xi=$tmp32;
290 $code.="\tsrlx @X[(($i+14)/2)%8],32,$xi\n";
291 }
292 $code.=<<___;
293 srl $xi,@sigma1[0],$tmp2
294 xor $tmp0,$T1,$T1 ! T1=sigma0(X[i+1])
295 sll $xi,`32-@sigma1[2]`,$tmp1
296 srl $xi,@sigma1[1],$tmp0
297 xor $tmp1,$tmp2,$tmp2
298 sll $tmp1,`@sigma1[2]-@sigma1[1]`,$tmp1
299 xor $tmp0,$tmp2,$tmp2
300 srl $xi,@sigma1[2],$tmp0
301 xor $tmp1,$tmp2,$tmp2
302 ___
303 if ($i&1) {
304 $xi=@X[($i/2)%8];
305 $code.=<<___;
306 srlx @X[(($i+9)/2)%8],32,$tmp1 ! X[i+9]
307 xor $tmp0,$tmp2,$tmp2 ! sigma1(X[i+14])
308 srl @X[($i/2)%8],0,$tmp0
309 add $tmp2,$tmp1,$tmp1
310 add $xi,$T1,$T1 ! +=X[i]
311 xor $tmp0,@X[($i/2)%8],@X[($i/2)%8]
312 add $tmp1,$T1,$T1
313
314 srl $T1,0,$T1
315 or $T1,@X[($i/2)%8],@X[($i/2)%8]
316 ___
317 } else {
318 $xi=@X[(($i+9)/2)%8];
319 $code.=<<___;
320 srlx @X[($i/2)%8],32,$tmp1 ! X[i]
321 xor $tmp0,$tmp2,$tmp2 ! sigma1(X[i+14])
322 add $xi,$T1,$T1 ! +=X[i+9]
323 add $tmp2,$tmp1,$tmp1
324 srl @X[($i/2)%8],0,@X[($i/2)%8]
325 add $tmp1,$T1,$T1
326
327 sllx $T1,32,$tmp0
328 or $tmp0,@X[($i/2)%8],@X[($i/2)%8]
329 ___
330 }
331 &BODY_00_15(@_);
332 } if ($SZ==4);
333
334 ########### SHA512
335 $BODY_16_XX = sub {
336 my $i=@_[0];
337 my @pair=("%l".eval(($i*2)%8),"%l".eval(($i*2)%8+1));
338
339 $code.=<<___;
340 sllx %l2,32,$tmp0 !! Xupdate($i)
341 or %l3,$tmp0,$tmp0
342
343 srlx $tmp0,@sigma0[0],$T1
344 ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+0`],%l2
345 sllx $tmp0,`64-@sigma0[2]`,$tmp1
346 ld [%sp+`$bias+$frame+(($i+1+1)%16)*$SZ+4`],%l3
347 srlx $tmp0,@sigma0[1],$tmp0
348 xor $tmp1,$T1,$T1
349 sllx $tmp1,`@sigma0[2]-@sigma0[1]`,$tmp1
350 xor $tmp0,$T1,$T1
351 srlx $tmp0,`@sigma0[2]-@sigma0[1]`,$tmp0
352 xor $tmp1,$T1,$T1
353 sllx %l6,32,$tmp2
354 xor $tmp0,$T1,$T1 ! sigma0(X[$i+1])
355 or %l7,$tmp2,$tmp2
356
357 srlx $tmp2,@sigma1[0],$tmp1
358 ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+0`],%l6
359 sllx $tmp2,`64-@sigma1[2]`,$tmp0
360 ld [%sp+`$bias+$frame+(($i+1+14)%16)*$SZ+4`],%l7
361 srlx $tmp2,@sigma1[1],$tmp2
362 xor $tmp0,$tmp1,$tmp1
363 sllx $tmp0,`@sigma1[2]-@sigma1[1]`,$tmp0
364 xor $tmp2,$tmp1,$tmp1
365 srlx $tmp2,`@sigma1[2]-@sigma1[1]`,$tmp2
366 xor $tmp0,$tmp1,$tmp1
367 sllx %l4,32,$tmp0
368 xor $tmp2,$tmp1,$tmp1 ! sigma1(X[$i+14])
369 ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+0`],%l4
370 or %l5,$tmp0,$tmp0
371 ld [%sp+`$bias+$frame+(($i+1+9)%16)*$SZ+4`],%l5
372
373 sllx %l0,32,$tmp2
374 add $tmp1,$T1,$T1
375 ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+0`],%l0
376 or %l1,$tmp2,$tmp2
377 add $tmp0,$T1,$T1 ! +=X[$i+9]
378 ld [%sp+`$bias+$frame+(($i+1+0)%16)*$SZ+4`],%l1
379 add $tmp2,$T1,$T1 ! +=X[$i]
380 $ST $T1,[%sp+`$bias+$frame+($i%16)*$SZ`]
381 ___
382 &BODY_00_15(@_);
383 } if ($SZ==8);
384
385 $code.=<<___ if ($bits==64);
386 .register %g2,#scratch
387 .register %g3,#scratch
388 ___
389 $code.=<<___;
390 .section ".text",#alloc,#execinstr
391
392 .align 64
393 K${label}:
394 .type K${label},#object
395 ___
396 if ($SZ==4) {
397 $code.=<<___;
398 .long 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
399 .long 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
400 .long 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
401 .long 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
402 .long 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
403 .long 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
404 .long 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
405 .long 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
406 .long 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
407 .long 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
408 .long 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
409 .long 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
410 .long 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
411 .long 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
412 .long 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
413 .long 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
414 ___
415 } else {
416 $code.=<<___;
417 .long 0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd
418 .long 0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc
419 .long 0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019
420 .long 0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118
421 .long 0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe
422 .long 0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2
423 .long 0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1
424 .long 0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694
425 .long 0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3
426 .long 0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65
427 .long 0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483
428 .long 0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5
429 .long 0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210
430 .long 0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4
431 .long 0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725
432 .long 0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70
433 .long 0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926
434 .long 0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df
435 .long 0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8
436 .long 0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b
437 .long 0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001
438 .long 0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30
439 .long 0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910
440 .long 0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8
441 .long 0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53
442 .long 0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8
443 .long 0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb
444 .long 0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3
445 .long 0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60
446 .long 0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec
447 .long 0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9
448 .long 0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b
449 .long 0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207
450 .long 0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178
451 .long 0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6
452 .long 0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b
453 .long 0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493
454 .long 0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c
455 .long 0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a
456 .long 0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817
457 ___
458 }
459 $code.=<<___;
460 .size K${label},.-K${label}
461 .globl sha${label}_block_data_order
462 sha${label}_block_data_order:
463 save %sp,`-$frame-$locals`,%sp
464 and $inp,`$align-1`,$tmp31
465 sllx $len,`log(16*$SZ)/log(2)`,$len
466 andn $inp,`$align-1`,$inp
467 sll $tmp31,3,$tmp31
468 add $inp,$len,$len
469 ___
470 $code.=<<___ if ($SZ==8); # SHA512
471 mov 32,$tmp32
472 sub $tmp32,$tmp31,$tmp32
473 ___
474 $code.=<<___;
475 .Lpic: call .+8
476 add %o7,K${label}-.Lpic,$Ktbl
477
478 $LD [$ctx+`0*$SZ`],$A
479 $LD [$ctx+`1*$SZ`],$B
480 $LD [$ctx+`2*$SZ`],$C
481 $LD [$ctx+`3*$SZ`],$D
482 $LD [$ctx+`4*$SZ`],$E
483 $LD [$ctx+`5*$SZ`],$F
484 $LD [$ctx+`6*$SZ`],$G
485 $LD [$ctx+`7*$SZ`],$H
486
487 .Lloop:
488 ___
489 for ($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
490 $code.=".L16_xx:\n";
491 for (;$i<32;$i++) { &$BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
492 $code.=<<___;
493 and $tmp2,0xfff,$tmp2
494 cmp $tmp2,$lastK
495 bne .L16_xx
496 add $Ktbl,`16*$SZ`,$Ktbl ! Ktbl+=16
497
498 ___
499 $code.=<<___ if ($SZ==4); # SHA256
500 $LD [$ctx+`0*$SZ`],@X[0]
501 $LD [$ctx+`1*$SZ`],@X[1]
502 $LD [$ctx+`2*$SZ`],@X[2]
503 $LD [$ctx+`3*$SZ`],@X[3]
504 $LD [$ctx+`4*$SZ`],@X[4]
505 $LD [$ctx+`5*$SZ`],@X[5]
506 $LD [$ctx+`6*$SZ`],@X[6]
507 $LD [$ctx+`7*$SZ`],@X[7]
508
509 add $A,@X[0],$A
510 $ST $A,[$ctx+`0*$SZ`]
511 add $B,@X[1],$B
512 $ST $B,[$ctx+`1*$SZ`]
513 add $C,@X[2],$C
514 $ST $C,[$ctx+`2*$SZ`]
515 add $D,@X[3],$D
516 $ST $D,[$ctx+`3*$SZ`]
517 add $E,@X[4],$E
518 $ST $E,[$ctx+`4*$SZ`]
519 add $F,@X[5],$F
520 $ST $F,[$ctx+`5*$SZ`]
521 add $G,@X[6],$G
522 $ST $G,[$ctx+`6*$SZ`]
523 add $H,@X[7],$H
524 $ST $H,[$ctx+`7*$SZ`]
525 ___
526 $code.=<<___ if ($SZ==8); # SHA512
527 ld [$ctx+`0*$SZ+0`],%l0
528 ld [$ctx+`0*$SZ+4`],%l1
529 ld [$ctx+`1*$SZ+0`],%l2
530 ld [$ctx+`1*$SZ+4`],%l3
531 ld [$ctx+`2*$SZ+0`],%l4
532 ld [$ctx+`2*$SZ+4`],%l5
533 ld [$ctx+`3*$SZ+0`],%l6
534
535 sllx %l0,32,$tmp0
536 ld [$ctx+`3*$SZ+4`],%l7
537 sllx %l2,32,$tmp1
538 or %l1,$tmp0,$tmp0
539 or %l3,$tmp1,$tmp1
540 add $tmp0,$A,$A
541 add $tmp1,$B,$B
542 $ST $A,[$ctx+`0*$SZ`]
543 sllx %l4,32,$tmp2
544 $ST $B,[$ctx+`1*$SZ`]
545 sllx %l6,32,$T1
546 or %l5,$tmp2,$tmp2
547 or %l7,$T1,$T1
548 add $tmp2,$C,$C
549 $ST $C,[$ctx+`2*$SZ`]
550 add $T1,$D,$D
551 $ST $D,[$ctx+`3*$SZ`]
552
553 ld [$ctx+`4*$SZ+0`],%l0
554 ld [$ctx+`4*$SZ+4`],%l1
555 ld [$ctx+`5*$SZ+0`],%l2
556 ld [$ctx+`5*$SZ+4`],%l3
557 ld [$ctx+`6*$SZ+0`],%l4
558 ld [$ctx+`6*$SZ+4`],%l5
559 ld [$ctx+`7*$SZ+0`],%l6
560
561 sllx %l0,32,$tmp0
562 ld [$ctx+`7*$SZ+4`],%l7
563 sllx %l2,32,$tmp1
564 or %l1,$tmp0,$tmp0
565 or %l3,$tmp1,$tmp1
566 add $tmp0,$E,$E
567 add $tmp1,$F,$F
568 $ST $E,[$ctx+`4*$SZ`]
569 sllx %l4,32,$tmp2
570 $ST $F,[$ctx+`5*$SZ`]
571 sllx %l6,32,$T1
572 or %l5,$tmp2,$tmp2
573 or %l7,$T1,$T1
574 add $tmp2,$G,$G
575 $ST $G,[$ctx+`6*$SZ`]
576 add $T1,$H,$H
577 $ST $H,[$ctx+`7*$SZ`]
578 ___
579 $code.=<<___;
580 add $inp,`16*$SZ`,$inp ! advance inp
581 cmp $inp,$len
582 bne `$bits==64?"%xcc":"%icc"`,.Lloop
583 sub $Ktbl,`($rounds-16)*$SZ`,$Ktbl ! rewind Ktbl
584
585 ret
586 restore
587 .type sha${label}_block_data_order,#function
588 .size sha${label}_block_data_order,(.-sha${label}_block_data_order)
589 .asciz "SHA${label} block transform for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
590 .align 4
591 ___
592
593 $code =~ s/\`([^\`]*)\`/eval $1/gem;
594 print $code;
595 close STDOUT;