]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/sha/asm/sha512-armv8.pl
Add assembly support to ios64-cross.
[thirdparty/openssl.git] / crypto / sha / asm / sha512-armv8.pl
CommitLineData
ddb6b965
AP
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# SHA256/512 for ARMv8.
11#
12# Performance in cycles per processed byte and improvement coefficient
13# over code generated with "default" compiler:
14#
15# SHA256-hw SHA256(*) SHA512
16# Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
0f777aeb
AP
17# Cortex-A53 2.38 15.6 (+110%) 10.1 (+190%(***))
18# Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
ddb6b965
AP
19#
20# (*) Software SHA256 results are of lesser relevance, presented
21# mostly for informational purposes.
22# (**) The result is a trade-off: it's possible to improve it by
0f777aeb
AP
23# 10% (or by 1 cycle per round), but at the cost of 20% loss
24# on Cortex-A53 (or by 4 cycles per round).
25# (***) Super-impressive coefficients over gcc-generated code are
26# indication of some compiler "pathology", most notably code
27# generated with -mgeneral-regs-only is significanty faster
28# and lags behind assembly only by 50-90%.
ddb6b965
AP
29
30$flavour=shift;
31$output=shift;
9b05cbc3
AP
32
33$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
34( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
35( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
36die "can't locate arm-xlate.pl";
37
38open OUT,"| \"$^X\" $xlate $flavour $output";
39*STDOUT=*OUT;
ddb6b965
AP
40
41if ($output =~ /512/) {
42 $BITS=512;
43 $SZ=8;
44 @Sigma0=(28,34,39);
45 @Sigma1=(14,18,41);
46 @sigma0=(1, 8, 7);
47 @sigma1=(19,61, 6);
48 $rounds=80;
49 $reg_t="x";
50} else {
51 $BITS=256;
52 $SZ=4;
53 @Sigma0=( 2,13,22);
54 @Sigma1=( 6,11,25);
55 @sigma0=( 7,18, 3);
56 @sigma1=(17,19,10);
57 $rounds=64;
58 $reg_t="w";
59}
60
61$func="sha${BITS}_block_data_order";
62
63($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
64
65@X=map("$reg_t$_",(3..15,0..2));
66@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
67($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
68
69sub BODY_00_xx {
70my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
71my $j=($i+1)&15;
72my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
73 $T0=@X[$i+3] if ($i<11);
74
75$code.=<<___ if ($i<16);
76#ifndef __ARMEB__
77 rev @X[$i],@X[$i] // $i
78#endif
79___
80$code.=<<___ if ($i<13 && ($i&1));
81 ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ
82___
83$code.=<<___ if ($i==13);
84 ldp @X[14],@X[15],[$inp]
85___
86$code.=<<___ if ($i>=14);
87 ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
88___
89$code.=<<___ if ($i>0 && $i<16);
90 add $a,$a,$t1 // h+=Sigma0(a)
91___
92$code.=<<___ if ($i>=11);
93 str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
94___
95# While ARMv8 specifies merged rotate-n-logical operation such as
96# 'eor x,y,z,ror#n', it was found to negatively affect performance
97# on Apple A7. The reason seems to be that it requires even 'y' to
98# be available earlier. This means that such merged instruction is
99# not necessarily best choice on critical path... On the other hand
100# Cortex-A5x handles merged instructions much better than disjoint
101# rotate and logical... See (**) footnote above.
102$code.=<<___ if ($i<15);
103 ror $t0,$e,#$Sigma1[0]
104 add $h,$h,$t2 // h+=K[i]
105 eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
106 and $t1,$f,$e
107 bic $t2,$g,$e
108 add $h,$h,@X[$i&15] // h+=X[i]
109 orr $t1,$t1,$t2 // Ch(e,f,g)
110 eor $t2,$a,$b // a^b, b^c in next round
111 eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e)
112 ror $T0,$a,#$Sigma0[0]
113 add $h,$h,$t1 // h+=Ch(e,f,g)
114 eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
115 add $h,$h,$t0 // h+=Sigma1(e)
116 and $t3,$t3,$t2 // (b^c)&=(a^b)
117 add $d,$d,$h // d+=h
118 eor $t3,$t3,$b // Maj(a,b,c)
119 eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a)
120 add $h,$h,$t3 // h+=Maj(a,b,c)
121 ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
122 //add $h,$h,$t1 // h+=Sigma0(a)
123___
124$code.=<<___ if ($i>=15);
125 ror $t0,$e,#$Sigma1[0]
126 add $h,$h,$t2 // h+=K[i]
127 ror $T1,@X[($j+1)&15],#$sigma0[0]
128 and $t1,$f,$e
129 ror $T2,@X[($j+14)&15],#$sigma1[0]
130 bic $t2,$g,$e
131 ror $T0,$a,#$Sigma0[0]
132 add $h,$h,@X[$i&15] // h+=X[i]
133 eor $t0,$t0,$e,ror#$Sigma1[1]
134 eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
135 orr $t1,$t1,$t2 // Ch(e,f,g)
136 eor $t2,$a,$b // a^b, b^c in next round
137 eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e)
138 eor $T0,$T0,$a,ror#$Sigma0[1]
139 add $h,$h,$t1 // h+=Ch(e,f,g)
140 and $t3,$t3,$t2 // (b^c)&=(a^b)
141 eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
142 eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1])
143 add $h,$h,$t0 // h+=Sigma1(e)
144 eor $t3,$t3,$b // Maj(a,b,c)
145 eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a)
146 eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14])
147 add @X[$j],@X[$j],@X[($j+9)&15]
148 add $d,$d,$h // d+=h
149 add $h,$h,$t3 // h+=Maj(a,b,c)
150 ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
151 add @X[$j],@X[$j],$T1
152 add $h,$h,$t1 // h+=Sigma0(a)
153 add @X[$j],@X[$j],$T2
154___
155 ($t2,$t3)=($t3,$t2);
156}
157
158$code.=<<___;
159#include "arm_arch.h"
160
161.text
162
9b05cbc3 163.extern OPENSSL_armcap_P
ddb6b965
AP
164.globl $func
165.type $func,%function
166.align 6
167$func:
168___
169$code.=<<___ if ($SZ==4);
170 ldr x16,.LOPENSSL_armcap_P
171 adr x17,.LOPENSSL_armcap_P
172 add x16,x16,x17
173 ldr w16,[x16]
174 tst w16,#ARMV8_SHA256
175 b.ne .Lv8_entry
176___
177$code.=<<___;
178 stp x29,x30,[sp,#-128]!
179 add x29,sp,#0
180
181 stp x19,x20,[sp,#16]
182 stp x21,x22,[sp,#32]
183 stp x23,x24,[sp,#48]
184 stp x25,x26,[sp,#64]
185 stp x27,x28,[sp,#80]
186 sub sp,sp,#4*$SZ
187
188 ldp $A,$B,[$ctx] // load context
189 ldp $C,$D,[$ctx,#2*$SZ]
190 ldp $E,$F,[$ctx,#4*$SZ]
191 add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input
192 ldp $G,$H,[$ctx,#6*$SZ]
9b05cbc3 193 adr $Ktbl,.LK$BITS
ddb6b965
AP
194 stp $ctx,$num,[x29,#96]
195
196.Loop:
197 ldp @X[0],@X[1],[$inp],#2*$SZ
198 ldr $t2,[$Ktbl],#$SZ // *K++
199 eor $t3,$B,$C // magic seed
200 str $inp,[x29,#112]
201___
202for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
203$code.=".Loop_16_xx:\n";
204for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
205$code.=<<___;
206 cbnz $t2,.Loop_16_xx
207
208 ldp $ctx,$num,[x29,#96]
209 ldr $inp,[x29,#112]
210 sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind
211
212 ldp @X[0],@X[1],[$ctx]
213 ldp @X[2],@X[3],[$ctx,#2*$SZ]
214 add $inp,$inp,#14*$SZ // advance input pointer
215 ldp @X[4],@X[5],[$ctx,#4*$SZ]
216 add $A,$A,@X[0]
217 ldp @X[6],@X[7],[$ctx,#6*$SZ]
218 add $B,$B,@X[1]
219 add $C,$C,@X[2]
220 add $D,$D,@X[3]
221 stp $A,$B,[$ctx]
222 add $E,$E,@X[4]
223 add $F,$F,@X[5]
224 stp $C,$D,[$ctx,#2*$SZ]
225 add $G,$G,@X[6]
226 add $H,$H,@X[7]
227 cmp $inp,$num
228 stp $E,$F,[$ctx,#4*$SZ]
229 stp $G,$H,[$ctx,#6*$SZ]
230 b.ne .Loop
231
232 ldp x19,x20,[x29,#16]
233 add sp,sp,#4*$SZ
234 ldp x21,x22,[x29,#32]
235 ldp x23,x24,[x29,#48]
236 ldp x25,x26,[x29,#64]
237 ldp x27,x28,[x29,#80]
238 ldp x29,x30,[sp],#128
239 ret
240.size $func,.-$func
241
242.align 6
9b05cbc3
AP
243.type .LK$BITS,%object
244.LK$BITS:
ddb6b965
AP
245___
246$code.=<<___ if ($SZ==8);
247 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
248 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
249 .quad 0x3956c25bf348b538,0x59f111f1b605d019
250 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
251 .quad 0xd807aa98a3030242,0x12835b0145706fbe
252 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
253 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
254 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
255 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
256 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
257 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
258 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
259 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
260 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
261 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
262 .quad 0x06ca6351e003826f,0x142929670a0e6e70
263 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
264 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
265 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
266 .quad 0x81c2c92e47edaee6,0x92722c851482353b
267 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
268 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
269 .quad 0xd192e819d6ef5218,0xd69906245565a910
270 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
271 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
272 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
273 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
274 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
275 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
276 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
277 .quad 0x90befffa23631e28,0xa4506cebde82bde9
278 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
279 .quad 0xca273eceea26619c,0xd186b8c721c0c207
280 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
281 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
282 .quad 0x113f9804bef90dae,0x1b710b35131c471b
283 .quad 0x28db77f523047d84,0x32caab7b40c72493
284 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
285 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
286 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
287 .quad 0 // terminator
288___
289$code.=<<___ if ($SZ==4);
290 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
291 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
292 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
293 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
294 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
295 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
296 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
297 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
298 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
299 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
300 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
301 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
302 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
303 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
304 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
305 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
306 .long 0 //terminator
307___
308$code.=<<___;
9b05cbc3 309.size .LK$BITS,.-.LK$BITS
ddb6b965
AP
310.align 3
311.LOPENSSL_armcap_P:
312 .quad OPENSSL_armcap_P-.
313.asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
314.align 2
315___
316
317if ($SZ==4) {
318my $Ktbl="x3";
319
320my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
321my @MSG=map("v$_.16b",(4..7));
322my ($W0,$W1)=("v16.4s","v17.4s");
323my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
324
325$code.=<<___;
326.type sha256_block_armv8,%function
327.align 6
328sha256_block_armv8:
329.Lv8_entry:
330 stp x29,x30,[sp,#-16]!
331 add x29,sp,#0
332
333 ld1.32 {$ABCD,$EFGH},[$ctx]
9b05cbc3 334 adr $Ktbl,.LK256
ddb6b965
AP
335
336.Loop_hw:
337 ld1 {@MSG[0]-@MSG[3]},[$inp],#64
338 sub $num,$num,#1
339 ld1.32 {$W0},[$Ktbl],#16
340 rev32 @MSG[0],@MSG[0]
341 rev32 @MSG[1],@MSG[1]
342 rev32 @MSG[2],@MSG[2]
343 rev32 @MSG[3],@MSG[3]
344 orr $ABCD_SAVE,$ABCD,$ABCD // offload
345 orr $EFGH_SAVE,$EFGH,$EFGH
346___
347for($i=0;$i<12;$i++) {
348$code.=<<___;
349 ld1.32 {$W1},[$Ktbl],#16
350 add.i32 $W0,$W0,@MSG[0]
351 sha256su0 @MSG[0],@MSG[1]
352 orr $abcd,$ABCD,$ABCD
353 sha256h $ABCD,$EFGH,$W0
354 sha256h2 $EFGH,$abcd,$W0
355 sha256su1 @MSG[0],@MSG[2],@MSG[3]
356___
357 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
358}
359$code.=<<___;
360 ld1.32 {$W1},[$Ktbl],#16
361 add.i32 $W0,$W0,@MSG[0]
362 orr $abcd,$ABCD,$ABCD
363 sha256h $ABCD,$EFGH,$W0
364 sha256h2 $EFGH,$abcd,$W0
365
366 ld1.32 {$W0},[$Ktbl],#16
367 add.i32 $W1,$W1,@MSG[1]
368 orr $abcd,$ABCD,$ABCD
369 sha256h $ABCD,$EFGH,$W1
370 sha256h2 $EFGH,$abcd,$W1
371
372 ld1.32 {$W1},[$Ktbl]
373 add.i32 $W0,$W0,@MSG[2]
374 sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind
375 orr $abcd,$ABCD,$ABCD
376 sha256h $ABCD,$EFGH,$W0
377 sha256h2 $EFGH,$abcd,$W0
378
379 add.i32 $W1,$W1,@MSG[3]
380 orr $abcd,$ABCD,$ABCD
381 sha256h $ABCD,$EFGH,$W1
382 sha256h2 $EFGH,$abcd,$W1
383
384 add.i32 $ABCD,$ABCD,$ABCD_SAVE
385 add.i32 $EFGH,$EFGH,$EFGH_SAVE
386
387 cbnz $num,.Loop_hw
388
389 st1.32 {$ABCD,$EFGH},[$ctx]
390
391 ldr x29,[sp],#16
392 ret
393.size sha256_block_armv8,.-sha256_block_armv8
394___
395}
396
397$code.=<<___;
398.comm OPENSSL_armcap_P,4,4
399___
400
401{ my %opcode = (
402 "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000,
403 "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 );
404
405 sub unsha256 {
406 my ($mnemonic,$arg)=@_;
407
408 $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
409 &&
cd91fd7c 410 sprintf ".inst\t0x%08x\t//%s %s",
ddb6b965
AP
411 $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
412 $mnemonic,$arg;
413 }
414}
415
416foreach(split("\n",$code)) {
417
418 s/\`([^\`]*)\`/eval($1)/geo;
419
420 s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/geo;
421
422 s/\.\w?32\b//o and s/\.16b/\.4s/go;
423 m/(ld|st)1[^\[]+\[0\]/o and s/\.4s/\.s/go;
424
425 print $_,"\n";
426}
427
428close STDOUT;