]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/sha/asm/sha256-mb-x86_64.pl
s3_pkt.c: move ssl3_release_write_buffer to ssl3_write_bytes.
[thirdparty/openssl.git] / crypto / sha / asm / sha256-mb-x86_64.pl
CommitLineData
b7838586
AP
1#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# Multi-buffer SHA256 procedure processes n buffers in parallel by
11# placing buffer data to designated lane of SIMD register. n is
12# naturally limited to 4 on pre-AVX2 processors and to 8 on
13# AVX2-capable processors such as Haswell.
14#
61ba602a 15# this +aesni(i) sha256 aesni-sha256 gain(iv)
b7838586 16# -------------------------------------------------------------------
61ba602a
AP
17# Westmere(ii) 23.3/n +1.28=7.11(n=4) 12.3 +3.75=16.1 +126%
18# Atom(ii) 39.1/n +3.93=13.7(n=4) 20.8 +5.69=26.5 +93%
b7838586
AP
19# Sandy Bridge (20.5 +5.15=25.7)/n 11.6 13.0 +103%
20# Ivy Bridge (20.4 +5.14=25.5)/n 10.3 11.6 +82%
61ba602a 21# Haswell(iii) (21.0 +5.00=26.0)/n 7.80 8.79 +170%
b7838586
AP
22# Bulldozer (21.6 +5.76=27.4)/n 13.6 13.7 +100%
23#
61ba602a
AP
24# (i) multi-block CBC encrypt with 128-bit key;
25# (ii) (HASH+AES)/n does not apply to Westmere for n>3 and Atom,
b7838586
AP
26# because of lower AES-NI instruction throughput, nor is there
27# AES-NI-SHA256 stitch for these processors;
61ba602a 28# (iii) "this" is for n=8, when we gather twice as much data, result
b7838586 29# for n=4 is 20.3+4.44=24.7;
61ba602a
AP
30# (iv) improvement coefficients in real-life application are somewhat
31# lower and range from 75% to 130% (on Haswell);
b7838586
AP
32
33$flavour = shift;
34$output = shift;
35if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
36
37$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
38
39$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
40( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
41( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
42die "can't locate x86_64-xlate.pl";
43
44$avx=0;
45
46if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
47 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
48 $avx = ($1>=2.19) + ($1>=2.22);
49}
50
51if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
52 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
53 $avx = ($1>=2.09) + ($1>=2.10);
54}
55
56if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
57 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
58 $avx = ($1>=10) + ($1>=11);
59}
60
61open OUT,"| \"$^X\" $xlate $flavour $output";
62*STDOUT=*OUT;
63
64# void sha256_multi_block (
65# struct { unsigned int A[8];
66# unsigned int B[8];
67# unsigned int C[8];
68# unsigned int D[8];
69# unsigned int E[8];
70# unsigned int F[8];
71# unsigned int G[8];
72# unsigned int H[8]; } *ctx,
73# struct { void *ptr; int blocks; } inp[8],
74# int num); /* 1 or 2 */
75#
76$ctx="%rdi"; # 1st arg
77$inp="%rsi"; # 2nd arg
78$num="%edx"; # 3rd arg
79@ptr=map("%r$_",(8..11));
80$Tbl="%rbp";
81
82@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("%xmm$_",(8..15));
83($t1,$t2,$t3,$axb,$bxc,$Xi,$Xn,$sigma)=map("%xmm$_",(0..7));
84
85$REG_SZ=16;
86
87sub Xi_off {
88my $off = shift;
89
90 $off %= 16; $off *= $REG_SZ;
91 $off<256 ? "$off-128(%rax)" : "$off-256-128(%rbx)";
92}
93
94sub ROUND_00_15 {
95my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
96
97$code.=<<___ if ($i<15);
98 movd `4*$i`(@ptr[0]),$Xi
99 movd `4*$i`(@ptr[1]),$t1
100 movd `4*$i`(@ptr[2]),$t2
101 movd `4*$i`(@ptr[3]),$t3
102 punpckldq $t2,$Xi
103 punpckldq $t3,$t1
104 punpckldq $t1,$Xi
105 pshufb $Xn,$Xi
106___
107$code.=<<___ if ($i==15);
108 movd `4*$i`(@ptr[0]),$Xi
109 lea `16*4`(@ptr[0]),@ptr[0]
110 movd `4*$i`(@ptr[1]),$t1
111 lea `16*4`(@ptr[1]),@ptr[1]
112 movd `4*$i`(@ptr[2]),$t2
113 lea `16*4`(@ptr[2]),@ptr[2]
114 movd `4*$i`(@ptr[3]),$t3
115 lea `16*4`(@ptr[3]),@ptr[3]
116 punpckldq $t2,$Xi
117 punpckldq $t3,$t1
118 punpckldq $t1,$Xi
119 pshufb $Xn,$Xi
120___
121$code.=<<___;
122 movdqa $e,$sigma
123 movdqa $e,$t3
124 psrld \$6,$sigma
125 movdqa $e,$t2
126 pslld \$7,$t3
127 movdqa $Xi,`&Xi_off($i)`
128 paddd $h,$Xi # Xi+=h
129
130 psrld \$11,$t2
131 pxor $t3,$sigma
132 pslld \$21-7,$t3
133 paddd `32*($i%8)-128`($Tbl),$Xi # Xi+=K[round]
134 pxor $t2,$sigma
135
136 psrld \$25-11,$t2
137 movdqa $e,$t1
138 pxor $t3,$sigma
139 movdqa $e,$axb # borrow $axb
140 pslld \$26-21,$t3
141 pandn $g,$t1
142 pand $f,$axb
143 pxor $t2,$sigma
144
145 movdqa $a,$t2
146 pxor $t3,$sigma # Sigma1(e)
147 movdqa $a,$t3
148 psrld \$2,$t2
149 paddd $sigma,$Xi # Xi+=Sigma1(e)
150 pxor $axb,$t1 # Ch(e,f,g)
151 movdqa $b,$axb
152 movdqa $a,$sigma
153 pslld \$10,$t3
154 pxor $a,$axb # a^b, b^c in next round
155
156 psrld \$13,$sigma
157 pxor $t3,$t2
158 paddd $t1,$Xi # Xi+=Ch(e,f,g)
159 pslld \$19-10,$t3
160 pand $axb,$bxc
161 pxor $sigma,$t2
162
163 psrld \$22-13,$sigma
164 pxor $t3,$t2
165 movdqa $b,$h
166 pslld \$30-19,$t3
167 pxor $t2,$sigma
168 pxor $bxc,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
169 paddd $Xi,$d # d+=Xi
170 pxor $t3,$sigma # Sigma0(a)
171
172 paddd $Xi,$h # h+=Xi
173 paddd $sigma,$h # h+=Sigma0(a)
174___
175$code.=<<___ if (($i%8)==7);
176 lea `32*8`($Tbl),$Tbl
177___
178 ($axb,$bxc)=($bxc,$axb);
179}
180
181sub ROUND_16_XX {
182my $i=shift;
183
184$code.=<<___;
185 movdqa `&Xi_off($i+1)`,$Xn
186 paddd `&Xi_off($i+9)`,$Xi # Xi+=X[i+9]
187
188 movdqa $Xn,$sigma
189 movdqa $Xn,$t2
190 psrld \$3,$sigma
191 movdqa $Xn,$t3
192
193 psrld \$7,$t2
194 movdqa `&Xi_off($i+14)`,$t1
195 pslld \$14,$t3
196 pxor $t2,$sigma
197 psrld \$18-7,$t2
198 movdqa $t1,$axb # borrow $axb
199 pxor $t3,$sigma
200 pslld \$25-14,$t3
201 pxor $t2,$sigma
202 psrld \$10,$t1
203 movdqa $axb,$t2
204
205 psrld \$17,$axb
206 pxor $t3,$sigma # sigma0(X[i+1])
207 pslld \$13,$t2
208 paddd $sigma,$Xi # Xi+=sigma0(e)
209 pxor $axb,$t1
210 psrld \$19-17,$axb
211 pxor $t2,$t1
212 pslld \$15-13,$t2
213 pxor $axb,$t1
214 pxor $t2,$t1 # sigma0(X[i+14])
215 paddd $t1,$Xi # Xi+=sigma1(X[i+14])
216___
217 &ROUND_00_15($i,@_);
218 ($Xi,$Xn)=($Xn,$Xi);
219}
220
221$code.=<<___;
222.text
223
224.extern OPENSSL_ia32cap_P
225
226.globl sha256_multi_block
227.type sha256_multi_block,\@function,3
228.align 32
229sha256_multi_block:
230___
231$code.=<<___ if ($avx);
232 mov OPENSSL_ia32cap_P+4(%rip),%rcx
233 test \$`1<<28`,%ecx
234 jnz _avx_shortcut
235___
236$code.=<<___;
237 mov %rsp,%rax
238 push %rbx
239 push %rbp
240___
241$code.=<<___ if ($win64);
242 lea -0xa8(%rsp),%rsp
243 movaps %xmm6,(%rsp)
244 movaps %xmm7,0x10(%rsp)
245 movaps %xmm8,0x20(%rsp)
246 movaps %xmm9,0x30(%rsp)
247 movaps %xmm10,-0x78(%rax)
248 movaps %xmm11,-0x68(%rax)
249 movaps %xmm12,-0x58(%rax)
250 movaps %xmm13,-0x48(%rax)
251 movaps %xmm14,-0x38(%rax)
252 movaps %xmm15,-0x28(%rax)
253___
254$code.=<<___;
255 sub \$`$REG_SZ*18`, %rsp
256 and \$-256,%rsp
257 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
258 lea K256+128(%rip),$Tbl
259 lea `$REG_SZ*16`(%rsp),%rbx
260 lea 0x80($ctx),$ctx # size optimization
261
262.Loop_grande:
263 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
264 xor $num,$num
265___
266for($i=0;$i<4;$i++) {
267 $code.=<<___;
268 mov `16*$i+0`($inp),@ptr[$i] # input pointer
269 mov `16*$i+8`($inp),%ecx # number of blocks
270 cmp $num,%ecx
271 cmovg %ecx,$num # find maximum
272 test %ecx,%ecx
273 mov %ecx,`4*$i`(%rbx) # initialize counters
274 cmovle $Tbl,@ptr[$i] # cancel input
275___
276}
277$code.=<<___;
278 test $num,$num
279 jz .Ldone
280
281 movdqu 0x00-0x80($ctx),$A # load context
282 lea 128(%rsp),%rax
283 movdqu 0x20-0x80($ctx),$B
284 movdqu 0x40-0x80($ctx),$C
285 movdqu 0x60-0x80($ctx),$D
286 movdqu 0x80-0x80($ctx),$E
287 movdqu 0xa0-0x80($ctx),$F
288 movdqu 0xc0-0x80($ctx),$G
289 movdqu 0xe0-0x80($ctx),$H
290 movdqu .Lpbswap(%rip),$Xn
291 jmp .Loop
292
293.align 32
294.Loop:
295 movdqa $C,$bxc
296 pxor $B,$bxc # magic seed
297___
298for($i=0;$i<16;$i++) { &ROUND_00_15($i,@V); unshift(@V,pop(@V)); }
299$code.=<<___;
300 movdqu `&Xi_off($i)`,$Xi
301 mov \$3,%ecx
302 jmp .Loop_16_xx
303.align 32
304.Loop_16_xx:
305___
306for(;$i<32;$i++) { &ROUND_16_XX($i,@V); unshift(@V,pop(@V)); }
307$code.=<<___;
308 dec %ecx
309 jnz .Loop_16_xx
310
311 mov \$1,%ecx
312 lea K256+128(%rip),$Tbl
313
314 movdqa (%rbx),$sigma # pull counters
315 cmp 4*0(%rbx),%ecx # examine counters
316 pxor $t1,$t1
317 cmovge $Tbl,@ptr[0] # cancel input
318 cmp 4*1(%rbx),%ecx
319 movdqa $sigma,$Xn
320 cmovge $Tbl,@ptr[1]
321 cmp 4*2(%rbx),%ecx
322 pcmpgtd $t1,$Xn # mask value
323 cmovge $Tbl,@ptr[2]
324 cmp 4*3(%rbx),%ecx
325 paddd $Xn,$sigma # counters--
326 cmovge $Tbl,@ptr[3]
327
328 movdqu 0x00-0x80($ctx),$t1
329 pand $Xn,$A
330 movdqu 0x20-0x80($ctx),$t2
331 pand $Xn,$B
332 movdqu 0x40-0x80($ctx),$t3
333 pand $Xn,$C
334 movdqu 0x60-0x80($ctx),$Xi
335 pand $Xn,$D
336 paddd $t1,$A
337 movdqu 0x80-0x80($ctx),$t1
338 pand $Xn,$E
339 paddd $t2,$B
340 movdqu 0xa0-0x80($ctx),$t2
341 pand $Xn,$F
342 paddd $t3,$C
343 movdqu 0xc0-0x80($ctx),$t3
344 pand $Xn,$G
345 paddd $Xi,$D
346 movdqu 0xe0-0x80($ctx),$Xi
347 pand $Xn,$H
348 paddd $t1,$E
349 paddd $t2,$F
350 movdqu $A,0x00-0x80($ctx)
351 paddd $t3,$G
352 movdqu $B,0x20-0x80($ctx)
353 paddd $Xi,$H
354 movdqu $C,0x40-0x80($ctx)
355 movdqu $D,0x60-0x80($ctx)
356 movdqu $E,0x80-0x80($ctx)
357 movdqu $F,0xa0-0x80($ctx)
358 movdqu $G,0xc0-0x80($ctx)
359 movdqu $H,0xe0-0x80($ctx)
360
361 movdqa $sigma,(%rbx) # save counters
362 movdqa .Lpbswap(%rip),$Xn
363 dec $num
364 jnz .Loop
365
366 mov `$REG_SZ*17+8`(%rsp),$num
367 lea $REG_SZ($ctx),$ctx
368 lea `16*$REG_SZ/4`($inp),$inp
369 dec $num
370 jnz .Loop_grande
371
372.Ldone:
373 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
374___
375$code.=<<___ if ($win64);
376 movaps -0xb8(%rax),%xmm6
377 movaps -0xa8(%rax),%xmm7
378 movaps -0x98(%rax),%xmm8
379 movaps -0x88(%rax),%xmm9
380 movaps -0x78(%rax),%xmm10
381 movaps -0x68(%rax),%xmm11
382 movaps -0x58(%rax),%xmm12
383 movaps -0x48(%rax),%xmm13
384 movaps -0x38(%rax),%xmm14
385 movaps -0x28(%rax),%xmm15
386___
387$code.=<<___;
388 mov -16(%rax),%rbp
389 mov -8(%rax),%rbx
390 lea (%rax),%rsp
391 ret
392.size sha256_multi_block,.-sha256_multi_block
393___
394 if ($avx) {{{
395sub ROUND_00_15_avx {
396my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
397
398$code.=<<___ if ($i<15 && $REG_SZ==16);
399 vmovd `4*$i`(@ptr[0]),$Xi
400 vmovd `4*$i`(@ptr[1]),$t1
401 vpinsrd \$1,`4*$i`(@ptr[2]),$Xi,$Xi
402 vpinsrd \$1,`4*$i`(@ptr[3]),$t1,$t1
403 vpunpckldq $t1,$Xi,$Xi
404 vpshufb $Xn,$Xi,$Xi
405___
406$code.=<<___ if ($i==15 && $REG_SZ==16);
407 vmovd `4*$i`(@ptr[0]),$Xi
408 lea `16*4`(@ptr[0]),@ptr[0]
409 vmovd `4*$i`(@ptr[1]),$t1
410 lea `16*4`(@ptr[1]),@ptr[1]
411 vpinsrd \$1,`4*$i`(@ptr[2]),$Xi,$Xi
412 lea `16*4`(@ptr[2]),@ptr[2]
413 vpinsrd \$1,`4*$i`(@ptr[3]),$t1,$t1
414 lea `16*4`(@ptr[3]),@ptr[3]
415 vpunpckldq $t1,$Xi,$Xi
416 vpshufb $Xn,$Xi,$Xi
417___
418$code.=<<___ if ($i<15 && $REG_SZ==32);
419 vmovd `4*$i`(@ptr[0]),$Xi
420 vmovd `4*$i`(@ptr[4]),$t1
421 vmovd `4*$i`(@ptr[1]),$t2
422 vmovd `4*$i`(@ptr[5]),$t3
423 vpinsrd \$1,`4*$i`(@ptr[2]),$Xi,$Xi
424 vpinsrd \$1,`4*$i`(@ptr[6]),$t1,$t1
425 vpinsrd \$1,`4*$i`(@ptr[3]),$t2,$t2
426 vpunpckldq $t2,$Xi,$Xi
427 vpinsrd \$1,`4*$i`(@ptr[7]),$t3,$t3
428 vpunpckldq $t3,$t1,$t1
429 vinserti128 $t1,$Xi,$Xi
430 vpshufb $Xn,$Xi,$Xi
431___
432$code.=<<___ if ($i==15 && $REG_SZ==32);
433 vmovd `4*$i`(@ptr[0]),$Xi
434 lea `16*4`(@ptr[0]),@ptr[0]
435 vmovd `4*$i`(@ptr[4]),$t1
436 lea `16*4`(@ptr[4]),@ptr[4]
437 vmovd `4*$i`(@ptr[1]),$t2
438 lea `16*4`(@ptr[1]),@ptr[1]
439 vmovd `4*$i`(@ptr[5]),$t3
440 lea `16*4`(@ptr[5]),@ptr[5]
441 vpinsrd \$1,`4*$i`(@ptr[2]),$Xi,$Xi
442 lea `16*4`(@ptr[2]),@ptr[2]
443 vpinsrd \$1,`4*$i`(@ptr[6]),$t1,$t1
444 lea `16*4`(@ptr[6]),@ptr[6]
445 vpinsrd \$1,`4*$i`(@ptr[3]),$t2,$t2
446 lea `16*4`(@ptr[3]),@ptr[3]
447 vpunpckldq $t2,$Xi,$Xi
448 vpinsrd \$1,`4*$i`(@ptr[7]),$t3,$t3
449 lea `16*4`(@ptr[7]),@ptr[7]
450 vpunpckldq $t3,$t1,$t1
451 vinserti128 $t1,$Xi,$Xi
452 vpshufb $Xn,$Xi,$Xi
453___
454$code.=<<___;
455 vpsrld \$6,$e,$sigma
456 vpslld \$26,$e,$t3
457 vmovdqu $Xi,`&Xi_off($i)`
458 vpaddd $h,$Xi,$Xi # Xi+=h
459
460 vpsrld \$11,$e,$t2
461 vpxor $t3,$sigma,$sigma
462 vpslld \$21,$e,$t3
463 vpaddd `32*($i%8)-128`($Tbl),$Xi,$Xi # Xi+=K[round]
464 vpxor $t2,$sigma,$sigma
465
466 vpsrld \$25,$e,$t2
467 vpxor $t3,$sigma,$sigma
468 vpslld \$7,$e,$t3
469 vpandn $g,$e,$t1
470 vpand $f,$e,$axb # borrow $axb
471 vpxor $t2,$sigma,$sigma
472
473 vpsrld \$2,$a,$h # borrow $h
474 vpxor $t3,$sigma,$sigma # Sigma1(e)
475 vpslld \$30,$a,$t2
476 vpxor $axb,$t1,$t1 # Ch(e,f,g)
477 vpxor $a,$b,$axb # a^b, b^c in next round
478 vpxor $t2,$h,$h
479 vpaddd $sigma,$Xi,$Xi # Xi+=Sigma1(e)
480
481 vpsrld \$13,$a,$t2
482 vpslld \$19,$a,$t3
483 vpaddd $t1,$Xi,$Xi # Xi+=Ch(e,f,g)
484 vpand $axb,$bxc,$bxc
485 vpxor $t2,$h,$sigma
486
487 vpsrld \$22,$a,$t2
488 vpxor $t3,$sigma,$sigma
489 vpslld \$10,$a,$t3
490 vpxor $bxc,$b,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
491 vpaddd $Xi,$d,$d # d+=Xi
492 vpxor $t2,$sigma,$sigma
493 vpxor $t3,$sigma,$sigma # Sigma0(a)
494
495 vpaddd $Xi,$h,$h # h+=Xi
496 vpaddd $sigma,$h,$h # h+=Sigma0(a)
497___
498$code.=<<___ if (($i%8)==7);
499 add \$`32*8`,$Tbl
500___
501 ($axb,$bxc)=($bxc,$axb);
502}
503
504sub ROUND_16_XX_avx {
505my $i=shift;
506
507$code.=<<___;
508 vmovdqu `&Xi_off($i+1)`,$Xn
509 vpaddd `&Xi_off($i+9)`,$Xi,$Xi # Xi+=X[i+9]
510
511 vpsrld \$3,$Xn,$sigma
512 vpsrld \$7,$Xn,$t2
513 vpslld \$25,$Xn,$t3
514 vpxor $t2,$sigma,$sigma
515 vpsrld \$18,$Xn,$t2
516 vpxor $t3,$sigma,$sigma
517 vpslld \$14,$Xn,$t3
518 vmovdqu `&Xi_off($i+14)`,$t1
519 vpsrld \$10,$t1,$axb # borrow $axb
520
521 vpxor $t2,$sigma,$sigma
522 vpsrld \$17,$t1,$t2
523 vpxor $t3,$sigma,$sigma # sigma0(X[i+1])
524 vpslld \$15,$t1,$t3
525 vpaddd $sigma,$Xi,$Xi # Xi+=sigma0(e)
526 vpxor $t2,$axb,$sigma
527 vpsrld \$19,$t1,$t2
528 vpxor $t3,$sigma,$sigma
529 vpslld \$13,$t1,$t3
530 vpxor $t2,$sigma,$sigma
531 vpxor $t3,$sigma,$sigma # sigma0(X[i+14])
532 vpaddd $sigma,$Xi,$Xi # Xi+=sigma1(X[i+14])
533___
534 &ROUND_00_15_avx($i,@_);
535 ($Xi,$Xn)=($Xn,$Xi);
536}
537
538$code.=<<___;
539.type sha256_multi_block_avx,\@function,3
540.align 32
541sha256_multi_block_avx:
542_avx_shortcut:
543___
544$code.=<<___ if ($avx>1);
545 shr \$32,%rcx
546 cmp \$2,$num
547 jb .Lavx
548 test \$`1<<5`,%ecx
549 jnz _avx2_shortcut
550 jmp .Lavx
551.align 32
552.Lavx:
553___
554$code.=<<___;
555 mov %rsp,%rax
556 push %rbx
557 push %rbp
558___
559$code.=<<___ if ($win64);
560 lea -0xa8(%rsp),%rsp
561 movaps %xmm6,(%rsp)
562 movaps %xmm7,0x10(%rsp)
563 movaps %xmm8,0x20(%rsp)
564 movaps %xmm9,0x30(%rsp)
565 movaps %xmm10,-0x78(%rax)
566 movaps %xmm11,-0x68(%rax)
567 movaps %xmm12,-0x58(%rax)
568 movaps %xmm13,-0x48(%rax)
569 movaps %xmm14,-0x38(%rax)
570 movaps %xmm15,-0x28(%rax)
571___
572$code.=<<___;
573 sub \$`$REG_SZ*18`, %rsp
574 and \$-256,%rsp
575 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
576 lea K256+128(%rip),$Tbl
577 lea `$REG_SZ*16`(%rsp),%rbx
578 lea 0x80($ctx),$ctx # size optimization
579
580.Loop_grande_avx:
581 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
582 xor $num,$num
583___
584for($i=0;$i<4;$i++) {
585 $code.=<<___;
586 mov `16*$i+0`($inp),@ptr[$i] # input pointer
587 mov `16*$i+8`($inp),%ecx # number of blocks
588 cmp $num,%ecx
589 cmovg %ecx,$num # find maximum
590 test %ecx,%ecx
591 mov %ecx,`4*$i`(%rbx) # initialize counters
592 cmovle $Tbl,@ptr[$i] # cancel input
593___
594}
595$code.=<<___;
596 test $num,$num
597 jz .Ldone_avx
598
599 vmovdqu 0x00-0x80($ctx),$A # load context
600 lea 128(%rsp),%rax
601 vmovdqu 0x20-0x80($ctx),$B
602 vmovdqu 0x40-0x80($ctx),$C
603 vmovdqu 0x60-0x80($ctx),$D
604 vmovdqu 0x80-0x80($ctx),$E
605 vmovdqu 0xa0-0x80($ctx),$F
606 vmovdqu 0xc0-0x80($ctx),$G
607 vmovdqu 0xe0-0x80($ctx),$H
608 vmovdqu .Lpbswap(%rip),$Xn
609 jmp .Loop_avx
610
611.align 32
612.Loop_avx:
613 vpxor $B,$C,$bxc # magic seed
614___
615for($i=0;$i<16;$i++) { &ROUND_00_15_avx($i,@V); unshift(@V,pop(@V)); }
616$code.=<<___;
617 vmovdqu `&Xi_off($i)`,$Xi
618 mov \$3,%ecx
619 jmp .Loop_16_xx_avx
620.align 32
621.Loop_16_xx_avx:
622___
623for(;$i<32;$i++) { &ROUND_16_XX_avx($i,@V); unshift(@V,pop(@V)); }
624$code.=<<___;
625 dec %ecx
626 jnz .Loop_16_xx_avx
627
628 mov \$1,%ecx
629 lea K256+128(%rip),$Tbl
630___
631for($i=0;$i<4;$i++) {
632 $code.=<<___;
633 cmp `4*$i`(%rbx),%ecx # examine counters
634 cmovge $Tbl,@ptr[$i] # cancel input
635___
636}
637$code.=<<___;
638 vmovdqa (%rbx),$sigma # pull counters
639 vpxor $t1,$t1,$t1
640 vmovdqa $sigma,$Xn
641 vpcmpgtd $t1,$Xn,$Xn # mask value
642 vpaddd $Xn,$sigma,$sigma # counters--
643
644 vmovdqu 0x00-0x80($ctx),$t1
645 vpand $Xn,$A,$A
646 vmovdqu 0x20-0x80($ctx),$t2
647 vpand $Xn,$B,$B
648 vmovdqu 0x40-0x80($ctx),$t3
649 vpand $Xn,$C,$C
650 vmovdqu 0x60-0x80($ctx),$Xi
651 vpand $Xn,$D,$D
652 vpaddd $t1,$A,$A
653 vmovdqu 0x80-0x80($ctx),$t1
654 vpand $Xn,$E,$E
655 vpaddd $t2,$B,$B
656 vmovdqu 0xa0-0x80($ctx),$t2
657 vpand $Xn,$F,$F
658 vpaddd $t3,$C,$C
659 vmovdqu 0xc0-0x80($ctx),$t3
660 vpand $Xn,$G,$G
661 vpaddd $Xi,$D,$D
662 vmovdqu 0xe0-0x80($ctx),$Xi
663 vpand $Xn,$H,$H
664 vpaddd $t1,$E,$E
665 vpaddd $t2,$F,$F
666 vmovdqu $A,0x00-0x80($ctx)
667 vpaddd $t3,$G,$G
668 vmovdqu $B,0x20-0x80($ctx)
669 vpaddd $Xi,$H,$H
670 vmovdqu $C,0x40-0x80($ctx)
671 vmovdqu $D,0x60-0x80($ctx)
672 vmovdqu $E,0x80-0x80($ctx)
673 vmovdqu $F,0xa0-0x80($ctx)
674 vmovdqu $G,0xc0-0x80($ctx)
675 vmovdqu $H,0xe0-0x80($ctx)
676
677 vmovdqu $sigma,(%rbx) # save counters
678 vmovdqu .Lpbswap(%rip),$Xn
679 dec $num
680 jnz .Loop_avx
681
682 mov `$REG_SZ*17+8`(%rsp),$num
683 lea $REG_SZ($ctx),$ctx
684 lea `16*$REG_SZ/4`($inp),$inp
685 dec $num
686 jnz .Loop_grande_avx
687
688.Ldone_avx:
689 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
690 vzeroupper
691___
692$code.=<<___ if ($win64);
693 movaps -0xb8(%rax),%xmm6
694 movaps -0xa8(%rax),%xmm7
695 movaps -0x98(%rax),%xmm8
696 movaps -0x88(%rax),%xmm9
697 movaps -0x78(%rax),%xmm10
698 movaps -0x68(%rax),%xmm11
699 movaps -0x58(%rax),%xmm12
700 movaps -0x48(%rax),%xmm13
701 movaps -0x38(%rax),%xmm14
702 movaps -0x28(%rax),%xmm15
703___
704$code.=<<___;
705 mov -16(%rax),%rbp
706 mov -8(%rax),%rbx
707 lea (%rax),%rsp
708 ret
709.size sha256_multi_block_avx,.-sha256_multi_block_avx
710___
711 if ($avx>1) {
712$code =~ s/\`([^\`]*)\`/eval $1/gem;
713
714$REG_SZ=32;
715@ptr=map("%r$_",(12..15,8..11));
716
717@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("%ymm$_",(8..15));
718($t1,$t2,$t3,$axb,$bxc,$Xi,$Xn,$sigma)=map("%ymm$_",(0..7));
719
720$code.=<<___;
721.type sha256_multi_block_avx2,\@function,3
722.align 32
723sha256_multi_block_avx2:
724_avx2_shortcut:
725 mov %rsp,%rax
726 push %rbx
727 push %rbp
728 push %r12
729 push %r13
730 push %r14
731 push %r15
732___
733$code.=<<___ if ($win64);
734 lea -0xa8(%rsp),%rsp
735 movaps %xmm6,(%rsp)
736 movaps %xmm7,0x10(%rsp)
737 movaps %xmm8,0x20(%rsp)
738 movaps %xmm9,0x30(%rsp)
739 movaps %xmm10,0x40(%rsp)
740 movaps %xmm11,0x50(%rsp)
741 movaps %xmm12,-0x78(%rax)
742 movaps %xmm13,-0x68(%rax)
743 movaps %xmm14,-0x58(%rax)
744 movaps %xmm15,-0x48(%rax)
745___
746$code.=<<___;
747 sub \$`$REG_SZ*18`, %rsp
748 and \$-256,%rsp
749 mov %rax,`$REG_SZ*17`(%rsp) # original %rsp
750 lea K256+128(%rip),$Tbl
751 lea 0x80($ctx),$ctx # size optimization
752
753.Loop_grande_avx2:
754 mov $num,`$REG_SZ*17+8`(%rsp) # original $num
755 xor $num,$num
756 lea `$REG_SZ*16`(%rsp),%rbx
757___
758for($i=0;$i<8;$i++) {
759 $code.=<<___;
760 mov `16*$i+0`($inp),@ptr[$i] # input pointer
761 mov `16*$i+8`($inp),%ecx # number of blocks
762 cmp $num,%ecx
763 cmovg %ecx,$num # find maximum
764 test %ecx,%ecx
765 mov %ecx,`4*$i`(%rbx) # initialize counters
766 cmovle $Tbl,@ptr[$i] # cancel input
767___
768}
769$code.=<<___;
770 vmovdqu 0x00-0x80($ctx),$A # load context
771 lea 128(%rsp),%rax
772 vmovdqu 0x20-0x80($ctx),$B
773 lea 256+128(%rsp),%rbx
774 vmovdqu 0x40-0x80($ctx),$C
775 vmovdqu 0x60-0x80($ctx),$D
776 vmovdqu 0x80-0x80($ctx),$E
777 vmovdqu 0xa0-0x80($ctx),$F
778 vmovdqu 0xc0-0x80($ctx),$G
779 vmovdqu 0xe0-0x80($ctx),$H
780 vmovdqu .Lpbswap(%rip),$Xn
781 jmp .Loop_avx2
782
783.align 32
784.Loop_avx2:
785 vpxor $B,$C,$bxc # magic seed
786___
787for($i=0;$i<16;$i++) { &ROUND_00_15_avx($i,@V); unshift(@V,pop(@V)); }
788$code.=<<___;
789 vmovdqu `&Xi_off($i)`,$Xi
790 mov \$3,%ecx
791 jmp .Loop_16_xx_avx2
792.align 32
793.Loop_16_xx_avx2:
794___
795for(;$i<32;$i++) { &ROUND_16_XX_avx($i,@V); unshift(@V,pop(@V)); }
796$code.=<<___;
797 dec %ecx
798 jnz .Loop_16_xx_avx2
799
800 mov \$1,%ecx
801 lea `$REG_SZ*16`(%rsp),%rbx
802 lea K256+128(%rip),$Tbl
803___
804for($i=0;$i<8;$i++) {
805 $code.=<<___;
806 cmp `4*$i`(%rbx),%ecx # examine counters
807 cmovge $Tbl,@ptr[$i] # cancel input
808___
809}
810$code.=<<___;
811 vmovdqa (%rbx),$sigma # pull counters
812 vpxor $t1,$t1,$t1
813 vmovdqa $sigma,$Xn
814 vpcmpgtd $t1,$Xn,$Xn # mask value
815 vpaddd $Xn,$sigma,$sigma # counters--
816
817 vmovdqu 0x00-0x80($ctx),$t1
818 vpand $Xn,$A,$A
819 vmovdqu 0x20-0x80($ctx),$t2
820 vpand $Xn,$B,$B
821 vmovdqu 0x40-0x80($ctx),$t3
822 vpand $Xn,$C,$C
823 vmovdqu 0x60-0x80($ctx),$Xi
824 vpand $Xn,$D,$D
825 vpaddd $t1,$A,$A
826 vmovdqu 0x80-0x80($ctx),$t1
827 vpand $Xn,$E,$E
828 vpaddd $t2,$B,$B
829 vmovdqu 0xa0-0x80($ctx),$t2
830 vpand $Xn,$F,$F
831 vpaddd $t3,$C,$C
832 vmovdqu 0xc0-0x80($ctx),$t3
833 vpand $Xn,$G,$G
834 vpaddd $Xi,$D,$D
835 vmovdqu 0xe0-0x80($ctx),$Xi
836 vpand $Xn,$H,$H
837 vpaddd $t1,$E,$E
838 vpaddd $t2,$F,$F
839 vmovdqu $A,0x00-0x80($ctx)
840 vpaddd $t3,$G,$G
841 vmovdqu $B,0x20-0x80($ctx)
842 vpaddd $Xi,$H,$H
843 vmovdqu $C,0x40-0x80($ctx)
844 vmovdqu $D,0x60-0x80($ctx)
845 vmovdqu $E,0x80-0x80($ctx)
846 vmovdqu $F,0xa0-0x80($ctx)
847 vmovdqu $G,0xc0-0x80($ctx)
848 vmovdqu $H,0xe0-0x80($ctx)
849
850 vmovdqu $sigma,(%rbx) # save counters
851 lea 256+128(%rsp),%rbx
852 vmovdqu .Lpbswap(%rip),$Xn
853 dec $num
854 jnz .Loop_avx2
855
856 #mov `$REG_SZ*17+8`(%rsp),$num
857 #lea $REG_SZ($ctx),$ctx
858 #lea `16*$REG_SZ/4`($inp),$inp
859 #dec $num
860 #jnz .Loop_grande_avx2
861
862.Ldone_avx2:
863 mov `$REG_SZ*17`(%rsp),%rax # orignal %rsp
864 vzeroupper
865___
866$code.=<<___ if ($win64);
867 movaps -0xd8(%rax),%xmm6
868 movaps -0xc8(%rax),%xmm7
869 movaps -0xb8(%rax),%xmm8
870 movaps -0xa8(%rax),%xmm9
871 movaps -0x98(%rax),%xmm10
872 movaps -0x88(%rax),%xmm11
873 movaps -0x78(%rax),%xmm12
874 movaps -0x68(%rax),%xmm13
875 movaps -0x58(%rax),%xmm14
876 movaps -0x48(%rax),%xmm15
877___
878$code.=<<___;
879 mov -48(%rax),%r15
880 mov -40(%rax),%r14
881 mov -32(%rax),%r13
882 mov -24(%rax),%r12
883 mov -16(%rax),%rbp
884 mov -8(%rax),%rbx
885 lea (%rax),%rsp
886 ret
887.size sha256_multi_block_avx2,.-sha256_multi_block_avx2
888___
889 } }}}
890$code.=<<___;
891.align 256
892K256:
893___
894sub TABLE {
895 foreach (@_) {
896 $code.=<<___;
897 .long $_,$_,$_,$_
898 .long $_,$_,$_,$_
899___
900 }
901}
902&TABLE( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
903 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
904 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
905 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
906 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
907 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
908 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
909 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
910 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
911 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
912 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
913 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
914 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
915 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
916 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
917 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
918$code.=<<___;
919.Lpbswap:
920 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
921 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap
922___
923
924foreach (split("\n",$code)) {
925 s/\`([^\`]*)\`/eval($1)/ge;
926
927 s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
928 s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
929 s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+),%ymm([0-9]+)/$1$2%xmm$3,%xmm$4/go or
930 s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
931 s/\b(vinserti128)\b(\s+)%ymm/$1$2\$1,%xmm/go or
932 s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
933 print $_,"\n";
934}
935
936close STDOUT;