]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/sha/asm/sha1-586.pl
Update copyright year
[thirdparty/openssl.git] / crypto / sha / asm / sha1-586.pl
CommitLineData
6aa36e8e 1#! /usr/bin/env perl
33388b44 2# Copyright 1998-2020 The OpenSSL Project Authors. All Rights Reserved.
6aa36e8e 3#
a598ed0d 4# Licensed under the Apache License 2.0 (the "License"). You may not use
6aa36e8e
RS
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
500b5a18
AP
9
10# ====================================================================
35c77b73 11# [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL
f0f61f6d
AP
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
500b5a18
AP
15# ====================================================================
16
17# "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
18# functions were re-implemented to address P4 performance issue [see
19# commentary below], and in 2006 the rest was rewritten in order to
20# gain freedom to liberate licensing terms.
58964a49 21
c372482c
AP
22# January, September 2004.
23#
30cb9ec7
AP
24# It was noted that Intel IA-32 C compiler generates code which
25# performs ~30% *faster* on P4 CPU than original *hand-coded*
26# SHA1 assembler implementation. To address this problem (and
27# prove that humans are still better than machines:-), the
28# original code was overhauled, which resulted in following
29# performance changes:
30#
31# compared with original compared with Intel cc
32# assembler impl. generated code
c29ef588 33# Pentium -16% +48%
30cb9ec7
AP
34# PIII/AMD +8% +16%
35# P4 +85%(!) +45%
36#
37# As you can see Pentium came out as looser:-( Yet I reckoned that
46f4e1be 38# improvement on P4 outweighs the loss and incorporate this
30cb9ec7
AP
39# re-tuned code to 0.9.7 and later.
40# ----------------------------------------------------------------
30cb9ec7 41
c372482c
AP
42# August 2009.
43#
44# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
45# '(c&d) + (b&(c^d))', which allows to accumulate partial results
46# and lighten "pressure" on scratch registers. This resulted in
47# >12% performance improvement on contemporary AMD cores (with no
48# degradation on other CPUs:-). Also, the code was revised to maximize
49# "distance" between instructions producing input to 'lea' instruction
50# and the 'lea' instruction itself, which is essential for Intel Atom
0c149802
AP
51# core and resulted in ~15% improvement.
52
53# October 2010.
54#
55# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
56# is to offload message schedule denoted by Wt in NIST specification,
57# or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel,
58# and in SSE2 context was first explored by Dean Gaudet in 2004, see
59# http://arctic.org/~dean/crypto/sha1.html. Since then several things
60# have changed that made it interesting again:
61#
62# a) XMM units became faster and wider;
63# b) instruction set became more versatile;
64# c) an important observation was made by Max Locktykhin, which made
65# it possible to reduce amount of instructions required to perform
66# the operation in question, for further details see
67# http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/.
68
69# April 2011.
70#
71# Add AVX code path, probably most controversial... The thing is that
72# switch to AVX alone improves performance by as little as 4% in
73# comparison to SSSE3 code path. But below result doesn't look like
74# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
053fa39a 75# pair of µ-ops, and it's the additional µ-ops, two per round, that
0c149802 76# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
053fa39a 77# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
0c149802
AP
78# equivalent 'sh[rl]d' that is responsible for the impressive 5.1
79# cycles per processed byte. But 'sh[rl]d' is not something that used
80# to be fast, nor does it appear to be fast in upcoming Bulldozer
81# [according to its optimization manual]. Which is why AVX code path
82# is guarded by *both* AVX and synthetic bit denoting Intel CPUs.
83# One can argue that it's unfair to AMD, but without 'sh[rl]d' it
84# makes no sense to keep the AVX code path. If somebody feels that
85# strongly, it's probably more appropriate to discuss possibility of
86# using vector rotate XOP on AMD...
87
619b9466
AP
88# March 2014.
89#
90# Add support for Intel SHA Extensions.
91
0c149802
AP
92######################################################################
93# Current performance is summarized in following table. Numbers are
94# CPU clock cycles spent to process single byte (less is better).
95#
96# x86 SSSE3 AVX
97# Pentium 15.7 -
98# PIII 11.5 -
99# P4 10.6 -
100# AMD K8 7.1 -
35c77b73 101# Core2 7.3 6.0/+22% -
69f45c52 102# Westmere 7.3 5.5/+33% -
35c77b73 103# Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73%
69f45c52 104# Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53%
b217ca63 105# Haswell 6.5 4.3/+51% 4.1(**)/+58%
a30b0522 106# Skylake 6.4 4.1/+55% 4.1(**)/+55%
69f45c52 107# Bulldozer 11.6 6.0/+92%
b217ca63 108# VIA Nano 10.6 7.5/+41%
b59f92e7
AP
109# Atom 12.5 9.3(*)/+35%
110# Silvermont 14.5 9.9(*)/+46%
a30b0522 111# Goldmont 8.8 6.7/+30% 1.7(***)/+415%
0c149802
AP
112#
113# (*) Loop is 1056 instructions long and expected result is ~8.25.
b59f92e7
AP
114# The discrepancy is because of front-end limitations, so
115# called MS-ROM penalties, and on Silvermont even rotate's
116# limited parallelism.
0c149802
AP
117#
118# (**) As per above comment, the result is for AVX *plus* sh[rl]d.
a30b0522
AP
119#
120# (***) SHAEXT result
c372482c 121
f0f61f6d
AP
122$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
123push(@INC,"${dir}","${dir}../../perlasm");
58964a49
RE
124require "x86asm.pl";
125
1aa89a7a 126$output=pop and open STDOUT,">$output";
e87e380a 127
e195c8a2 128&asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
58964a49 129
0c149802
AP
130$xmm=$ymm=0;
131for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
132
133$ymm=1 if ($xmm &&
afa4b386 134 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
0c149802
AP
135 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
136 $1>=2.19); # first version supporting AVX
137
609b0852 138$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
0c149802
AP
139 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
140 $1>=2.03); # first version supporting AVX
141
367b1264
AP
142$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
143 `ml 2>&1` =~ /Version ([0-9]+)\./ &&
144 $1>=10); # first version supporting AVX
145
9bb3e5fd 146$ymm=1 if ($xmm && !$ymm && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([0-9]+\.[0-9]+)/ &&
a356e488 147 $2>=3.0); # first version supporting AVX
ac171925 148
977f32e8
AP
149$shaext=$xmm; ### set to zero if compiling for 1.0.1
150
0c149802
AP
151&external_label("OPENSSL_ia32cap_P") if ($xmm);
152
153
58964a49 154$A="eax";
500b5a18
AP
155$B="ebx";
156$C="ecx";
58964a49
RE
157$D="edx";
158$E="edi";
159$T="esi";
160$tmp1="ebp";
161
500b5a18 162@V=($A,$B,$C,$D,$E,$T);
58964a49 163
0c149802
AP
164$alt=0; # 1 denotes alternative IALU implementation, which performs
165 # 8% *worse* on P4, same on Westmere and Atom, 2% better on
166 # Sandy Bridge...
167
58964a49
RE
168sub BODY_00_15
169 {
500b5a18 170 local($n,$a,$b,$c,$d,$e,$f)=@_;
58964a49 171
58964a49
RE
172 &comment("00_15 $n");
173
c29ef588
AP
174 &mov($f,$c); # f to hold F_00_19(b,c,d)
175 if ($n==0) { &mov($tmp1,$a); }
176 else { &mov($a,$tmp1); }
30cb9ec7
AP
177 &rotl($tmp1,5); # tmp1=ROTATE(a,5)
178 &xor($f,$d);
500b5a18 179 &add($tmp1,$e); # tmp1+=e;
c372482c 180 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded
500b5a18
AP
181 # with xi, also note that e becomes
182 # f in next round...
c372482c 183 &and($f,$b);
c29ef588 184 &rotr($b,2); # b=ROTATE(b,30)
c372482c
AP
185 &xor($f,$d); # f holds F_00_19(b,c,d)
186 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi
c29ef588 187
c372482c
AP
188 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
189 &add($f,$tmp1); } # f+=tmp1
500b5a18 190 else { &add($tmp1,$f); } # f becomes a in next round
0c149802 191 &mov($tmp1,$a) if ($alt && $n==15);
58964a49
RE
192 }
193
194sub BODY_16_19
195 {
500b5a18 196 local($n,$a,$b,$c,$d,$e,$f)=@_;
58964a49 197
58964a49
RE
198 &comment("16_19 $n");
199
0c149802
AP
200if ($alt) {
201 &xor($c,$d);
202 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
203 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d
204 &xor($f,&swtmp(($n+8)%16));
205 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
206 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
207 &rotl($f,1); # f=ROTATE(f,1)
208 &add($e,$tmp1); # e+=F_00_19(b,c,d)
209 &xor($c,$d); # restore $c
210 &mov($tmp1,$a); # b in next round
211 &rotr($b,$n==16?2:7); # b=ROTATE(b,30)
212 &mov(&swtmp($n%16),$f); # xi=f
213 &rotl($a,5); # ROTATE(a,5)
214 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
215 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
216 &add($f,$a); # f+=ROTATE(a,5)
217} else {
c372482c
AP
218 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d)
219 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
220 &xor($tmp1,$d);
221 &xor($f,&swtmp(($n+8)%16));
222 &and($tmp1,$b);
500b5a18
AP
223 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
224 &rotl($f,1); # f=ROTATE(f,1)
c29ef588 225 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
c372482c
AP
226 &add($e,$tmp1); # e+=F_00_19(b,c,d)
227 &mov($tmp1,$a);
228 &rotr($b,2); # b=ROTATE(b,30)
229 &mov(&swtmp($n%16),$f); # xi=f
230 &rotl($tmp1,5); # ROTATE(a,5)
231 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
232 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
233 &add($f,$tmp1); # f+=ROTATE(a,5)
0c149802 234}
58964a49
RE
235 }
236
237sub BODY_20_39
238 {
500b5a18
AP
239 local($n,$a,$b,$c,$d,$e,$f)=@_;
240 local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
58964a49
RE
241
242 &comment("20_39 $n");
58964a49 243
0c149802
AP
244if ($alt) {
245 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c
246 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
247 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
248 &xor($f,&swtmp(($n+8)%16));
249 &add($e,$tmp1); # e+=F_20_39(b,c,d)
250 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
251 &rotl($f,1); # f=ROTATE(f,1)
252 &mov($tmp1,$a); # b in next round
253 &rotr($b,7); # b=ROTATE(b,30)
254 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
255 &rotl($a,5); # ROTATE(a,5)
256 &xor($b,$c) if($n==39);# warm up for BODY_40_59
257 &and($tmp1,$b) if($n==39);
258 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
259 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
260 &add($f,$a); # f+=ROTATE(a,5)
261 &rotr($a,5) if ($n==79);
262} else {
c29ef588 263 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d)
c372482c 264 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
c29ef588 265 &xor($tmp1,$c);
500b5a18 266 &xor($f,&swtmp(($n+8)%16));
c29ef588 267 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
500b5a18 268 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
30cb9ec7 269 &rotl($f,1); # f=ROTATE(f,1)
c372482c
AP
270 &add($e,$tmp1); # e+=F_20_39(b,c,d)
271 &rotr($b,2); # b=ROTATE(b,30)
272 &mov($tmp1,$a);
273 &rotl($tmp1,5); # ROTATE(a,5)
274 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
275 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
276 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
277 &add($f,$tmp1); # f+=ROTATE(a,5)
0c149802 278}
58964a49
RE
279 }
280
281sub BODY_40_59
282 {
500b5a18 283 local($n,$a,$b,$c,$d,$e,$f)=@_;
58964a49
RE
284
285 &comment("40_59 $n");
58964a49 286
0c149802
AP
287if ($alt) {
288 &add($e,$tmp1); # e+=b&(c^d)
289 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
290 &mov($tmp1,$d);
291 &xor($f,&swtmp(($n+8)%16));
292 &xor($c,$d); # restore $c
293 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
294 &rotl($f,1); # f=ROTATE(f,1)
295 &and($tmp1,$c);
296 &rotr($b,7); # b=ROTATE(b,30)
297 &add($e,$tmp1); # e+=c&d
298 &mov($tmp1,$a); # b in next round
299 &mov(&swtmp($n%16),$f); # xi=f
300 &rotl($a,5); # ROTATE(a,5)
301 &xor($b,$c) if ($n<59);
302 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d)
303 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d))
304 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
305 &add($f,$a); # f+=ROTATE(a,5)
306} else {
c372482c
AP
307 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d)
308 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
309 &xor($tmp1,$d);
310 &xor($f,&swtmp(($n+8)%16));
311 &and($tmp1,$b);
312 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
30cb9ec7 313 &rotl($f,1); # f=ROTATE(f,1)
c372482c 314 &add($tmp1,$e); # b&(c^d)+=e
30cb9ec7 315 &rotr($b,2); # b=ROTATE(b,30)
c372482c
AP
316 &mov($e,$a); # e becomes volatile
317 &rotl($e,5); # ROTATE(a,5)
318 &mov(&swtmp($n%16),$f); # xi=f
319 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
320 &mov($tmp1,$c);
c29ef588 321 &add($f,$e); # f+=ROTATE(a,5)
c372482c
AP
322 &and($tmp1,$d);
323 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
324 &add($f,$tmp1); # f+=c&d
0c149802 325}
58964a49
RE
326 }
327
87facba3 328&function_begin("sha1_block_data_order");
0c149802 329if ($xmm) {
977f32e8 330 &static_label("shaext_shortcut") if ($shaext);
0c149802
AP
331 &static_label("ssse3_shortcut");
332 &static_label("avx_shortcut") if ($ymm);
333 &static_label("K_XX_XX");
334
335 &call (&label("pic_point")); # make it PIC!
336 &set_label("pic_point");
337 &blindpop($tmp1);
338 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point"));
339 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
340
341 &mov ($A,&DWP(0,$T));
342 &mov ($D,&DWP(4,$T));
343 &test ($D,1<<9); # check SSSE3 bit
344 &jz (&label("x86"));
619b9466 345 &mov ($C,&DWP(8,$T));
0c149802
AP
346 &test ($A,1<<24); # check FXSR bit
347 &jz (&label("x86"));
977f32e8
AP
348 if ($shaext) {
349 &test ($C,1<<29); # check SHA bit
350 &jnz (&label("shaext_shortcut"));
351 }
0c149802
AP
352 if ($ymm) {
353 &and ($D,1<<28); # mask AVX bit
354 &and ($A,1<<30); # mask "Intel CPU" bit
355 &or ($A,$D);
356 &cmp ($A,1<<28|1<<30);
357 &je (&label("avx_shortcut"));
358 }
359 &jmp (&label("ssse3_shortcut"));
360 &set_label("x86",16);
361}
500b5a18
AP
362 &mov($tmp1,&wparam(0)); # SHA_CTX *c
363 &mov($T,&wparam(1)); # const void *input
364 &mov($A,&wparam(2)); # size_t num
0c149802 365 &stack_push(16+3); # allocate X[16]
500b5a18
AP
366 &shl($A,6);
367 &add($A,$T);
368 &mov(&wparam(2),$A); # pointer beyond the end of input
369 &mov($E,&DWP(16,$tmp1));# pre-load E
0c149802 370 &jmp(&label("loop"));
58964a49 371
0c149802 372&set_label("loop",16);
500b5a18
AP
373
374 # copy input chunk to X, but reversing byte order!
375 for ($i=0; $i<16; $i+=4)
69fb1c3f 376 {
500b5a18
AP
377 &mov($A,&DWP(4*($i+0),$T));
378 &mov($B,&DWP(4*($i+1),$T));
379 &mov($C,&DWP(4*($i+2),$T));
380 &mov($D,&DWP(4*($i+3),$T));
381 &bswap($A);
382 &bswap($B);
383 &bswap($C);
384 &bswap($D);
69fb1c3f 385 &mov(&swtmp($i+0),$A);
500b5a18
AP
386 &mov(&swtmp($i+1),$B);
387 &mov(&swtmp($i+2),$C);
388 &mov(&swtmp($i+3),$D);
389 }
390 &mov(&wparam(1),$T); # redundant in 1st spin
391
392 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX
393 &mov($B,&DWP(4,$tmp1));
394 &mov($C,&DWP(8,$tmp1));
395 &mov($D,&DWP(12,$tmp1));
396 # E is pre-loaded
397
500b5a18
AP
398 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
399 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
400 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
401 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
402 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
403
404 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check
405
406 &mov($tmp1,&wparam(0)); # re-load SHA_CTX*
407 &mov($D,&wparam(1)); # D is last "T" and is discarded
408
409 &add($E,&DWP(0,$tmp1)); # E is last "A"...
410 &add($T,&DWP(4,$tmp1));
411 &add($A,&DWP(8,$tmp1));
412 &add($B,&DWP(12,$tmp1));
413 &add($C,&DWP(16,$tmp1));
414
415 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX
416 &add($D,64); # advance input pointer
417 &mov(&DWP(4,$tmp1),$T);
418 &cmp($D,&wparam(2)); # have we reached the end yet?
419 &mov(&DWP(8,$tmp1),$A);
420 &mov($E,$C); # C is last "E" which needs to be "pre-loaded"
421 &mov(&DWP(12,$tmp1),$B);
422 &mov($T,$D); # input pointer
423 &mov(&DWP(16,$tmp1),$C);
424 &jb(&label("loop"));
425
0c149802 426 &stack_pop(16+3);
c5f17d45 427&function_end("sha1_block_data_order");
0c149802
AP
428
429if ($xmm) {
977f32e8 430if ($shaext) {
619b9466
AP
431######################################################################
432# Intel SHA Extensions implementation of SHA1 update function.
433#
434my ($ctx,$inp,$num)=("edi","esi","ecx");
435my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3));
436my @MSG=map("xmm$_",(4..7));
437
438sub sha1rnds4 {
439 my ($dst,$src,$imm)=@_;
440 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
441 { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); }
442}
443sub sha1op38 {
444 my ($opcodelet,$dst,$src)=@_;
445 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
446 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
447}
448sub sha1nexte { sha1op38(0xc8,@_); }
449sub sha1msg1 { sha1op38(0xc9,@_); }
450sub sha1msg2 { sha1op38(0xca,@_); }
451
452&function_begin("_sha1_block_data_order_shaext");
453 &call (&label("pic_point")); # make it PIC!
454 &set_label("pic_point");
455 &blindpop($tmp1);
456 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
457&set_label("shaext_shortcut");
458 &mov ($ctx,&wparam(0));
459 &mov ("ebx","esp");
460 &mov ($inp,&wparam(1));
461 &mov ($num,&wparam(2));
462 &sub ("esp",32);
463
464 &movdqu ($ABCD,&QWP(0,$ctx));
3372c4ff 465 &movd ($E,&DWP(16,$ctx));
619b9466
AP
466 &and ("esp",-32);
467 &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap
468
469 &movdqu (@MSG[0],&QWP(0,$inp));
470 &pshufd ($ABCD,$ABCD,0b00011011); # flip word order
471 &movdqu (@MSG[1],&QWP(0x10,$inp));
472 &pshufd ($E,$E,0b00011011); # flip word order
473 &movdqu (@MSG[2],&QWP(0x20,$inp));
474 &pshufb (@MSG[0],$BSWAP);
475 &movdqu (@MSG[3],&QWP(0x30,$inp));
476 &pshufb (@MSG[1],$BSWAP);
477 &pshufb (@MSG[2],$BSWAP);
478 &pshufb (@MSG[3],$BSWAP);
479 &jmp (&label("loop_shaext"));
480
481&set_label("loop_shaext",16);
482 &dec ($num);
483 &lea ("eax",&DWP(0x40,$inp));
484 &movdqa (&QWP(0,"esp"),$E); # offload $E
485 &paddd ($E,@MSG[0]);
486 &cmovne ($inp,"eax");
487 &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD
488
489for($i=0;$i<20-4;$i+=2) {
490 &sha1msg1 (@MSG[0],@MSG[1]);
491 &movdqa ($E_,$ABCD);
492 &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3...
493 &sha1nexte ($E_,@MSG[1]);
494 &pxor (@MSG[0],@MSG[2]);
495 &sha1msg1 (@MSG[1],@MSG[2]);
496 &sha1msg2 (@MSG[0],@MSG[3]);
497
498 &movdqa ($E,$ABCD);
499 &sha1rnds4 ($ABCD,$E_,int(($i+1)/5));
500 &sha1nexte ($E,@MSG[2]);
501 &pxor (@MSG[1],@MSG[3]);
502 &sha1msg2 (@MSG[1],@MSG[0]);
503
504 push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
505}
506 &movdqu (@MSG[0],&QWP(0,$inp));
507 &movdqa ($E_,$ABCD);
508 &sha1rnds4 ($ABCD,$E,3); # 64-67
509 &sha1nexte ($E_,@MSG[1]);
510 &movdqu (@MSG[1],&QWP(0x10,$inp));
511 &pshufb (@MSG[0],$BSWAP);
512
513 &movdqa ($E,$ABCD);
514 &sha1rnds4 ($ABCD,$E_,3); # 68-71
515 &sha1nexte ($E,@MSG[2]);
516 &movdqu (@MSG[2],&QWP(0x20,$inp));
517 &pshufb (@MSG[1],$BSWAP);
518
519 &movdqa ($E_,$ABCD);
520 &sha1rnds4 ($ABCD,$E,3); # 72-75
521 &sha1nexte ($E_,@MSG[3]);
522 &movdqu (@MSG[3],&QWP(0x30,$inp));
523 &pshufb (@MSG[2],$BSWAP);
524
525 &movdqa ($E,$ABCD);
526 &sha1rnds4 ($ABCD,$E_,3); # 76-79
527 &movdqa ($E_,&QWP(0,"esp"));
528 &pshufb (@MSG[3],$BSWAP);
529 &sha1nexte ($E,$E_);
530 &paddd ($ABCD,&QWP(16,"esp"));
531
532 &jnz (&label("loop_shaext"));
533
534 &pshufd ($ABCD,$ABCD,0b00011011);
535 &pshufd ($E,$E,0b00011011);
536 &movdqu (&QWP(0,$ctx),$ABCD)
537 &movd (&DWP(16,$ctx),$E);
538 &mov ("esp","ebx");
539&function_end("_sha1_block_data_order_shaext");
540}
0c149802
AP
541######################################################################
542# The SSSE3 implementation.
543#
544# %xmm[0-7] are used as ring @X[] buffer containing quadruples of last
545# 32 elements of the message schedule or Xupdate outputs. First 4
546# quadruples are simply byte-swapped input, next 4 are calculated
547# according to method originally suggested by Dean Gaudet (modulo
548# being implemented in SSSE3). Once 8 quadruples or 32 elements are
549# collected, it switches to routine proposed by Max Locktyukhin.
550#
46f4e1be 551# Calculations inevitably require temporary registers, and there are
0c149802
AP
552# no %xmm registers left to spare. For this reason part of the ring
553# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
554# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
555# X[-5], and X[4] - X[-4]...
556#
557# Another notable optimization is aggressive stack frame compression
558# aiming to minimize amount of 9-byte instructions...
559#
560# Yet another notable optimization is "jumping" $B variable. It means
561# that there is no register permanently allocated for $B value. This
562# allowed to eliminate one instruction from body_20_39...
563#
564my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
565my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
566my @V=($A,$B,$C,$D,$E);
567my $j=0; # hash round
35c77b73 568my $rx=0;
0c149802
AP
569my @T=($T,$tmp1);
570my $inp;
571
572my $_rol=sub { &rol(@_) };
573my $_ror=sub { &ror(@_) };
574
575&function_begin("_sha1_block_data_order_ssse3");
576 &call (&label("pic_point")); # make it PIC!
577 &set_label("pic_point");
578 &blindpop($tmp1);
579 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
580&set_label("ssse3_shortcut");
581
582 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19
583 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39
584 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59
585 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79
586 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask
587
588 &mov ($E,&wparam(0)); # load argument block
589 &mov ($inp=@T[1],&wparam(1));
590 &mov ($D,&wparam(2));
591 &mov (@T[0],"esp");
592
593 # stack frame layout
594 #
595 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
596 # X[4]+K X[5]+K X[6]+K X[7]+K
597 # X[8]+K X[9]+K X[10]+K X[11]+K
598 # X[12]+K X[13]+K X[14]+K X[15]+K
599 #
600 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
601 # X[4] X[5] X[6] X[7]
602 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
603 #
604 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
605 # K_40_59 K_40_59 K_40_59 K_40_59
606 # K_60_79 K_60_79 K_60_79 K_60_79
607 # K_00_19 K_00_19 K_00_19 K_00_19
608 # pbswap mask
609 #
610 # +192 ctx # argument block
611 # +196 inp
612 # +200 end
613 # +204 esp
614 &sub ("esp",208);
615 &and ("esp",-64);
616
617 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants
618 &movdqa (&QWP(112+16,"esp"),@X[5]);
619 &movdqa (&QWP(112+32,"esp"),@X[6]);
620 &shl ($D,6); # len*64
621 &movdqa (&QWP(112+48,"esp"),@X[3]);
622 &add ($D,$inp); # end of input
623 &movdqa (&QWP(112+64,"esp"),@X[2]);
624 &add ($inp,64);
625 &mov (&DWP(192+0,"esp"),$E); # save argument block
626 &mov (&DWP(192+4,"esp"),$inp);
627 &mov (&DWP(192+8,"esp"),$D);
628 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
629
630 &mov ($A,&DWP(0,$E)); # load context
631 &mov ($B,&DWP(4,$E));
632 &mov ($C,&DWP(8,$E));
633 &mov ($D,&DWP(12,$E));
634 &mov ($E,&DWP(16,$E));
635 &mov (@T[0],$B); # magic seed
636
637 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
638 &movdqu (@X[-3&7],&QWP(-48,$inp));
639 &movdqu (@X[-2&7],&QWP(-32,$inp));
640 &movdqu (@X[-1&7],&QWP(-16,$inp));
641 &pshufb (@X[-4&7],@X[2]); # byte swap
642 &pshufb (@X[-3&7],@X[2]);
643 &pshufb (@X[-2&7],@X[2]);
644 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
645 &pshufb (@X[-1&7],@X[2]);
646 &paddd (@X[-4&7],@X[3]); # add K_00_19
647 &paddd (@X[-3&7],@X[3]);
648 &paddd (@X[-2&7],@X[3]);
649 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU
650 &psubd (@X[-4&7],@X[3]); # restore X[]
651 &movdqa (&QWP(0+16,"esp"),@X[-3&7]);
652 &psubd (@X[-3&7],@X[3]);
653 &movdqa (&QWP(0+32,"esp"),@X[-2&7]);
35c77b73 654 &mov (@T[1],$C);
0c149802 655 &psubd (@X[-2&7],@X[3]);
35c77b73 656 &xor (@T[1],$D);
b217ca63 657 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
35c77b73 658 &and (@T[0],@T[1]);
0c149802
AP
659 &jmp (&label("loop"));
660
661######################################################################
69687aa8 662# SSE instruction sequence is first broken to groups of independent
0c149802
AP
663# instructions, independent in respect to their inputs and shifter
664# (not all architectures have more than one). Then IALU instructions
665# are "knitted in" between the SSE groups. Distance is maintained for
666# SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer
667# [which allegedly also implements SSSE3]...
668#
669# Temporary registers usage. X[2] is volatile at the entry and at the
670# end is restored from backtrace ring buffer. X[3] is expected to
69687aa8 671# contain current K_XX_XX constant and is used to calculate X[-1]+K
0c149802
AP
672# from previous round, it becomes volatile the moment the value is
673# saved to stack for transfer to IALU. X[4] becomes volatile whenever
674# X[-4] is accumulated and offloaded to backtrace ring buffer, at the
675# end it is loaded with next K_XX_XX [which becomes X[3] in next
676# round]...
677#
69687aa8 678sub Xupdate_ssse3_16_31() # recall that $Xi starts with 4
0c149802
AP
679{ use integer;
680 my $body = shift;
681 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
682 my ($a,$b,$c,$d,$e);
683
b217ca63 684 eval(shift(@insns)); # ror
0c149802
AP
685 eval(shift(@insns));
686 eval(shift(@insns));
b217ca63 687 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
0c149802
AP
688 &movdqa (@X[2],@X[-1&7]);
689 eval(shift(@insns));
690 eval(shift(@insns));
691
692 &paddd (@X[3],@X[-1&7]);
693 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
b217ca63 694 eval(shift(@insns)); # rol
0c149802
AP
695 eval(shift(@insns));
696 &psrldq (@X[2],4); # "X[-3]", 3 dwords
697 eval(shift(@insns));
698 eval(shift(@insns));
699 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
700 eval(shift(@insns));
b217ca63 701 eval(shift(@insns)); # ror
0c149802
AP
702
703 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
704 eval(shift(@insns));
705 eval(shift(@insns));
706 eval(shift(@insns));
0c149802
AP
707
708 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
709 eval(shift(@insns));
b217ca63 710 eval(shift(@insns)); # rol
0c149802
AP
711 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
712 eval(shift(@insns));
713 eval(shift(@insns));
714
715 &movdqa (@X[4],@X[0]);
0c149802
AP
716 eval(shift(@insns));
717 eval(shift(@insns));
b217ca63
AP
718 eval(shift(@insns)); # ror
719 &movdqa (@X[2],@X[0]);
0c149802
AP
720 eval(shift(@insns));
721
722 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword
723 &paddd (@X[0],@X[0]);
724 eval(shift(@insns));
725 eval(shift(@insns));
0c149802
AP
726
727 &psrld (@X[2],31);
728 eval(shift(@insns));
b217ca63 729 eval(shift(@insns)); # rol
0c149802
AP
730 &movdqa (@X[3],@X[4]);
731 eval(shift(@insns));
732 eval(shift(@insns));
b217ca63 733 eval(shift(@insns));
0c149802
AP
734
735 &psrld (@X[4],30);
0c149802 736 eval(shift(@insns));
b217ca63
AP
737 eval(shift(@insns)); # ror
738 &por (@X[0],@X[2]); # "X[0]"<<<=1
0c149802
AP
739 eval(shift(@insns));
740 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
741 eval(shift(@insns));
742 eval(shift(@insns));
743
744 &pslld (@X[3],2);
0c149802 745 eval(shift(@insns));
b217ca63
AP
746 eval(shift(@insns)); # rol
747 &pxor (@X[0],@X[4]);
0c149802
AP
748 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
749 eval(shift(@insns));
750 eval(shift(@insns));
751
752 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2
b217ca63
AP
753 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7])
754 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7);
0c149802
AP
755 eval(shift(@insns));
756 eval(shift(@insns));
757
758 foreach (@insns) { eval; } # remaining instructions [if any]
759
760 $Xi++; push(@X,shift(@X)); # "rotate" X[]
761}
762
763sub Xupdate_ssse3_32_79()
764{ use integer;
765 my $body = shift;
69f45c52 766 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
0c149802
AP
767 my ($a,$b,$c,$d,$e);
768
0c149802
AP
769 eval(shift(@insns)); # body_20_39
770 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
b217ca63 771 &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8)
0c149802
AP
772 eval(shift(@insns));
773 eval(shift(@insns));
774 eval(shift(@insns)); # rol
775
776 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
777 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
778 eval(shift(@insns));
779 eval(shift(@insns));
b217ca63 780 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
0c149802
AP
781 if ($Xi%5) {
782 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
783 } else { # ... or load next one
784 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
785 }
0c149802 786 eval(shift(@insns)); # ror
b217ca63 787 &paddd (@X[3],@X[-1&7]);
0c149802
AP
788 eval(shift(@insns));
789
790 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]"
791 eval(shift(@insns)); # body_20_39
792 eval(shift(@insns));
793 eval(shift(@insns));
794 eval(shift(@insns)); # rol
795
796 &movdqa (@X[2],@X[0]);
797 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
798 eval(shift(@insns));
799 eval(shift(@insns));
800 eval(shift(@insns)); # ror
801 eval(shift(@insns));
b217ca63 802 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
0c149802
AP
803
804 &pslld (@X[0],2);
805 eval(shift(@insns)); # body_20_39
806 eval(shift(@insns));
807 &psrld (@X[2],30);
808 eval(shift(@insns));
809 eval(shift(@insns)); # rol
810 eval(shift(@insns));
811 eval(shift(@insns));
812 eval(shift(@insns)); # ror
813 eval(shift(@insns));
b217ca63
AP
814 eval(shift(@insns)) if (@insns[1] =~ /_rol/);
815 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
0c149802
AP
816
817 &por (@X[0],@X[2]); # "X[0]"<<<=2
818 eval(shift(@insns)); # body_20_39
819 eval(shift(@insns));
820 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
821 eval(shift(@insns));
822 eval(shift(@insns)); # rol
823 eval(shift(@insns));
824 eval(shift(@insns));
825 eval(shift(@insns)); # ror
b217ca63 826 &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0])
0c149802
AP
827 eval(shift(@insns));
828
829 foreach (@insns) { eval; } # remaining instructions
830
831 $Xi++; push(@X,shift(@X)); # "rotate" X[]
832}
833
834sub Xuplast_ssse3_80()
835{ use integer;
836 my $body = shift;
837 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
838 my ($a,$b,$c,$d,$e);
839
b217ca63
AP
840 eval(shift(@insns));
841 eval(shift(@insns));
842 eval(shift(@insns));
843 eval(shift(@insns));
844 eval(shift(@insns));
845 eval(shift(@insns));
0c149802
AP
846 eval(shift(@insns));
847 &paddd (@X[3],@X[-1&7]);
848 eval(shift(@insns));
849 eval(shift(@insns));
850 eval(shift(@insns));
851 eval(shift(@insns));
852
853 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
854
855 foreach (@insns) { eval; } # remaining instructions
856
857 &mov ($inp=@T[1],&DWP(192+4,"esp"));
858 &cmp ($inp,&DWP(192+8,"esp"));
859 &je (&label("done"));
860
861 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19
862 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask
863 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input
864 &movdqu (@X[-3&7],&QWP(16,$inp));
865 &movdqu (@X[-2&7],&QWP(32,$inp));
866 &movdqu (@X[-1&7],&QWP(48,$inp));
867 &add ($inp,64);
868 &pshufb (@X[-4&7],@X[2]); # byte swap
869 &mov (&DWP(192+4,"esp"),$inp);
870 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
871
872 $Xi=0;
873}
874
875sub Xloop_ssse3()
876{ use integer;
877 my $body = shift;
878 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
879 my ($a,$b,$c,$d,$e);
880
881 eval(shift(@insns));
882 eval(shift(@insns));
b217ca63
AP
883 eval(shift(@insns));
884 eval(shift(@insns));
885 eval(shift(@insns));
886 eval(shift(@insns));
887 eval(shift(@insns));
0c149802
AP
888 &pshufb (@X[($Xi-3)&7],@X[2]);
889 eval(shift(@insns));
890 eval(shift(@insns));
b217ca63
AP
891 eval(shift(@insns));
892 eval(shift(@insns));
0c149802
AP
893 &paddd (@X[($Xi-4)&7],@X[3]);
894 eval(shift(@insns));
895 eval(shift(@insns));
896 eval(shift(@insns));
897 eval(shift(@insns));
898 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU
899 eval(shift(@insns));
900 eval(shift(@insns));
b217ca63
AP
901 eval(shift(@insns));
902 eval(shift(@insns));
0c149802
AP
903 &psubd (@X[($Xi-4)&7],@X[3]);
904
905 foreach (@insns) { eval; }
906 $Xi++;
907}
908
909sub Xtail_ssse3()
910{ use integer;
911 my $body = shift;
912 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
913 my ($a,$b,$c,$d,$e);
914
915 foreach (@insns) { eval; }
916}
917
35c77b73
AP
918sub body_00_19 () { # ((c^d)&b)^d
919 # on start @T[0]=(c^d)&b
920 return &body_20_39() if ($rx==19); $rx++;
0c149802
AP
921 (
922 '($a,$b,$c,$d,$e)=@V;'.
35c77b73
AP
923 '&$_ror ($b,$j?7:2);', # $b>>>2
924 '&xor (@T[0],$d);',
0c149802 925 '&mov (@T[1],$a);', # $b in next round
35c77b73
AP
926
927 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
928 '&xor ($b,$c);', # $c^$d for next round
929
0c149802 930 '&$_rol ($a,5);',
35c77b73
AP
931 '&add ($e,@T[0]);',
932 '&and (@T[1],$b);', # ($b&($c^$d)) for next round
933
934 '&xor ($b,$c);', # restore $b
935 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
0c149802
AP
936 );
937}
938
35c77b73
AP
939sub body_20_39 () { # b^d^c
940 # on entry @T[0]=b^d
941 return &body_40_59() if ($rx==39); $rx++;
0c149802
AP
942 (
943 '($a,$b,$c,$d,$e)=@V;'.
35c77b73
AP
944 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
945 '&xor (@T[0],$d) if($j==19);'.
946 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c)
0c149802 947 '&mov (@T[1],$a);', # $b in next round
35c77b73 948
0c149802 949 '&$_rol ($a,5);',
35c77b73
AP
950 '&add ($e,@T[0]);',
951 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round
952
0c149802 953 '&$_ror ($b,7);', # $b>>>2
35c77b73 954 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
0c149802
AP
955 );
956}
957
35c77b73
AP
958sub body_40_59 () { # ((b^c)&(c^d))^c
959 # on entry @T[0]=(b^c), (c^=d)
960 $rx++;
0c149802
AP
961 (
962 '($a,$b,$c,$d,$e)=@V;'.
35c77b73
AP
963 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
964 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d)
965 '&xor ($c,$d) if ($j>=40);', # restore $c
966
0c149802 967 '&$_ror ($b,7);', # $b>>>2
35c77b73 968 '&mov (@T[1],$a);', # $b for next round
69f45c52 969 '&xor (@T[0],$c);',
35c77b73 970
0c149802
AP
971 '&$_rol ($a,5);',
972 '&add ($e,@T[0]);',
35c77b73
AP
973 '&xor (@T[1],$c) if ($j==59);'.
974 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round
975
976 '&xor ($b,$c) if ($j< 59);', # c^d for next round
977 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
0c149802
AP
978 );
979}
b217ca63
AP
980######
981sub bodyx_00_19 () { # ((c^d)&b)^d
982 # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K
983 return &bodyx_20_39() if ($rx==19); $rx++;
984 (
985 '($a,$b,$c,$d,$e)=@V;'.
986
987 '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2
988 '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2
989 '&lea ($e,&DWP(0,$e,@T[0]));',
990 '&rorx (@T[0],$a,5);',
991
992 '&andn (@T[1],$a,$c);',
993 '&and ($a,$b)',
994 '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer
995
996 '&xor (@T[1],$a)',
997 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
998 );
999}
1000
1001sub bodyx_20_39 () { # b^d^c
1002 # on start $b=b^c^d
1003 return &bodyx_40_59() if ($rx==39); $rx++;
1004 (
1005 '($a,$b,$c,$d,$e)=@V;'.
1006
1007 '&add ($e,($j==19?@T[0]:$b))',
1008 '&rorx ($b,@T[1],7);', # $b>>>2
1009 '&rorx (@T[0],$a,5);',
1010
1011 '&xor ($a,$b) if ($j<79);',
1012 '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer
1013 '&xor ($a,$c) if ($j<79);',
1014 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
1015 );
1016}
1017
1018sub bodyx_40_59 () { # ((b^c)&(c^d))^c
1019 # on start $b=((b^c)&(c^d))^c
1020 return &bodyx_20_39() if ($rx==59); $rx++;
1021 (
1022 '($a,$b,$c,$d,$e)=@V;'.
1023
1024 '&rorx (@T[0],$a,5)',
1025 '&lea ($e,&DWP(0,$e,$b))',
1026 '&rorx ($b,@T[1],7)', # $b>>>2
1027 '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer
1028
1029 '&mov (@T[1],$c)',
1030 '&xor ($a,$b)', # b^c for next round
1031 '&xor (@T[1],$b)', # c^d for next round
1032
1033 '&and ($a,@T[1])',
1034 '&add ($e,@T[0])',
1035 '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
1036 );
1037}
0c149802
AP
1038
1039&set_label("loop",16);
1040 &Xupdate_ssse3_16_31(\&body_00_19);
1041 &Xupdate_ssse3_16_31(\&body_00_19);
1042 &Xupdate_ssse3_16_31(\&body_00_19);
1043 &Xupdate_ssse3_16_31(\&body_00_19);
1044 &Xupdate_ssse3_32_79(\&body_00_19);
1045 &Xupdate_ssse3_32_79(\&body_20_39);
1046 &Xupdate_ssse3_32_79(\&body_20_39);
1047 &Xupdate_ssse3_32_79(\&body_20_39);
1048 &Xupdate_ssse3_32_79(\&body_20_39);
1049 &Xupdate_ssse3_32_79(\&body_20_39);
1050 &Xupdate_ssse3_32_79(\&body_40_59);
1051 &Xupdate_ssse3_32_79(\&body_40_59);
1052 &Xupdate_ssse3_32_79(\&body_40_59);
1053 &Xupdate_ssse3_32_79(\&body_40_59);
1054 &Xupdate_ssse3_32_79(\&body_40_59);
1055 &Xupdate_ssse3_32_79(\&body_20_39);
1056 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
1057
1058 $saved_j=$j; @saved_V=@V;
1059
1060 &Xloop_ssse3(\&body_20_39);
1061 &Xloop_ssse3(\&body_20_39);
1062 &Xloop_ssse3(\&body_20_39);
1063
1064 &mov (@T[1],&DWP(192,"esp")); # update context
1065 &add ($A,&DWP(0,@T[1]));
1066 &add (@T[0],&DWP(4,@T[1])); # $b
1067 &add ($C,&DWP(8,@T[1]));
1068 &mov (&DWP(0,@T[1]),$A);
1069 &add ($D,&DWP(12,@T[1]));
1070 &mov (&DWP(4,@T[1]),@T[0]);
1071 &add ($E,&DWP(16,@T[1]));
1072 &mov (&DWP(8,@T[1]),$C);
35c77b73 1073 &mov ($B,$C);
0c149802 1074 &mov (&DWP(12,@T[1]),$D);
35c77b73 1075 &xor ($B,$D);
0c149802 1076 &mov (&DWP(16,@T[1]),$E);
b217ca63
AP
1077 &mov (@T[1],@T[0]);
1078 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
1079 &and (@T[0],$B);
1080 &mov ($B,$T[1]);
0c149802
AP
1081
1082 &jmp (&label("loop"));
1083
1084&set_label("done",16); $j=$saved_j; @V=@saved_V;
1085
1086 &Xtail_ssse3(\&body_20_39);
1087 &Xtail_ssse3(\&body_20_39);
1088 &Xtail_ssse3(\&body_20_39);
1089
1090 &mov (@T[1],&DWP(192,"esp")); # update context
1091 &add ($A,&DWP(0,@T[1]));
1092 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1093 &add (@T[0],&DWP(4,@T[1])); # $b
1094 &add ($C,&DWP(8,@T[1]));
1095 &mov (&DWP(0,@T[1]),$A);
1096 &add ($D,&DWP(12,@T[1]));
1097 &mov (&DWP(4,@T[1]),@T[0]);
1098 &add ($E,&DWP(16,@T[1]));
1099 &mov (&DWP(8,@T[1]),$C);
1100 &mov (&DWP(12,@T[1]),$D);
1101 &mov (&DWP(16,@T[1]),$E);
1102
1103&function_end("_sha1_block_data_order_ssse3");
1104
35c77b73
AP
1105$rx=0; # reset
1106
0c149802
AP
1107if ($ymm) {
1108my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
1109my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
1110my @V=($A,$B,$C,$D,$E);
1111my $j=0; # hash round
1112my @T=($T,$tmp1);
1113my $inp;
1114
1115my $_rol=sub { &shld(@_[0],@_) };
1116my $_ror=sub { &shrd(@_[0],@_) };
1117
1118&function_begin("_sha1_block_data_order_avx");
1119 &call (&label("pic_point")); # make it PIC!
1120 &set_label("pic_point");
1121 &blindpop($tmp1);
1122 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
1123&set_label("avx_shortcut");
1124 &vzeroall();
1125
1126 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19
1127 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39
1128 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59
1129 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79
1130 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask
1131
1132 &mov ($E,&wparam(0)); # load argument block
1133 &mov ($inp=@T[1],&wparam(1));
1134 &mov ($D,&wparam(2));
1135 &mov (@T[0],"esp");
1136
1137 # stack frame layout
1138 #
1139 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
1140 # X[4]+K X[5]+K X[6]+K X[7]+K
1141 # X[8]+K X[9]+K X[10]+K X[11]+K
1142 # X[12]+K X[13]+K X[14]+K X[15]+K
1143 #
1144 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
1145 # X[4] X[5] X[6] X[7]
1146 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
1147 #
1148 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
1149 # K_40_59 K_40_59 K_40_59 K_40_59
1150 # K_60_79 K_60_79 K_60_79 K_60_79
1151 # K_00_19 K_00_19 K_00_19 K_00_19
1152 # pbswap mask
1153 #
1154 # +192 ctx # argument block
1155 # +196 inp
1156 # +200 end
1157 # +204 esp
1158 &sub ("esp",208);
1159 &and ("esp",-64);
1160
1161 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants
1162 &vmovdqa(&QWP(112+16,"esp"),@X[5]);
1163 &vmovdqa(&QWP(112+32,"esp"),@X[6]);
1164 &shl ($D,6); # len*64
1165 &vmovdqa(&QWP(112+48,"esp"),@X[3]);
1166 &add ($D,$inp); # end of input
1167 &vmovdqa(&QWP(112+64,"esp"),@X[2]);
1168 &add ($inp,64);
1169 &mov (&DWP(192+0,"esp"),$E); # save argument block
1170 &mov (&DWP(192+4,"esp"),$inp);
1171 &mov (&DWP(192+8,"esp"),$D);
1172 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
1173
1174 &mov ($A,&DWP(0,$E)); # load context
1175 &mov ($B,&DWP(4,$E));
1176 &mov ($C,&DWP(8,$E));
1177 &mov ($D,&DWP(12,$E));
1178 &mov ($E,&DWP(16,$E));
1179 &mov (@T[0],$B); # magic seed
1180
1181 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
1182 &vmovdqu(@X[-3&7],&QWP(-48,$inp));
1183 &vmovdqu(@X[-2&7],&QWP(-32,$inp));
1184 &vmovdqu(@X[-1&7],&QWP(-16,$inp));
1185 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1186 &vpshufb(@X[-3&7],@X[-3&7],@X[2]);
1187 &vpshufb(@X[-2&7],@X[-2&7],@X[2]);
1188 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1189 &vpshufb(@X[-1&7],@X[-1&7],@X[2]);
1190 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19
1191 &vpaddd (@X[1],@X[-3&7],@X[3]);
1192 &vpaddd (@X[2],@X[-2&7],@X[3]);
1193 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU
35c77b73 1194 &mov (@T[1],$C);
0c149802 1195 &vmovdqa(&QWP(0+16,"esp"),@X[1]);
35c77b73 1196 &xor (@T[1],$D);
0c149802 1197 &vmovdqa(&QWP(0+32,"esp"),@X[2]);
35c77b73 1198 &and (@T[0],@T[1]);
0c149802
AP
1199 &jmp (&label("loop"));
1200
69687aa8 1201sub Xupdate_avx_16_31() # recall that $Xi starts with 4
0c149802
AP
1202{ use integer;
1203 my $body = shift;
1204 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
1205 my ($a,$b,$c,$d,$e);
1206
1207 eval(shift(@insns));
1208 eval(shift(@insns));
1209 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
1210 eval(shift(@insns));
1211 eval(shift(@insns));
1212
1213 &vpaddd (@X[3],@X[3],@X[-1&7]);
1214 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
1215 eval(shift(@insns));
1216 eval(shift(@insns));
1217 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords
1218 eval(shift(@insns));
1219 eval(shift(@insns));
1220 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
1221 eval(shift(@insns));
1222 eval(shift(@insns));
1223
1224 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
1225 eval(shift(@insns));
1226 eval(shift(@insns));
1227 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1228 eval(shift(@insns));
1229 eval(shift(@insns));
1230
1231 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
1232 eval(shift(@insns));
1233 eval(shift(@insns));
1234 eval(shift(@insns));
1235 eval(shift(@insns));
1236
1237 &vpsrld (@X[2],@X[0],31);
1238 eval(shift(@insns));
1239 eval(shift(@insns));
1240 eval(shift(@insns));
1241 eval(shift(@insns));
1242
1243 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword
1244 &vpaddd (@X[0],@X[0],@X[0]);
1245 eval(shift(@insns));
1246 eval(shift(@insns));
1247 eval(shift(@insns));
1248 eval(shift(@insns));
1249
1250 &vpsrld (@X[3],@X[4],30);
1251 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1
1252 eval(shift(@insns));
1253 eval(shift(@insns));
1254 eval(shift(@insns));
1255 eval(shift(@insns));
1256
1257 &vpslld (@X[4],@X[4],2);
1258 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
1259 eval(shift(@insns));
1260 eval(shift(@insns));
1261 &vpxor (@X[0],@X[0],@X[3]);
1262 eval(shift(@insns));
1263 eval(shift(@insns));
1264 eval(shift(@insns));
1265 eval(shift(@insns));
1266
1267 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2
1268 eval(shift(@insns));
1269 eval(shift(@insns));
1270 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
1271 eval(shift(@insns));
1272 eval(shift(@insns));
1273
1274 foreach (@insns) { eval; } # remaining instructions [if any]
1275
1276 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1277}
1278
1279sub Xupdate_avx_32_79()
1280{ use integer;
1281 my $body = shift;
69f45c52 1282 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
0c149802
AP
1283 my ($a,$b,$c,$d,$e);
1284
1285 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
1286 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
1287 eval(shift(@insns)); # body_20_39
1288 eval(shift(@insns));
1289 eval(shift(@insns));
1290 eval(shift(@insns)); # rol
1291
1292 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
1293 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
1294 eval(shift(@insns));
1295 eval(shift(@insns));
1296 if ($Xi%5) {
1297 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
1298 } else { # ... or load next one
1299 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
1300 }
1301 &vpaddd (@X[3],@X[3],@X[-1&7]);
1302 eval(shift(@insns)); # ror
1303 eval(shift(@insns));
1304
1305 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]"
1306 eval(shift(@insns)); # body_20_39
1307 eval(shift(@insns));
1308 eval(shift(@insns));
1309 eval(shift(@insns)); # rol
1310
1311 &vpsrld (@X[2],@X[0],30);
1312 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1313 eval(shift(@insns));
1314 eval(shift(@insns));
1315 eval(shift(@insns)); # ror
1316 eval(shift(@insns));
1317
1318 &vpslld (@X[0],@X[0],2);
1319 eval(shift(@insns)); # body_20_39
1320 eval(shift(@insns));
1321 eval(shift(@insns));
1322 eval(shift(@insns)); # rol
1323 eval(shift(@insns));
1324 eval(shift(@insns));
1325 eval(shift(@insns)); # ror
1326 eval(shift(@insns));
1327
1328 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2
1329 eval(shift(@insns)); # body_20_39
1330 eval(shift(@insns));
1331 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
1332 eval(shift(@insns));
1333 eval(shift(@insns)); # rol
1334 eval(shift(@insns));
1335 eval(shift(@insns));
1336 eval(shift(@insns)); # ror
1337 eval(shift(@insns));
1338
1339 foreach (@insns) { eval; } # remaining instructions
1340
1341 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1342}
1343
1344sub Xuplast_avx_80()
1345{ use integer;
1346 my $body = shift;
1347 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1348 my ($a,$b,$c,$d,$e);
1349
1350 eval(shift(@insns));
1351 &vpaddd (@X[3],@X[3],@X[-1&7]);
1352 eval(shift(@insns));
1353 eval(shift(@insns));
1354 eval(shift(@insns));
1355 eval(shift(@insns));
1356
1357 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
1358
1359 foreach (@insns) { eval; } # remaining instructions
1360
1361 &mov ($inp=@T[1],&DWP(192+4,"esp"));
1362 &cmp ($inp,&DWP(192+8,"esp"));
1363 &je (&label("done"));
1364
1365 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19
1366 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask
1367 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input
1368 &vmovdqu(@X[-3&7],&QWP(16,$inp));
1369 &vmovdqu(@X[-2&7],&QWP(32,$inp));
1370 &vmovdqu(@X[-1&7],&QWP(48,$inp));
1371 &add ($inp,64);
1372 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1373 &mov (&DWP(192+4,"esp"),$inp);
1374 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1375
1376 $Xi=0;
1377}
1378
1379sub Xloop_avx()
1380{ use integer;
1381 my $body = shift;
1382 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1383 my ($a,$b,$c,$d,$e);
1384
1385 eval(shift(@insns));
1386 eval(shift(@insns));
1387 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
1388 eval(shift(@insns));
1389 eval(shift(@insns));
1390 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]);
1391 eval(shift(@insns));
1392 eval(shift(@insns));
1393 eval(shift(@insns));
1394 eval(shift(@insns));
1395 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU
1396 eval(shift(@insns));
1397 eval(shift(@insns));
1398
1399 foreach (@insns) { eval; }
1400 $Xi++;
1401}
1402
1403sub Xtail_avx()
1404{ use integer;
1405 my $body = shift;
1406 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1407 my ($a,$b,$c,$d,$e);
1408
1409 foreach (@insns) { eval; }
1410}
1411
1412&set_label("loop",16);
1413 &Xupdate_avx_16_31(\&body_00_19);
1414 &Xupdate_avx_16_31(\&body_00_19);
1415 &Xupdate_avx_16_31(\&body_00_19);
1416 &Xupdate_avx_16_31(\&body_00_19);
1417 &Xupdate_avx_32_79(\&body_00_19);
1418 &Xupdate_avx_32_79(\&body_20_39);
1419 &Xupdate_avx_32_79(\&body_20_39);
1420 &Xupdate_avx_32_79(\&body_20_39);
1421 &Xupdate_avx_32_79(\&body_20_39);
1422 &Xupdate_avx_32_79(\&body_20_39);
1423 &Xupdate_avx_32_79(\&body_40_59);
1424 &Xupdate_avx_32_79(\&body_40_59);
1425 &Xupdate_avx_32_79(\&body_40_59);
1426 &Xupdate_avx_32_79(\&body_40_59);
1427 &Xupdate_avx_32_79(\&body_40_59);
1428 &Xupdate_avx_32_79(\&body_20_39);
1429 &Xuplast_avx_80(\&body_20_39); # can jump to "done"
1430
1431 $saved_j=$j; @saved_V=@V;
1432
1433 &Xloop_avx(\&body_20_39);
1434 &Xloop_avx(\&body_20_39);
1435 &Xloop_avx(\&body_20_39);
1436
1437 &mov (@T[1],&DWP(192,"esp")); # update context
1438 &add ($A,&DWP(0,@T[1]));
1439 &add (@T[0],&DWP(4,@T[1])); # $b
1440 &add ($C,&DWP(8,@T[1]));
1441 &mov (&DWP(0,@T[1]),$A);
1442 &add ($D,&DWP(12,@T[1]));
1443 &mov (&DWP(4,@T[1]),@T[0]);
1444 &add ($E,&DWP(16,@T[1]));
35c77b73 1445 &mov ($B,$C);
0c149802 1446 &mov (&DWP(8,@T[1]),$C);
35c77b73 1447 &xor ($B,$D);
0c149802
AP
1448 &mov (&DWP(12,@T[1]),$D);
1449 &mov (&DWP(16,@T[1]),$E);
b217ca63
AP
1450 &mov (@T[1],@T[0]);
1451 &and (@T[0],$B);
1452 &mov ($B,@T[1]);
0c149802
AP
1453
1454 &jmp (&label("loop"));
1455
1456&set_label("done",16); $j=$saved_j; @V=@saved_V;
1457
1458 &Xtail_avx(\&body_20_39);
1459 &Xtail_avx(\&body_20_39);
1460 &Xtail_avx(\&body_20_39);
1461
1462 &vzeroall();
1463
1464 &mov (@T[1],&DWP(192,"esp")); # update context
1465 &add ($A,&DWP(0,@T[1]));
1466 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1467 &add (@T[0],&DWP(4,@T[1])); # $b
1468 &add ($C,&DWP(8,@T[1]));
1469 &mov (&DWP(0,@T[1]),$A);
1470 &add ($D,&DWP(12,@T[1]));
1471 &mov (&DWP(4,@T[1]),@T[0]);
1472 &add ($E,&DWP(16,@T[1]));
1473 &mov (&DWP(8,@T[1]),$C);
1474 &mov (&DWP(12,@T[1]),$D);
1475 &mov (&DWP(16,@T[1]),$E);
1476&function_end("_sha1_block_data_order_avx");
1477}
1478&set_label("K_XX_XX",64);
1479&data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19
1480&data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39
1481&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59
1482&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79
1483&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask
619b9466 1484&data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0);
0c149802 1485}
f0f61f6d 1486&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
58964a49 1487
500b5a18 1488&asm_finish();
e87e380a 1489
a21314db 1490close STDOUT or die "error closing STDOUT: $!";