3 #******************************************************************************
4 #* Copyright(c) 2012, Intel Corp.
5 #* Developers and authors:
6 #* Shay Gueron (1, 2), and Vlad Krasnov (1)
7 #* (1) Intel Corporation, Israel Development Center, Haifa, Israel
8 #* (2) University of Haifa, Israel
9 #******************************************************************************
11 #* This submission to OpenSSL is to be made available under the OpenSSL
12 #* license, and only to the OpenSSL project, in order to allow integration
13 #* into the publicly distributed code.
14 #* The use of this code, or portions of this code, or concepts embedded in
15 #* this code, or modification of this code and/or algorithm(s) in it, or the
16 #* use of this code for any other purpose than stated above, requires special
18 #******************************************************************************
20 #* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS AND THE COPYRIGHT OWNERS
21 #* ``AS IS''. ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 #* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 #* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS OR THE COPYRIGHT
24 #* OWNERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 #* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 #* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 #* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 #* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 #* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 #* POSSIBILITY OF SUCH DAMAGE.
31 #******************************************************************************
33 #* [1] S. Gueron, V. Krasnov: "Software Implementation of Modular
34 #* Exponentiation, Using Advanced Vector Instructions Architectures",
35 #* F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,
36 #* pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012
37 #* [2] S. Gueron: "Efficient Software Implementations of Modular
38 #* Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).
39 #* [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE
40 #* Proceedings of 9th International Conference on Information Technology:
41 #* New Generations (ITNG 2012), pp.821-823 (2012)
42 #* [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis
43 #* resistant 1024-bit modular exponentiation, for optimizing RSA2048
44 #* on AVX2 capable x86_64 platforms",
45 #* http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest
46 #******************************************************************************
48 # +10% improvement by <appro@openssl.org>
50 # rsa2048 sign/sec OpenSSL 1.0.1 scalar(*) this
51 # 2GHz Haswell 544 632/+16% 947/+74%
53 # (*) if system doesn't support AVX2, for reference purposes;
57 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
59 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
61 $0 =~ m/(.*[\/\\])[^\
/\\]+$/; $dir=$1;
62 ( $xlate="${dir}x86_64-xlate.pl" and -f
$xlate ) or
63 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f
$xlate) or
64 die "can't locate x86_64-xlate.pl";
66 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
67 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
68 $avx = ($1>=2.19) + ($1>=2.22);
71 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM
} =~ /nasm/) &&
72 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
73 $avx = ($1>=2.09) + ($1>=2.11);
76 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM
} =~ /ml64/) &&
77 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
78 $avx = ($1>=10) + ($1>=11);
81 open OUT
,"| $^X $xlate $flavour $output";
86 my $rp="%rdi"; # BN_ULONG *rp,
87 my $ap="%rsi"; # const BN_ULONG *ap,
88 my $np="%rdx"; # const BN_ULONG *np,
89 my $n0="%ecx"; # const BN_ULONG n0,
90 my $rep="%r8d"; # int repeat);
92 # The registers that hold the accumulated redundant result
93 # The AMM works on 1024 bit operands, and redundant word size is 29
94 # Therefore: ceil(1024/29)/4 = 9
105 # Registers that hold the broadcasted words of bp, currently used
108 # Registers that hold the broadcasted words of Y, currently used
113 my $AND_MASK="%ymm15";
114 # alu registers that hold the first words of the ACC
120 my $i="%r14d"; # loop counter
123 my $FrameSize=32*18+32*8; # place for A^2 and 2*A
129 $np="%r13"; # reassigned argument
132 .globl rsaz_1024_sqr_avx2
133 .type rsaz_1024_sqr_avx2
,\
@function,5
135 rsaz_1024_sqr_avx2
: # 702 cycles, 14% faster than rsaz_1024_mul_avx2
144 $code.=<<___
if ($win64);
146 movaps
%xmm6,-0xd8(%rax)
147 movaps
%xmm7,-0xc8(%rax)
148 movaps
%xmm8,-0xb8(%rax)
149 movaps
%xmm9,-0xa8(%rax)
150 movaps
%xmm10,-0x98(%rax)
151 movaps
%xmm11,-0x88(%rax)
152 movaps
%xmm12,-0x78(%rax)
153 movaps
%xmm13,-0x68(%rax)
154 movaps
%xmm14,-0x58(%rax)
155 movaps
%xmm15,-0x48(%rax)
161 mov
%rdx, $np # reassigned argument
162 sub \
$$FrameSize, %rsp
164 sub \
$-128, $rp # size optimization
168 and \
$4095, $tmp # see if $np crosses page
171 jz
.Lsqr_1024_no_n_copy
173 # unaligned 256-bit load that crosses page boundary can
174 # cause >2x performance degradation here, so if $np does
175 # cross page boundary, copy it to stack and make sure stack
178 vmovdqu
32*0-128($np), $ACC0
180 vmovdqu
32*1-128($np), $ACC1
181 vmovdqu
32*2-128($np), $ACC2
182 vmovdqu
32*3-128($np), $ACC3
183 vmovdqu
32*4-128($np), $ACC4
184 vmovdqu
32*5-128($np), $ACC5
185 vmovdqu
32*6-128($np), $ACC6
186 vmovdqu
32*7-128($np), $ACC7
187 vmovdqu
32*8-128($np), $ACC8
188 lea
$FrameSize+128(%rsp),$np
189 vmovdqu
$ACC0, 32*0-128($np)
190 vmovdqu
$ACC1, 32*1-128($np)
191 vmovdqu
$ACC2, 32*2-128($np)
192 vmovdqu
$ACC3, 32*3-128($np)
193 vmovdqu
$ACC4, 32*4-128($np)
194 vmovdqu
$ACC5, 32*5-128($np)
195 vmovdqu
$ACC6, 32*6-128($np)
196 vmovdqu
$ACC7, 32*7-128($np)
197 vmovdqu
$ACC8, 32*8-128($np)
198 vmovdqu
$ACC9, 32*9-128($np) # $ACC9 is zero after vzeroall
200 .Lsqr_1024_no_n_copy
:
203 vmovdqu
32*1-128($ap), $ACC1
204 vmovdqu
32*2-128($ap), $ACC2
205 vmovdqu
32*3-128($ap), $ACC3
206 vmovdqu
32*4-128($ap), $ACC4
207 vmovdqu
32*5-128($ap), $ACC5
208 vmovdqu
32*6-128($ap), $ACC6
209 vmovdqu
32*7-128($ap), $ACC7
210 vmovdqu
32*8-128($ap), $ACC8
212 lea
192(%rsp), $tp0 # 64+128=192
213 vpbroadcastq
.Land_mask
(%rip), $AND_MASK
214 jmp
.LOOP_GRANDE_SQR_1024
217 .LOOP_GRANDE_SQR_1024
:
218 lea
32*18+128(%rsp), $aap # size optimization
219 lea
448(%rsp), $tp1 # 64+128+256=448
221 # the squaring is performed as described in Variant B of
222 # "Speeding up Big-Number Squaring", so start by calculating
224 vpaddq
$ACC1, $ACC1, $ACC1
225 vpbroadcastq
32*0-128($ap), $B1
226 vpaddq
$ACC2, $ACC2, $ACC2
227 vmovdqa
$ACC1, 32*0-128($aap)
228 vpaddq
$ACC3, $ACC3, $ACC3
229 vmovdqa
$ACC2, 32*1-128($aap)
230 vpaddq
$ACC4, $ACC4, $ACC4
231 vmovdqa
$ACC3, 32*2-128($aap)
232 vpaddq
$ACC5, $ACC5, $ACC5
233 vmovdqa
$ACC4, 32*3-128($aap)
234 vpaddq
$ACC6, $ACC6, $ACC6
235 vmovdqa
$ACC5, 32*4-128($aap)
236 vpaddq
$ACC7, $ACC7, $ACC7
237 vmovdqa
$ACC6, 32*5-128($aap)
238 vpaddq
$ACC8, $ACC8, $ACC8
239 vmovdqa
$ACC7, 32*6-128($aap)
240 vpxor
$ACC9, $ACC9, $ACC9
241 vmovdqa
$ACC8, 32*7-128($aap)
243 vpmuludq
32*0-128($ap), $B1, $ACC0
244 vpbroadcastq
32*1-128($ap), $B2
245 vmovdqu
$ACC9, 32*9-192($tp0) # zero upper half
246 vpmuludq
$B1, $ACC1, $ACC1
247 vmovdqu
$ACC9, 32*10-448($tp1)
248 vpmuludq
$B1, $ACC2, $ACC2
249 vmovdqu
$ACC9, 32*11-448($tp1)
250 vpmuludq
$B1, $ACC3, $ACC3
251 vmovdqu
$ACC9, 32*12-448($tp1)
252 vpmuludq
$B1, $ACC4, $ACC4
253 vmovdqu
$ACC9, 32*13-448($tp1)
254 vpmuludq
$B1, $ACC5, $ACC5
255 vmovdqu
$ACC9, 32*14-448($tp1)
256 vpmuludq
$B1, $ACC6, $ACC6
257 vmovdqu
$ACC9, 32*15-448($tp1)
258 vpmuludq
$B1, $ACC7, $ACC7
259 vmovdqu
$ACC9, 32*16-448($tp1)
260 vpmuludq
$B1, $ACC8, $ACC8
261 vpbroadcastq
32*2-128($ap), $B1
262 vmovdqu
$ACC9, 32*17-448($tp1)
273 vmovdqu
32*0(%rsp,$tmp), $TEMP0 # 32*0-192($tp0,$tmp)
274 vmovdqu
32*1(%rsp,$tmp), $TEMP1 # 32*1-192($tp0,$tmp)
275 vpbroadcastq
32*1-128($ap,$tmp), $B2
276 vpmuludq
32*0-128($ap), $B1, $ACC0
277 vmovdqu
32*2-192($tp0,$tmp), $TEMP2
278 vpaddq
$TEMP0, $ACC0, $ACC0
279 vpmuludq
32*0-128($aap), $B1, $ACC1
280 vmovdqu
32*3-192($tp0,$tmp), $TEMP0
281 vpaddq
$TEMP1, $ACC1, $ACC1
282 vpmuludq
32*1-128($aap), $B1, $ACC2
283 vmovdqu
32*4-192($tp0,$tmp), $TEMP1
284 vpaddq
$TEMP2, $ACC2, $ACC2
285 vpmuludq
32*2-128($aap), $B1, $ACC3
286 vmovdqu
32*5-192($tp0,$tmp), $TEMP2
287 vpaddq
$TEMP0, $ACC3, $ACC3
288 vpmuludq
32*3-128($aap), $B1, $ACC4
289 vmovdqu
32*6-192($tp0,$tmp), $TEMP0
290 vpaddq
$TEMP1, $ACC4, $ACC4
291 vpmuludq
32*4-128($aap), $B1, $ACC5
292 vmovdqu
32*7-192($tp0,$tmp), $TEMP1
293 vpaddq
$TEMP2, $ACC5, $ACC5
294 vpmuludq
32*5-128($aap), $B1, $ACC6
295 vmovdqu
32*8-192($tp0,$tmp), $TEMP2
296 vpaddq
$TEMP0, $ACC6, $ACC6
297 vpmuludq
32*6-128($aap), $B1, $ACC7
298 vpaddq
$TEMP1, $ACC7, $ACC7
299 vpmuludq
32*7-128($aap), $B1, $ACC8
300 vpbroadcastq
32*2-128($ap,$tmp), $B1
301 vpaddq
$TEMP2, $ACC8, $ACC8
303 vmovdqu
$ACC0, 32*0(%rsp,$tmp) # 32*0-192($tp0,$tmp)
304 vmovdqu
$ACC1, 32*1(%rsp,$tmp) # 32*1-192($tp0,$tmp)
306 vpmuludq
32*1-128($ap), $B2, $TEMP0
307 vpaddq
$TEMP0, $ACC2, $ACC2
308 vpmuludq
32*1-128($aap), $B2, $TEMP1
309 vpaddq
$TEMP1, $ACC3, $ACC3
310 vpmuludq
32*2-128($aap), $B2, $TEMP2
311 vpaddq
$TEMP2, $ACC4, $ACC4
312 vpmuludq
32*3-128($aap), $B2, $TEMP0
313 vpaddq
$TEMP0, $ACC5, $ACC5
314 vpmuludq
32*4-128($aap), $B2, $TEMP1
315 vpaddq
$TEMP1, $ACC6, $ACC6
316 vpmuludq
32*5-128($aap), $B2, $TEMP2
317 vmovdqu
32*9-192($tp0,$tmp), $TEMP1
318 vpaddq
$TEMP2, $ACC7, $ACC7
319 vpmuludq
32*6-128($aap), $B2, $TEMP0
320 vpaddq
$TEMP0, $ACC8, $ACC8
321 vpmuludq
32*7-128($aap), $B2, $ACC0
322 vpbroadcastq
32*3-128($ap,$tmp), $B2
323 vpaddq
$TEMP1, $ACC0, $ACC0
325 vmovdqu
$ACC2, 32*2-192($tp0,$tmp)
326 vmovdqu
$ACC3, 32*3-192($tp0,$tmp)
328 vpmuludq
32*2-128($ap), $B1, $TEMP2
329 vpaddq
$TEMP2, $ACC4, $ACC4
330 vpmuludq
32*2-128($aap), $B1, $TEMP0
331 vpaddq
$TEMP0, $ACC5, $ACC5
332 vpmuludq
32*3-128($aap), $B1, $TEMP1
333 vpaddq
$TEMP1, $ACC6, $ACC6
334 vpmuludq
32*4-128($aap), $B1, $TEMP2
335 vpaddq
$TEMP2, $ACC7, $ACC7
336 vpmuludq
32*5-128($aap), $B1, $TEMP0
337 vmovdqu
32*10-448($tp1,$tmp), $TEMP2
338 vpaddq
$TEMP0, $ACC8, $ACC8
339 vpmuludq
32*6-128($aap), $B1, $TEMP1
340 vpaddq
$TEMP1, $ACC0, $ACC0
341 vpmuludq
32*7-128($aap), $B1, $ACC1
342 vpbroadcastq
32*4-128($ap,$tmp), $B1
343 vpaddq
$TEMP2, $ACC1, $ACC1
345 vmovdqu
$ACC4, 32*4-192($tp0,$tmp)
346 vmovdqu
$ACC5, 32*5-192($tp0,$tmp)
348 vpmuludq
32*3-128($ap), $B2, $TEMP0
349 vpaddq
$TEMP0, $ACC6, $ACC6
350 vpmuludq
32*3-128($aap), $B2, $TEMP1
351 vpaddq
$TEMP1, $ACC7, $ACC7
352 vpmuludq
32*4-128($aap), $B2, $TEMP2
353 vpaddq
$TEMP2, $ACC8, $ACC8
354 vpmuludq
32*5-128($aap), $B2, $TEMP0
355 vmovdqu
32*11-448($tp1,$tmp), $TEMP2
356 vpaddq
$TEMP0, $ACC0, $ACC0
357 vpmuludq
32*6-128($aap), $B2, $TEMP1
358 vpaddq
$TEMP1, $ACC1, $ACC1
359 vpmuludq
32*7-128($aap), $B2, $ACC2
360 vpbroadcastq
32*5-128($ap,$tmp), $B2
361 vpaddq
$TEMP2, $ACC2, $ACC2
363 vmovdqu
$ACC6, 32*6-192($tp0,$tmp)
364 vmovdqu
$ACC7, 32*7-192($tp0,$tmp)
366 vpmuludq
32*4-128($ap), $B1, $TEMP0
367 vpaddq
$TEMP0, $ACC8, $ACC8
368 vpmuludq
32*4-128($aap), $B1, $TEMP1
369 vpaddq
$TEMP1, $ACC0, $ACC0
370 vpmuludq
32*5-128($aap), $B1, $TEMP2
371 vmovdqu
32*12-448($tp1,$tmp), $TEMP1
372 vpaddq
$TEMP2, $ACC1, $ACC1
373 vpmuludq
32*6-128($aap), $B1, $TEMP0
374 vpaddq
$TEMP0, $ACC2, $ACC2
375 vpmuludq
32*7-128($aap), $B1, $ACC3
376 vpbroadcastq
32*6-128($ap,$tmp), $B1
377 vpaddq
$TEMP1, $ACC3, $ACC3
379 vmovdqu
$ACC8, 32*8-192($tp0,$tmp)
380 vmovdqu
$ACC0, 32*9-192($tp0,$tmp)
382 vpmuludq
32*5-128($ap), $B2, $TEMP2
383 vpaddq
$TEMP2, $ACC1, $ACC1
384 vpmuludq
32*5-128($aap), $B2, $TEMP0
385 vmovdqu
32*13-448($tp1,$tmp), $TEMP2
386 vpaddq
$TEMP0, $ACC2, $ACC2
387 vpmuludq
32*6-128($aap), $B2, $TEMP1
388 vpaddq
$TEMP1, $ACC3, $ACC3
389 vpmuludq
32*7-128($aap), $B2, $ACC4
390 vpbroadcastq
32*7-128($ap,$tmp), $B2
391 vpaddq
$TEMP2, $ACC4, $ACC4
393 vmovdqu
$ACC1, 32*10-448($tp1,$tmp)
394 vmovdqu
$ACC2, 32*11-448($tp1,$tmp)
396 vpmuludq
32*6-128($ap), $B1, $TEMP0
397 vmovdqu
32*14-448($tp1,$tmp), $TEMP2
398 vpaddq
$TEMP0, $ACC3, $ACC3
399 vpmuludq
32*6-128($aap), $B1, $TEMP1
400 vpbroadcastq
32*8-128($ap,$tmp), $ACC0 # borrow $ACC0 for $B1
401 vpaddq
$TEMP1, $ACC4, $ACC4
402 vpmuludq
32*7-128($aap), $B1, $ACC5
403 vpbroadcastq
32*0+8-128($ap,$tmp), $B1 # for next iteration
404 vpaddq
$TEMP2, $ACC5, $ACC5
405 vmovdqu
32*15-448($tp1,$tmp), $TEMP1
407 vmovdqu
$ACC3, 32*12-448($tp1,$tmp)
408 vmovdqu
$ACC4, 32*13-448($tp1,$tmp)
410 vpmuludq
32*7-128($ap), $B2, $TEMP0
411 vmovdqu
32*16-448($tp1,$tmp), $TEMP2
412 vpaddq
$TEMP0, $ACC5, $ACC5
413 vpmuludq
32*7-128($aap), $B2, $ACC6
414 vpaddq
$TEMP1, $ACC6, $ACC6
416 vpmuludq
32*8-128($ap), $ACC0, $ACC7
417 vmovdqu
$ACC5, 32*14-448($tp1,$tmp)
418 vpaddq
$TEMP2, $ACC7, $ACC7
419 vmovdqu
$ACC6, 32*15-448($tp1,$tmp)
420 vmovdqu
$ACC7, 32*16-448($tp1,$tmp)
432 #we need to fix indexes 32-39 to avoid overflow
433 vmovdqu
32*8-192($tp0), $ACC8
434 vmovdqu
32*9-192($tp0), $ACC1
435 vmovdqu
32*10-448($tp1), $ACC2
437 vpsrlq \
$29, $ACC8, $TEMP1
438 vpand
$AND_MASK, $ACC8, $ACC8
439 vpsrlq \
$29, $ACC1, $TEMP2
440 vpand
$AND_MASK, $ACC1, $ACC1
442 vpermq \
$0x93, $TEMP1, $TEMP1
443 vpxor
$ZERO, $ZERO, $ZERO
444 vpermq \
$0x93, $TEMP2, $TEMP2
446 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
447 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
448 vpaddq
$TEMP0, $ACC8, $ACC8
449 vpblendd \
$3, $TEMP2, $ZERO, $TEMP2
450 vpaddq
$TEMP1, $ACC1, $ACC1
451 vpaddq
$TEMP2, $ACC2, $ACC2
452 vmovdqu
$ACC1, 32*9-192($tp0)
453 vmovdqu
$ACC2, 32*10-448($tp1)
459 vmovdqu
32*1(%rsp), $ACC1
460 vmovdqu
32*2-192($tp0), $ACC2
461 vmovdqu
32*3-192($tp0), $ACC3
462 vmovdqu
32*4-192($tp0), $ACC4
463 vmovdqu
32*5-192($tp0), $ACC5
464 vmovdqu
32*6-192($tp0), $ACC6
465 vmovdqu
32*7-192($tp0), $ACC7
469 and \
$0x1fffffff, %eax
473 imulq
-128($np), %rax
474 vpbroadcastq
$Y1, $Y1
477 imulq
8-128($np), %rax
481 imulq
16-128($np), %rax
484 imulq
24-128($np), %rdx
489 and \
$0x1fffffff, %eax
492 jmp
.LOOP_REDUCE_1024
497 vpbroadcastq
$Y2, $Y2
499 vpmuludq
32*1-128($np), $Y1, $TEMP0
501 imulq
-128($np), %rax
502 vpaddq
$TEMP0, $ACC1, $ACC1
503 vpmuludq
32*2-128($np), $Y1, $TEMP1
506 imulq
8-128($np), %rax
507 vpaddq
$TEMP1, $ACC2, $ACC2
508 vpmuludq
32*3-128($np), $Y1, $TEMP2
511 imulq
16-128($np), %rax
513 vpaddq
$TEMP2, $ACC3, $ACC3
514 vpmuludq
32*4-128($np), $Y1, $TEMP0
517 vpaddq
$TEMP0, $ACC4, $ACC4
518 vpmuludq
32*5-128($np), $Y1, $TEMP1
521 vpaddq
$TEMP1, $ACC5, $ACC5
522 vpmuludq
32*6-128($np), $Y1, $TEMP2
523 and \
$0x1fffffff, %eax
524 vpaddq
$TEMP2, $ACC6, $ACC6
525 vpmuludq
32*7-128($np), $Y1, $TEMP0
526 vpaddq
$TEMP0, $ACC7, $ACC7
527 vpmuludq
32*8-128($np), $Y1, $TEMP1
529 vmovdqu
32*1-8-128($np), $TEMP2
530 vpaddq
$TEMP1, $ACC8, $ACC8
531 vmovdqu
32*2-8-128($np), $TEMP0
532 vpbroadcastq
$Y1, $Y1
534 vpmuludq
$Y2, $TEMP2, $TEMP2
535 vmovdqu
32*3-8-128($np), $TEMP1
537 imulq
-128($np), %rax
538 vpaddq
$TEMP2, $ACC1, $ACC1
539 vpmuludq
$Y2, $TEMP0, $TEMP0
540 vmovdqu
32*4-8-128($np), $TEMP2
543 imulq
8-128($np), %rax
544 vpaddq
$TEMP0, $ACC2, $ACC2
547 vpmuludq
$Y2, $TEMP1, $TEMP1
548 vmovdqu
32*5-8-128($np), $TEMP0
550 vpaddq
$TEMP1, $ACC3, $ACC3
551 vpmuludq
$Y2, $TEMP2, $TEMP2
552 vmovdqu
32*6-8-128($np), $TEMP1
555 vpaddq
$TEMP2, $ACC4, $ACC4
556 vpmuludq
$Y2, $TEMP0, $TEMP0
557 vmovdqu
32*7-8-128($np), $TEMP2
558 and \
$0x1fffffff, %eax
559 vpaddq
$TEMP0, $ACC5, $ACC5
560 vpmuludq
$Y2, $TEMP1, $TEMP1
561 vmovdqu
32*8-8-128($np), $TEMP0
562 vpaddq
$TEMP1, $ACC6, $ACC6
563 vpmuludq
$Y2, $TEMP2, $TEMP2
564 vmovdqu
32*9-8-128($np), $ACC9
565 vmovd
%eax, $ACC0 # borrow ACC0 for Y2
566 imulq
-128($np), %rax
567 vpaddq
$TEMP2, $ACC7, $ACC7
568 vpmuludq
$Y2, $TEMP0, $TEMP0
569 vmovdqu
32*1-16-128($np), $TEMP1
570 vpbroadcastq
$ACC0, $ACC0
571 vpaddq
$TEMP0, $ACC8, $ACC8
572 vpmuludq
$Y2, $ACC9, $ACC9
573 vmovdqu
32*2-16-128($np), $TEMP2
577 ($ACC0,$Y2)=($Y2,$ACC0);
579 vmovdqu
32*1-24-128($np), $ACC0
580 vpmuludq
$Y1, $TEMP1, $TEMP1
581 vmovdqu
32*3-16-128($np), $TEMP0
582 vpaddq
$TEMP1, $ACC1, $ACC1
583 vpmuludq
$Y2, $ACC0, $ACC0
584 vpmuludq
$Y1, $TEMP2, $TEMP2
585 vmovdqu
32*4-16-128($np), $TEMP1
586 vpaddq
$ACC1, $ACC0, $ACC0
587 vpaddq
$TEMP2, $ACC2, $ACC2
588 vpmuludq
$Y1, $TEMP0, $TEMP0
589 vmovdqu
32*5-16-128($np), $TEMP2
591 vmovdqu
$ACC0, (%rsp) # transfer $r0-$r3
592 vpaddq
$TEMP0, $ACC3, $ACC3
593 vpmuludq
$Y1, $TEMP1, $TEMP1
594 vmovdqu
32*6-16-128($np), $TEMP0
595 vpaddq
$TEMP1, $ACC4, $ACC4
596 vpmuludq
$Y1, $TEMP2, $TEMP2
597 vmovdqu
32*7-16-128($np), $TEMP1
598 vpaddq
$TEMP2, $ACC5, $ACC5
599 vpmuludq
$Y1, $TEMP0, $TEMP0
600 vmovdqu
32*8-16-128($np), $TEMP2
601 vpaddq
$TEMP0, $ACC6, $ACC6
602 vpmuludq
$Y1, $TEMP1, $TEMP1
603 vmovdqu
32*9-16-128($np), $TEMP0
605 vpaddq
$TEMP1, $ACC7, $ACC7
606 vpmuludq
$Y1, $TEMP2, $TEMP2
607 vmovdqu
32*2-24-128($np), $TEMP1
611 vpaddq
$TEMP2, $ACC8, $ACC8
612 vpmuludq
$Y1, $TEMP0, $TEMP0
613 and \
$0x1fffffff, %eax
615 vmovdqu
32*3-24-128($np), $TEMP2
616 vpaddq
$TEMP0, $ACC9, $ACC9
617 vpbroadcastq
$Y1, $Y1
619 vpmuludq
$Y2, $TEMP1, $TEMP1
620 vmovdqu
32*4-24-128($np), $TEMP0
622 imulq
-128($np), %rax
624 vpaddq
$TEMP1, $ACC2, $ACC1
625 vpmuludq
$Y2, $TEMP2, $TEMP2
626 vmovdqu
32*5-24-128($np), $TEMP1
629 imulq
8-128($np), %rax
632 vpaddq
$TEMP2, $ACC3, $ACC2
633 vpmuludq
$Y2, $TEMP0, $TEMP0
634 vmovdqu
32*6-24-128($np), $TEMP2
637 imulq
16-128($np), %rax
638 vpaddq
$TEMP0, $ACC4, $ACC3
639 vpmuludq
$Y2, $TEMP1, $TEMP1
640 vmovdqu
32*7-24-128($np), $TEMP0
641 imulq
24-128($np), %rdx # future $r3
644 vpaddq
$TEMP1, $ACC5, $ACC4
645 vpmuludq
$Y2, $TEMP2, $TEMP2
646 vmovdqu
32*8-24-128($np), $TEMP1
649 vpaddq
$TEMP2, $ACC6, $ACC5
650 vpmuludq
$Y2, $TEMP0, $TEMP0
651 vmovdqu
32*9-24-128($np), $TEMP2
652 and \
$0x1fffffff, %eax
653 vpaddq
$TEMP0, $ACC7, $ACC6
654 vpmuludq
$Y2, $TEMP1, $TEMP1
656 vpaddq
$TEMP1, $ACC8, $ACC7
657 vpmuludq
$Y2, $TEMP2, $TEMP2
658 vpaddq
$TEMP2, $ACC9, $ACC8
663 jnz
.LOOP_REDUCE_1024
665 ($ACC0,$Y2)=($Y2,$ACC0);
667 lea
448(%rsp), $tp1 # size optimization
668 vpaddq
$ACC9, $Y2, $ACC0
669 vpxor
$ZERO, $ZERO, $ZERO
671 vpaddq
32*9-192($tp0), $ACC0, $ACC0
672 vpaddq
32*10-448($tp1), $ACC1, $ACC1
673 vpaddq
32*11-448($tp1), $ACC2, $ACC2
674 vpaddq
32*12-448($tp1), $ACC3, $ACC3
675 vpaddq
32*13-448($tp1), $ACC4, $ACC4
676 vpaddq
32*14-448($tp1), $ACC5, $ACC5
677 vpaddq
32*15-448($tp1), $ACC6, $ACC6
678 vpaddq
32*16-448($tp1), $ACC7, $ACC7
679 vpaddq
32*17-448($tp1), $ACC8, $ACC8
681 vpsrlq \
$29, $ACC0, $TEMP1
682 vpand
$AND_MASK, $ACC0, $ACC0
683 vpsrlq \
$29, $ACC1, $TEMP2
684 vpand
$AND_MASK, $ACC1, $ACC1
685 vpsrlq \
$29, $ACC2, $TEMP3
686 vpermq \
$0x93, $TEMP1, $TEMP1
687 vpand
$AND_MASK, $ACC2, $ACC2
688 vpsrlq \
$29, $ACC3, $TEMP4
689 vpermq \
$0x93, $TEMP2, $TEMP2
690 vpand
$AND_MASK, $ACC3, $ACC3
691 vpermq \
$0x93, $TEMP3, $TEMP3
693 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
694 vpermq \
$0x93, $TEMP4, $TEMP4
695 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
696 vpaddq
$TEMP0, $ACC0, $ACC0
697 vpblendd \
$3, $TEMP2, $TEMP3, $TEMP2
698 vpaddq
$TEMP1, $ACC1, $ACC1
699 vpblendd \
$3, $TEMP3, $TEMP4, $TEMP3
700 vpaddq
$TEMP2, $ACC2, $ACC2
701 vpblendd \
$3, $TEMP4, $ZERO, $TEMP4
702 vpaddq
$TEMP3, $ACC3, $ACC3
703 vpaddq
$TEMP4, $ACC4, $ACC4
705 vpsrlq \
$29, $ACC0, $TEMP1
706 vpand
$AND_MASK, $ACC0, $ACC0
707 vpsrlq \
$29, $ACC1, $TEMP2
708 vpand
$AND_MASK, $ACC1, $ACC1
709 vpsrlq \
$29, $ACC2, $TEMP3
710 vpermq \
$0x93, $TEMP1, $TEMP1
711 vpand
$AND_MASK, $ACC2, $ACC2
712 vpsrlq \
$29, $ACC3, $TEMP4
713 vpermq \
$0x93, $TEMP2, $TEMP2
714 vpand
$AND_MASK, $ACC3, $ACC3
715 vpermq \
$0x93, $TEMP3, $TEMP3
717 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
718 vpermq \
$0x93, $TEMP4, $TEMP4
719 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
720 vpaddq
$TEMP0, $ACC0, $ACC0
721 vpblendd \
$3, $TEMP2, $TEMP3, $TEMP2
722 vpaddq
$TEMP1, $ACC1, $ACC1
723 vmovdqu
$ACC0, 32*0-128($rp)
724 vpblendd \
$3, $TEMP3, $TEMP4, $TEMP3
725 vpaddq
$TEMP2, $ACC2, $ACC2
726 vmovdqu
$ACC1, 32*1-128($rp)
727 vpblendd \
$3, $TEMP4, $ZERO, $TEMP4
728 vpaddq
$TEMP3, $ACC3, $ACC3
729 vmovdqu
$ACC2, 32*2-128($rp)
730 vpaddq
$TEMP4, $ACC4, $ACC4
731 vmovdqu
$ACC3, 32*3-128($rp)
735 vpsrlq \
$29, $ACC4, $TEMP1
736 vpand
$AND_MASK, $ACC4, $ACC4
737 vpsrlq \
$29, $ACC5, $TEMP2
738 vpand
$AND_MASK, $ACC5, $ACC5
739 vpsrlq \
$29, $ACC6, $TEMP3
740 vpermq \
$0x93, $TEMP1, $TEMP1
741 vpand
$AND_MASK, $ACC6, $ACC6
742 vpsrlq \
$29, $ACC7, $TEMP4
743 vpermq \
$0x93, $TEMP2, $TEMP2
744 vpand
$AND_MASK, $ACC7, $ACC7
745 vpsrlq \
$29, $ACC8, $TEMP5
746 vpermq \
$0x93, $TEMP3, $TEMP3
747 vpand
$AND_MASK, $ACC8, $ACC8
748 vpermq \
$0x93, $TEMP4, $TEMP4
750 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
751 vpermq \
$0x93, $TEMP5, $TEMP5
752 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
753 vpaddq
$TEMP0, $ACC4, $ACC4
754 vpblendd \
$3, $TEMP2, $TEMP3, $TEMP2
755 vpaddq
$TEMP1, $ACC5, $ACC5
756 vpblendd \
$3, $TEMP3, $TEMP4, $TEMP3
757 vpaddq
$TEMP2, $ACC6, $ACC6
758 vpblendd \
$3, $TEMP4, $TEMP5, $TEMP4
759 vpaddq
$TEMP3, $ACC7, $ACC7
760 vpaddq
$TEMP4, $ACC8, $ACC8
762 vpsrlq \
$29, $ACC4, $TEMP1
763 vpand
$AND_MASK, $ACC4, $ACC4
764 vpsrlq \
$29, $ACC5, $TEMP2
765 vpand
$AND_MASK, $ACC5, $ACC5
766 vpsrlq \
$29, $ACC6, $TEMP3
767 vpermq \
$0x93, $TEMP1, $TEMP1
768 vpand
$AND_MASK, $ACC6, $ACC6
769 vpsrlq \
$29, $ACC7, $TEMP4
770 vpermq \
$0x93, $TEMP2, $TEMP2
771 vpand
$AND_MASK, $ACC7, $ACC7
772 vpsrlq \
$29, $ACC8, $TEMP5
773 vpermq \
$0x93, $TEMP3, $TEMP3
774 vpand
$AND_MASK, $ACC8, $ACC8
775 vpermq \
$0x93, $TEMP4, $TEMP4
777 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
778 vpermq \
$0x93, $TEMP5, $TEMP5
779 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
780 vpaddq
$TEMP0, $ACC4, $ACC4
781 vpblendd \
$3, $TEMP2, $TEMP3, $TEMP2
782 vpaddq
$TEMP1, $ACC5, $ACC5
783 vmovdqu
$ACC4, 32*4-128($rp)
784 vpblendd \
$3, $TEMP3, $TEMP4, $TEMP3
785 vpaddq
$TEMP2, $ACC6, $ACC6
786 vmovdqu
$ACC5, 32*5-128($rp)
787 vpblendd \
$3, $TEMP4, $TEMP5, $TEMP4
788 vpaddq
$TEMP3, $ACC7, $ACC7
789 vmovdqu
$ACC6, 32*6-128($rp)
790 vpaddq
$TEMP4, $ACC8, $ACC8
791 vmovdqu
$ACC7, 32*7-128($rp)
792 vmovdqu
$ACC8, 32*8-128($rp)
796 jne
.LOOP_GRANDE_SQR_1024
801 $code.=<<___
if ($win64);
802 movaps
-0xd8(%rax),%xmm6
803 movaps
-0xc8(%rax),%xmm7
804 movaps
-0xb8(%rax),%xmm8
805 movaps
-0xa8(%rax),%xmm9
806 movaps
-0x98(%rax),%xmm10
807 movaps
-0x88(%rax),%xmm11
808 movaps
-0x78(%rax),%xmm12
809 movaps
-0x68(%rax),%xmm13
810 movaps
-0x58(%rax),%xmm14
811 movaps
-0x48(%rax),%xmm15
820 lea
(%rax),%rsp # restore %rsp
823 .size rsaz_1024_sqr_avx2
,.-rsaz_1024_sqr_avx2
828 my $rp="%rdi"; # BN_ULONG *rp,
829 my $ap="%rsi"; # const BN_ULONG *ap,
830 my $bp="%rdx"; # const BN_ULONG *bp,
831 my $np="%rcx"; # const BN_ULONG *np,
832 my $n0="%r8d"; # unsigned int n0);
834 # The registers that hold the accumulated redundant result
835 # The AMM works on 1024 bit operands, and redundant word size is 29
836 # Therefore: ceil(1024/29)/4 = 9
848 # Registers that hold the broadcasted words of multiplier, currently used
857 my $AND_MASK="%ymm15";
859 # alu registers that hold the first words of the ACC
868 $bp="%r13"; # reassigned argument
871 .globl rsaz_1024_mul_avx2
872 .type rsaz_1024_mul_avx2
,\
@function,5
883 $code.=<<___
if ($win64);
885 movaps
%xmm6,-0xd8(%rax)
886 movaps
%xmm7,-0xc8(%rax)
887 movaps
%xmm8,-0xb8(%rax)
888 movaps
%xmm9,-0xa8(%rax)
889 movaps
%xmm10,-0x98(%rax)
890 movaps
%xmm11,-0x88(%rax)
891 movaps
%xmm12,-0x78(%rax)
892 movaps
%xmm13,-0x68(%rax)
893 movaps
%xmm14,-0x58(%rax)
894 movaps
%xmm15,-0x48(%rax)
900 mov
%rdx, $bp # reassigned argument
903 # unaligned 256-bit load that crosses page boundary can
904 # cause severe performance degradation here, so if $ap does
905 # cross page boundary, swap it with $bp [meaning that caller
906 # is advised to lay down $ap and $bp next to each other, so
907 # that only one can cross page boundary].
917 sub \
$-128,$ap # size optimization
921 and \
$4095, $tmp # see if $np crosses page
924 jz
.Lmul_1024_no_n_copy
926 # unaligned 256-bit load that crosses page boundary can
927 # cause severe performance degradation here, so if $np does
928 # cross page boundary, copy it to stack and make sure stack
931 vmovdqu
32*0-128($np), $ACC0
933 vmovdqu
32*1-128($np), $ACC1
934 vmovdqu
32*2-128($np), $ACC2
935 vmovdqu
32*3-128($np), $ACC3
936 vmovdqu
32*4-128($np), $ACC4
937 vmovdqu
32*5-128($np), $ACC5
938 vmovdqu
32*6-128($np), $ACC6
939 vmovdqu
32*7-128($np), $ACC7
940 vmovdqu
32*8-128($np), $ACC8
942 vmovdqu
$ACC0, 32*0-128($np)
943 vpxor
$ACC0, $ACC0, $ACC0
944 vmovdqu
$ACC1, 32*1-128($np)
945 vpxor
$ACC1, $ACC1, $ACC1
946 vmovdqu
$ACC2, 32*2-128($np)
947 vpxor
$ACC2, $ACC2, $ACC2
948 vmovdqu
$ACC3, 32*3-128($np)
949 vpxor
$ACC3, $ACC3, $ACC3
950 vmovdqu
$ACC4, 32*4-128($np)
951 vpxor
$ACC4, $ACC4, $ACC4
952 vmovdqu
$ACC5, 32*5-128($np)
953 vpxor
$ACC5, $ACC5, $ACC5
954 vmovdqu
$ACC6, 32*6-128($np)
955 vpxor
$ACC6, $ACC6, $ACC6
956 vmovdqu
$ACC7, 32*7-128($np)
957 vpxor
$ACC7, $ACC7, $ACC7
958 vmovdqu
$ACC8, 32*8-128($np)
960 vmovdqu
$ACC9, 32*9-128($np) # $ACC9 is zero after vzeroall
961 .Lmul_1024_no_n_copy
:
965 vpbroadcastq
($bp), $Bi
966 vmovdqu
$ACC0, (%rsp) # clear top of stack
972 vmovdqu
.Land_mask
(%rip), $AND_MASK
978 vpsrlq \
$29, $ACC3, $ACC9 # correct $ACC3(*)
980 imulq
-128($ap), %rax
983 imulq
8-128($ap), $r1
988 and \
$0x1fffffff, %eax
991 imulq
16-128($ap), $r2
995 imulq
24-128($ap), $r3
997 vpmuludq
32*1-128($ap),$Bi,$TEMP0
999 vpaddq
$TEMP0,$ACC1,$ACC1
1000 vpmuludq
32*2-128($ap),$Bi,$TEMP1
1001 vpbroadcastq
$Yi, $Yi
1002 vpaddq
$TEMP1,$ACC2,$ACC2
1003 vpmuludq
32*3-128($ap),$Bi,$TEMP2
1004 vpand
$AND_MASK, $ACC3, $ACC3 # correct $ACC3
1005 vpaddq
$TEMP2,$ACC3,$ACC3
1006 vpmuludq
32*4-128($ap),$Bi,$TEMP0
1007 vpaddq
$TEMP0,$ACC4,$ACC4
1008 vpmuludq
32*5-128($ap),$Bi,$TEMP1
1009 vpaddq
$TEMP1,$ACC5,$ACC5
1010 vpmuludq
32*6-128($ap),$Bi,$TEMP2
1011 vpaddq
$TEMP2,$ACC6,$ACC6
1012 vpmuludq
32*7-128($ap),$Bi,$TEMP0
1013 vpermq \
$0x93, $ACC9, $ACC9 # correct $ACC3
1014 vpaddq
$TEMP0,$ACC7,$ACC7
1015 vpmuludq
32*8-128($ap),$Bi,$TEMP1
1016 vpbroadcastq
8($bp), $Bi
1017 vpaddq
$TEMP1,$ACC8,$ACC8
1020 imulq
-128($np),%rax
1023 imulq
8-128($np),%rax
1026 imulq
16-128($np),%rax
1029 imulq
24-128($np),%rdx
1033 vpmuludq
32*1-128($np),$Yi,$TEMP2
1035 vpaddq
$TEMP2,$ACC1,$ACC1
1036 vpmuludq
32*2-128($np),$Yi,$TEMP0
1037 vpaddq
$TEMP0,$ACC2,$ACC2
1038 vpmuludq
32*3-128($np),$Yi,$TEMP1
1039 vpaddq
$TEMP1,$ACC3,$ACC3
1040 vpmuludq
32*4-128($np),$Yi,$TEMP2
1041 vpaddq
$TEMP2,$ACC4,$ACC4
1042 vpmuludq
32*5-128($np),$Yi,$TEMP0
1043 vpaddq
$TEMP0,$ACC5,$ACC5
1044 vpmuludq
32*6-128($np),$Yi,$TEMP1
1045 vpaddq
$TEMP1,$ACC6,$ACC6
1046 vpmuludq
32*7-128($np),$Yi,$TEMP2
1047 vpblendd \
$3, $ZERO, $ACC9, $ACC9 # correct $ACC3
1048 vpaddq
$TEMP2,$ACC7,$ACC7
1049 vpmuludq
32*8-128($np),$Yi,$TEMP0
1050 vpaddq
$ACC9, $ACC3, $ACC3 # correct $ACC3
1051 vpaddq
$TEMP0,$ACC8,$ACC8
1054 imulq
-128($ap),%rax
1056 vmovdqu
-8+32*1-128($ap),$TEMP1
1058 imulq
8-128($ap),%rax
1060 vmovdqu
-8+32*2-128($ap),$TEMP2
1064 and \
$0x1fffffff, %eax
1066 imulq
16-128($ap),%rbx
1068 vpmuludq
$Bi,$TEMP1,$TEMP1
1070 vmovdqu
-8+32*3-128($ap),$TEMP0
1071 vpaddq
$TEMP1,$ACC1,$ACC1
1072 vpmuludq
$Bi,$TEMP2,$TEMP2
1073 vpbroadcastq
$Yi, $Yi
1074 vmovdqu
-8+32*4-128($ap),$TEMP1
1075 vpaddq
$TEMP2,$ACC2,$ACC2
1076 vpmuludq
$Bi,$TEMP0,$TEMP0
1077 vmovdqu
-8+32*5-128($ap),$TEMP2
1078 vpaddq
$TEMP0,$ACC3,$ACC3
1079 vpmuludq
$Bi,$TEMP1,$TEMP1
1080 vmovdqu
-8+32*6-128($ap),$TEMP0
1081 vpaddq
$TEMP1,$ACC4,$ACC4
1082 vpmuludq
$Bi,$TEMP2,$TEMP2
1083 vmovdqu
-8+32*7-128($ap),$TEMP1
1084 vpaddq
$TEMP2,$ACC5,$ACC5
1085 vpmuludq
$Bi,$TEMP0,$TEMP0
1086 vmovdqu
-8+32*8-128($ap),$TEMP2
1087 vpaddq
$TEMP0,$ACC6,$ACC6
1088 vpmuludq
$Bi,$TEMP1,$TEMP1
1089 vmovdqu
-8+32*9-128($ap),$ACC9
1090 vpaddq
$TEMP1,$ACC7,$ACC7
1091 vpmuludq
$Bi,$TEMP2,$TEMP2
1092 vpaddq
$TEMP2,$ACC8,$ACC8
1093 vpmuludq
$Bi,$ACC9,$ACC9
1094 vpbroadcastq
16($bp), $Bi
1097 imulq
-128($np),%rax
1099 vmovdqu
-8+32*1-128($np),$TEMP0
1101 imulq
8-128($np),%rax
1103 vmovdqu
-8+32*2-128($np),$TEMP1
1105 imulq
16-128($np),%rdx
1109 vpmuludq
$Yi,$TEMP0,$TEMP0
1111 vmovdqu
-8+32*3-128($np),$TEMP2
1112 vpaddq
$TEMP0,$ACC1,$ACC1
1113 vpmuludq
$Yi,$TEMP1,$TEMP1
1114 vmovdqu
-8+32*4-128($np),$TEMP0
1115 vpaddq
$TEMP1,$ACC2,$ACC2
1116 vpmuludq
$Yi,$TEMP2,$TEMP2
1117 vmovdqu
-8+32*5-128($np),$TEMP1
1118 vpaddq
$TEMP2,$ACC3,$ACC3
1119 vpmuludq
$Yi,$TEMP0,$TEMP0
1120 vmovdqu
-8+32*6-128($np),$TEMP2
1121 vpaddq
$TEMP0,$ACC4,$ACC4
1122 vpmuludq
$Yi,$TEMP1,$TEMP1
1123 vmovdqu
-8+32*7-128($np),$TEMP0
1124 vpaddq
$TEMP1,$ACC5,$ACC5
1125 vpmuludq
$Yi,$TEMP2,$TEMP2
1126 vmovdqu
-8+32*8-128($np),$TEMP1
1127 vpaddq
$TEMP2,$ACC6,$ACC6
1128 vpmuludq
$Yi,$TEMP0,$TEMP0
1129 vmovdqu
-8+32*9-128($np),$TEMP2
1130 vpaddq
$TEMP0,$ACC7,$ACC7
1131 vpmuludq
$Yi,$TEMP1,$TEMP1
1132 vpaddq
$TEMP1,$ACC8,$ACC8
1133 vpmuludq
$Yi,$TEMP2,$TEMP2
1134 vpaddq
$TEMP2,$ACC9,$ACC9
1136 vmovdqu
-16+32*1-128($ap),$TEMP0
1138 imulq
-128($ap),%rax
1141 vmovdqu
-16+32*2-128($ap),$TEMP1
1144 and \
$0x1fffffff, %eax
1146 imulq
8-128($ap),%rbx
1148 vpmuludq
$Bi,$TEMP0,$TEMP0
1150 vmovdqu
-16+32*3-128($ap),$TEMP2
1151 vpaddq
$TEMP0,$ACC1,$ACC1
1152 vpmuludq
$Bi,$TEMP1,$TEMP1
1153 vpbroadcastq
$Yi, $Yi
1154 vmovdqu
-16+32*4-128($ap),$TEMP0
1155 vpaddq
$TEMP1,$ACC2,$ACC2
1156 vpmuludq
$Bi,$TEMP2,$TEMP2
1157 vmovdqu
-16+32*5-128($ap),$TEMP1
1158 vpaddq
$TEMP2,$ACC3,$ACC3
1159 vpmuludq
$Bi,$TEMP0,$TEMP0
1160 vmovdqu
-16+32*6-128($ap),$TEMP2
1161 vpaddq
$TEMP0,$ACC4,$ACC4
1162 vpmuludq
$Bi,$TEMP1,$TEMP1
1163 vmovdqu
-16+32*7-128($ap),$TEMP0
1164 vpaddq
$TEMP1,$ACC5,$ACC5
1165 vpmuludq
$Bi,$TEMP2,$TEMP2
1166 vmovdqu
-16+32*8-128($ap),$TEMP1
1167 vpaddq
$TEMP2,$ACC6,$ACC6
1168 vpmuludq
$Bi,$TEMP0,$TEMP0
1169 vmovdqu
-16+32*9-128($ap),$TEMP2
1170 vpaddq
$TEMP0,$ACC7,$ACC7
1171 vpmuludq
$Bi,$TEMP1,$TEMP1
1172 vpaddq
$TEMP1,$ACC8,$ACC8
1173 vpmuludq
$Bi,$TEMP2,$TEMP2
1174 vpbroadcastq
24($bp), $Bi
1175 vpaddq
$TEMP2,$ACC9,$ACC9
1177 vmovdqu
-16+32*1-128($np),$TEMP0
1179 imulq
-128($np),%rax
1181 vmovdqu
-16+32*2-128($np),$TEMP1
1182 imulq
8-128($np),%rdx
1186 vpmuludq
$Yi,$TEMP0,$TEMP0
1188 vmovdqu
-16+32*3-128($np),$TEMP2
1189 vpaddq
$TEMP0,$ACC1,$ACC1
1190 vpmuludq
$Yi,$TEMP1,$TEMP1
1191 vmovdqu
-16+32*4-128($np),$TEMP0
1192 vpaddq
$TEMP1,$ACC2,$ACC2
1193 vpmuludq
$Yi,$TEMP2,$TEMP2
1194 vmovdqu
-16+32*5-128($np),$TEMP1
1195 vpaddq
$TEMP2,$ACC3,$ACC3
1196 vpmuludq
$Yi,$TEMP0,$TEMP0
1197 vmovdqu
-16+32*6-128($np),$TEMP2
1198 vpaddq
$TEMP0,$ACC4,$ACC4
1199 vpmuludq
$Yi,$TEMP1,$TEMP1
1200 vmovdqu
-16+32*7-128($np),$TEMP0
1201 vpaddq
$TEMP1,$ACC5,$ACC5
1202 vpmuludq
$Yi,$TEMP2,$TEMP2
1203 vmovdqu
-16+32*8-128($np),$TEMP1
1204 vpaddq
$TEMP2,$ACC6,$ACC6
1205 vpmuludq
$Yi,$TEMP0,$TEMP0
1206 vmovdqu
-16+32*9-128($np),$TEMP2
1207 vpaddq
$TEMP0,$ACC7,$ACC7
1208 vpmuludq
$Yi,$TEMP1,$TEMP1
1209 vmovdqu
-24+32*1-128($ap),$TEMP0
1210 vpaddq
$TEMP1,$ACC8,$ACC8
1211 vpmuludq
$Yi,$TEMP2,$TEMP2
1212 vmovdqu
-24+32*2-128($ap),$TEMP1
1213 vpaddq
$TEMP2,$ACC9,$ACC9
1216 imulq
-128($ap),%rbx
1221 and \
$0x1fffffff, %eax
1223 vpmuludq
$Bi,$TEMP0,$TEMP0
1225 vmovdqu
-24+32*3-128($ap),$TEMP2
1226 vpaddq
$TEMP0,$ACC1,$ACC1
1227 vpmuludq
$Bi,$TEMP1,$TEMP1
1228 vpbroadcastq
$Yi, $Yi
1229 vmovdqu
-24+32*4-128($ap),$TEMP0
1230 vpaddq
$TEMP1,$ACC2,$ACC2
1231 vpmuludq
$Bi,$TEMP2,$TEMP2
1232 vmovdqu
-24+32*5-128($ap),$TEMP1
1233 vpaddq
$TEMP2,$ACC3,$ACC3
1234 vpmuludq
$Bi,$TEMP0,$TEMP0
1235 vmovdqu
-24+32*6-128($ap),$TEMP2
1236 vpaddq
$TEMP0,$ACC4,$ACC4
1237 vpmuludq
$Bi,$TEMP1,$TEMP1
1238 vmovdqu
-24+32*7-128($ap),$TEMP0
1239 vpaddq
$TEMP1,$ACC5,$ACC5
1240 vpmuludq
$Bi,$TEMP2,$TEMP2
1241 vmovdqu
-24+32*8-128($ap),$TEMP1
1242 vpaddq
$TEMP2,$ACC6,$ACC6
1243 vpmuludq
$Bi,$TEMP0,$TEMP0
1244 vmovdqu
-24+32*9-128($ap),$TEMP2
1245 vpaddq
$TEMP0,$ACC7,$ACC7
1246 vpmuludq
$Bi,$TEMP1,$TEMP1
1247 vpaddq
$TEMP1,$ACC8,$ACC8
1248 vpmuludq
$Bi,$TEMP2,$TEMP2
1249 vpbroadcastq
32($bp), $Bi
1250 vpaddq
$TEMP2,$ACC9,$ACC9
1251 add \
$32, $bp # $bp++
1253 vmovdqu
-24+32*1-128($np),$TEMP0
1254 imulq
-128($np),%rax
1258 vmovdqu
-24+32*2-128($np),$TEMP1
1259 vpmuludq
$Yi,$TEMP0,$TEMP0
1261 vmovdqu
-24+32*3-128($np),$TEMP2
1262 vpaddq
$TEMP0,$ACC1,$ACC0 # $ACC0==$TEMP0
1263 vpmuludq
$Yi,$TEMP1,$TEMP1
1264 vmovdqu
$ACC0, (%rsp) # transfer $r0-$r3
1265 vpaddq
$TEMP1,$ACC2,$ACC1
1266 vmovdqu
-24+32*4-128($np),$TEMP0
1267 vpmuludq
$Yi,$TEMP2,$TEMP2
1268 vmovdqu
-24+32*5-128($np),$TEMP1
1269 vpaddq
$TEMP2,$ACC3,$ACC2
1270 vpmuludq
$Yi,$TEMP0,$TEMP0
1271 vmovdqu
-24+32*6-128($np),$TEMP2
1272 vpaddq
$TEMP0,$ACC4,$ACC3
1273 vpmuludq
$Yi,$TEMP1,$TEMP1
1274 vmovdqu
-24+32*7-128($np),$TEMP0
1275 vpaddq
$TEMP1,$ACC5,$ACC4
1276 vpmuludq
$Yi,$TEMP2,$TEMP2
1277 vmovdqu
-24+32*8-128($np),$TEMP1
1278 vpaddq
$TEMP2,$ACC6,$ACC5
1279 vpmuludq
$Yi,$TEMP0,$TEMP0
1280 vmovdqu
-24+32*9-128($np),$TEMP2
1282 vpaddq
$TEMP0,$ACC7,$ACC6
1283 vpmuludq
$Yi,$TEMP1,$TEMP1
1285 vpaddq
$TEMP1,$ACC8,$ACC7
1286 vpmuludq
$Yi,$TEMP2,$TEMP2
1288 vpaddq
$TEMP2,$ACC9,$ACC8
1294 # (*) Original implementation was correcting ACC1-ACC3 for overflow
1295 # after 7 loop runs, or after 28 iterations, or 56 additions.
1296 # But as we underutilize resources, it's possible to correct in
1297 # each iteration with marginal performance loss. But then, as
1298 # we do it in each iteration, we can correct less digits, and
1299 # avoid performance penalties completely. Also note that we
1300 # correct only three digits out of four. This works because
1301 # most significant digit is subjected to less additions.
1307 vpermq \
$0, $AND_MASK, $AND_MASK
1308 vpaddq
(%rsp), $TEMP1, $ACC0
1310 vpsrlq \
$29, $ACC0, $TEMP1
1311 vpand
$AND_MASK, $ACC0, $ACC0
1312 vpsrlq \
$29, $ACC1, $TEMP2
1313 vpand
$AND_MASK, $ACC1, $ACC1
1314 vpsrlq \
$29, $ACC2, $TEMP3
1315 vpermq \
$0x93, $TEMP1, $TEMP1
1316 vpand
$AND_MASK, $ACC2, $ACC2
1317 vpsrlq \
$29, $ACC3, $TEMP4
1318 vpermq \
$0x93, $TEMP2, $TEMP2
1319 vpand
$AND_MASK, $ACC3, $ACC3
1321 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
1322 vpermq \
$0x93, $TEMP3, $TEMP3
1323 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
1324 vpermq \
$0x93, $TEMP4, $TEMP4
1325 vpaddq
$TEMP0, $ACC0, $ACC0
1326 vpblendd \
$3, $TEMP2, $TEMP3, $TEMP2
1327 vpaddq
$TEMP1, $ACC1, $ACC1
1328 vpblendd \
$3, $TEMP3, $TEMP4, $TEMP3
1329 vpaddq
$TEMP2, $ACC2, $ACC2
1330 vpblendd \
$3, $TEMP4, $ZERO, $TEMP4
1331 vpaddq
$TEMP3, $ACC3, $ACC3
1332 vpaddq
$TEMP4, $ACC4, $ACC4
1334 vpsrlq \
$29, $ACC0, $TEMP1
1335 vpand
$AND_MASK, $ACC0, $ACC0
1336 vpsrlq \
$29, $ACC1, $TEMP2
1337 vpand
$AND_MASK, $ACC1, $ACC1
1338 vpsrlq \
$29, $ACC2, $TEMP3
1339 vpermq \
$0x93, $TEMP1, $TEMP1
1340 vpand
$AND_MASK, $ACC2, $ACC2
1341 vpsrlq \
$29, $ACC3, $TEMP4
1342 vpermq \
$0x93, $TEMP2, $TEMP2
1343 vpand
$AND_MASK, $ACC3, $ACC3
1344 vpermq \
$0x93, $TEMP3, $TEMP3
1346 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
1347 vpermq \
$0x93, $TEMP4, $TEMP4
1348 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
1349 vpaddq
$TEMP0, $ACC0, $ACC0
1350 vpblendd \
$3, $TEMP2, $TEMP3, $TEMP2
1351 vpaddq
$TEMP1, $ACC1, $ACC1
1352 vpblendd \
$3, $TEMP3, $TEMP4, $TEMP3
1353 vpaddq
$TEMP2, $ACC2, $ACC2
1354 vpblendd \
$3, $TEMP4, $ZERO, $TEMP4
1355 vpaddq
$TEMP3, $ACC3, $ACC3
1356 vpaddq
$TEMP4, $ACC4, $ACC4
1358 vmovdqu
$ACC0, 0-128($rp)
1359 vmovdqu
$ACC1, 32-128($rp)
1360 vmovdqu
$ACC2, 64-128($rp)
1361 vmovdqu
$ACC3, 96-128($rp)
1366 vpsrlq \
$29, $ACC4, $TEMP1
1367 vpand
$AND_MASK, $ACC4, $ACC4
1368 vpsrlq \
$29, $ACC5, $TEMP2
1369 vpand
$AND_MASK, $ACC5, $ACC5
1370 vpsrlq \
$29, $ACC6, $TEMP3
1371 vpermq \
$0x93, $TEMP1, $TEMP1
1372 vpand
$AND_MASK, $ACC6, $ACC6
1373 vpsrlq \
$29, $ACC7, $TEMP4
1374 vpermq \
$0x93, $TEMP2, $TEMP2
1375 vpand
$AND_MASK, $ACC7, $ACC7
1376 vpsrlq \
$29, $ACC8, $TEMP5
1377 vpermq \
$0x93, $TEMP3, $TEMP3
1378 vpand
$AND_MASK, $ACC8, $ACC8
1379 vpermq \
$0x93, $TEMP4, $TEMP4
1381 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
1382 vpermq \
$0x93, $TEMP5, $TEMP5
1383 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
1384 vpaddq
$TEMP0, $ACC4, $ACC4
1385 vpblendd \
$3, $TEMP2, $TEMP3, $TEMP2
1386 vpaddq
$TEMP1, $ACC5, $ACC5
1387 vpblendd \
$3, $TEMP3, $TEMP4, $TEMP3
1388 vpaddq
$TEMP2, $ACC6, $ACC6
1389 vpblendd \
$3, $TEMP4, $TEMP5, $TEMP4
1390 vpaddq
$TEMP3, $ACC7, $ACC7
1391 vpaddq
$TEMP4, $ACC8, $ACC8
1393 vpsrlq \
$29, $ACC4, $TEMP1
1394 vpand
$AND_MASK, $ACC4, $ACC4
1395 vpsrlq \
$29, $ACC5, $TEMP2
1396 vpand
$AND_MASK, $ACC5, $ACC5
1397 vpsrlq \
$29, $ACC6, $TEMP3
1398 vpermq \
$0x93, $TEMP1, $TEMP1
1399 vpand
$AND_MASK, $ACC6, $ACC6
1400 vpsrlq \
$29, $ACC7, $TEMP4
1401 vpermq \
$0x93, $TEMP2, $TEMP2
1402 vpand
$AND_MASK, $ACC7, $ACC7
1403 vpsrlq \
$29, $ACC8, $TEMP5
1404 vpermq \
$0x93, $TEMP3, $TEMP3
1405 vpand
$AND_MASK, $ACC8, $ACC8
1406 vpermq \
$0x93, $TEMP4, $TEMP4
1408 vpblendd \
$3, $ZERO, $TEMP1, $TEMP0
1409 vpermq \
$0x93, $TEMP5, $TEMP5
1410 vpblendd \
$3, $TEMP1, $TEMP2, $TEMP1
1411 vpaddq
$TEMP0, $ACC4, $ACC4
1412 vpblendd \
$3, $TEMP2, $TEMP3, $TEMP2
1413 vpaddq
$TEMP1, $ACC5, $ACC5
1414 vpblendd \
$3, $TEMP3, $TEMP4, $TEMP3
1415 vpaddq
$TEMP2, $ACC6, $ACC6
1416 vpblendd \
$3, $TEMP4, $TEMP5, $TEMP4
1417 vpaddq
$TEMP3, $ACC7, $ACC7
1418 vpaddq
$TEMP4, $ACC8, $ACC8
1420 vmovdqu
$ACC4, 128-128($rp)
1421 vmovdqu
$ACC5, 160-128($rp)
1422 vmovdqu
$ACC6, 192-128($rp)
1423 vmovdqu
$ACC7, 224-128($rp)
1424 vmovdqu
$ACC8, 256-128($rp)
1429 $code.=<<___
if ($win64);
1430 movaps
-0xd8(%rax),%xmm6
1431 movaps
-0xc8(%rax),%xmm7
1432 movaps
-0xb8(%rax),%xmm8
1433 movaps
-0xa8(%rax),%xmm9
1434 movaps
-0x98(%rax),%xmm10
1435 movaps
-0x88(%rax),%xmm11
1436 movaps
-0x78(%rax),%xmm12
1437 movaps
-0x68(%rax),%xmm13
1438 movaps
-0x58(%rax),%xmm14
1439 movaps
-0x48(%rax),%xmm15
1448 lea
(%rax),%rsp # restore %rsp
1449 .Lmul_1024_epilogue
:
1451 .size rsaz_1024_mul_avx2
,.-rsaz_1024_mul_avx2
1455 my ($out,$inp) = $win64 ?
("%rcx","%rdx") : ("%rdi","%rsi");
1456 my @T = map("%r$_",(8..11));
1459 .globl rsaz_1024_red2norm_avx2
1460 .type rsaz_1024_red2norm_avx2
,\
@abi-omnipotent
1462 rsaz_1024_red2norm_avx2
:
1463 sub \
$-128,$inp # size optimization
1467 for ($j=0,$i=0; $i<16; $i++) {
1469 while (29*$j<64*($i+1)) { # load data till boundary
1470 $code.=" mov `8*$j-128`($inp), @T[0]\n";
1471 $j++; $k++; push(@T,shift(@T));
1474 while ($k>1) { # shift loaded data but last value
1475 $code.=" shl \$`29*($j-$k)`,@T[-$k]\n";
1478 $code.=<<___
; # shift last value
1480 shl \
$`29*($j-1)`, @T[-1]
1481 shr \
$`-29*($j-1)`, @T[0]
1483 while ($l) { # accumulate all values
1484 $code.=" add @T[-$l], %rax\n";
1488 adc \
$0, @T[0] # consume eventual carry
1489 mov
%rax, 8*$i($out)
1496 .size rsaz_1024_red2norm_avx2
,.-rsaz_1024_red2norm_avx2
1498 .globl rsaz_1024_norm2red_avx2
1499 .type rsaz_1024_norm2red_avx2
,\
@abi-omnipotent
1501 rsaz_1024_norm2red_avx2
:
1502 sub \
$-128,$out # size optimization
1504 mov \
$0x1fffffff,%eax
1506 for ($j=0,$i=0; $i<16; $i++) {
1507 $code.=" mov `8*($i+1)`($inp),@T[1]\n" if ($i<15);
1508 $code.=" xor @T[1],@T[1]\n" if ($i==15);
1510 while (29*($j+1)<64*($i+1)) {
1513 shr \
$`29*$j`,@T[-$k]
1514 and %rax,@T[-$k] # &0x1fffffff
1515 mov
@T[-$k],`8*$j-128`($out)
1520 shrd \
$`29*$j`,@T[1],@T[0]
1522 mov
@T[0],`8*$j-128`($out)
1528 mov
@T[0],`8*$j-128`($out) # zero
1529 mov
@T[0],`8*($j+1)-128`($out)
1530 mov
@T[0],`8*($j+2)-128`($out)
1531 mov
@T[0],`8*($j+3)-128`($out)
1533 .size rsaz_1024_norm2red_avx2
,.-rsaz_1024_norm2red_avx2
1537 my ($out,$inp,$power) = $win64 ?
("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1540 .globl rsaz_1024_scatter5_avx2
1541 .type rsaz_1024_scatter5_avx2
,\
@abi-omnipotent
1543 rsaz_1024_scatter5_avx2
:
1545 vmovdqu
.Lscatter_permd
(%rip),%ymm5
1547 lea
($out,$power),$out
1549 jmp
.Loop_scatter_1024
1553 vmovdqu
($inp),%ymm0
1555 vpermd
%ymm0,%ymm5,%ymm0
1556 vmovdqu
%xmm0,($out)
1557 lea
16*32($out),$out
1559 jnz
.Loop_scatter_1024
1563 .size rsaz_1024_scatter5_avx2
,.-rsaz_1024_scatter5_avx2
1565 .globl rsaz_1024_gather5_avx2
1566 .type rsaz_1024_gather5_avx2
,\
@abi-omnipotent
1568 rsaz_1024_gather5_avx2
:
1570 $code.=<<___
if ($win64);
1571 lea
-0x88(%rsp),%rax
1572 .LSEH_begin_rsaz_1024_gather5
:
1573 # I can't trust assembler to use specific encoding:-(
1574 .byte
0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
1575 .byte
0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax)
1576 .byte
0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax)
1577 .byte
0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax)
1578 .byte
0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax)
1579 .byte
0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax)
1580 .byte
0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax)
1581 .byte
0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax)
1582 .byte
0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax)
1583 .byte
0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax)
1584 .byte
0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax)
1588 lea
.Lgather_table
(%rip),%r11
1591 shr \
$2,%eax # cache line number
1592 shl \
$4,$power # offset within cache line
1594 vmovdqu
-32(%r11),%ymm7 # .Lgather_permd
1595 vpbroadcastb
8(%r11,%rax), %xmm8
1596 vpbroadcastb
7(%r11,%rax), %xmm9
1597 vpbroadcastb
6(%r11,%rax), %xmm10
1598 vpbroadcastb
5(%r11,%rax), %xmm11
1599 vpbroadcastb
4(%r11,%rax), %xmm12
1600 vpbroadcastb
3(%r11,%rax), %xmm13
1601 vpbroadcastb
2(%r11,%rax), %xmm14
1602 vpbroadcastb
1(%r11,%rax), %xmm15
1604 lea
($inp,$power),$inp
1605 mov \
$64,%r11 # size optimization
1607 jmp
.Loop_gather_1024
1611 vpand
($inp), %xmm8,%xmm0
1612 vpand
($inp,%r11), %xmm9,%xmm1
1613 vpand
($inp,%r11,2), %xmm10,%xmm2
1614 vpand
64($inp,%r11,2), %xmm11,%xmm3
1615 vpor
%xmm0,%xmm1,%xmm1
1616 vpand
($inp,%r11,4), %xmm12,%xmm4
1617 vpor
%xmm2,%xmm3,%xmm3
1618 vpand
64($inp,%r11,4), %xmm13,%xmm5
1619 vpor
%xmm1,%xmm3,%xmm3
1620 vpand
-128($inp,%r11,8), %xmm14,%xmm6
1621 vpor
%xmm4,%xmm5,%xmm5
1622 vpand
-64($inp,%r11,8), %xmm15,%xmm2
1623 lea
($inp,%r11,8),$inp
1624 vpor
%xmm3,%xmm5,%xmm5
1625 vpor
%xmm2,%xmm6,%xmm6
1626 vpor
%xmm5,%xmm6,%xmm6
1627 vpermd
%ymm6,%ymm7,%ymm6
1628 vmovdqu
%ymm6,($out)
1631 jnz
.Loop_gather_1024
1633 vpxor
%ymm0,%ymm0,%ymm0
1634 vmovdqu
%ymm0,($out)
1637 $code.=<<___
if ($win64);
1639 movaps
0x10(%rsp),%xmm7
1640 movaps
0x20(%rsp),%xmm8
1641 movaps
0x30(%rsp),%xmm9
1642 movaps
0x40(%rsp),%xmm10
1643 movaps
0x50(%rsp),%xmm11
1644 movaps
0x60(%rsp),%xmm12
1645 movaps
0x70(%rsp),%xmm13
1646 movaps
0x80(%rsp),%xmm14
1647 movaps
0x90(%rsp),%xmm15
1649 .LSEH_end_rsaz_1024_gather5
:
1653 .size rsaz_1024_gather5_avx2
,.-rsaz_1024_gather5_avx2
1658 .extern OPENSSL_ia32cap_P
1659 .globl rsaz_avx2_eligible
1660 .type rsaz_avx2_eligible
,\
@abi-omnipotent
1663 mov OPENSSL_ia32cap_P
+8(%rip),%eax
1667 .size rsaz_avx2_eligible
,.-rsaz_avx2_eligible
1671 .quad
0x1fffffff,0x1fffffff,0x1fffffff,-1
1673 .long
0,2,4,6,7,7,7,7
1675 .long
0,7,1,7,2,7,3,7
1677 .byte
0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0
1688 .extern __imp_RtlVirtualUnwind
1689 .type rsaz_se_handler
,\
@abi-omnipotent
1703 mov
120($context),%rax # pull context->Rax
1704 mov
248($context),%rbx # pull context->Rip
1706 mov
8($disp),%rsi # disp->ImageBase
1707 mov
56($disp),%r11 # disp->HandlerData
1709 mov
0(%r11),%r10d # HandlerData[0]
1710 lea
(%rsi,%r10),%r10 # prologue label
1711 cmp %r10,%rbx # context->Rip<prologue label
1712 jb
.Lcommon_seh_tail
1714 mov
152($context),%rax # pull context->Rsp
1716 mov
4(%r11),%r10d # HandlerData[1]
1717 lea
(%rsi,%r10),%r10 # epilogue label
1718 cmp %r10,%rbx # context->Rip>=epilogue label
1719 jae
.Lcommon_seh_tail
1721 mov
160($context),%rax # pull context->Rbp
1729 mov
%r15,240($context)
1730 mov
%r14,232($context)
1731 mov
%r13,224($context)
1732 mov
%r12,216($context)
1733 mov
%rbp,160($context)
1734 mov
%rbx,144($context)
1736 lea
-0xd8(%rax),%rsi # %xmm save area
1737 lea
512($context),%rdi # & context.Xmm6
1738 mov \
$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
1739 .long
0xa548f3fc # cld; rep movsq
1744 mov
%rax,152($context) # restore context->Rsp
1745 mov
%rsi,168($context) # restore context->Rsi
1746 mov
%rdi,176($context) # restore context->Rdi
1748 mov
40($disp),%rdi # disp->ContextRecord
1749 mov
$context,%rsi # context
1750 mov \
$154,%ecx # sizeof(CONTEXT)
1751 .long
0xa548f3fc # cld; rep movsq
1754 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1755 mov
8(%rsi),%rdx # arg2, disp->ImageBase
1756 mov
0(%rsi),%r8 # arg3, disp->ControlPc
1757 mov
16(%rsi),%r9 # arg4, disp->FunctionEntry
1758 mov
40(%rsi),%r10 # disp->ContextRecord
1759 lea
56(%rsi),%r11 # &disp->HandlerData
1760 lea
24(%rsi),%r12 # &disp->EstablisherFrame
1761 mov
%r10,32(%rsp) # arg5
1762 mov
%r11,40(%rsp) # arg6
1763 mov
%r12,48(%rsp) # arg7
1764 mov
%rcx,56(%rsp) # arg8, (NULL)
1765 call
*__imp_RtlVirtualUnwind
(%rip)
1767 mov \
$1,%eax # ExceptionContinueSearch
1779 .size rsaz_se_handler
,.-rsaz_se_handler
1783 .rva
.LSEH_begin_rsaz_1024_sqr_avx2
1784 .rva
.LSEH_end_rsaz_1024_sqr_avx2
1785 .rva
.LSEH_info_rsaz_1024_sqr_avx2
1787 .rva
.LSEH_begin_rsaz_1024_mul_avx2
1788 .rva
.LSEH_end_rsaz_1024_mul_avx2
1789 .rva
.LSEH_info_rsaz_1024_mul_avx2
1791 .rva
.LSEH_begin_rsaz_1024_gather5
1792 .rva
.LSEH_end_rsaz_1024_gather5
1793 .rva
.LSEH_info_rsaz_1024_gather5
1796 .LSEH_info_rsaz_1024_sqr_avx2
:
1798 .rva rsaz_se_handler
1799 .rva
.Lsqr_1024_body
,.Lsqr_1024_epilogue
1800 .LSEH_info_rsaz_1024_mul_avx2
:
1802 .rva rsaz_se_handler
1803 .rva
.Lmul_1024_body
,.Lmul_1024_epilogue
1804 .LSEH_info_rsaz_1024_gather5
:
1805 .byte
0x01,0x33,0x16,0x00
1806 .byte
0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15
1807 .byte
0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14
1808 .byte
0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13
1809 .byte
0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12
1810 .byte
0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11
1811 .byte
0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10
1812 .byte
0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9
1813 .byte
0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8
1814 .byte
0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
1815 .byte
0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
1816 .byte
0x04,0x01,0x15,0x00 #sub rsp,0xa8
1820 foreach (split("\n",$code)) {
1821 s/\`([^\`]*)\`/eval($1)/ge;
1823 s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge or
1825 s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1826 s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
1827 s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1828 s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1829 s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1834 print <<___
; # assembler is too old
1837 .globl rsaz_avx2_eligible
1838 .type rsaz_avx2_eligible
,\
@abi-omnipotent
1842 .size rsaz_avx2_eligible
,.-rsaz_avx2_eligible
1844 .globl rsaz_1024_sqr_avx2
1845 .globl rsaz_1024_mul_avx2
1846 .globl rsaz_1024_norm2red_avx2
1847 .globl rsaz_1024_red2norm_avx2
1848 .globl rsaz_1024_scatter5_avx2
1849 .globl rsaz_1024_gather5_avx2
1850 .type rsaz_1024_sqr_avx2
,\
@abi-omnipotent
1853 rsaz_1024_norm2red_avx2
:
1854 rsaz_1024_red2norm_avx2
:
1855 rsaz_1024_scatter5_avx2
:
1856 rsaz_1024_gather5_avx2
:
1857 .byte
0x0f,0x0b # ud2
1859 .size rsaz_1024_sqr_avx2
,.-rsaz_1024_sqr_avx2