]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/bn/asm/x86_64-mont.pl
x86_64 assembly pack: tolerate spaces in source directory name.
[thirdparty/openssl.git] / crypto / bn / asm / x86_64-mont.pl
1 #! /usr/bin/env perl
2 # Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16
17 # October 2005.
18 #
19 # Montgomery multiplication routine for x86_64. While it gives modest
20 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
21 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
22 # respectful 50%. It remains to be seen if loop unrolling and
23 # dedicated squaring routine can provide further improvement...
24
25 # July 2011.
26 #
27 # Add dedicated squaring procedure. Performance improvement varies
28 # from platform to platform, but in average it's ~5%/15%/25%/33%
29 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
30
31 # August 2011.
32 #
33 # Unroll and modulo-schedule inner loops in such manner that they
34 # are "fallen through" for input lengths of 8, which is critical for
35 # 1024-bit RSA *sign*. Average performance improvement in comparison
36 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
37 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
38
39 # June 2013.
40 #
41 # Optimize reduction in squaring procedure and improve 1024+-bit RSA
42 # sign performance by 10-16% on Intel Sandy Bridge and later
43 # (virtually same on non-Intel processors).
44
45 # August 2013.
46 #
47 # Add MULX/ADOX/ADCX code path.
48
49 $flavour = shift;
50 $output = shift;
51 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
52
53 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
54
55 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
56 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
57 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
58 die "can't locate x86_64-xlate.pl";
59
60 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
61 *STDOUT=*OUT;
62
63 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
64 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
65 $addx = ($1>=2.23);
66 }
67
68 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
69 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
70 $addx = ($1>=2.10);
71 }
72
73 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
74 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
75 $addx = ($1>=12);
76 }
77
78 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
79 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
80 $addx = ($ver>=3.03);
81 }
82
83 # int bn_mul_mont(
84 $rp="%rdi"; # BN_ULONG *rp,
85 $ap="%rsi"; # const BN_ULONG *ap,
86 $bp="%rdx"; # const BN_ULONG *bp,
87 $np="%rcx"; # const BN_ULONG *np,
88 $n0="%r8"; # const BN_ULONG *n0,
89 $num="%r9"; # int num);
90 $lo0="%r10";
91 $hi0="%r11";
92 $hi1="%r13";
93 $i="%r14";
94 $j="%r15";
95 $m0="%rbx";
96 $m1="%rbp";
97
98 $code=<<___;
99 .text
100
101 .extern OPENSSL_ia32cap_P
102
103 .globl bn_mul_mont
104 .type bn_mul_mont,\@function,6
105 .align 16
106 bn_mul_mont:
107 test \$3,${num}d
108 jnz .Lmul_enter
109 cmp \$8,${num}d
110 jb .Lmul_enter
111 ___
112 $code.=<<___ if ($addx);
113 mov OPENSSL_ia32cap_P+8(%rip),%r11d
114 ___
115 $code.=<<___;
116 cmp $ap,$bp
117 jne .Lmul4x_enter
118 test \$7,${num}d
119 jz .Lsqr8x_enter
120 jmp .Lmul4x_enter
121
122 .align 16
123 .Lmul_enter:
124 push %rbx
125 push %rbp
126 push %r12
127 push %r13
128 push %r14
129 push %r15
130
131 mov ${num}d,${num}d
132 lea 2($num),%r10
133 mov %rsp,%r11
134 neg %r10
135 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
136 and \$-1024,%rsp # minimize TLB usage
137
138 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
139 .Lmul_body:
140 # An OS-agnostic version of __chkstk.
141 #
142 # Some OSes (Windows) insist on stack being "wired" to
143 # physical memory in strictly sequential manner, i.e. if stack
144 # allocation spans two pages, then reference to farmost one can
145 # be punishable by SEGV. But page walking can do good even on
146 # other OSes, because it guarantees that villain thread hits
147 # the guard page before it can make damage to innocent one...
148 sub %rsp,%r11
149 and \$-4096,%r11
150 .Lmul_page_walk:
151 mov (%rsp,%r11),%r10
152 sub \$4096,%r11
153 .byte 0x66,0x2e # predict non-taken
154 jnc .Lmul_page_walk
155
156 mov $bp,%r12 # reassign $bp
157 ___
158 $bp="%r12";
159 $code.=<<___;
160 mov ($n0),$n0 # pull n0[0] value
161 mov ($bp),$m0 # m0=bp[0]
162 mov ($ap),%rax
163
164 xor $i,$i # i=0
165 xor $j,$j # j=0
166
167 mov $n0,$m1
168 mulq $m0 # ap[0]*bp[0]
169 mov %rax,$lo0
170 mov ($np),%rax
171
172 imulq $lo0,$m1 # "tp[0]"*n0
173 mov %rdx,$hi0
174
175 mulq $m1 # np[0]*m1
176 add %rax,$lo0 # discarded
177 mov 8($ap),%rax
178 adc \$0,%rdx
179 mov %rdx,$hi1
180
181 lea 1($j),$j # j++
182 jmp .L1st_enter
183
184 .align 16
185 .L1st:
186 add %rax,$hi1
187 mov ($ap,$j,8),%rax
188 adc \$0,%rdx
189 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
190 mov $lo0,$hi0
191 adc \$0,%rdx
192 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
193 mov %rdx,$hi1
194
195 .L1st_enter:
196 mulq $m0 # ap[j]*bp[0]
197 add %rax,$hi0
198 mov ($np,$j,8),%rax
199 adc \$0,%rdx
200 lea 1($j),$j # j++
201 mov %rdx,$lo0
202
203 mulq $m1 # np[j]*m1
204 cmp $num,$j
205 jne .L1st
206
207 add %rax,$hi1
208 mov ($ap),%rax # ap[0]
209 adc \$0,%rdx
210 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
211 adc \$0,%rdx
212 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
213 mov %rdx,$hi1
214 mov $lo0,$hi0
215
216 xor %rdx,%rdx
217 add $hi0,$hi1
218 adc \$0,%rdx
219 mov $hi1,-8(%rsp,$num,8)
220 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
221
222 lea 1($i),$i # i++
223 jmp .Louter
224 .align 16
225 .Louter:
226 mov ($bp,$i,8),$m0 # m0=bp[i]
227 xor $j,$j # j=0
228 mov $n0,$m1
229 mov (%rsp),$lo0
230 mulq $m0 # ap[0]*bp[i]
231 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
232 mov ($np),%rax
233 adc \$0,%rdx
234
235 imulq $lo0,$m1 # tp[0]*n0
236 mov %rdx,$hi0
237
238 mulq $m1 # np[0]*m1
239 add %rax,$lo0 # discarded
240 mov 8($ap),%rax
241 adc \$0,%rdx
242 mov 8(%rsp),$lo0 # tp[1]
243 mov %rdx,$hi1
244
245 lea 1($j),$j # j++
246 jmp .Linner_enter
247
248 .align 16
249 .Linner:
250 add %rax,$hi1
251 mov ($ap,$j,8),%rax
252 adc \$0,%rdx
253 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
254 mov (%rsp,$j,8),$lo0
255 adc \$0,%rdx
256 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
257 mov %rdx,$hi1
258
259 .Linner_enter:
260 mulq $m0 # ap[j]*bp[i]
261 add %rax,$hi0
262 mov ($np,$j,8),%rax
263 adc \$0,%rdx
264 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
265 mov %rdx,$hi0
266 adc \$0,$hi0
267 lea 1($j),$j # j++
268
269 mulq $m1 # np[j]*m1
270 cmp $num,$j
271 jne .Linner
272
273 add %rax,$hi1
274 mov ($ap),%rax # ap[0]
275 adc \$0,%rdx
276 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
277 mov (%rsp,$j,8),$lo0
278 adc \$0,%rdx
279 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
280 mov %rdx,$hi1
281
282 xor %rdx,%rdx
283 add $hi0,$hi1
284 adc \$0,%rdx
285 add $lo0,$hi1 # pull upmost overflow bit
286 adc \$0,%rdx
287 mov $hi1,-8(%rsp,$num,8)
288 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
289
290 lea 1($i),$i # i++
291 cmp $num,$i
292 jb .Louter
293
294 xor $i,$i # i=0 and clear CF!
295 mov (%rsp),%rax # tp[0]
296 lea (%rsp),$ap # borrow ap for tp
297 mov $num,$j # j=num
298 jmp .Lsub
299 .align 16
300 .Lsub: sbb ($np,$i,8),%rax
301 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
302 mov 8($ap,$i,8),%rax # tp[i+1]
303 lea 1($i),$i # i++
304 dec $j # doesnn't affect CF!
305 jnz .Lsub
306
307 sbb \$0,%rax # handle upmost overflow bit
308 xor $i,$i
309 and %rax,$ap
310 not %rax
311 mov $rp,$np
312 and %rax,$np
313 mov $num,$j # j=num
314 or $np,$ap # ap=borrow?tp:rp
315 .align 16
316 .Lcopy: # copy or in-place refresh
317 mov ($ap,$i,8),%rax
318 mov $i,(%rsp,$i,8) # zap temporary vector
319 mov %rax,($rp,$i,8) # rp[i]=tp[i]
320 lea 1($i),$i
321 sub \$1,$j
322 jnz .Lcopy
323
324 mov 8(%rsp,$num,8),%rsi # restore %rsp
325 mov \$1,%rax
326 mov (%rsi),%r15
327 mov 8(%rsi),%r14
328 mov 16(%rsi),%r13
329 mov 24(%rsi),%r12
330 mov 32(%rsi),%rbp
331 mov 40(%rsi),%rbx
332 lea 48(%rsi),%rsp
333 .Lmul_epilogue:
334 ret
335 .size bn_mul_mont,.-bn_mul_mont
336 ___
337 {{{
338 my @A=("%r10","%r11");
339 my @N=("%r13","%rdi");
340 $code.=<<___;
341 .type bn_mul4x_mont,\@function,6
342 .align 16
343 bn_mul4x_mont:
344 .Lmul4x_enter:
345 ___
346 $code.=<<___ if ($addx);
347 and \$0x80100,%r11d
348 cmp \$0x80100,%r11d
349 je .Lmulx4x_enter
350 ___
351 $code.=<<___;
352 push %rbx
353 push %rbp
354 push %r12
355 push %r13
356 push %r14
357 push %r15
358
359 mov ${num}d,${num}d
360 lea 4($num),%r10
361 mov %rsp,%r11
362 neg %r10
363 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
364 and \$-1024,%rsp # minimize TLB usage
365
366 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
367 .Lmul4x_body:
368 sub %rsp,%r11
369 and \$-4096,%r11
370 .Lmul4x_page_walk:
371 mov (%rsp,%r11),%r10
372 sub \$4096,%r11
373 .byte 0x2e # predict non-taken
374 jnc .Lmul4x_page_walk
375
376 mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
377 mov %rdx,%r12 # reassign $bp
378 ___
379 $bp="%r12";
380 $code.=<<___;
381 mov ($n0),$n0 # pull n0[0] value
382 mov ($bp),$m0 # m0=bp[0]
383 mov ($ap),%rax
384
385 xor $i,$i # i=0
386 xor $j,$j # j=0
387
388 mov $n0,$m1
389 mulq $m0 # ap[0]*bp[0]
390 mov %rax,$A[0]
391 mov ($np),%rax
392
393 imulq $A[0],$m1 # "tp[0]"*n0
394 mov %rdx,$A[1]
395
396 mulq $m1 # np[0]*m1
397 add %rax,$A[0] # discarded
398 mov 8($ap),%rax
399 adc \$0,%rdx
400 mov %rdx,$N[1]
401
402 mulq $m0
403 add %rax,$A[1]
404 mov 8($np),%rax
405 adc \$0,%rdx
406 mov %rdx,$A[0]
407
408 mulq $m1
409 add %rax,$N[1]
410 mov 16($ap),%rax
411 adc \$0,%rdx
412 add $A[1],$N[1]
413 lea 4($j),$j # j++
414 adc \$0,%rdx
415 mov $N[1],(%rsp)
416 mov %rdx,$N[0]
417 jmp .L1st4x
418 .align 16
419 .L1st4x:
420 mulq $m0 # ap[j]*bp[0]
421 add %rax,$A[0]
422 mov -16($np,$j,8),%rax
423 adc \$0,%rdx
424 mov %rdx,$A[1]
425
426 mulq $m1 # np[j]*m1
427 add %rax,$N[0]
428 mov -8($ap,$j,8),%rax
429 adc \$0,%rdx
430 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
431 adc \$0,%rdx
432 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
433 mov %rdx,$N[1]
434
435 mulq $m0 # ap[j]*bp[0]
436 add %rax,$A[1]
437 mov -8($np,$j,8),%rax
438 adc \$0,%rdx
439 mov %rdx,$A[0]
440
441 mulq $m1 # np[j]*m1
442 add %rax,$N[1]
443 mov ($ap,$j,8),%rax
444 adc \$0,%rdx
445 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
446 adc \$0,%rdx
447 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
448 mov %rdx,$N[0]
449
450 mulq $m0 # ap[j]*bp[0]
451 add %rax,$A[0]
452 mov ($np,$j,8),%rax
453 adc \$0,%rdx
454 mov %rdx,$A[1]
455
456 mulq $m1 # np[j]*m1
457 add %rax,$N[0]
458 mov 8($ap,$j,8),%rax
459 adc \$0,%rdx
460 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
461 adc \$0,%rdx
462 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
463 mov %rdx,$N[1]
464
465 mulq $m0 # ap[j]*bp[0]
466 add %rax,$A[1]
467 mov 8($np,$j,8),%rax
468 adc \$0,%rdx
469 lea 4($j),$j # j++
470 mov %rdx,$A[0]
471
472 mulq $m1 # np[j]*m1
473 add %rax,$N[1]
474 mov -16($ap,$j,8),%rax
475 adc \$0,%rdx
476 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
477 adc \$0,%rdx
478 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
479 mov %rdx,$N[0]
480 cmp $num,$j
481 jb .L1st4x
482
483 mulq $m0 # ap[j]*bp[0]
484 add %rax,$A[0]
485 mov -16($np,$j,8),%rax
486 adc \$0,%rdx
487 mov %rdx,$A[1]
488
489 mulq $m1 # np[j]*m1
490 add %rax,$N[0]
491 mov -8($ap,$j,8),%rax
492 adc \$0,%rdx
493 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
494 adc \$0,%rdx
495 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
496 mov %rdx,$N[1]
497
498 mulq $m0 # ap[j]*bp[0]
499 add %rax,$A[1]
500 mov -8($np,$j,8),%rax
501 adc \$0,%rdx
502 mov %rdx,$A[0]
503
504 mulq $m1 # np[j]*m1
505 add %rax,$N[1]
506 mov ($ap),%rax # ap[0]
507 adc \$0,%rdx
508 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
509 adc \$0,%rdx
510 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
511 mov %rdx,$N[0]
512
513 xor $N[1],$N[1]
514 add $A[0],$N[0]
515 adc \$0,$N[1]
516 mov $N[0],-8(%rsp,$j,8)
517 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
518
519 lea 1($i),$i # i++
520 .align 4
521 .Louter4x:
522 mov ($bp,$i,8),$m0 # m0=bp[i]
523 xor $j,$j # j=0
524 mov (%rsp),$A[0]
525 mov $n0,$m1
526 mulq $m0 # ap[0]*bp[i]
527 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
528 mov ($np),%rax
529 adc \$0,%rdx
530
531 imulq $A[0],$m1 # tp[0]*n0
532 mov %rdx,$A[1]
533
534 mulq $m1 # np[0]*m1
535 add %rax,$A[0] # "$N[0]", discarded
536 mov 8($ap),%rax
537 adc \$0,%rdx
538 mov %rdx,$N[1]
539
540 mulq $m0 # ap[j]*bp[i]
541 add %rax,$A[1]
542 mov 8($np),%rax
543 adc \$0,%rdx
544 add 8(%rsp),$A[1] # +tp[1]
545 adc \$0,%rdx
546 mov %rdx,$A[0]
547
548 mulq $m1 # np[j]*m1
549 add %rax,$N[1]
550 mov 16($ap),%rax
551 adc \$0,%rdx
552 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
553 lea 4($j),$j # j+=2
554 adc \$0,%rdx
555 mov $N[1],(%rsp) # tp[j-1]
556 mov %rdx,$N[0]
557 jmp .Linner4x
558 .align 16
559 .Linner4x:
560 mulq $m0 # ap[j]*bp[i]
561 add %rax,$A[0]
562 mov -16($np,$j,8),%rax
563 adc \$0,%rdx
564 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
565 adc \$0,%rdx
566 mov %rdx,$A[1]
567
568 mulq $m1 # np[j]*m1
569 add %rax,$N[0]
570 mov -8($ap,$j,8),%rax
571 adc \$0,%rdx
572 add $A[0],$N[0]
573 adc \$0,%rdx
574 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
575 mov %rdx,$N[1]
576
577 mulq $m0 # ap[j]*bp[i]
578 add %rax,$A[1]
579 mov -8($np,$j,8),%rax
580 adc \$0,%rdx
581 add -8(%rsp,$j,8),$A[1]
582 adc \$0,%rdx
583 mov %rdx,$A[0]
584
585 mulq $m1 # np[j]*m1
586 add %rax,$N[1]
587 mov ($ap,$j,8),%rax
588 adc \$0,%rdx
589 add $A[1],$N[1]
590 adc \$0,%rdx
591 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
592 mov %rdx,$N[0]
593
594 mulq $m0 # ap[j]*bp[i]
595 add %rax,$A[0]
596 mov ($np,$j,8),%rax
597 adc \$0,%rdx
598 add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
599 adc \$0,%rdx
600 mov %rdx,$A[1]
601
602 mulq $m1 # np[j]*m1
603 add %rax,$N[0]
604 mov 8($ap,$j,8),%rax
605 adc \$0,%rdx
606 add $A[0],$N[0]
607 adc \$0,%rdx
608 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
609 mov %rdx,$N[1]
610
611 mulq $m0 # ap[j]*bp[i]
612 add %rax,$A[1]
613 mov 8($np,$j,8),%rax
614 adc \$0,%rdx
615 add 8(%rsp,$j,8),$A[1]
616 adc \$0,%rdx
617 lea 4($j),$j # j++
618 mov %rdx,$A[0]
619
620 mulq $m1 # np[j]*m1
621 add %rax,$N[1]
622 mov -16($ap,$j,8),%rax
623 adc \$0,%rdx
624 add $A[1],$N[1]
625 adc \$0,%rdx
626 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
627 mov %rdx,$N[0]
628 cmp $num,$j
629 jb .Linner4x
630
631 mulq $m0 # ap[j]*bp[i]
632 add %rax,$A[0]
633 mov -16($np,$j,8),%rax
634 adc \$0,%rdx
635 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
636 adc \$0,%rdx
637 mov %rdx,$A[1]
638
639 mulq $m1 # np[j]*m1
640 add %rax,$N[0]
641 mov -8($ap,$j,8),%rax
642 adc \$0,%rdx
643 add $A[0],$N[0]
644 adc \$0,%rdx
645 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
646 mov %rdx,$N[1]
647
648 mulq $m0 # ap[j]*bp[i]
649 add %rax,$A[1]
650 mov -8($np,$j,8),%rax
651 adc \$0,%rdx
652 add -8(%rsp,$j,8),$A[1]
653 adc \$0,%rdx
654 lea 1($i),$i # i++
655 mov %rdx,$A[0]
656
657 mulq $m1 # np[j]*m1
658 add %rax,$N[1]
659 mov ($ap),%rax # ap[0]
660 adc \$0,%rdx
661 add $A[1],$N[1]
662 adc \$0,%rdx
663 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
664 mov %rdx,$N[0]
665
666 xor $N[1],$N[1]
667 add $A[0],$N[0]
668 adc \$0,$N[1]
669 add (%rsp,$num,8),$N[0] # pull upmost overflow bit
670 adc \$0,$N[1]
671 mov $N[0],-8(%rsp,$j,8)
672 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
673
674 cmp $num,$i
675 jb .Louter4x
676 ___
677 {
678 my @ri=("%rax","%rdx",$m0,$m1);
679 $code.=<<___;
680 mov 16(%rsp,$num,8),$rp # restore $rp
681 mov 0(%rsp),@ri[0] # tp[0]
682 pxor %xmm0,%xmm0
683 mov 8(%rsp),@ri[1] # tp[1]
684 shr \$2,$num # num/=4
685 lea (%rsp),$ap # borrow ap for tp
686 xor $i,$i # i=0 and clear CF!
687
688 sub 0($np),@ri[0]
689 mov 16($ap),@ri[2] # tp[2]
690 mov 24($ap),@ri[3] # tp[3]
691 sbb 8($np),@ri[1]
692 lea -1($num),$j # j=num/4-1
693 jmp .Lsub4x
694 .align 16
695 .Lsub4x:
696 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
697 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
698 sbb 16($np,$i,8),@ri[2]
699 mov 32($ap,$i,8),@ri[0] # tp[i+1]
700 mov 40($ap,$i,8),@ri[1]
701 sbb 24($np,$i,8),@ri[3]
702 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
703 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
704 sbb 32($np,$i,8),@ri[0]
705 mov 48($ap,$i,8),@ri[2]
706 mov 56($ap,$i,8),@ri[3]
707 sbb 40($np,$i,8),@ri[1]
708 lea 4($i),$i # i++
709 dec $j # doesnn't affect CF!
710 jnz .Lsub4x
711
712 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
713 mov 32($ap,$i,8),@ri[0] # load overflow bit
714 sbb 16($np,$i,8),@ri[2]
715 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
716 sbb 24($np,$i,8),@ri[3]
717 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
718
719 sbb \$0,@ri[0] # handle upmost overflow bit
720 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
721 xor $i,$i # i=0
722 and @ri[0],$ap
723 not @ri[0]
724 mov $rp,$np
725 and @ri[0],$np
726 lea -1($num),$j
727 or $np,$ap # ap=borrow?tp:rp
728
729 movdqu ($ap),%xmm1
730 movdqa %xmm0,(%rsp)
731 movdqu %xmm1,($rp)
732 jmp .Lcopy4x
733 .align 16
734 .Lcopy4x: # copy or in-place refresh
735 movdqu 16($ap,$i),%xmm2
736 movdqu 32($ap,$i),%xmm1
737 movdqa %xmm0,16(%rsp,$i)
738 movdqu %xmm2,16($rp,$i)
739 movdqa %xmm0,32(%rsp,$i)
740 movdqu %xmm1,32($rp,$i)
741 lea 32($i),$i
742 dec $j
743 jnz .Lcopy4x
744
745 shl \$2,$num
746 movdqu 16($ap,$i),%xmm2
747 movdqa %xmm0,16(%rsp,$i)
748 movdqu %xmm2,16($rp,$i)
749 ___
750 }
751 $code.=<<___;
752 mov 8(%rsp,$num,8),%rsi # restore %rsp
753 mov \$1,%rax
754 mov (%rsi),%r15
755 mov 8(%rsi),%r14
756 mov 16(%rsi),%r13
757 mov 24(%rsi),%r12
758 mov 32(%rsi),%rbp
759 mov 40(%rsi),%rbx
760 lea 48(%rsi),%rsp
761 .Lmul4x_epilogue:
762 ret
763 .size bn_mul4x_mont,.-bn_mul4x_mont
764 ___
765 }}}
766 \f{{{
767 ######################################################################
768 # void bn_sqr8x_mont(
769 my $rptr="%rdi"; # const BN_ULONG *rptr,
770 my $aptr="%rsi"; # const BN_ULONG *aptr,
771 my $bptr="%rdx"; # not used
772 my $nptr="%rcx"; # const BN_ULONG *nptr,
773 my $n0 ="%r8"; # const BN_ULONG *n0);
774 my $num ="%r9"; # int num, has to be divisible by 8
775
776 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
777 my @A0=("%r10","%r11");
778 my @A1=("%r12","%r13");
779 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
780
781 $code.=<<___ if ($addx);
782 .extern bn_sqrx8x_internal # see x86_64-mont5 module
783 ___
784 $code.=<<___;
785 .extern bn_sqr8x_internal # see x86_64-mont5 module
786
787 .type bn_sqr8x_mont,\@function,6
788 .align 32
789 bn_sqr8x_mont:
790 .Lsqr8x_enter:
791 mov %rsp,%rax
792 push %rbx
793 push %rbp
794 push %r12
795 push %r13
796 push %r14
797 push %r15
798
799 mov ${num}d,%r10d
800 shl \$3,${num}d # convert $num to bytes
801 shl \$3+2,%r10 # 4*$num
802 neg $num
803
804 ##############################################################
805 # ensure that stack frame doesn't alias with $aptr modulo
806 # 4096. this is done to allow memory disambiguation logic
807 # do its job.
808 #
809 lea -64(%rsp,$num,2),%r11
810 mov ($n0),$n0 # *n0
811 sub $aptr,%r11
812 and \$4095,%r11
813 cmp %r11,%r10
814 jb .Lsqr8x_sp_alt
815 sub %r11,%rsp # align with $aptr
816 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
817 jmp .Lsqr8x_sp_done
818
819 .align 32
820 .Lsqr8x_sp_alt:
821 lea 4096-64(,$num,2),%r10 # 4096-frame-2*$num
822 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
823 sub %r10,%r11
824 mov \$0,%r10
825 cmovc %r10,%r11
826 sub %r11,%rsp
827 .Lsqr8x_sp_done:
828 and \$-64,%rsp
829 mov %rax,%r11
830 sub %rsp,%r11
831 and \$-4096,%r11
832 .Lsqr8x_page_walk:
833 mov (%rsp,%r11),%r10
834 sub \$4096,%r11
835 .byte 0x2e # predict non-taken
836 jnc .Lsqr8x_page_walk
837
838 mov $num,%r10
839 neg $num
840
841 mov $n0, 32(%rsp)
842 mov %rax, 40(%rsp) # save original %rsp
843 .Lsqr8x_body:
844
845 movq $nptr, %xmm2 # save pointer to modulus
846 pxor %xmm0,%xmm0
847 movq $rptr,%xmm1 # save $rptr
848 movq %r10, %xmm3 # -$num
849 ___
850 $code.=<<___ if ($addx);
851 mov OPENSSL_ia32cap_P+8(%rip),%eax
852 and \$0x80100,%eax
853 cmp \$0x80100,%eax
854 jne .Lsqr8x_nox
855
856 call bn_sqrx8x_internal # see x86_64-mont5 module
857 # %rax top-most carry
858 # %rbp nptr
859 # %rcx -8*num
860 # %r8 end of tp[2*num]
861 lea (%r8,%rcx),%rbx
862 mov %rcx,$num
863 mov %rcx,%rdx
864 movq %xmm1,$rptr
865 sar \$3+2,%rcx # %cf=0
866 jmp .Lsqr8x_sub
867
868 .align 32
869 .Lsqr8x_nox:
870 ___
871 $code.=<<___;
872 call bn_sqr8x_internal # see x86_64-mont5 module
873 # %rax top-most carry
874 # %rbp nptr
875 # %r8 -8*num
876 # %rdi end of tp[2*num]
877 lea (%rdi,$num),%rbx
878 mov $num,%rcx
879 mov $num,%rdx
880 movq %xmm1,$rptr
881 sar \$3+2,%rcx # %cf=0
882 jmp .Lsqr8x_sub
883
884 .align 32
885 .Lsqr8x_sub:
886 mov 8*0(%rbx),%r12
887 mov 8*1(%rbx),%r13
888 mov 8*2(%rbx),%r14
889 mov 8*3(%rbx),%r15
890 lea 8*4(%rbx),%rbx
891 sbb 8*0(%rbp),%r12
892 sbb 8*1(%rbp),%r13
893 sbb 8*2(%rbp),%r14
894 sbb 8*3(%rbp),%r15
895 lea 8*4(%rbp),%rbp
896 mov %r12,8*0($rptr)
897 mov %r13,8*1($rptr)
898 mov %r14,8*2($rptr)
899 mov %r15,8*3($rptr)
900 lea 8*4($rptr),$rptr
901 inc %rcx # preserves %cf
902 jnz .Lsqr8x_sub
903
904 sbb \$0,%rax # top-most carry
905 lea (%rbx,$num),%rbx # rewind
906 lea ($rptr,$num),$rptr # rewind
907
908 movq %rax,%xmm1
909 pxor %xmm0,%xmm0
910 pshufd \$0,%xmm1,%xmm1
911 mov 40(%rsp),%rsi # restore %rsp
912 jmp .Lsqr8x_cond_copy
913
914 .align 32
915 .Lsqr8x_cond_copy:
916 movdqa 16*0(%rbx),%xmm2
917 movdqa 16*1(%rbx),%xmm3
918 lea 16*2(%rbx),%rbx
919 movdqu 16*0($rptr),%xmm4
920 movdqu 16*1($rptr),%xmm5
921 lea 16*2($rptr),$rptr
922 movdqa %xmm0,-16*2(%rbx) # zero tp
923 movdqa %xmm0,-16*1(%rbx)
924 movdqa %xmm0,-16*2(%rbx,%rdx)
925 movdqa %xmm0,-16*1(%rbx,%rdx)
926 pcmpeqd %xmm1,%xmm0
927 pand %xmm1,%xmm2
928 pand %xmm1,%xmm3
929 pand %xmm0,%xmm4
930 pand %xmm0,%xmm5
931 pxor %xmm0,%xmm0
932 por %xmm2,%xmm4
933 por %xmm3,%xmm5
934 movdqu %xmm4,-16*2($rptr)
935 movdqu %xmm5,-16*1($rptr)
936 add \$32,$num
937 jnz .Lsqr8x_cond_copy
938
939 mov \$1,%rax
940 mov -48(%rsi),%r15
941 mov -40(%rsi),%r14
942 mov -32(%rsi),%r13
943 mov -24(%rsi),%r12
944 mov -16(%rsi),%rbp
945 mov -8(%rsi),%rbx
946 lea (%rsi),%rsp
947 .Lsqr8x_epilogue:
948 ret
949 .size bn_sqr8x_mont,.-bn_sqr8x_mont
950 ___
951 }}}
952 \f
953 if ($addx) {{{
954 my $bp="%rdx"; # original value
955
956 $code.=<<___;
957 .type bn_mulx4x_mont,\@function,6
958 .align 32
959 bn_mulx4x_mont:
960 .Lmulx4x_enter:
961 mov %rsp,%rax
962 push %rbx
963 push %rbp
964 push %r12
965 push %r13
966 push %r14
967 push %r15
968
969 shl \$3,${num}d # convert $num to bytes
970 .byte 0x67
971 xor %r10,%r10
972 sub $num,%r10 # -$num
973 mov ($n0),$n0 # *n0
974 lea -72(%rsp,%r10),%rsp # alloca(frame+$num+8)
975 and \$-128,%rsp
976 mov %rax,%r11
977 sub %rsp,%r11
978 and \$-4096,%r11
979 .Lmulx4x_page_walk:
980 mov (%rsp,%r11),%r10
981 sub \$4096,%r11
982 .byte 0x66,0x2e # predict non-taken
983 jnc .Lmulx4x_page_walk
984
985 lea ($bp,$num),%r10
986 ##############################################################
987 # Stack layout
988 # +0 num
989 # +8 off-loaded &b[i]
990 # +16 end of b[num]
991 # +24 saved n0
992 # +32 saved rp
993 # +40 saved %rsp
994 # +48 inner counter
995 # +56
996 # +64 tmp[num+1]
997 #
998 mov $num,0(%rsp) # save $num
999 shr \$5,$num
1000 mov %r10,16(%rsp) # end of b[num]
1001 sub \$1,$num
1002 mov $n0, 24(%rsp) # save *n0
1003 mov $rp, 32(%rsp) # save $rp
1004 mov %rax,40(%rsp) # save original %rsp
1005 mov $num,48(%rsp) # inner counter
1006 jmp .Lmulx4x_body
1007
1008 .align 32
1009 .Lmulx4x_body:
1010 ___
1011 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
1012 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
1013 my $rptr=$bptr;
1014 $code.=<<___;
1015 lea 8($bp),$bptr
1016 mov ($bp),%rdx # b[0], $bp==%rdx actually
1017 lea 64+32(%rsp),$tptr
1018 mov %rdx,$bi
1019
1020 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
1021 mulx 1*8($aptr),%r11,%r14 # a[1]*b[0]
1022 add %rax,%r11
1023 mov $bptr,8(%rsp) # off-load &b[i]
1024 mulx 2*8($aptr),%r12,%r13 # ...
1025 adc %r14,%r12
1026 adc \$0,%r13
1027
1028 mov $mi,$bptr # borrow $bptr
1029 imulq 24(%rsp),$mi # "t[0]"*n0
1030 xor $zero,$zero # cf=0, of=0
1031
1032 mulx 3*8($aptr),%rax,%r14
1033 mov $mi,%rdx
1034 lea 4*8($aptr),$aptr
1035 adcx %rax,%r13
1036 adcx $zero,%r14 # cf=0
1037
1038 mulx 0*8($nptr),%rax,%r10
1039 adcx %rax,$bptr # discarded
1040 adox %r11,%r10
1041 mulx 1*8($nptr),%rax,%r11
1042 adcx %rax,%r10
1043 adox %r12,%r11
1044 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 # mulx 2*8($nptr),%rax,%r12
1045 mov 48(%rsp),$bptr # counter value
1046 mov %r10,-4*8($tptr)
1047 adcx %rax,%r11
1048 adox %r13,%r12
1049 mulx 3*8($nptr),%rax,%r15
1050 mov $bi,%rdx
1051 mov %r11,-3*8($tptr)
1052 adcx %rax,%r12
1053 adox $zero,%r15 # of=0
1054 lea 4*8($nptr),$nptr
1055 mov %r12,-2*8($tptr)
1056
1057 jmp .Lmulx4x_1st
1058
1059 .align 32
1060 .Lmulx4x_1st:
1061 adcx $zero,%r15 # cf=0, modulo-scheduled
1062 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
1063 adcx %r14,%r10
1064 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
1065 adcx %rax,%r11
1066 mulx 2*8($aptr),%r12,%rax # ...
1067 adcx %r14,%r12
1068 mulx 3*8($aptr),%r13,%r14
1069 .byte 0x67,0x67
1070 mov $mi,%rdx
1071 adcx %rax,%r13
1072 adcx $zero,%r14 # cf=0
1073 lea 4*8($aptr),$aptr
1074 lea 4*8($tptr),$tptr
1075
1076 adox %r15,%r10
1077 mulx 0*8($nptr),%rax,%r15
1078 adcx %rax,%r10
1079 adox %r15,%r11
1080 mulx 1*8($nptr),%rax,%r15
1081 adcx %rax,%r11
1082 adox %r15,%r12
1083 mulx 2*8($nptr),%rax,%r15
1084 mov %r10,-5*8($tptr)
1085 adcx %rax,%r12
1086 mov %r11,-4*8($tptr)
1087 adox %r15,%r13
1088 mulx 3*8($nptr),%rax,%r15
1089 mov $bi,%rdx
1090 mov %r12,-3*8($tptr)
1091 adcx %rax,%r13
1092 adox $zero,%r15
1093 lea 4*8($nptr),$nptr
1094 mov %r13,-2*8($tptr)
1095
1096 dec $bptr # of=0, pass cf
1097 jnz .Lmulx4x_1st
1098
1099 mov 0(%rsp),$num # load num
1100 mov 8(%rsp),$bptr # re-load &b[i]
1101 adc $zero,%r15 # modulo-scheduled
1102 add %r15,%r14
1103 sbb %r15,%r15 # top-most carry
1104 mov %r14,-1*8($tptr)
1105 jmp .Lmulx4x_outer
1106
1107 .align 32
1108 .Lmulx4x_outer:
1109 mov ($bptr),%rdx # b[i]
1110 lea 8($bptr),$bptr # b++
1111 sub $num,$aptr # rewind $aptr
1112 mov %r15,($tptr) # save top-most carry
1113 lea 64+4*8(%rsp),$tptr
1114 sub $num,$nptr # rewind $nptr
1115
1116 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
1117 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1118 mov %rdx,$bi
1119 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
1120 adox -4*8($tptr),$mi
1121 adcx %r14,%r11
1122 mulx 2*8($aptr),%r15,%r13 # ...
1123 adox -3*8($tptr),%r11
1124 adcx %r15,%r12
1125 adox $zero,%r12
1126 adcx $zero,%r13
1127
1128 mov $bptr,8(%rsp) # off-load &b[i]
1129 .byte 0x67
1130 mov $mi,%r15
1131 imulq 24(%rsp),$mi # "t[0]"*n0
1132 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1133
1134 mulx 3*8($aptr),%rax,%r14
1135 mov $mi,%rdx
1136 adox -2*8($tptr),%r12
1137 adcx %rax,%r13
1138 adox -1*8($tptr),%r13
1139 adcx $zero,%r14
1140 lea 4*8($aptr),$aptr
1141 adox $zero,%r14
1142
1143 mulx 0*8($nptr),%rax,%r10
1144 adcx %rax,%r15 # discarded
1145 adox %r11,%r10
1146 mulx 1*8($nptr),%rax,%r11
1147 adcx %rax,%r10
1148 adox %r12,%r11
1149 mulx 2*8($nptr),%rax,%r12
1150 mov %r10,-4*8($tptr)
1151 adcx %rax,%r11
1152 adox %r13,%r12
1153 mulx 3*8($nptr),%rax,%r15
1154 mov $bi,%rdx
1155 mov %r11,-3*8($tptr)
1156 lea 4*8($nptr),$nptr
1157 adcx %rax,%r12
1158 adox $zero,%r15 # of=0
1159 mov 48(%rsp),$bptr # counter value
1160 mov %r12,-2*8($tptr)
1161
1162 jmp .Lmulx4x_inner
1163
1164 .align 32
1165 .Lmulx4x_inner:
1166 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
1167 adcx $zero,%r15 # cf=0, modulo-scheduled
1168 adox %r14,%r10
1169 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
1170 adcx 0*8($tptr),%r10
1171 adox %rax,%r11
1172 mulx 2*8($aptr),%r12,%rax # ...
1173 adcx 1*8($tptr),%r11
1174 adox %r14,%r12
1175 mulx 3*8($aptr),%r13,%r14
1176 mov $mi,%rdx
1177 adcx 2*8($tptr),%r12
1178 adox %rax,%r13
1179 adcx 3*8($tptr),%r13
1180 adox $zero,%r14 # of=0
1181 lea 4*8($aptr),$aptr
1182 lea 4*8($tptr),$tptr
1183 adcx $zero,%r14 # cf=0
1184
1185 adox %r15,%r10
1186 mulx 0*8($nptr),%rax,%r15
1187 adcx %rax,%r10
1188 adox %r15,%r11
1189 mulx 1*8($nptr),%rax,%r15
1190 adcx %rax,%r11
1191 adox %r15,%r12
1192 mulx 2*8($nptr),%rax,%r15
1193 mov %r10,-5*8($tptr)
1194 adcx %rax,%r12
1195 adox %r15,%r13
1196 mulx 3*8($nptr),%rax,%r15
1197 mov $bi,%rdx
1198 mov %r11,-4*8($tptr)
1199 mov %r12,-3*8($tptr)
1200 adcx %rax,%r13
1201 adox $zero,%r15
1202 lea 4*8($nptr),$nptr
1203 mov %r13,-2*8($tptr)
1204
1205 dec $bptr # of=0, pass cf
1206 jnz .Lmulx4x_inner
1207
1208 mov 0(%rsp),$num # load num
1209 mov 8(%rsp),$bptr # re-load &b[i]
1210 adc $zero,%r15 # modulo-scheduled
1211 sub 0*8($tptr),$zero # pull top-most carry
1212 adc %r15,%r14
1213 sbb %r15,%r15 # top-most carry
1214 mov %r14,-1*8($tptr)
1215
1216 cmp 16(%rsp),$bptr
1217 jne .Lmulx4x_outer
1218
1219 lea 64(%rsp),$tptr
1220 sub $num,$nptr # rewind $nptr
1221 neg %r15
1222 mov $num,%rdx
1223 shr \$3+2,$num # %cf=0
1224 mov 32(%rsp),$rptr # restore rp
1225 jmp .Lmulx4x_sub
1226
1227 .align 32
1228 .Lmulx4x_sub:
1229 mov 8*0($tptr),%r11
1230 mov 8*1($tptr),%r12
1231 mov 8*2($tptr),%r13
1232 mov 8*3($tptr),%r14
1233 lea 8*4($tptr),$tptr
1234 sbb 8*0($nptr),%r11
1235 sbb 8*1($nptr),%r12
1236 sbb 8*2($nptr),%r13
1237 sbb 8*3($nptr),%r14
1238 lea 8*4($nptr),$nptr
1239 mov %r11,8*0($rptr)
1240 mov %r12,8*1($rptr)
1241 mov %r13,8*2($rptr)
1242 mov %r14,8*3($rptr)
1243 lea 8*4($rptr),$rptr
1244 dec $num # preserves %cf
1245 jnz .Lmulx4x_sub
1246
1247 sbb \$0,%r15 # top-most carry
1248 lea 64(%rsp),$tptr
1249 sub %rdx,$rptr # rewind
1250
1251 movq %r15,%xmm1
1252 pxor %xmm0,%xmm0
1253 pshufd \$0,%xmm1,%xmm1
1254 mov 40(%rsp),%rsi # restore %rsp
1255 jmp .Lmulx4x_cond_copy
1256
1257 .align 32
1258 .Lmulx4x_cond_copy:
1259 movdqa 16*0($tptr),%xmm2
1260 movdqa 16*1($tptr),%xmm3
1261 lea 16*2($tptr),$tptr
1262 movdqu 16*0($rptr),%xmm4
1263 movdqu 16*1($rptr),%xmm5
1264 lea 16*2($rptr),$rptr
1265 movdqa %xmm0,-16*2($tptr) # zero tp
1266 movdqa %xmm0,-16*1($tptr)
1267 pcmpeqd %xmm1,%xmm0
1268 pand %xmm1,%xmm2
1269 pand %xmm1,%xmm3
1270 pand %xmm0,%xmm4
1271 pand %xmm0,%xmm5
1272 pxor %xmm0,%xmm0
1273 por %xmm2,%xmm4
1274 por %xmm3,%xmm5
1275 movdqu %xmm4,-16*2($rptr)
1276 movdqu %xmm5,-16*1($rptr)
1277 sub \$32,%rdx
1278 jnz .Lmulx4x_cond_copy
1279
1280 mov %rdx,($tptr)
1281
1282 mov \$1,%rax
1283 mov -48(%rsi),%r15
1284 mov -40(%rsi),%r14
1285 mov -32(%rsi),%r13
1286 mov -24(%rsi),%r12
1287 mov -16(%rsi),%rbp
1288 mov -8(%rsi),%rbx
1289 lea (%rsi),%rsp
1290 .Lmulx4x_epilogue:
1291 ret
1292 .size bn_mulx4x_mont,.-bn_mulx4x_mont
1293 ___
1294 }}}
1295 $code.=<<___;
1296 .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1297 .align 16
1298 ___
1299
1300 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1301 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1302 if ($win64) {
1303 $rec="%rcx";
1304 $frame="%rdx";
1305 $context="%r8";
1306 $disp="%r9";
1307
1308 $code.=<<___;
1309 .extern __imp_RtlVirtualUnwind
1310 .type mul_handler,\@abi-omnipotent
1311 .align 16
1312 mul_handler:
1313 push %rsi
1314 push %rdi
1315 push %rbx
1316 push %rbp
1317 push %r12
1318 push %r13
1319 push %r14
1320 push %r15
1321 pushfq
1322 sub \$64,%rsp
1323
1324 mov 120($context),%rax # pull context->Rax
1325 mov 248($context),%rbx # pull context->Rip
1326
1327 mov 8($disp),%rsi # disp->ImageBase
1328 mov 56($disp),%r11 # disp->HandlerData
1329
1330 mov 0(%r11),%r10d # HandlerData[0]
1331 lea (%rsi,%r10),%r10 # end of prologue label
1332 cmp %r10,%rbx # context->Rip<end of prologue label
1333 jb .Lcommon_seh_tail
1334
1335 mov 152($context),%rax # pull context->Rsp
1336
1337 mov 4(%r11),%r10d # HandlerData[1]
1338 lea (%rsi,%r10),%r10 # epilogue label
1339 cmp %r10,%rbx # context->Rip>=epilogue label
1340 jae .Lcommon_seh_tail
1341
1342 mov 192($context),%r10 # pull $num
1343 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
1344 lea 48(%rax),%rax
1345
1346 mov -8(%rax),%rbx
1347 mov -16(%rax),%rbp
1348 mov -24(%rax),%r12
1349 mov -32(%rax),%r13
1350 mov -40(%rax),%r14
1351 mov -48(%rax),%r15
1352 mov %rbx,144($context) # restore context->Rbx
1353 mov %rbp,160($context) # restore context->Rbp
1354 mov %r12,216($context) # restore context->R12
1355 mov %r13,224($context) # restore context->R13
1356 mov %r14,232($context) # restore context->R14
1357 mov %r15,240($context) # restore context->R15
1358
1359 jmp .Lcommon_seh_tail
1360 .size mul_handler,.-mul_handler
1361
1362 .type sqr_handler,\@abi-omnipotent
1363 .align 16
1364 sqr_handler:
1365 push %rsi
1366 push %rdi
1367 push %rbx
1368 push %rbp
1369 push %r12
1370 push %r13
1371 push %r14
1372 push %r15
1373 pushfq
1374 sub \$64,%rsp
1375
1376 mov 120($context),%rax # pull context->Rax
1377 mov 248($context),%rbx # pull context->Rip
1378
1379 mov 8($disp),%rsi # disp->ImageBase
1380 mov 56($disp),%r11 # disp->HandlerData
1381
1382 mov 0(%r11),%r10d # HandlerData[0]
1383 lea (%rsi,%r10),%r10 # end of prologue label
1384 cmp %r10,%rbx # context->Rip<.Lsqr_body
1385 jb .Lcommon_seh_tail
1386
1387 mov 152($context),%rax # pull context->Rsp
1388
1389 mov 4(%r11),%r10d # HandlerData[1]
1390 lea (%rsi,%r10),%r10 # epilogue label
1391 cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
1392 jae .Lcommon_seh_tail
1393
1394 mov 40(%rax),%rax # pull saved stack pointer
1395
1396 mov -8(%rax),%rbx
1397 mov -16(%rax),%rbp
1398 mov -24(%rax),%r12
1399 mov -32(%rax),%r13
1400 mov -40(%rax),%r14
1401 mov -48(%rax),%r15
1402 mov %rbx,144($context) # restore context->Rbx
1403 mov %rbp,160($context) # restore context->Rbp
1404 mov %r12,216($context) # restore context->R12
1405 mov %r13,224($context) # restore context->R13
1406 mov %r14,232($context) # restore context->R14
1407 mov %r15,240($context) # restore context->R15
1408
1409 .Lcommon_seh_tail:
1410 mov 8(%rax),%rdi
1411 mov 16(%rax),%rsi
1412 mov %rax,152($context) # restore context->Rsp
1413 mov %rsi,168($context) # restore context->Rsi
1414 mov %rdi,176($context) # restore context->Rdi
1415
1416 mov 40($disp),%rdi # disp->ContextRecord
1417 mov $context,%rsi # context
1418 mov \$154,%ecx # sizeof(CONTEXT)
1419 .long 0xa548f3fc # cld; rep movsq
1420
1421 mov $disp,%rsi
1422 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1423 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1424 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1425 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1426 mov 40(%rsi),%r10 # disp->ContextRecord
1427 lea 56(%rsi),%r11 # &disp->HandlerData
1428 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1429 mov %r10,32(%rsp) # arg5
1430 mov %r11,40(%rsp) # arg6
1431 mov %r12,48(%rsp) # arg7
1432 mov %rcx,56(%rsp) # arg8, (NULL)
1433 call *__imp_RtlVirtualUnwind(%rip)
1434
1435 mov \$1,%eax # ExceptionContinueSearch
1436 add \$64,%rsp
1437 popfq
1438 pop %r15
1439 pop %r14
1440 pop %r13
1441 pop %r12
1442 pop %rbp
1443 pop %rbx
1444 pop %rdi
1445 pop %rsi
1446 ret
1447 .size sqr_handler,.-sqr_handler
1448
1449 .section .pdata
1450 .align 4
1451 .rva .LSEH_begin_bn_mul_mont
1452 .rva .LSEH_end_bn_mul_mont
1453 .rva .LSEH_info_bn_mul_mont
1454
1455 .rva .LSEH_begin_bn_mul4x_mont
1456 .rva .LSEH_end_bn_mul4x_mont
1457 .rva .LSEH_info_bn_mul4x_mont
1458
1459 .rva .LSEH_begin_bn_sqr8x_mont
1460 .rva .LSEH_end_bn_sqr8x_mont
1461 .rva .LSEH_info_bn_sqr8x_mont
1462 ___
1463 $code.=<<___ if ($addx);
1464 .rva .LSEH_begin_bn_mulx4x_mont
1465 .rva .LSEH_end_bn_mulx4x_mont
1466 .rva .LSEH_info_bn_mulx4x_mont
1467 ___
1468 $code.=<<___;
1469 .section .xdata
1470 .align 8
1471 .LSEH_info_bn_mul_mont:
1472 .byte 9,0,0,0
1473 .rva mul_handler
1474 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
1475 .LSEH_info_bn_mul4x_mont:
1476 .byte 9,0,0,0
1477 .rva mul_handler
1478 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
1479 .LSEH_info_bn_sqr8x_mont:
1480 .byte 9,0,0,0
1481 .rva sqr_handler
1482 .rva .Lsqr8x_body,.Lsqr8x_epilogue # HandlerData[]
1483 ___
1484 $code.=<<___ if ($addx);
1485 .LSEH_info_bn_mulx4x_mont:
1486 .byte 9,0,0,0
1487 .rva sqr_handler
1488 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
1489 ___
1490 }
1491
1492 print $code;
1493 close STDOUT;