]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/bn/asm/x86_64-mont.pl
x86_64 assembly pack: tune clang version detection even further.
[thirdparty/openssl.git] / crypto / bn / asm / x86_64-mont.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # October 2005.
11 #
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
17
18 # July 2011.
19 #
20 # Add dedicated squaring procedure. Performance improvement varies
21 # from platform to platform, but in average it's ~5%/15%/25%/33%
22 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
23
24 # August 2011.
25 #
26 # Unroll and modulo-schedule inner loops in such manner that they
27 # are "fallen through" for input lengths of 8, which is critical for
28 # 1024-bit RSA *sign*. Average performance improvement in comparison
29 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
31
32 # June 2013.
33 #
34 # Optimize reduction in squaring procedure and improve 1024+-bit RSA
35 # sign performance by 10-16% on Intel Sandy Bridge and later
36 # (virtually same on non-Intel processors).
37
38 # August 2013.
39 #
40 # Add MULX/ADOX/ADCX code path.
41
42 $flavour = shift;
43 $output = shift;
44 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
45
46 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
47
48 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51 die "can't locate x86_64-xlate.pl";
52
53 open OUT,"| \"$^X\" $xlate $flavour $output";
54 *STDOUT=*OUT;
55
56 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
57 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
58 $addx = ($1>=2.23);
59 }
60
61 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
62 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
63 $addx = ($1>=2.10);
64 }
65
66 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
67 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
68 $addx = ($1>=12);
69 }
70
71 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
72 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
73 $addx = ($ver>=3.03);
74 }
75
76 # int bn_mul_mont(
77 $rp="%rdi"; # BN_ULONG *rp,
78 $ap="%rsi"; # const BN_ULONG *ap,
79 $bp="%rdx"; # const BN_ULONG *bp,
80 $np="%rcx"; # const BN_ULONG *np,
81 $n0="%r8"; # const BN_ULONG *n0,
82 $num="%r9"; # int num);
83 $lo0="%r10";
84 $hi0="%r11";
85 $hi1="%r13";
86 $i="%r14";
87 $j="%r15";
88 $m0="%rbx";
89 $m1="%rbp";
90
91 $code=<<___;
92 .text
93
94 .extern OPENSSL_ia32cap_P
95
96 .globl bn_mul_mont
97 .type bn_mul_mont,\@function,6
98 .align 16
99 bn_mul_mont:
100 test \$3,${num}d
101 jnz .Lmul_enter
102 cmp \$8,${num}d
103 jb .Lmul_enter
104 ___
105 $code.=<<___ if ($addx);
106 mov OPENSSL_ia32cap_P+8(%rip),%r11d
107 ___
108 $code.=<<___;
109 cmp $ap,$bp
110 jne .Lmul4x_enter
111 test \$7,${num}d
112 jz .Lsqr8x_enter
113 jmp .Lmul4x_enter
114
115 .align 16
116 .Lmul_enter:
117 push %rbx
118 push %rbp
119 push %r12
120 push %r13
121 push %r14
122 push %r15
123
124 mov ${num}d,${num}d
125 lea 2($num),%r10
126 mov %rsp,%r11
127 neg %r10
128 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
129 and \$-1024,%rsp # minimize TLB usage
130
131 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
132 .Lmul_body:
133 mov $bp,%r12 # reassign $bp
134 ___
135 $bp="%r12";
136 $code.=<<___;
137 mov ($n0),$n0 # pull n0[0] value
138 mov ($bp),$m0 # m0=bp[0]
139 mov ($ap),%rax
140
141 xor $i,$i # i=0
142 xor $j,$j # j=0
143
144 mov $n0,$m1
145 mulq $m0 # ap[0]*bp[0]
146 mov %rax,$lo0
147 mov ($np),%rax
148
149 imulq $lo0,$m1 # "tp[0]"*n0
150 mov %rdx,$hi0
151
152 mulq $m1 # np[0]*m1
153 add %rax,$lo0 # discarded
154 mov 8($ap),%rax
155 adc \$0,%rdx
156 mov %rdx,$hi1
157
158 lea 1($j),$j # j++
159 jmp .L1st_enter
160
161 .align 16
162 .L1st:
163 add %rax,$hi1
164 mov ($ap,$j,8),%rax
165 adc \$0,%rdx
166 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
167 mov $lo0,$hi0
168 adc \$0,%rdx
169 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
170 mov %rdx,$hi1
171
172 .L1st_enter:
173 mulq $m0 # ap[j]*bp[0]
174 add %rax,$hi0
175 mov ($np,$j,8),%rax
176 adc \$0,%rdx
177 lea 1($j),$j # j++
178 mov %rdx,$lo0
179
180 mulq $m1 # np[j]*m1
181 cmp $num,$j
182 jne .L1st
183
184 add %rax,$hi1
185 mov ($ap),%rax # ap[0]
186 adc \$0,%rdx
187 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
188 adc \$0,%rdx
189 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
190 mov %rdx,$hi1
191 mov $lo0,$hi0
192
193 xor %rdx,%rdx
194 add $hi0,$hi1
195 adc \$0,%rdx
196 mov $hi1,-8(%rsp,$num,8)
197 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
198
199 lea 1($i),$i # i++
200 jmp .Louter
201 .align 16
202 .Louter:
203 mov ($bp,$i,8),$m0 # m0=bp[i]
204 xor $j,$j # j=0
205 mov $n0,$m1
206 mov (%rsp),$lo0
207 mulq $m0 # ap[0]*bp[i]
208 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
209 mov ($np),%rax
210 adc \$0,%rdx
211
212 imulq $lo0,$m1 # tp[0]*n0
213 mov %rdx,$hi0
214
215 mulq $m1 # np[0]*m1
216 add %rax,$lo0 # discarded
217 mov 8($ap),%rax
218 adc \$0,%rdx
219 mov 8(%rsp),$lo0 # tp[1]
220 mov %rdx,$hi1
221
222 lea 1($j),$j # j++
223 jmp .Linner_enter
224
225 .align 16
226 .Linner:
227 add %rax,$hi1
228 mov ($ap,$j,8),%rax
229 adc \$0,%rdx
230 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
231 mov (%rsp,$j,8),$lo0
232 adc \$0,%rdx
233 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
234 mov %rdx,$hi1
235
236 .Linner_enter:
237 mulq $m0 # ap[j]*bp[i]
238 add %rax,$hi0
239 mov ($np,$j,8),%rax
240 adc \$0,%rdx
241 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
242 mov %rdx,$hi0
243 adc \$0,$hi0
244 lea 1($j),$j # j++
245
246 mulq $m1 # np[j]*m1
247 cmp $num,$j
248 jne .Linner
249
250 add %rax,$hi1
251 mov ($ap),%rax # ap[0]
252 adc \$0,%rdx
253 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
254 mov (%rsp,$j,8),$lo0
255 adc \$0,%rdx
256 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
257 mov %rdx,$hi1
258
259 xor %rdx,%rdx
260 add $hi0,$hi1
261 adc \$0,%rdx
262 add $lo0,$hi1 # pull upmost overflow bit
263 adc \$0,%rdx
264 mov $hi1,-8(%rsp,$num,8)
265 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
266
267 lea 1($i),$i # i++
268 cmp $num,$i
269 jb .Louter
270
271 xor $i,$i # i=0 and clear CF!
272 mov (%rsp),%rax # tp[0]
273 lea (%rsp),$ap # borrow ap for tp
274 mov $num,$j # j=num
275 jmp .Lsub
276 .align 16
277 .Lsub: sbb ($np,$i,8),%rax
278 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
279 mov 8($ap,$i,8),%rax # tp[i+1]
280 lea 1($i),$i # i++
281 dec $j # doesnn't affect CF!
282 jnz .Lsub
283
284 sbb \$0,%rax # handle upmost overflow bit
285 xor $i,$i
286 and %rax,$ap
287 not %rax
288 mov $rp,$np
289 and %rax,$np
290 mov $num,$j # j=num
291 or $np,$ap # ap=borrow?tp:rp
292 .align 16
293 .Lcopy: # copy or in-place refresh
294 mov ($ap,$i,8),%rax
295 mov $i,(%rsp,$i,8) # zap temporary vector
296 mov %rax,($rp,$i,8) # rp[i]=tp[i]
297 lea 1($i),$i
298 sub \$1,$j
299 jnz .Lcopy
300
301 mov 8(%rsp,$num,8),%rsi # restore %rsp
302 mov \$1,%rax
303 mov (%rsi),%r15
304 mov 8(%rsi),%r14
305 mov 16(%rsi),%r13
306 mov 24(%rsi),%r12
307 mov 32(%rsi),%rbp
308 mov 40(%rsi),%rbx
309 lea 48(%rsi),%rsp
310 .Lmul_epilogue:
311 ret
312 .size bn_mul_mont,.-bn_mul_mont
313 ___
314 {{{
315 my @A=("%r10","%r11");
316 my @N=("%r13","%rdi");
317 $code.=<<___;
318 .type bn_mul4x_mont,\@function,6
319 .align 16
320 bn_mul4x_mont:
321 .Lmul4x_enter:
322 ___
323 $code.=<<___ if ($addx);
324 and \$0x80100,%r11d
325 cmp \$0x80100,%r11d
326 je .Lmulx4x_enter
327 ___
328 $code.=<<___;
329 push %rbx
330 push %rbp
331 push %r12
332 push %r13
333 push %r14
334 push %r15
335
336 mov ${num}d,${num}d
337 lea 4($num),%r10
338 mov %rsp,%r11
339 neg %r10
340 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
341 and \$-1024,%rsp # minimize TLB usage
342
343 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
344 .Lmul4x_body:
345 mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
346 mov %rdx,%r12 # reassign $bp
347 ___
348 $bp="%r12";
349 $code.=<<___;
350 mov ($n0),$n0 # pull n0[0] value
351 mov ($bp),$m0 # m0=bp[0]
352 mov ($ap),%rax
353
354 xor $i,$i # i=0
355 xor $j,$j # j=0
356
357 mov $n0,$m1
358 mulq $m0 # ap[0]*bp[0]
359 mov %rax,$A[0]
360 mov ($np),%rax
361
362 imulq $A[0],$m1 # "tp[0]"*n0
363 mov %rdx,$A[1]
364
365 mulq $m1 # np[0]*m1
366 add %rax,$A[0] # discarded
367 mov 8($ap),%rax
368 adc \$0,%rdx
369 mov %rdx,$N[1]
370
371 mulq $m0
372 add %rax,$A[1]
373 mov 8($np),%rax
374 adc \$0,%rdx
375 mov %rdx,$A[0]
376
377 mulq $m1
378 add %rax,$N[1]
379 mov 16($ap),%rax
380 adc \$0,%rdx
381 add $A[1],$N[1]
382 lea 4($j),$j # j++
383 adc \$0,%rdx
384 mov $N[1],(%rsp)
385 mov %rdx,$N[0]
386 jmp .L1st4x
387 .align 16
388 .L1st4x:
389 mulq $m0 # ap[j]*bp[0]
390 add %rax,$A[0]
391 mov -16($np,$j,8),%rax
392 adc \$0,%rdx
393 mov %rdx,$A[1]
394
395 mulq $m1 # np[j]*m1
396 add %rax,$N[0]
397 mov -8($ap,$j,8),%rax
398 adc \$0,%rdx
399 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
400 adc \$0,%rdx
401 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
402 mov %rdx,$N[1]
403
404 mulq $m0 # ap[j]*bp[0]
405 add %rax,$A[1]
406 mov -8($np,$j,8),%rax
407 adc \$0,%rdx
408 mov %rdx,$A[0]
409
410 mulq $m1 # np[j]*m1
411 add %rax,$N[1]
412 mov ($ap,$j,8),%rax
413 adc \$0,%rdx
414 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
415 adc \$0,%rdx
416 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
417 mov %rdx,$N[0]
418
419 mulq $m0 # ap[j]*bp[0]
420 add %rax,$A[0]
421 mov ($np,$j,8),%rax
422 adc \$0,%rdx
423 mov %rdx,$A[1]
424
425 mulq $m1 # np[j]*m1
426 add %rax,$N[0]
427 mov 8($ap,$j,8),%rax
428 adc \$0,%rdx
429 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
430 adc \$0,%rdx
431 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
432 mov %rdx,$N[1]
433
434 mulq $m0 # ap[j]*bp[0]
435 add %rax,$A[1]
436 mov 8($np,$j,8),%rax
437 adc \$0,%rdx
438 lea 4($j),$j # j++
439 mov %rdx,$A[0]
440
441 mulq $m1 # np[j]*m1
442 add %rax,$N[1]
443 mov -16($ap,$j,8),%rax
444 adc \$0,%rdx
445 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
446 adc \$0,%rdx
447 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
448 mov %rdx,$N[0]
449 cmp $num,$j
450 jb .L1st4x
451
452 mulq $m0 # ap[j]*bp[0]
453 add %rax,$A[0]
454 mov -16($np,$j,8),%rax
455 adc \$0,%rdx
456 mov %rdx,$A[1]
457
458 mulq $m1 # np[j]*m1
459 add %rax,$N[0]
460 mov -8($ap,$j,8),%rax
461 adc \$0,%rdx
462 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
463 adc \$0,%rdx
464 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
465 mov %rdx,$N[1]
466
467 mulq $m0 # ap[j]*bp[0]
468 add %rax,$A[1]
469 mov -8($np,$j,8),%rax
470 adc \$0,%rdx
471 mov %rdx,$A[0]
472
473 mulq $m1 # np[j]*m1
474 add %rax,$N[1]
475 mov ($ap),%rax # ap[0]
476 adc \$0,%rdx
477 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
478 adc \$0,%rdx
479 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
480 mov %rdx,$N[0]
481
482 xor $N[1],$N[1]
483 add $A[0],$N[0]
484 adc \$0,$N[1]
485 mov $N[0],-8(%rsp,$j,8)
486 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
487
488 lea 1($i),$i # i++
489 .align 4
490 .Louter4x:
491 mov ($bp,$i,8),$m0 # m0=bp[i]
492 xor $j,$j # j=0
493 mov (%rsp),$A[0]
494 mov $n0,$m1
495 mulq $m0 # ap[0]*bp[i]
496 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
497 mov ($np),%rax
498 adc \$0,%rdx
499
500 imulq $A[0],$m1 # tp[0]*n0
501 mov %rdx,$A[1]
502
503 mulq $m1 # np[0]*m1
504 add %rax,$A[0] # "$N[0]", discarded
505 mov 8($ap),%rax
506 adc \$0,%rdx
507 mov %rdx,$N[1]
508
509 mulq $m0 # ap[j]*bp[i]
510 add %rax,$A[1]
511 mov 8($np),%rax
512 adc \$0,%rdx
513 add 8(%rsp),$A[1] # +tp[1]
514 adc \$0,%rdx
515 mov %rdx,$A[0]
516
517 mulq $m1 # np[j]*m1
518 add %rax,$N[1]
519 mov 16($ap),%rax
520 adc \$0,%rdx
521 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
522 lea 4($j),$j # j+=2
523 adc \$0,%rdx
524 mov $N[1],(%rsp) # tp[j-1]
525 mov %rdx,$N[0]
526 jmp .Linner4x
527 .align 16
528 .Linner4x:
529 mulq $m0 # ap[j]*bp[i]
530 add %rax,$A[0]
531 mov -16($np,$j,8),%rax
532 adc \$0,%rdx
533 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
534 adc \$0,%rdx
535 mov %rdx,$A[1]
536
537 mulq $m1 # np[j]*m1
538 add %rax,$N[0]
539 mov -8($ap,$j,8),%rax
540 adc \$0,%rdx
541 add $A[0],$N[0]
542 adc \$0,%rdx
543 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
544 mov %rdx,$N[1]
545
546 mulq $m0 # ap[j]*bp[i]
547 add %rax,$A[1]
548 mov -8($np,$j,8),%rax
549 adc \$0,%rdx
550 add -8(%rsp,$j,8),$A[1]
551 adc \$0,%rdx
552 mov %rdx,$A[0]
553
554 mulq $m1 # np[j]*m1
555 add %rax,$N[1]
556 mov ($ap,$j,8),%rax
557 adc \$0,%rdx
558 add $A[1],$N[1]
559 adc \$0,%rdx
560 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
561 mov %rdx,$N[0]
562
563 mulq $m0 # ap[j]*bp[i]
564 add %rax,$A[0]
565 mov ($np,$j,8),%rax
566 adc \$0,%rdx
567 add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
568 adc \$0,%rdx
569 mov %rdx,$A[1]
570
571 mulq $m1 # np[j]*m1
572 add %rax,$N[0]
573 mov 8($ap,$j,8),%rax
574 adc \$0,%rdx
575 add $A[0],$N[0]
576 adc \$0,%rdx
577 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
578 mov %rdx,$N[1]
579
580 mulq $m0 # ap[j]*bp[i]
581 add %rax,$A[1]
582 mov 8($np,$j,8),%rax
583 adc \$0,%rdx
584 add 8(%rsp,$j,8),$A[1]
585 adc \$0,%rdx
586 lea 4($j),$j # j++
587 mov %rdx,$A[0]
588
589 mulq $m1 # np[j]*m1
590 add %rax,$N[1]
591 mov -16($ap,$j,8),%rax
592 adc \$0,%rdx
593 add $A[1],$N[1]
594 adc \$0,%rdx
595 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
596 mov %rdx,$N[0]
597 cmp $num,$j
598 jb .Linner4x
599
600 mulq $m0 # ap[j]*bp[i]
601 add %rax,$A[0]
602 mov -16($np,$j,8),%rax
603 adc \$0,%rdx
604 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
605 adc \$0,%rdx
606 mov %rdx,$A[1]
607
608 mulq $m1 # np[j]*m1
609 add %rax,$N[0]
610 mov -8($ap,$j,8),%rax
611 adc \$0,%rdx
612 add $A[0],$N[0]
613 adc \$0,%rdx
614 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
615 mov %rdx,$N[1]
616
617 mulq $m0 # ap[j]*bp[i]
618 add %rax,$A[1]
619 mov -8($np,$j,8),%rax
620 adc \$0,%rdx
621 add -8(%rsp,$j,8),$A[1]
622 adc \$0,%rdx
623 lea 1($i),$i # i++
624 mov %rdx,$A[0]
625
626 mulq $m1 # np[j]*m1
627 add %rax,$N[1]
628 mov ($ap),%rax # ap[0]
629 adc \$0,%rdx
630 add $A[1],$N[1]
631 adc \$0,%rdx
632 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
633 mov %rdx,$N[0]
634
635 xor $N[1],$N[1]
636 add $A[0],$N[0]
637 adc \$0,$N[1]
638 add (%rsp,$num,8),$N[0] # pull upmost overflow bit
639 adc \$0,$N[1]
640 mov $N[0],-8(%rsp,$j,8)
641 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
642
643 cmp $num,$i
644 jb .Louter4x
645 ___
646 {
647 my @ri=("%rax","%rdx",$m0,$m1);
648 $code.=<<___;
649 mov 16(%rsp,$num,8),$rp # restore $rp
650 mov 0(%rsp),@ri[0] # tp[0]
651 pxor %xmm0,%xmm0
652 mov 8(%rsp),@ri[1] # tp[1]
653 shr \$2,$num # num/=4
654 lea (%rsp),$ap # borrow ap for tp
655 xor $i,$i # i=0 and clear CF!
656
657 sub 0($np),@ri[0]
658 mov 16($ap),@ri[2] # tp[2]
659 mov 24($ap),@ri[3] # tp[3]
660 sbb 8($np),@ri[1]
661 lea -1($num),$j # j=num/4-1
662 jmp .Lsub4x
663 .align 16
664 .Lsub4x:
665 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
666 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
667 sbb 16($np,$i,8),@ri[2]
668 mov 32($ap,$i,8),@ri[0] # tp[i+1]
669 mov 40($ap,$i,8),@ri[1]
670 sbb 24($np,$i,8),@ri[3]
671 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
672 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
673 sbb 32($np,$i,8),@ri[0]
674 mov 48($ap,$i,8),@ri[2]
675 mov 56($ap,$i,8),@ri[3]
676 sbb 40($np,$i,8),@ri[1]
677 lea 4($i),$i # i++
678 dec $j # doesnn't affect CF!
679 jnz .Lsub4x
680
681 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
682 mov 32($ap,$i,8),@ri[0] # load overflow bit
683 sbb 16($np,$i,8),@ri[2]
684 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
685 sbb 24($np,$i,8),@ri[3]
686 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
687
688 sbb \$0,@ri[0] # handle upmost overflow bit
689 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
690 xor $i,$i # i=0
691 and @ri[0],$ap
692 not @ri[0]
693 mov $rp,$np
694 and @ri[0],$np
695 lea -1($num),$j
696 or $np,$ap # ap=borrow?tp:rp
697
698 movdqu ($ap),%xmm1
699 movdqa %xmm0,(%rsp)
700 movdqu %xmm1,($rp)
701 jmp .Lcopy4x
702 .align 16
703 .Lcopy4x: # copy or in-place refresh
704 movdqu 16($ap,$i),%xmm2
705 movdqu 32($ap,$i),%xmm1
706 movdqa %xmm0,16(%rsp,$i)
707 movdqu %xmm2,16($rp,$i)
708 movdqa %xmm0,32(%rsp,$i)
709 movdqu %xmm1,32($rp,$i)
710 lea 32($i),$i
711 dec $j
712 jnz .Lcopy4x
713
714 shl \$2,$num
715 movdqu 16($ap,$i),%xmm2
716 movdqa %xmm0,16(%rsp,$i)
717 movdqu %xmm2,16($rp,$i)
718 ___
719 }
720 $code.=<<___;
721 mov 8(%rsp,$num,8),%rsi # restore %rsp
722 mov \$1,%rax
723 mov (%rsi),%r15
724 mov 8(%rsi),%r14
725 mov 16(%rsi),%r13
726 mov 24(%rsi),%r12
727 mov 32(%rsi),%rbp
728 mov 40(%rsi),%rbx
729 lea 48(%rsi),%rsp
730 .Lmul4x_epilogue:
731 ret
732 .size bn_mul4x_mont,.-bn_mul4x_mont
733 ___
734 }}}
735 \f{{{
736 ######################################################################
737 # void bn_sqr8x_mont(
738 my $rptr="%rdi"; # const BN_ULONG *rptr,
739 my $aptr="%rsi"; # const BN_ULONG *aptr,
740 my $bptr="%rdx"; # not used
741 my $nptr="%rcx"; # const BN_ULONG *nptr,
742 my $n0 ="%r8"; # const BN_ULONG *n0);
743 my $num ="%r9"; # int num, has to be divisible by 8
744
745 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
746 my @A0=("%r10","%r11");
747 my @A1=("%r12","%r13");
748 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
749
750 $code.=<<___ if ($addx);
751 .extern bn_sqrx8x_internal # see x86_64-mont5 module
752 ___
753 $code.=<<___;
754 .extern bn_sqr8x_internal # see x86_64-mont5 module
755
756 .type bn_sqr8x_mont,\@function,6
757 .align 32
758 bn_sqr8x_mont:
759 .Lsqr8x_enter:
760 mov %rsp,%rax
761 push %rbx
762 push %rbp
763 push %r12
764 push %r13
765 push %r14
766 push %r15
767
768 mov ${num}d,%r10d
769 shl \$3,${num}d # convert $num to bytes
770 shl \$3+2,%r10 # 4*$num
771 neg $num
772
773 ##############################################################
774 # ensure that stack frame doesn't alias with $aptr modulo
775 # 4096. this is done to allow memory disambiguation logic
776 # do its job.
777 #
778 lea -64(%rsp,$num,4),%r11
779 mov ($n0),$n0 # *n0
780 sub $aptr,%r11
781 and \$4095,%r11
782 cmp %r11,%r10
783 jb .Lsqr8x_sp_alt
784 sub %r11,%rsp # align with $aptr
785 lea -64(%rsp,$num,4),%rsp # alloca(frame+4*$num)
786 jmp .Lsqr8x_sp_done
787
788 .align 32
789 .Lsqr8x_sp_alt:
790 lea 4096-64(,$num,4),%r10 # 4096-frame-4*$num
791 lea -64(%rsp,$num,4),%rsp # alloca(frame+4*$num)
792 sub %r10,%r11
793 mov \$0,%r10
794 cmovc %r10,%r11
795 sub %r11,%rsp
796 .Lsqr8x_sp_done:
797 and \$-64,%rsp
798 mov $num,%r10
799 neg $num
800
801 lea 64(%rsp,$num,2),%r11 # copy of modulus
802 mov $n0, 32(%rsp)
803 mov %rax, 40(%rsp) # save original %rsp
804 .Lsqr8x_body:
805
806 mov $num,$i
807 movq %r11, %xmm2 # save pointer to modulus copy
808 shr \$3+2,$i
809 mov OPENSSL_ia32cap_P+8(%rip),%eax
810 jmp .Lsqr8x_copy_n
811
812 .align 32
813 .Lsqr8x_copy_n:
814 movq 8*0($nptr),%xmm0
815 movq 8*1($nptr),%xmm1
816 movq 8*2($nptr),%xmm3
817 movq 8*3($nptr),%xmm4
818 lea 8*4($nptr),$nptr
819 movdqa %xmm0,16*0(%r11)
820 movdqa %xmm1,16*1(%r11)
821 movdqa %xmm3,16*2(%r11)
822 movdqa %xmm4,16*3(%r11)
823 lea 16*4(%r11),%r11
824 dec $i
825 jnz .Lsqr8x_copy_n
826
827 pxor %xmm0,%xmm0
828 movq $rptr,%xmm1 # save $rptr
829 movq %r10, %xmm3 # -$num
830 ___
831 $code.=<<___ if ($addx);
832 and \$0x80100,%eax
833 cmp \$0x80100,%eax
834 jne .Lsqr8x_nox
835
836 call bn_sqrx8x_internal # see x86_64-mont5 module
837
838 pxor %xmm0,%xmm0
839 lea 48(%rsp),%rax
840 lea 64(%rsp,$num,2),%rdx
841 shr \$3+2,$num
842 mov 40(%rsp),%rsi # restore %rsp
843 jmp .Lsqr8x_zero
844
845 .align 32
846 .Lsqr8x_nox:
847 ___
848 $code.=<<___;
849 call bn_sqr8x_internal # see x86_64-mont5 module
850
851 pxor %xmm0,%xmm0
852 lea 48(%rsp),%rax
853 lea 64(%rsp,$num,2),%rdx
854 shr \$3+2,$num
855 mov 40(%rsp),%rsi # restore %rsp
856 jmp .Lsqr8x_zero
857
858 .align 32
859 .Lsqr8x_zero:
860 movdqa %xmm0,16*0(%rax) # wipe t
861 movdqa %xmm0,16*1(%rax)
862 movdqa %xmm0,16*2(%rax)
863 movdqa %xmm0,16*3(%rax)
864 lea 16*4(%rax),%rax
865 movdqa %xmm0,16*0(%rdx) # wipe n
866 movdqa %xmm0,16*1(%rdx)
867 movdqa %xmm0,16*2(%rdx)
868 movdqa %xmm0,16*3(%rdx)
869 lea 16*4(%rdx),%rdx
870 dec $num
871 jnz .Lsqr8x_zero
872
873 mov \$1,%rax
874 mov -48(%rsi),%r15
875 mov -40(%rsi),%r14
876 mov -32(%rsi),%r13
877 mov -24(%rsi),%r12
878 mov -16(%rsi),%rbp
879 mov -8(%rsi),%rbx
880 lea (%rsi),%rsp
881 .Lsqr8x_epilogue:
882 ret
883 .size bn_sqr8x_mont,.-bn_sqr8x_mont
884 ___
885 }}}
886 \f
887 if ($addx) {{{
888 my $bp="%rdx"; # original value
889
890 $code.=<<___;
891 .type bn_mulx4x_mont,\@function,6
892 .align 32
893 bn_mulx4x_mont:
894 .Lmulx4x_enter:
895 mov %rsp,%rax
896 push %rbx
897 push %rbp
898 push %r12
899 push %r13
900 push %r14
901 push %r15
902
903 shl \$3,${num}d # convert $num to bytes
904 .byte 0x67
905 xor %r10,%r10
906 sub $num,%r10 # -$num
907 mov ($n0),$n0 # *n0
908 lea -72(%rsp,%r10),%rsp # alloca(frame+$num+8)
909 lea ($bp,$num),%r10
910 and \$-128,%rsp
911 ##############################################################
912 # Stack layout
913 # +0 num
914 # +8 off-loaded &b[i]
915 # +16 end of b[num]
916 # +24 saved n0
917 # +32 saved rp
918 # +40 saved %rsp
919 # +48 inner counter
920 # +56
921 # +64 tmp[num+1]
922 #
923 mov $num,0(%rsp) # save $num
924 shr \$5,$num
925 mov %r10,16(%rsp) # end of b[num]
926 sub \$1,$num
927 mov $n0, 24(%rsp) # save *n0
928 mov $rp, 32(%rsp) # save $rp
929 mov %rax,40(%rsp) # save original %rsp
930 mov $num,48(%rsp) # inner counter
931 jmp .Lmulx4x_body
932
933 .align 32
934 .Lmulx4x_body:
935 ___
936 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
937 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
938 my $rptr=$bptr;
939 $code.=<<___;
940 lea 8($bp),$bptr
941 mov ($bp),%rdx # b[0], $bp==%rdx actually
942 lea 64+32(%rsp),$tptr
943 mov %rdx,$bi
944
945 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
946 mulx 1*8($aptr),%r11,%r14 # a[1]*b[0]
947 add %rax,%r11
948 mov $bptr,8(%rsp) # off-load &b[i]
949 mulx 2*8($aptr),%r12,%r13 # ...
950 adc %r14,%r12
951 adc \$0,%r13
952
953 mov $mi,$bptr # borrow $bptr
954 imulq 24(%rsp),$mi # "t[0]"*n0
955 xor $zero,$zero # cf=0, of=0
956
957 mulx 3*8($aptr),%rax,%r14
958 mov $mi,%rdx
959 lea 4*8($aptr),$aptr
960 adcx %rax,%r13
961 adcx $zero,%r14 # cf=0
962
963 mulx 0*8($nptr),%rax,%r10
964 adcx %rax,$bptr # discarded
965 adox %r11,%r10
966 mulx 1*8($nptr),%rax,%r11
967 adcx %rax,%r10
968 adox %r12,%r11
969 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 # mulx 2*8($nptr),%rax,%r12
970 mov 48(%rsp),$bptr # counter value
971 mov %r10,-4*8($tptr)
972 adcx %rax,%r11
973 adox %r13,%r12
974 mulx 3*8($nptr),%rax,%r15
975 mov $bi,%rdx
976 mov %r11,-3*8($tptr)
977 adcx %rax,%r12
978 adox $zero,%r15 # of=0
979 lea 4*8($nptr),$nptr
980 mov %r12,-2*8($tptr)
981
982 jmp .Lmulx4x_1st
983
984 .align 32
985 .Lmulx4x_1st:
986 adcx $zero,%r15 # cf=0, modulo-scheduled
987 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
988 adcx %r14,%r10
989 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
990 adcx %rax,%r11
991 mulx 2*8($aptr),%r12,%rax # ...
992 adcx %r14,%r12
993 mulx 3*8($aptr),%r13,%r14
994 .byte 0x67,0x67
995 mov $mi,%rdx
996 adcx %rax,%r13
997 adcx $zero,%r14 # cf=0
998 lea 4*8($aptr),$aptr
999 lea 4*8($tptr),$tptr
1000
1001 adox %r15,%r10
1002 mulx 0*8($nptr),%rax,%r15
1003 adcx %rax,%r10
1004 adox %r15,%r11
1005 mulx 1*8($nptr),%rax,%r15
1006 adcx %rax,%r11
1007 adox %r15,%r12
1008 mulx 2*8($nptr),%rax,%r15
1009 mov %r10,-5*8($tptr)
1010 adcx %rax,%r12
1011 mov %r11,-4*8($tptr)
1012 adox %r15,%r13
1013 mulx 3*8($nptr),%rax,%r15
1014 mov $bi,%rdx
1015 mov %r12,-3*8($tptr)
1016 adcx %rax,%r13
1017 adox $zero,%r15
1018 lea 4*8($nptr),$nptr
1019 mov %r13,-2*8($tptr)
1020
1021 dec $bptr # of=0, pass cf
1022 jnz .Lmulx4x_1st
1023
1024 mov 0(%rsp),$num # load num
1025 mov 8(%rsp),$bptr # re-load &b[i]
1026 adc $zero,%r15 # modulo-scheduled
1027 add %r15,%r14
1028 sbb %r15,%r15 # top-most carry
1029 mov %r14,-1*8($tptr)
1030 jmp .Lmulx4x_outer
1031
1032 .align 32
1033 .Lmulx4x_outer:
1034 mov ($bptr),%rdx # b[i]
1035 lea 8($bptr),$bptr # b++
1036 sub $num,$aptr # rewind $aptr
1037 mov %r15,($tptr) # save top-most carry
1038 lea 64+4*8(%rsp),$tptr
1039 sub $num,$nptr # rewind $nptr
1040
1041 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
1042 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1043 mov %rdx,$bi
1044 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
1045 adox -4*8($tptr),$mi
1046 adcx %r14,%r11
1047 mulx 2*8($aptr),%r15,%r13 # ...
1048 adox -3*8($tptr),%r11
1049 adcx %r15,%r12
1050 adox $zero,%r12
1051 adcx $zero,%r13
1052
1053 mov $bptr,8(%rsp) # off-load &b[i]
1054 .byte 0x67
1055 mov $mi,%r15
1056 imulq 24(%rsp),$mi # "t[0]"*n0
1057 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1058
1059 mulx 3*8($aptr),%rax,%r14
1060 mov $mi,%rdx
1061 adox -2*8($tptr),%r12
1062 adcx %rax,%r13
1063 adox -1*8($tptr),%r13
1064 adcx $zero,%r14
1065 lea 4*8($aptr),$aptr
1066 adox $zero,%r14
1067
1068 mulx 0*8($nptr),%rax,%r10
1069 adcx %rax,%r15 # discarded
1070 adox %r11,%r10
1071 mulx 1*8($nptr),%rax,%r11
1072 adcx %rax,%r10
1073 adox %r12,%r11
1074 mulx 2*8($nptr),%rax,%r12
1075 mov %r10,-4*8($tptr)
1076 adcx %rax,%r11
1077 adox %r13,%r12
1078 mulx 3*8($nptr),%rax,%r15
1079 mov $bi,%rdx
1080 mov %r11,-3*8($tptr)
1081 lea 4*8($nptr),$nptr
1082 adcx %rax,%r12
1083 adox $zero,%r15 # of=0
1084 mov 48(%rsp),$bptr # counter value
1085 mov %r12,-2*8($tptr)
1086
1087 jmp .Lmulx4x_inner
1088
1089 .align 32
1090 .Lmulx4x_inner:
1091 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
1092 adcx $zero,%r15 # cf=0, modulo-scheduled
1093 adox %r14,%r10
1094 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
1095 adcx 0*8($tptr),%r10
1096 adox %rax,%r11
1097 mulx 2*8($aptr),%r12,%rax # ...
1098 adcx 1*8($tptr),%r11
1099 adox %r14,%r12
1100 mulx 3*8($aptr),%r13,%r14
1101 mov $mi,%rdx
1102 adcx 2*8($tptr),%r12
1103 adox %rax,%r13
1104 adcx 3*8($tptr),%r13
1105 adox $zero,%r14 # of=0
1106 lea 4*8($aptr),$aptr
1107 lea 4*8($tptr),$tptr
1108 adcx $zero,%r14 # cf=0
1109
1110 adox %r15,%r10
1111 mulx 0*8($nptr),%rax,%r15
1112 adcx %rax,%r10
1113 adox %r15,%r11
1114 mulx 1*8($nptr),%rax,%r15
1115 adcx %rax,%r11
1116 adox %r15,%r12
1117 mulx 2*8($nptr),%rax,%r15
1118 mov %r10,-5*8($tptr)
1119 adcx %rax,%r12
1120 adox %r15,%r13
1121 mulx 3*8($nptr),%rax,%r15
1122 mov $bi,%rdx
1123 mov %r11,-4*8($tptr)
1124 mov %r12,-3*8($tptr)
1125 adcx %rax,%r13
1126 adox $zero,%r15
1127 lea 4*8($nptr),$nptr
1128 mov %r13,-2*8($tptr)
1129
1130 dec $bptr # of=0, pass cf
1131 jnz .Lmulx4x_inner
1132
1133 mov 0(%rsp),$num # load num
1134 mov 8(%rsp),$bptr # re-load &b[i]
1135 adc $zero,%r15 # modulo-scheduled
1136 sub 0*8($tptr),$zero # pull top-most carry
1137 adc %r15,%r14
1138 mov -8($nptr),$mi
1139 sbb %r15,%r15 # top-most carry
1140 mov %r14,-1*8($tptr)
1141
1142 cmp 16(%rsp),$bptr
1143 jne .Lmulx4x_outer
1144
1145 sub %r14,$mi # compare top-most words
1146 sbb $mi,$mi
1147 or $mi,%r15
1148
1149 neg $num
1150 xor %rdx,%rdx
1151 mov 32(%rsp),$rptr # restore rp
1152 lea 64(%rsp),$tptr
1153
1154 pxor %xmm0,%xmm0
1155 mov 0*8($nptr,$num),%r8
1156 mov 1*8($nptr,$num),%r9
1157 neg %r8
1158 jmp .Lmulx4x_sub_entry
1159
1160 .align 32
1161 .Lmulx4x_sub:
1162 mov 0*8($nptr,$num),%r8
1163 mov 1*8($nptr,$num),%r9
1164 not %r8
1165 .Lmulx4x_sub_entry:
1166 mov 2*8($nptr,$num),%r10
1167 not %r9
1168 and %r15,%r8
1169 mov 3*8($nptr,$num),%r11
1170 not %r10
1171 and %r15,%r9
1172 not %r11
1173 and %r15,%r10
1174 and %r15,%r11
1175
1176 neg %rdx # mov %rdx,%cf
1177 adc 0*8($tptr),%r8
1178 adc 1*8($tptr),%r9
1179 movdqa %xmm0,($tptr)
1180 adc 2*8($tptr),%r10
1181 adc 3*8($tptr),%r11
1182 movdqa %xmm0,16($tptr)
1183 lea 4*8($tptr),$tptr
1184 sbb %rdx,%rdx # mov %cf,%rdx
1185
1186 mov %r8,0*8($rptr)
1187 mov %r9,1*8($rptr)
1188 mov %r10,2*8($rptr)
1189 mov %r11,3*8($rptr)
1190 lea 4*8($rptr),$rptr
1191
1192 add \$32,$num
1193 jnz .Lmulx4x_sub
1194
1195 mov 40(%rsp),%rsi # restore %rsp
1196 mov \$1,%rax
1197 mov -48(%rsi),%r15
1198 mov -40(%rsi),%r14
1199 mov -32(%rsi),%r13
1200 mov -24(%rsi),%r12
1201 mov -16(%rsi),%rbp
1202 mov -8(%rsi),%rbx
1203 lea (%rsi),%rsp
1204 .Lmulx4x_epilogue:
1205 ret
1206 .size bn_mulx4x_mont,.-bn_mulx4x_mont
1207 ___
1208 }}}
1209 $code.=<<___;
1210 .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1211 .align 16
1212 ___
1213
1214 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1215 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1216 if ($win64) {
1217 $rec="%rcx";
1218 $frame="%rdx";
1219 $context="%r8";
1220 $disp="%r9";
1221
1222 $code.=<<___;
1223 .extern __imp_RtlVirtualUnwind
1224 .type mul_handler,\@abi-omnipotent
1225 .align 16
1226 mul_handler:
1227 push %rsi
1228 push %rdi
1229 push %rbx
1230 push %rbp
1231 push %r12
1232 push %r13
1233 push %r14
1234 push %r15
1235 pushfq
1236 sub \$64,%rsp
1237
1238 mov 120($context),%rax # pull context->Rax
1239 mov 248($context),%rbx # pull context->Rip
1240
1241 mov 8($disp),%rsi # disp->ImageBase
1242 mov 56($disp),%r11 # disp->HandlerData
1243
1244 mov 0(%r11),%r10d # HandlerData[0]
1245 lea (%rsi,%r10),%r10 # end of prologue label
1246 cmp %r10,%rbx # context->Rip<end of prologue label
1247 jb .Lcommon_seh_tail
1248
1249 mov 152($context),%rax # pull context->Rsp
1250
1251 mov 4(%r11),%r10d # HandlerData[1]
1252 lea (%rsi,%r10),%r10 # epilogue label
1253 cmp %r10,%rbx # context->Rip>=epilogue label
1254 jae .Lcommon_seh_tail
1255
1256 mov 192($context),%r10 # pull $num
1257 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
1258 lea 48(%rax),%rax
1259
1260 mov -8(%rax),%rbx
1261 mov -16(%rax),%rbp
1262 mov -24(%rax),%r12
1263 mov -32(%rax),%r13
1264 mov -40(%rax),%r14
1265 mov -48(%rax),%r15
1266 mov %rbx,144($context) # restore context->Rbx
1267 mov %rbp,160($context) # restore context->Rbp
1268 mov %r12,216($context) # restore context->R12
1269 mov %r13,224($context) # restore context->R13
1270 mov %r14,232($context) # restore context->R14
1271 mov %r15,240($context) # restore context->R15
1272
1273 jmp .Lcommon_seh_tail
1274 .size mul_handler,.-mul_handler
1275
1276 .type sqr_handler,\@abi-omnipotent
1277 .align 16
1278 sqr_handler:
1279 push %rsi
1280 push %rdi
1281 push %rbx
1282 push %rbp
1283 push %r12
1284 push %r13
1285 push %r14
1286 push %r15
1287 pushfq
1288 sub \$64,%rsp
1289
1290 mov 120($context),%rax # pull context->Rax
1291 mov 248($context),%rbx # pull context->Rip
1292
1293 mov 8($disp),%rsi # disp->ImageBase
1294 mov 56($disp),%r11 # disp->HandlerData
1295
1296 mov 0(%r11),%r10d # HandlerData[0]
1297 lea (%rsi,%r10),%r10 # end of prologue label
1298 cmp %r10,%rbx # context->Rip<.Lsqr_body
1299 jb .Lcommon_seh_tail
1300
1301 mov 152($context),%rax # pull context->Rsp
1302
1303 mov 4(%r11),%r10d # HandlerData[1]
1304 lea (%rsi,%r10),%r10 # epilogue label
1305 cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
1306 jae .Lcommon_seh_tail
1307
1308 mov 40(%rax),%rax # pull saved stack pointer
1309
1310 mov -8(%rax),%rbx
1311 mov -16(%rax),%rbp
1312 mov -24(%rax),%r12
1313 mov -32(%rax),%r13
1314 mov -40(%rax),%r14
1315 mov -48(%rax),%r15
1316 mov %rbx,144($context) # restore context->Rbx
1317 mov %rbp,160($context) # restore context->Rbp
1318 mov %r12,216($context) # restore context->R12
1319 mov %r13,224($context) # restore context->R13
1320 mov %r14,232($context) # restore context->R14
1321 mov %r15,240($context) # restore context->R15
1322
1323 .Lcommon_seh_tail:
1324 mov 8(%rax),%rdi
1325 mov 16(%rax),%rsi
1326 mov %rax,152($context) # restore context->Rsp
1327 mov %rsi,168($context) # restore context->Rsi
1328 mov %rdi,176($context) # restore context->Rdi
1329
1330 mov 40($disp),%rdi # disp->ContextRecord
1331 mov $context,%rsi # context
1332 mov \$154,%ecx # sizeof(CONTEXT)
1333 .long 0xa548f3fc # cld; rep movsq
1334
1335 mov $disp,%rsi
1336 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1337 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1338 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1339 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1340 mov 40(%rsi),%r10 # disp->ContextRecord
1341 lea 56(%rsi),%r11 # &disp->HandlerData
1342 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1343 mov %r10,32(%rsp) # arg5
1344 mov %r11,40(%rsp) # arg6
1345 mov %r12,48(%rsp) # arg7
1346 mov %rcx,56(%rsp) # arg8, (NULL)
1347 call *__imp_RtlVirtualUnwind(%rip)
1348
1349 mov \$1,%eax # ExceptionContinueSearch
1350 add \$64,%rsp
1351 popfq
1352 pop %r15
1353 pop %r14
1354 pop %r13
1355 pop %r12
1356 pop %rbp
1357 pop %rbx
1358 pop %rdi
1359 pop %rsi
1360 ret
1361 .size sqr_handler,.-sqr_handler
1362
1363 .section .pdata
1364 .align 4
1365 .rva .LSEH_begin_bn_mul_mont
1366 .rva .LSEH_end_bn_mul_mont
1367 .rva .LSEH_info_bn_mul_mont
1368
1369 .rva .LSEH_begin_bn_mul4x_mont
1370 .rva .LSEH_end_bn_mul4x_mont
1371 .rva .LSEH_info_bn_mul4x_mont
1372
1373 .rva .LSEH_begin_bn_sqr8x_mont
1374 .rva .LSEH_end_bn_sqr8x_mont
1375 .rva .LSEH_info_bn_sqr8x_mont
1376 ___
1377 $code.=<<___ if ($addx);
1378 .rva .LSEH_begin_bn_mulx4x_mont
1379 .rva .LSEH_end_bn_mulx4x_mont
1380 .rva .LSEH_info_bn_mulx4x_mont
1381 ___
1382 $code.=<<___;
1383 .section .xdata
1384 .align 8
1385 .LSEH_info_bn_mul_mont:
1386 .byte 9,0,0,0
1387 .rva mul_handler
1388 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
1389 .LSEH_info_bn_mul4x_mont:
1390 .byte 9,0,0,0
1391 .rva mul_handler
1392 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
1393 .LSEH_info_bn_sqr8x_mont:
1394 .byte 9,0,0,0
1395 .rva sqr_handler
1396 .rva .Lsqr8x_body,.Lsqr8x_epilogue # HandlerData[]
1397 ___
1398 $code.=<<___ if ($addx);
1399 .LSEH_info_bn_mulx4x_mont:
1400 .byte 9,0,0,0
1401 .rva sqr_handler
1402 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
1403 ___
1404 }
1405
1406 print $code;
1407 close STDOUT;