3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # This module implements Poly1305 hash for x86_64.
14 # Numbers are cycles per processed byte with poly1305_blocks alone,
15 # measured with rdtsc at fixed clock frequency.
17 # IALU/gcc-4.8(*) AVX(**) AVX2
20 # Westmere 1.86/+120% -
21 # Sandy Bridge 1.39/+140% 1.10
22 # Haswell 1.10/+175% 1.11 0.65
23 # Skylake 1.12/+120% 0.96 0.51
24 # Silvermont 2.83/+95% -
25 # VIA Nano 1.82/+150% -
26 # Sledgehammer 1.38/+160% -
27 # Bulldozer 2.21/+130% 0.97
29 # (*) improvement coefficients relative to clang are more modest and
30 # are ~50% on most processors, in both cases we are comparing to
32 # (**) SSE2 implementation was attempted, but among non-AVX processors
33 # it was faster than integer-only code only on older Intel P4 and
34 # Core processors, 50-30%, less newer processor is, but slower on
35 # contemporary ones, for example almost 2x slower on Atom, and as
36 # former are naturally disappearing, SSE2 is deemed unnecessary;
40 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
42 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
44 $0 =~ m/(.*[\/\\])[^\
/\\]+$/; $dir=$1;
45 ( $xlate="${dir}x86_64-xlate.pl" and -f
$xlate ) or
46 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f
$xlate) or
47 die "can't locate x86_64-xlate.pl";
49 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
50 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
51 $avx = ($1>=2.19) + ($1>=2.22);
54 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM
} =~ /nasm/) &&
55 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
56 $avx = ($1>=2.09) + ($1>=2.10);
59 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM
} =~ /ml64/) &&
60 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
61 $avx = ($1>=10) + ($1>=12);
64 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
65 $avx = ($2>=3.0) + ($2>3.0);
68 open OUT
,"| \"$^X\" $xlate $flavour $output";
71 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
72 my ($mac,$nonce)=($inp,$len); # *_emit arguments
73 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
74 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
76 sub poly1305_iteration
{
77 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
78 # output: $h0-$h2 *= $r0-$r1
86 mov
%rax,$h0 # future $h0
96 mov
$h2,$h1 # borrow $h1
100 imulq
$s1,$h1 # h2*s1
105 imulq
$r0,$h2 # h2*r0
107 mov \
$-4,%rax # mask value
110 and $d3,%rax # last reduction step
120 ########################################################################
121 # Layout of opaque area is following.
123 # unsigned __int64 h[3]; # current hash value base 2^64
124 # unsigned __int64 r[2]; # key value base 2^64
129 .extern OPENSSL_ia32cap_P
132 .globl poly1305_blocks
134 .type poly1305_init
,\
@function,3
138 mov
%rax,0($ctx) # initialize hash value
145 lea poly1305_blocks
(%rip),%r10
146 lea poly1305_emit
(%rip),%r11
148 $code.=<<___
if ($avx);
149 mov OPENSSL_ia32cap_P
+4(%rip),%r9
150 lea poly1305_blocks_avx
(%rip),%rax
151 lea poly1305_emit_avx
(%rip),%rcx
152 bt \
$`60-32`,%r9 # AVX?
156 $code.=<<___
if ($avx>1);
157 lea poly1305_blocks_avx2
(%rip),%rax
158 bt \
$`5+32`,%r9 # AVX2?
162 mov \
$0x0ffffffc0fffffff,%rax
163 mov \
$0x0ffffffc0ffffffc,%rcx
175 .size poly1305_init
,.-poly1305_init
177 .type poly1305_blocks
,\
@function,4
181 sub \
$16,$len # too short?
192 mov
$len,%r15 # reassign $len
194 mov
24($ctx),$r0 # load r
197 mov
0($ctx),$h0 # load hash value
204 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
209 add
0($inp),$h0 # accumulate input
214 &poly1305_iteration
();
217 sub \
$16,%r15 # len-=16
220 mov
$h0,0($ctx) # store hash value
234 .size poly1305_blocks
,.-poly1305_blocks
236 .type poly1305_emit
,\
@function,3
240 mov
0($ctx),%r8 # load hash value
245 add \
$5,%r8 # compare to modulus
249 shr \
$2,%r10 # did 130-bit value overfow?
253 add
0($nonce),%rax # accumulate nonce
255 mov
%rax,0($mac) # write result
259 .size poly1305_emit
,.-poly1305_emit
263 ########################################################################
264 # Layout of opaque area is following.
266 # unsigned __int32 h[5]; # current hash value base 2^26
267 # unsigned __int32 is_base2_26;
268 # unsigned __int64 r[2]; # key value base 2^64
269 # unsigned __int64 pad;
270 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
272 # where r^n are base 2^26 digits of degrees of multiplier key. There are
273 # 5 digits, but last four are interleaved with multiples of 5, totalling
274 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
276 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
277 map("%xmm$_",(0..15));
280 .type __poly1305_block
,\
@abi-omnipotent
284 &poly1305_iteration
();
287 .size __poly1305_block
,.-__poly1305_block
289 .type __poly1305_init_avx
,\
@abi-omnipotent
296 lea
48+64($ctx),$ctx # size optimization
299 call __poly1305_block
# r^2
301 mov \
$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
307 mov
%eax,`16*0+0-64`($ctx)
309 mov
%edx,`16*0+4-64`($ctx)
316 mov
%eax,`16*1+0-64`($ctx)
317 lea
(%rax,%rax,4),%eax # *5
318 mov
%edx,`16*1+4-64`($ctx)
319 lea
(%rdx,%rdx,4),%edx # *5
320 mov
%eax,`16*2+0-64`($ctx)
322 mov
%edx,`16*2+4-64`($ctx)
333 mov
%eax,`16*3+0-64`($ctx)
334 lea
(%rax,%rax,4),%eax # *5
335 mov
%edx,`16*3+4-64`($ctx)
336 lea
(%rdx,%rdx,4),%edx # *5
337 mov
%eax,`16*4+0-64`($ctx)
339 mov
%edx,`16*4+4-64`($ctx)
348 mov
%eax,`16*5+0-64`($ctx)
349 lea
(%rax,%rax,4),%eax # *5
350 mov
%edx,`16*5+4-64`($ctx)
351 lea
(%rdx,%rdx,4),%edx # *5
352 mov
%eax,`16*6+0-64`($ctx)
354 mov
%edx,`16*6+4-64`($ctx)
360 mov
$d1#d,`16*7+0-64`($ctx)
361 lea
($d1,$d1,4),$d1 # *5
362 mov
$d2#d,`16*7+4-64`($ctx)
363 lea
($d2,$d2,4),$d2 # *5
364 mov
$d1#d,`16*8+0-64`($ctx)
365 mov
$d2#d,`16*8+4-64`($ctx)
368 call __poly1305_block
# r^3
370 mov \
$0x3ffffff,%eax # save r^3 base 2^26
374 mov
%eax,`16*0+12-64`($ctx)
378 mov
%edx,`16*1+12-64`($ctx)
379 lea
(%rdx,%rdx,4),%edx # *5
381 mov
%edx,`16*2+12-64`($ctx)
387 mov
%eax,`16*3+12-64`($ctx)
388 lea
(%rax,%rax,4),%eax # *5
390 mov
%eax,`16*4+12-64`($ctx)
395 mov
%edx,`16*5+12-64`($ctx)
396 lea
(%rdx,%rdx,4),%edx # *5
398 mov
%edx,`16*6+12-64`($ctx)
403 mov
$d1#d,`16*7+12-64`($ctx)
404 lea
($d1,$d1,4),$d1 # *5
405 mov
$d1#d,`16*8+12-64`($ctx)
408 call __poly1305_block
# r^4
410 mov \
$0x3ffffff,%eax # save r^4 base 2^26
414 mov
%eax,`16*0+8-64`($ctx)
418 mov
%edx,`16*1+8-64`($ctx)
419 lea
(%rdx,%rdx,4),%edx # *5
421 mov
%edx,`16*2+8-64`($ctx)
427 mov
%eax,`16*3+8-64`($ctx)
428 lea
(%rax,%rax,4),%eax # *5
430 mov
%eax,`16*4+8-64`($ctx)
435 mov
%edx,`16*5+8-64`($ctx)
436 lea
(%rdx,%rdx,4),%edx # *5
438 mov
%edx,`16*6+8-64`($ctx)
443 mov
$d1#d,`16*7+8-64`($ctx)
444 lea
($d1,$d1,4),$d1 # *5
445 mov
$d1#d,`16*8+8-64`($ctx)
447 lea
-48-64($ctx),$ctx # size [de-]optimization
449 .size __poly1305_init_avx
,.-__poly1305_init_avx
451 .type poly1305_blocks_avx
,\
@function,4
454 mov
20($ctx),%r8d # is_base2_26
480 mov
$len,%r15 # reassign $len
482 mov
0($ctx),$d1 # load hash value
486 mov
24($ctx),$r0 # load r
489 ################################# base 2^26 -> base 2^64
492 mov
$d2,$r1 # borrow $r1
508 adc \
$0,$h2 # can be partially reduced...
510 mov \
$-4,$d2 # ... so reduce
522 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
524 add
0($inp),$h0 # accumulate input
529 call __poly1305_block
531 test
$padbit,$padbit # if $padbit is zero,
532 jz
.Lstore_base2_64_avx
# store hash in base 2^64 format
534 ################################# base 2^64 -> base 2^26
541 and \
$0x3ffffff,%rax # h[0]
543 and \
$0x3ffffff,%rdx # h[1]
547 and \
$0x3ffffff,$h0 # h[2]
549 and \
$0x3ffffff,$h1 # h[3]
553 jz
.Lstore_base2_26_avx
563 .Lstore_base2_64_avx
:
566 mov
$h2,16($ctx) # note that is_base2_26 is zeroed
570 .Lstore_base2_26_avx
:
571 mov
%rax#d,0($ctx) # store hash value base 2^26
586 .Lblocks_avx_epilogue
:
599 mov
$len,%r15 # reassign $len
601 mov
24($ctx),$r0 # load r
604 mov
0($ctx),$h0 # load hash value
611 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
616 add
0($inp),$h0 # accumulate input
622 call __poly1305_block
625 ################################# base 2^64 -> base 2^26
632 and \
$0x3ffffff,%rax # h[0]
634 and \
$0x3ffffff,%rdx # h[1]
638 and \
$0x3ffffff,$h0 # h[2]
640 and \
$0x3ffffff,$h1 # h[3]
648 movl \
$1,20($ctx) # set is_base2_26
650 call __poly1305_init_avx
663 .Lbase2_64_avx_epilogue
:
668 vmovd
4*0($ctx),$H0 # load hash value
676 $code.=<<___
if (!$win64);
680 $code.=<<___
if ($win64);
683 vmovdqa
%xmm6,0x50(%r11)
684 vmovdqa
%xmm7,0x60(%r11)
685 vmovdqa
%xmm8,0x70(%r11)
686 vmovdqa
%xmm9,0x80(%r11)
687 vmovdqa
%xmm10,0x90(%r11)
688 vmovdqa
%xmm11,0xa0(%r11)
689 vmovdqa
%xmm12,0xb0(%r11)
690 vmovdqa
%xmm13,0xc0(%r11)
691 vmovdqa
%xmm14,0xd0(%r11)
692 vmovdqa
%xmm15,0xe0(%r11)
700 vmovdqu
`16*3`($ctx),$D4 # preload r0^2
701 lea
`16*3+64`($ctx),$ctx # size optimization
702 lea
.Lconst
(%rip),%rcx
704 ################################################################
706 vmovdqu
16*2($inp),$T0
707 vmovdqu
16*3($inp),$T1
708 vmovdqa
64(%rcx),$MASK # .Lmask26
710 vpsrldq \
$6,$T0,$T2 # splat input
712 vpunpckhqdq
$T1,$T0,$T4 # 4
713 vpunpcklqdq
$T1,$T0,$T0 # 0:1
714 vpunpcklqdq
$T3,$T2,$T3 # 2:3
716 vpsrlq \
$40,$T4,$T4 # 4
718 vpand
$MASK,$T0,$T0 # 0
720 vpand
$MASK,$T1,$T1 # 1
722 vpand
$MASK,$T2,$T2 # 2
723 vpand
$MASK,$T3,$T3 # 3
724 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
728 # expand and copy pre-calculated table to stack
729 vmovdqu
`16*1-64`($ctx),$D1
730 vmovdqu
`16*2-64`($ctx),$D2
731 vpshufd \
$0xEE,$D4,$D3 # 34xx -> 3434
732 vpshufd \
$0x44,$D4,$D0 # xx12 -> 1212
733 vmovdqa
$D3,-0x90(%r11)
734 vmovdqa
$D0,0x00(%rsp)
735 vpshufd \
$0xEE,$D1,$D4
736 vmovdqu
`16*3-64`($ctx),$D0
737 vpshufd \
$0x44,$D1,$D1
738 vmovdqa
$D4,-0x80(%r11)
739 vmovdqa
$D1,0x10(%rsp)
740 vpshufd \
$0xEE,$D2,$D3
741 vmovdqu
`16*4-64`($ctx),$D1
742 vpshufd \
$0x44,$D2,$D2
743 vmovdqa
$D3,-0x70(%r11)
744 vmovdqa
$D2,0x20(%rsp)
745 vpshufd \
$0xEE,$D0,$D4
746 vmovdqu
`16*5-64`($ctx),$D2
747 vpshufd \
$0x44,$D0,$D0
748 vmovdqa
$D4,-0x60(%r11)
749 vmovdqa
$D0,0x30(%rsp)
750 vpshufd \
$0xEE,$D1,$D3
751 vmovdqu
`16*6-64`($ctx),$D0
752 vpshufd \
$0x44,$D1,$D1
753 vmovdqa
$D3,-0x50(%r11)
754 vmovdqa
$D1,0x40(%rsp)
755 vpshufd \
$0xEE,$D2,$D4
756 vmovdqu
`16*7-64`($ctx),$D1
757 vpshufd \
$0x44,$D2,$D2
758 vmovdqa
$D4,-0x40(%r11)
759 vmovdqa
$D2,0x50(%rsp)
760 vpshufd \
$0xEE,$D0,$D3
761 vmovdqu
`16*8-64`($ctx),$D2
762 vpshufd \
$0x44,$D0,$D0
763 vmovdqa
$D3,-0x30(%r11)
764 vmovdqa
$D0,0x60(%rsp)
765 vpshufd \
$0xEE,$D1,$D4
766 vpshufd \
$0x44,$D1,$D1
767 vmovdqa
$D4,-0x20(%r11)
768 vmovdqa
$D1,0x70(%rsp)
769 vpshufd \
$0xEE,$D2,$D3
770 vmovdqa
0x00(%rsp),$D4 # preload r0^2
771 vpshufd \
$0x44,$D2,$D2
772 vmovdqa
$D3,-0x10(%r11)
773 vmovdqa
$D2,0x80(%rsp)
779 ################################################################
780 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
781 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
782 # \___________________/
783 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
784 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
785 # \___________________/ \____________________/
787 # Note that we start with inp[2:3]*r^2. This is because it
788 # doesn't depend on reduction in previous iteration.
789 ################################################################
790 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
791 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
792 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
793 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
794 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
796 # though note that $Tx and $Hx are "reversed" in this section,
797 # and $D4 is preloaded with r0^2...
799 vpmuludq
$T0,$D4,$D0 # d0 = h0*r0
800 vpmuludq
$T1,$D4,$D1 # d1 = h1*r0
801 vmovdqa
$H2,0x20(%r11) # offload hash
802 vpmuludq
$T2,$D4,$D2 # d3 = h2*r0
803 vmovdqa
0x10(%rsp),$H2 # r1^2
804 vpmuludq
$T3,$D4,$D3 # d3 = h3*r0
805 vpmuludq
$T4,$D4,$D4 # d4 = h4*r0
807 vmovdqa
$H0,0x00(%r11) #
808 vpmuludq
0x20(%rsp),$T4,$H0 # h4*s1
809 vmovdqa
$H1,0x10(%r11) #
810 vpmuludq
$T3,$H2,$H1 # h3*r1
811 vpaddq
$H0,$D0,$D0 # d0 += h4*s1
812 vpaddq
$H1,$D4,$D4 # d4 += h3*r1
813 vmovdqa
$H3,0x30(%r11) #
814 vpmuludq
$T2,$H2,$H0 # h2*r1
815 vpmuludq
$T1,$H2,$H1 # h1*r1
816 vpaddq
$H0,$D3,$D3 # d3 += h2*r1
817 vmovdqa
0x30(%rsp),$H3 # r2^2
818 vpaddq
$H1,$D2,$D2 # d2 += h1*r1
819 vmovdqa
$H4,0x40(%r11) #
820 vpmuludq
$T0,$H2,$H2 # h0*r1
821 vpmuludq
$T2,$H3,$H0 # h2*r2
822 vpaddq
$H2,$D1,$D1 # d1 += h0*r1
824 vmovdqa
0x40(%rsp),$H4 # s2^2
825 vpaddq
$H0,$D4,$D4 # d4 += h2*r2
826 vpmuludq
$T1,$H3,$H1 # h1*r2
827 vpmuludq
$T0,$H3,$H3 # h0*r2
828 vpaddq
$H1,$D3,$D3 # d3 += h1*r2
829 vmovdqa
0x50(%rsp),$H2 # r3^2
830 vpaddq
$H3,$D2,$D2 # d2 += h0*r2
831 vpmuludq
$T4,$H4,$H0 # h4*s2
832 vpmuludq
$T3,$H4,$H4 # h3*s2
833 vpaddq
$H0,$D1,$D1 # d1 += h4*s2
834 vmovdqa
0x60(%rsp),$H3 # s3^2
835 vpaddq
$H4,$D0,$D0 # d0 += h3*s2
837 vmovdqa
0x80(%rsp),$H4 # s4^2
838 vpmuludq
$T1,$H2,$H1 # h1*r3
839 vpmuludq
$T0,$H2,$H2 # h0*r3
840 vpaddq
$H1,$D4,$D4 # d4 += h1*r3
841 vpaddq
$H2,$D3,$D3 # d3 += h0*r3
842 vpmuludq
$T4,$H3,$H0 # h4*s3
843 vpmuludq
$T3,$H3,$H1 # h3*s3
844 vpaddq
$H0,$D2,$D2 # d2 += h4*s3
845 vmovdqu
16*0($inp),$H0 # load input
846 vpaddq
$H1,$D1,$D1 # d1 += h3*s3
847 vpmuludq
$T2,$H3,$H3 # h2*s3
848 vpmuludq
$T2,$H4,$T2 # h2*s4
849 vpaddq
$H3,$D0,$D0 # d0 += h2*s3
851 vmovdqu
16*1($inp),$H1 #
852 vpaddq
$T2,$D1,$D1 # d1 += h2*s4
853 vpmuludq
$T3,$H4,$T3 # h3*s4
854 vpmuludq
$T4,$H4,$T4 # h4*s4
855 vpsrldq \
$6,$H0,$H2 # splat input
856 vpaddq
$T3,$D2,$D2 # d2 += h3*s4
857 vpaddq
$T4,$D3,$D3 # d3 += h4*s4
858 vpsrldq \
$6,$H1,$H3 #
859 vpmuludq
0x70(%rsp),$T0,$T4 # h0*r4
860 vpmuludq
$T1,$H4,$T0 # h1*s4
861 vpunpckhqdq
$H1,$H0,$H4 # 4
862 vpaddq
$T4,$D4,$D4 # d4 += h0*r4
863 vmovdqa
-0x90(%r11),$T4 # r0^4
864 vpaddq
$T0,$D0,$D0 # d0 += h1*s4
866 vpunpcklqdq
$H1,$H0,$H0 # 0:1
867 vpunpcklqdq
$H3,$H2,$H3 # 2:3
869 #vpsrlq \$40,$H4,$H4 # 4
870 vpsrldq \
$`40/8`,$H4,$H4 # 4
872 vpand
$MASK,$H0,$H0 # 0
874 vpand
$MASK,$H1,$H1 # 1
875 vpand
0(%rcx),$H4,$H4 # .Lmask24
877 vpand
$MASK,$H2,$H2 # 2
878 vpand
$MASK,$H3,$H3 # 3
879 vpor
32(%rcx),$H4,$H4 # padbit, yes, always
881 vpaddq
0x00(%r11),$H0,$H0 # add hash value
882 vpaddq
0x10(%r11),$H1,$H1
883 vpaddq
0x20(%r11),$H2,$H2
884 vpaddq
0x30(%r11),$H3,$H3
885 vpaddq
0x40(%r11),$H4,$H4
892 ################################################################
893 # Now we accumulate (inp[0:1]+hash)*r^4
894 ################################################################
895 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
896 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
897 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
898 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
899 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
901 vpmuludq
$H0,$T4,$T0 # h0*r0
902 vpmuludq
$H1,$T4,$T1 # h1*r0
905 vmovdqa
-0x80(%r11),$T2 # r1^4
906 vpmuludq
$H2,$T4,$T0 # h2*r0
907 vpmuludq
$H3,$T4,$T1 # h3*r0
910 vpmuludq
$H4,$T4,$T4 # h4*r0
911 vpmuludq
-0x70(%r11),$H4,$T0 # h4*s1
914 vpaddq
$T0,$D0,$D0 # d0 += h4*s1
915 vpmuludq
$H2,$T2,$T1 # h2*r1
916 vpmuludq
$H3,$T2,$T0 # h3*r1
917 vpaddq
$T1,$D3,$D3 # d3 += h2*r1
918 vmovdqa
-0x60(%r11),$T3 # r2^4
919 vpaddq
$T0,$D4,$D4 # d4 += h3*r1
920 vpmuludq
$H1,$T2,$T1 # h1*r1
921 vpmuludq
$H0,$T2,$T2 # h0*r1
922 vpaddq
$T1,$D2,$D2 # d2 += h1*r1
923 vpaddq
$T2,$D1,$D1 # d1 += h0*r1
925 vmovdqa
-0x50(%r11),$T4 # s2^4
926 vpmuludq
$H2,$T3,$T0 # h2*r2
927 vpmuludq
$H1,$T3,$T1 # h1*r2
928 vpaddq
$T0,$D4,$D4 # d4 += h2*r2
929 vpaddq
$T1,$D3,$D3 # d3 += h1*r2
930 vmovdqa
-0x40(%r11),$T2 # r3^4
931 vpmuludq
$H0,$T3,$T3 # h0*r2
932 vpmuludq
$H4,$T4,$T0 # h4*s2
933 vpaddq
$T3,$D2,$D2 # d2 += h0*r2
934 vpaddq
$T0,$D1,$D1 # d1 += h4*s2
935 vmovdqa
-0x30(%r11),$T3 # s3^4
936 vpmuludq
$H3,$T4,$T4 # h3*s2
937 vpmuludq
$H1,$T2,$T1 # h1*r3
938 vpaddq
$T4,$D0,$D0 # d0 += h3*s2
940 vmovdqa
-0x10(%r11),$T4 # s4^4
941 vpaddq
$T1,$D4,$D4 # d4 += h1*r3
942 vpmuludq
$H0,$T2,$T2 # h0*r3
943 vpmuludq
$H4,$T3,$T0 # h4*s3
944 vpaddq
$T2,$D3,$D3 # d3 += h0*r3
945 vpaddq
$T0,$D2,$D2 # d2 += h4*s3
946 vmovdqu
16*2($inp),$T0 # load input
947 vpmuludq
$H3,$T3,$T2 # h3*s3
948 vpmuludq
$H2,$T3,$T3 # h2*s3
949 vpaddq
$T2,$D1,$D1 # d1 += h3*s3
950 vmovdqu
16*3($inp),$T1 #
951 vpaddq
$T3,$D0,$D0 # d0 += h2*s3
953 vpmuludq
$H2,$T4,$H2 # h2*s4
954 vpmuludq
$H3,$T4,$H3 # h3*s4
955 vpsrldq \
$6,$T0,$T2 # splat input
956 vpaddq
$H2,$D1,$D1 # d1 += h2*s4
957 vpmuludq
$H4,$T4,$H4 # h4*s4
958 vpsrldq \
$6,$T1,$T3 #
959 vpaddq
$H3,$D2,$H2 # h2 = d2 + h3*s4
960 vpaddq
$H4,$D3,$H3 # h3 = d3 + h4*s4
961 vpmuludq
-0x20(%r11),$H0,$H4 # h0*r4
963 vpunpckhqdq
$T1,$T0,$T4 # 4
964 vpaddq
$H4,$D4,$H4 # h4 = d4 + h0*r4
965 vpaddq
$H0,$D0,$H0 # h0 = d0 + h1*s4
967 vpunpcklqdq
$T1,$T0,$T0 # 0:1
968 vpunpcklqdq
$T3,$T2,$T3 # 2:3
970 #vpsrlq \$40,$T4,$T4 # 4
971 vpsrldq \
$`40/8`,$T4,$T4 # 4
973 vmovdqa
0x00(%rsp),$D4 # preload r0^2
974 vpand
$MASK,$T0,$T0 # 0
976 vpand
$MASK,$T1,$T1 # 1
977 vpand
0(%rcx),$T4,$T4 # .Lmask24
979 vpand
$MASK,$T2,$T2 # 2
980 vpand
$MASK,$T3,$T3 # 3
981 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
983 ################################################################
984 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
989 vpaddq
$D3,$H4,$H4 # h3 -> h4
993 vpaddq
$D0,$D1,$H1 # h0 -> h1
1000 vpaddq
$D1,$H2,$H2 # h1 -> h2
1004 vpaddq
$D0,$H0,$H0 # h4 -> h0
1008 vpaddq
$D2,$H3,$H3 # h2 -> h3
1012 vpaddq
$D0,$H1,$H1 # h0 -> h1
1016 vpaddq
$D3,$H4,$H4 # h3 -> h4
1021 ################################################################
1022 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1024 vpshufd \
$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1035 vmovdqa
$H2,0x20(%r11)
1036 vmovdqa
$H0,0x00(%r11)
1037 vmovdqa
$H1,0x10(%r11)
1038 vmovdqa
$H3,0x30(%r11)
1039 vmovdqa
$H4,0x40(%r11)
1041 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1042 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1043 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1044 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1045 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1047 vpmuludq
$T2,$D4,$D2 # d2 = h2*r0
1048 vpmuludq
$T0,$D4,$D0 # d0 = h0*r0
1049 vpshufd \
$0x10,`16*1-64`($ctx),$H2 # r1^n
1050 vpmuludq
$T1,$D4,$D1 # d1 = h1*r0
1051 vpmuludq
$T3,$D4,$D3 # d3 = h3*r0
1052 vpmuludq
$T4,$D4,$D4 # d4 = h4*r0
1054 vpmuludq
$T3,$H2,$H0 # h3*r1
1055 vpaddq
$H0,$D4,$D4 # d4 += h3*r1
1056 vpshufd \
$0x10,`16*2-64`($ctx),$H3 # s1^n
1057 vpmuludq
$T2,$H2,$H1 # h2*r1
1058 vpaddq
$H1,$D3,$D3 # d3 += h2*r1
1059 vpshufd \
$0x10,`16*3-64`($ctx),$H4 # r2^n
1060 vpmuludq
$T1,$H2,$H0 # h1*r1
1061 vpaddq
$H0,$D2,$D2 # d2 += h1*r1
1062 vpmuludq
$T0,$H2,$H2 # h0*r1
1063 vpaddq
$H2,$D1,$D1 # d1 += h0*r1
1064 vpmuludq
$T4,$H3,$H3 # h4*s1
1065 vpaddq
$H3,$D0,$D0 # d0 += h4*s1
1067 vpshufd \
$0x10,`16*4-64`($ctx),$H2 # s2^n
1068 vpmuludq
$T2,$H4,$H1 # h2*r2
1069 vpaddq
$H1,$D4,$D4 # d4 += h2*r2
1070 vpmuludq
$T1,$H4,$H0 # h1*r2
1071 vpaddq
$H0,$D3,$D3 # d3 += h1*r2
1072 vpshufd \
$0x10,`16*5-64`($ctx),$H3 # r3^n
1073 vpmuludq
$T0,$H4,$H4 # h0*r2
1074 vpaddq
$H4,$D2,$D2 # d2 += h0*r2
1075 vpmuludq
$T4,$H2,$H1 # h4*s2
1076 vpaddq
$H1,$D1,$D1 # d1 += h4*s2
1077 vpshufd \
$0x10,`16*6-64`($ctx),$H4 # s3^n
1078 vpmuludq
$T3,$H2,$H2 # h3*s2
1079 vpaddq
$H2,$D0,$D0 # d0 += h3*s2
1081 vpmuludq
$T1,$H3,$H0 # h1*r3
1082 vpaddq
$H0,$D4,$D4 # d4 += h1*r3
1083 vpmuludq
$T0,$H3,$H3 # h0*r3
1084 vpaddq
$H3,$D3,$D3 # d3 += h0*r3
1085 vpshufd \
$0x10,`16*7-64`($ctx),$H2 # r4^n
1086 vpmuludq
$T4,$H4,$H1 # h4*s3
1087 vpaddq
$H1,$D2,$D2 # d2 += h4*s3
1088 vpshufd \
$0x10,`16*8-64`($ctx),$H3 # s4^n
1089 vpmuludq
$T3,$H4,$H0 # h3*s3
1090 vpaddq
$H0,$D1,$D1 # d1 += h3*s3
1091 vpmuludq
$T2,$H4,$H4 # h2*s3
1092 vpaddq
$H4,$D0,$D0 # d0 += h2*s3
1094 vpmuludq
$T0,$H2,$H2 # h0*r4
1095 vpaddq
$H2,$D4,$D4 # h4 = d4 + h0*r4
1096 vpmuludq
$T4,$H3,$H1 # h4*s4
1097 vpaddq
$H1,$D3,$D3 # h3 = d3 + h4*s4
1098 vpmuludq
$T3,$H3,$H0 # h3*s4
1099 vpaddq
$H0,$D2,$D2 # h2 = d2 + h3*s4
1100 vpmuludq
$T2,$H3,$H1 # h2*s4
1101 vpaddq
$H1,$D1,$D1 # h1 = d1 + h2*s4
1102 vpmuludq
$T1,$H3,$H3 # h1*s4
1103 vpaddq
$H3,$D0,$D0 # h0 = d0 + h1*s4
1107 vmovdqu
16*0($inp),$H0 # load input
1108 vmovdqu
16*1($inp),$H1
1110 vpsrldq \
$6,$H0,$H2 # splat input
1112 vpunpckhqdq
$H1,$H0,$H4 # 4
1113 vpunpcklqdq
$H1,$H0,$H0 # 0:1
1114 vpunpcklqdq
$H3,$H2,$H3 # 2:3
1116 vpsrlq \
$40,$H4,$H4 # 4
1118 vpand
$MASK,$H0,$H0 # 0
1120 vpand
$MASK,$H1,$H1 # 1
1122 vpand
$MASK,$H2,$H2 # 2
1123 vpand
$MASK,$H3,$H3 # 3
1124 vpor
32(%rcx),$H4,$H4 # padbit, yes, always
1126 vpshufd \
$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1127 vpaddq
0x00(%r11),$H0,$H0
1128 vpaddq
0x10(%r11),$H1,$H1
1129 vpaddq
0x20(%r11),$H2,$H2
1130 vpaddq
0x30(%r11),$H3,$H3
1131 vpaddq
0x40(%r11),$H4,$H4
1133 ################################################################
1134 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1136 vpmuludq
$H0,$T4,$T0 # h0*r0
1137 vpaddq
$T0,$D0,$D0 # d0 += h0*r0
1138 vpmuludq
$H1,$T4,$T1 # h1*r0
1139 vpaddq
$T1,$D1,$D1 # d1 += h1*r0
1140 vpmuludq
$H2,$T4,$T0 # h2*r0
1141 vpaddq
$T0,$D2,$D2 # d2 += h2*r0
1142 vpshufd \
$0x32,`16*1-64`($ctx),$T2 # r1^n
1143 vpmuludq
$H3,$T4,$T1 # h3*r0
1144 vpaddq
$T1,$D3,$D3 # d3 += h3*r0
1145 vpmuludq
$H4,$T4,$T4 # h4*r0
1146 vpaddq
$T4,$D4,$D4 # d4 += h4*r0
1148 vpmuludq
$H3,$T2,$T0 # h3*r1
1149 vpaddq
$T0,$D4,$D4 # d4 += h3*r1
1150 vpshufd \
$0x32,`16*2-64`($ctx),$T3 # s1
1151 vpmuludq
$H2,$T2,$T1 # h2*r1
1152 vpaddq
$T1,$D3,$D3 # d3 += h2*r1
1153 vpshufd \
$0x32,`16*3-64`($ctx),$T4 # r2
1154 vpmuludq
$H1,$T2,$T0 # h1*r1
1155 vpaddq
$T0,$D2,$D2 # d2 += h1*r1
1156 vpmuludq
$H0,$T2,$T2 # h0*r1
1157 vpaddq
$T2,$D1,$D1 # d1 += h0*r1
1158 vpmuludq
$H4,$T3,$T3 # h4*s1
1159 vpaddq
$T3,$D0,$D0 # d0 += h4*s1
1161 vpshufd \
$0x32,`16*4-64`($ctx),$T2 # s2
1162 vpmuludq
$H2,$T4,$T1 # h2*r2
1163 vpaddq
$T1,$D4,$D4 # d4 += h2*r2
1164 vpmuludq
$H1,$T4,$T0 # h1*r2
1165 vpaddq
$T0,$D3,$D3 # d3 += h1*r2
1166 vpshufd \
$0x32,`16*5-64`($ctx),$T3 # r3
1167 vpmuludq
$H0,$T4,$T4 # h0*r2
1168 vpaddq
$T4,$D2,$D2 # d2 += h0*r2
1169 vpmuludq
$H4,$T2,$T1 # h4*s2
1170 vpaddq
$T1,$D1,$D1 # d1 += h4*s2
1171 vpshufd \
$0x32,`16*6-64`($ctx),$T4 # s3
1172 vpmuludq
$H3,$T2,$T2 # h3*s2
1173 vpaddq
$T2,$D0,$D0 # d0 += h3*s2
1175 vpmuludq
$H1,$T3,$T0 # h1*r3
1176 vpaddq
$T0,$D4,$D4 # d4 += h1*r3
1177 vpmuludq
$H0,$T3,$T3 # h0*r3
1178 vpaddq
$T3,$D3,$D3 # d3 += h0*r3
1179 vpshufd \
$0x32,`16*7-64`($ctx),$T2 # r4
1180 vpmuludq
$H4,$T4,$T1 # h4*s3
1181 vpaddq
$T1,$D2,$D2 # d2 += h4*s3
1182 vpshufd \
$0x32,`16*8-64`($ctx),$T3 # s4
1183 vpmuludq
$H3,$T4,$T0 # h3*s3
1184 vpaddq
$T0,$D1,$D1 # d1 += h3*s3
1185 vpmuludq
$H2,$T4,$T4 # h2*s3
1186 vpaddq
$T4,$D0,$D0 # d0 += h2*s3
1188 vpmuludq
$H0,$T2,$T2 # h0*r4
1189 vpaddq
$T2,$D4,$D4 # d4 += h0*r4
1190 vpmuludq
$H4,$T3,$T1 # h4*s4
1191 vpaddq
$T1,$D3,$D3 # d3 += h4*s4
1192 vpmuludq
$H3,$T3,$T0 # h3*s4
1193 vpaddq
$T0,$D2,$D2 # d2 += h3*s4
1194 vpmuludq
$H2,$T3,$T1 # h2*s4
1195 vpaddq
$T1,$D1,$D1 # d1 += h2*s4
1196 vpmuludq
$H1,$T3,$T3 # h1*s4
1197 vpaddq
$T3,$D0,$D0 # d0 += h1*s4
1200 ################################################################
1201 # horizontal addition
1214 ################################################################
1219 vpaddq
$H3,$D4,$D4 # h3 -> h4
1223 vpaddq
$H0,$D1,$D1 # h0 -> h1
1230 vpaddq
$H1,$D2,$D2 # h1 -> h2
1234 vpaddq
$H4,$D0,$D0 # h4 -> h0
1238 vpaddq
$H2,$D3,$D3 # h2 -> h3
1242 vpaddq
$H0,$D1,$D1 # h0 -> h1
1246 vpaddq
$H3,$D4,$D4 # h3 -> h4
1248 vmovd
$D0,`4*0-48-64`($ctx) # save partially reduced
1249 vmovd
$D1,`4*1-48-64`($ctx)
1250 vmovd
$D2,`4*2-48-64`($ctx)
1251 vmovd
$D3,`4*3-48-64`($ctx)
1252 vmovd
$D4,`4*4-48-64`($ctx)
1254 $code.=<<___
if ($win64);
1255 vmovdqa
0x50(%r11),%xmm6
1256 vmovdqa
0x60(%r11),%xmm7
1257 vmovdqa
0x70(%r11),%xmm8
1258 vmovdqa
0x80(%r11),%xmm9
1259 vmovdqa
0x90(%r11),%xmm10
1260 vmovdqa
0xa0(%r11),%xmm11
1261 vmovdqa
0xb0(%r11),%xmm12
1262 vmovdqa
0xc0(%r11),%xmm13
1263 vmovdqa
0xd0(%r11),%xmm14
1264 vmovdqa
0xe0(%r11),%xmm15
1268 $code.=<<___
if (!$win64);
1274 .size poly1305_blocks_avx
,.-poly1305_blocks_avx
1276 .type poly1305_emit_avx
,\
@function,3
1279 cmpl \
$0,20($ctx) # is_base2_26?
1282 mov
0($ctx),%eax # load hash value base 2^26
1288 shl \
$26,%rcx # base 2^26 -> base 2^64
1304 mov
%r10,%rax # could be partially reduced, so reduce
1314 add \
$5,%r8 # compare to modulus
1318 shr \
$2,%r10 # did 130-bit value overfow?
1322 add
0($nonce),%rax # accumulate nonce
1324 mov
%rax,0($mac) # write result
1328 .size poly1305_emit_avx
,.-poly1305_emit_avx
1332 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1333 map("%ymm$_",(0..15));
1337 .type poly1305_blocks_avx2
,\
@function,4
1339 poly1305_blocks_avx2
:
1340 mov
20($ctx),%r8d # is_base2_26
1366 mov
$len,%r15 # reassign $len
1368 mov
0($ctx),$d1 # load hash value
1372 mov
24($ctx),$r0 # load r
1375 ################################# base 2^26 -> base 2^64
1378 mov
$d2,$r1 # borrow $r1
1394 adc \
$0,$h2 # can be partially reduced...
1396 mov \
$-4,$d2 # ... so reduce
1408 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
1410 .Lbase2_26_pre_avx2
:
1411 add
0($inp),$h0 # accumulate input
1417 call __poly1305_block
1421 jnz
.Lbase2_26_pre_avx2
1423 test
$padbit,$padbit # if $padbit is zero,
1424 jz
.Lstore_base2_64_avx2
# store hash in base 2^64 format
1426 ################################# base 2^64 -> base 2^26
1433 and \
$0x3ffffff,%rax # h[0]
1435 and \
$0x3ffffff,%rdx # h[1]
1439 and \
$0x3ffffff,$h0 # h[2]
1441 and \
$0x3ffffff,$h1 # h[3]
1445 jz
.Lstore_base2_26_avx2
1455 .Lstore_base2_64_avx2
:
1458 mov
$h2,16($ctx) # note that is_base2_26 is zeroed
1462 .Lstore_base2_26_avx2
:
1463 mov
%rax#d,0($ctx) # store hash value base 2^26
1478 .Lblocks_avx2_epilogue
:
1489 .Lbase2_64_avx2_body
:
1491 mov
$len,%r15 # reassign $len
1493 mov
24($ctx),$r0 # load r
1496 mov
0($ctx),$h0 # load hash value
1503 add
$r1,$s1 # s1 = r1 + (r1 >> 2)
1508 .Lbase2_64_pre_avx2
:
1509 add
0($inp),$h0 # accumulate input
1515 call __poly1305_block
1519 jnz
.Lbase2_64_pre_avx2
1522 ################################# base 2^64 -> base 2^26
1529 and \
$0x3ffffff,%rax # h[0]
1531 and \
$0x3ffffff,%rdx # h[1]
1535 and \
$0x3ffffff,$h0 # h[2]
1537 and \
$0x3ffffff,$h1 # h[3]
1545 movl \
$1,20($ctx) # set is_base2_26
1547 call __poly1305_init_avx
1560 .Lbase2_64_avx2_epilogue
:
1565 vmovd
4*0($ctx),%x#$H0 # load hash value base 2^26
1566 vmovd
4*1($ctx),%x#$H1
1567 vmovd
4*2($ctx),%x#$H2
1568 vmovd
4*3($ctx),%x#$H3
1569 vmovd
4*4($ctx),%x#$H4
1573 $code.=<<___
if (!$win64);
1577 $code.=<<___
if ($win64);
1578 lea
-0xf8(%rsp),%r11
1580 vmovdqa
%xmm6,0x50(%r11)
1581 vmovdqa
%xmm7,0x60(%r11)
1582 vmovdqa
%xmm8,0x70(%r11)
1583 vmovdqa
%xmm9,0x80(%r11)
1584 vmovdqa
%xmm10,0x90(%r11)
1585 vmovdqa
%xmm11,0xa0(%r11)
1586 vmovdqa
%xmm12,0xb0(%r11)
1587 vmovdqa
%xmm13,0xc0(%r11)
1588 vmovdqa
%xmm14,0xd0(%r11)
1589 vmovdqa
%xmm15,0xe0(%r11)
1593 lea
48+64($ctx),$ctx # size optimization
1594 lea
.Lconst
(%rip),%rcx
1596 # expand and copy pre-calculated table to stack
1597 vmovdqu
`16*0-64`($ctx),%x#$T2
1599 vmovdqu
`16*1-64`($ctx),%x#$T3
1600 vmovdqu
`16*2-64`($ctx),%x#$T4
1601 vmovdqu
`16*3-64`($ctx),%x#$D0
1602 vmovdqu
`16*4-64`($ctx),%x#$D1
1603 vmovdqu
`16*5-64`($ctx),%x#$D2
1604 vmovdqu
`16*6-64`($ctx),%x#$D3
1605 vpermq \
$0x15,$T2,$T2 # 00003412 -> 12343434
1606 vmovdqu
`16*7-64`($ctx),%x#$D4
1607 vpermq \
$0x15,$T3,$T3
1608 vpshufd \
$0xc8,$T2,$T2 # 12343434 -> 14243444
1609 vmovdqu
`16*8-64`($ctx),%x#$MASK
1610 vpermq \
$0x15,$T4,$T4
1611 vpshufd \
$0xc8,$T3,$T3
1612 vmovdqa
$T2,0x00(%rsp)
1613 vpermq \
$0x15,$D0,$D0
1614 vpshufd \
$0xc8,$T4,$T4
1615 vmovdqa
$T3,0x20(%rsp)
1616 vpermq \
$0x15,$D1,$D1
1617 vpshufd \
$0xc8,$D0,$D0
1618 vmovdqa
$T4,0x40(%rsp)
1619 vpermq \
$0x15,$D2,$D2
1620 vpshufd \
$0xc8,$D1,$D1
1621 vmovdqa
$D0,0x60(%rsp)
1622 vpermq \
$0x15,$D3,$D3
1623 vpshufd \
$0xc8,$D2,$D2
1624 vmovdqa
$D1,0x80(%rsp)
1625 vpermq \
$0x15,$D4,$D4
1626 vpshufd \
$0xc8,$D3,$D3
1627 vmovdqa
$D2,0xa0(%rsp)
1628 vpermq \
$0x15,$MASK,$MASK
1629 vpshufd \
$0xc8,$D4,$D4
1630 vmovdqa
$D3,0xc0(%rsp)
1631 vpshufd \
$0xc8,$MASK,$MASK
1632 vmovdqa
$D4,0xe0(%rsp)
1633 vmovdqa
$MASK,0x100(%rsp)
1634 vmovdqa
64(%rcx),$MASK # .Lmask26
1636 ################################################################
1638 vmovdqu
16*0($inp),%x#$T0
1639 vmovdqu
16*1($inp),%x#$T1
1640 vinserti128 \
$1,16*2($inp),$T0,$T0
1641 vinserti128 \
$1,16*3($inp),$T1,$T1
1644 vpsrldq \
$6,$T0,$T2 # splat input
1646 vpunpckhqdq
$T1,$T0,$T4 # 4
1647 vpunpcklqdq
$T3,$T2,$T2 # 2:3
1648 vpunpcklqdq
$T1,$T0,$T0 # 0:1
1653 vpsrlq \
$40,$T4,$T4 # 4
1654 vpand
$MASK,$T2,$T2 # 2
1655 vpand
$MASK,$T0,$T0 # 0
1656 vpand
$MASK,$T1,$T1 # 1
1657 vpand
$MASK,$T3,$T3 # 3
1658 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
1660 lea
0x90(%rsp),%rax # size optimization
1661 vpaddq
$H2,$T2,$H2 # accumulate input
1668 ################################################################
1669 # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
1670 # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
1671 # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
1672 # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
1673 # \________/\________/
1674 ################################################################
1675 #vpaddq $H2,$T2,$H2 # accumulate input
1677 vmovdqa
`32*0`(%rsp),$T0 # r0^4
1679 vmovdqa
`32*1`(%rsp),$T1 # r1^4
1681 vmovdqa
`32*3`(%rsp),$T2 # r2^4
1683 vmovdqa
`32*6-0x90`(%rax),$T3 # s3^4
1684 vmovdqa
`32*8-0x90`(%rax),$S4 # s4^4
1686 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1687 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1688 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1689 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1690 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1692 # however, as h2 is "chronologically" first one available pull
1693 # corresponding operations up, so it's
1695 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1696 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1697 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1698 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1699 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1701 vpmuludq
$H2,$T0,$D2 # d2 = h2*r0
1702 vpmuludq
$H2,$T1,$D3 # d3 = h2*r1
1703 vpmuludq
$H2,$T2,$D4 # d4 = h2*r2
1704 vpmuludq
$H2,$T3,$D0 # d0 = h2*s3
1705 vpmuludq
$H2,$S4,$D1 # d1 = h2*s4
1707 vpmuludq
$H0,$T1,$T4 # h0*r1
1708 vpmuludq
$H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1709 vpaddq
$T4,$D1,$D1 # d1 += h0*r1
1710 vpaddq
$H2,$D2,$D2 # d2 += h1*r1
1711 vpmuludq
$H3,$T1,$T4 # h3*r1
1712 vpmuludq
`32*2`(%rsp),$H4,$H2 # h4*s1
1713 vpaddq
$T4,$D4,$D4 # d4 += h3*r1
1714 vpaddq
$H2,$D0,$D0 # d0 += h4*s1
1715 vmovdqa
`32*4-0x90`(%rax),$T1 # s2
1717 vpmuludq
$H0,$T0,$T4 # h0*r0
1718 vpmuludq
$H1,$T0,$H2 # h1*r0
1719 vpaddq
$T4,$D0,$D0 # d0 += h0*r0
1720 vpaddq
$H2,$D1,$D1 # d1 += h1*r0
1721 vpmuludq
$H3,$T0,$T4 # h3*r0
1722 vpmuludq
$H4,$T0,$H2 # h4*r0
1723 vmovdqu
16*0($inp),%x#$T0 # load input
1724 vpaddq
$T4,$D3,$D3 # d3 += h3*r0
1725 vpaddq
$H2,$D4,$D4 # d4 += h4*r0
1726 vinserti128 \
$1,16*2($inp),$T0,$T0
1728 vpmuludq
$H3,$T1,$T4 # h3*s2
1729 vpmuludq
$H4,$T1,$H2 # h4*s2
1730 vmovdqu
16*1($inp),%x#$T1
1731 vpaddq
$T4,$D0,$D0 # d0 += h3*s2
1732 vpaddq
$H2,$D1,$D1 # d1 += h4*s2
1733 vmovdqa
`32*5-0x90`(%rax),$H2 # r3
1734 vpmuludq
$H1,$T2,$T4 # h1*r2
1735 vpmuludq
$H0,$T2,$T2 # h0*r2
1736 vpaddq
$T4,$D3,$D3 # d3 += h1*r2
1737 vpaddq
$T2,$D2,$D2 # d2 += h0*r2
1738 vinserti128 \
$1,16*3($inp),$T1,$T1
1741 vpmuludq
$H1,$H2,$T4 # h1*r3
1742 vpmuludq
$H0,$H2,$H2 # h0*r3
1743 vpsrldq \
$6,$T0,$T2 # splat input
1744 vpaddq
$T4,$D4,$D4 # d4 += h1*r3
1745 vpaddq
$H2,$D3,$D3 # d3 += h0*r3
1746 vpmuludq
$H3,$T3,$T4 # h3*s3
1747 vpmuludq
$H4,$T3,$H2 # h4*s3
1749 vpaddq
$T4,$D1,$D1 # d1 += h3*s3
1750 vpaddq
$H2,$D2,$D2 # d2 += h4*s3
1751 vpunpckhqdq
$T1,$T0,$T4 # 4
1753 vpmuludq
$H3,$S4,$H3 # h3*s4
1754 vpmuludq
$H4,$S4,$H4 # h4*s4
1755 vpunpcklqdq
$T1,$T0,$T0 # 0:1
1756 vpaddq
$H3,$D2,$H2 # h2 = d2 + h3*r4
1757 vpaddq
$H4,$D3,$H3 # h3 = d3 + h4*r4
1758 vpunpcklqdq
$T3,$T2,$T3 # 2:3
1759 vpmuludq
`32*7-0x90`(%rax),$H0,$H4 # h0*r4
1760 vpmuludq
$H1,$S4,$H0 # h1*s4
1761 vmovdqa
64(%rcx),$MASK # .Lmask26
1762 vpaddq
$H4,$D4,$H4 # h4 = d4 + h0*r4
1763 vpaddq
$H0,$D0,$H0 # h0 = d0 + h1*s4
1765 ################################################################
1766 # lazy reduction (interleaved with tail of input splat)
1770 vpaddq
$D3,$H4,$H4 # h3 -> h4
1774 vpaddq
$D0,$D1,$H1 # h0 -> h1
1783 vpaddq
$D1,$H2,$H2 # h1 -> h2
1787 vpaddq
$D4,$H0,$H0 # h4 -> h0
1789 vpand
$MASK,$T2,$T2 # 2
1794 vpaddq
$D2,$H3,$H3 # h2 -> h3
1796 vpaddq
$T2,$H2,$H2 # modulo-scheduled
1801 vpaddq
$D0,$H1,$H1 # h0 -> h1
1803 vpsrlq \
$40,$T4,$T4 # 4
1807 vpaddq
$D3,$H4,$H4 # h3 -> h4
1809 vpand
$MASK,$T0,$T0 # 0
1810 vpand
$MASK,$T1,$T1 # 1
1811 vpand
$MASK,$T3,$T3 # 3
1812 vpor
32(%rcx),$T4,$T4 # padbit, yes, always
1819 ################################################################
1820 # while above multiplications were by r^4 in all lanes, in last
1821 # iteration we multiply least significant lane by r^4 and most
1822 # significant one by r, so copy of above except that references
1823 # to the precomputed table are displaced by 4...
1825 #vpaddq $H2,$T2,$H2 # accumulate input
1827 vmovdqu
`32*0+4`(%rsp),$T0 # r0^4
1829 vmovdqu
`32*1+4`(%rsp),$T1 # r1^4
1831 vmovdqu
`32*3+4`(%rsp),$T2 # r2^4
1833 vmovdqu
`32*6+4-0x90`(%rax),$T3 # s3^4
1834 vmovdqu
`32*8+4-0x90`(%rax),$S4 # s4^4
1836 vpmuludq
$H2,$T0,$D2 # d2 = h2*r0
1837 vpmuludq
$H2,$T1,$D3 # d3 = h2*r1
1838 vpmuludq
$H2,$T2,$D4 # d4 = h2*r2
1839 vpmuludq
$H2,$T3,$D0 # d0 = h2*s3
1840 vpmuludq
$H2,$S4,$D1 # d1 = h2*s4
1842 vpmuludq
$H0,$T1,$T4 # h0*r1
1843 vpmuludq
$H1,$T1,$H2 # h1*r1
1844 vpaddq
$T4,$D1,$D1 # d1 += h0*r1
1845 vpaddq
$H2,$D2,$D2 # d2 += h1*r1
1846 vpmuludq
$H3,$T1,$T4 # h3*r1
1847 vpmuludq
`32*2+4`(%rsp),$H4,$H2 # h4*s1
1848 vpaddq
$T4,$D4,$D4 # d4 += h3*r1
1849 vpaddq
$H2,$D0,$D0 # d0 += h4*s1
1851 vpmuludq
$H0,$T0,$T4 # h0*r0
1852 vpmuludq
$H1,$T0,$H2 # h1*r0
1853 vpaddq
$T4,$D0,$D0 # d0 += h0*r0
1854 vmovdqu
`32*4+4-0x90`(%rax),$T1 # s2
1855 vpaddq
$H2,$D1,$D1 # d1 += h1*r0
1856 vpmuludq
$H3,$T0,$T4 # h3*r0
1857 vpmuludq
$H4,$T0,$H2 # h4*r0
1858 vpaddq
$T4,$D3,$D3 # d3 += h3*r0
1859 vpaddq
$H2,$D4,$D4 # d4 += h4*r0
1861 vpmuludq
$H3,$T1,$T4 # h3*s2
1862 vpmuludq
$H4,$T1,$H2 # h4*s2
1863 vpaddq
$T4,$D0,$D0 # d0 += h3*s2
1864 vpaddq
$H2,$D1,$D1 # d1 += h4*s2
1865 vmovdqu
`32*5+4-0x90`(%rax),$H2 # r3
1866 vpmuludq
$H1,$T2,$T4 # h1*r2
1867 vpmuludq
$H0,$T2,$T2 # h0*r2
1868 vpaddq
$T4,$D3,$D3 # d3 += h1*r2
1869 vpaddq
$T2,$D2,$D2 # d2 += h0*r2
1871 vpmuludq
$H1,$H2,$T4 # h1*r3
1872 vpmuludq
$H0,$H2,$H2 # h0*r3
1873 vpaddq
$T4,$D4,$D4 # d4 += h1*r3
1874 vpaddq
$H2,$D3,$D3 # d3 += h0*r3
1875 vpmuludq
$H3,$T3,$T4 # h3*s3
1876 vpmuludq
$H4,$T3,$H2 # h4*s3
1877 vpaddq
$T4,$D1,$D1 # d1 += h3*s3
1878 vpaddq
$H2,$D2,$D2 # d2 += h4*s3
1880 vpmuludq
$H3,$S4,$H3 # h3*s4
1881 vpmuludq
$H4,$S4,$H4 # h4*s4
1882 vpaddq
$H3,$D2,$H2 # h2 = d2 + h3*r4
1883 vpaddq
$H4,$D3,$H3 # h3 = d3 + h4*r4
1884 vpmuludq
`32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
1885 vpmuludq
$H1,$S4,$H0 # h1*s4
1886 vmovdqa
64(%rcx),$MASK # .Lmask26
1887 vpaddq
$H4,$D4,$H4 # h4 = d4 + h0*r4
1888 vpaddq
$H0,$D0,$H0 # h0 = d0 + h1*s4
1890 ################################################################
1891 # horizontal addition
1904 vpermq \
$0x2,$H3,$T3
1905 vpermq \
$0x2,$H4,$T4
1906 vpermq \
$0x2,$H0,$T0
1907 vpermq \
$0x2,$D1,$T1
1908 vpermq \
$0x2,$H2,$T2
1915 ################################################################
1920 vpaddq
$D3,$H4,$H4 # h3 -> h4
1924 vpaddq
$D0,$D1,$H1 # h0 -> h1
1931 vpaddq
$D1,$H2,$H2 # h1 -> h2
1935 vpaddq
$D4,$H0,$H0 # h4 -> h0
1939 vpaddq
$D2,$H3,$H3 # h2 -> h3
1943 vpaddq
$D0,$H1,$H1 # h0 -> h1
1947 vpaddq
$D3,$H4,$H4 # h3 -> h4
1949 vmovd
%x#$H0,`4*0-48-64`($ctx)# save partially reduced
1950 vmovd
%x#$H1,`4*1-48-64`($ctx)
1951 vmovd
%x#$H2,`4*2-48-64`($ctx)
1952 vmovd
%x#$H3,`4*3-48-64`($ctx)
1953 vmovd
%x#$H4,`4*4-48-64`($ctx)
1955 $code.=<<___
if ($win64);
1956 vmovdqa
0x50(%r11),%xmm6
1957 vmovdqa
0x60(%r11),%xmm7
1958 vmovdqa
0x70(%r11),%xmm8
1959 vmovdqa
0x80(%r11),%xmm9
1960 vmovdqa
0x90(%r11),%xmm10
1961 vmovdqa
0xa0(%r11),%xmm11
1962 vmovdqa
0xb0(%r11),%xmm12
1963 vmovdqa
0xc0(%r11),%xmm13
1964 vmovdqa
0xd0(%r11),%xmm14
1965 vmovdqa
0xe0(%r11),%xmm15
1969 $code.=<<___
if (!$win64);
1975 .size poly1305_blocks_avx2
,.-poly1305_blocks_avx2
1982 .long
0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
1984 .long
1<<24,0,1<<24,0,1<<24,0,1<<24,0
1986 .long
0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
1988 .long
5,0,5,0,5,0,5,0
1993 .asciz
"Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1997 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1998 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2006 .extern __imp_RtlVirtualUnwind
2007 .type se_handler
,\
@abi-omnipotent
2021 mov
120($context),%rax # pull context->Rax
2022 mov
248($context),%rbx # pull context->Rip
2024 mov
8($disp),%rsi # disp->ImageBase
2025 mov
56($disp),%r11 # disp->HandlerData
2027 mov
0(%r11),%r10d # HandlerData[0]
2028 lea
(%rsi,%r10),%r10 # prologue label
2029 cmp %r10,%rbx # context->Rip<.Lprologue
2030 jb
.Lcommon_seh_tail
2032 mov
152($context),%rax # pull context->Rsp
2034 mov
4(%r11),%r10d # HandlerData[1]
2035 lea
(%rsi,%r10),%r10 # epilogue label
2036 cmp %r10,%rbx # context->Rip>=.Lepilogue
2037 jae
.Lcommon_seh_tail
2047 mov
%rbx,144($context) # restore context->Rbx
2048 mov
%rbp,160($context) # restore context->Rbp
2049 mov
%r12,216($context) # restore context->R12
2050 mov
%r13,224($context) # restore context->R13
2051 mov
%r14,232($context) # restore context->R14
2052 mov
%r15,240($context) # restore context->R14
2054 jmp
.Lcommon_seh_tail
2055 .size se_handler
,.-se_handler
2057 .type avx_handler
,\
@abi-omnipotent
2071 mov
120($context),%rax # pull context->Rax
2072 mov
248($context),%rbx # pull context->Rip
2074 mov
8($disp),%rsi # disp->ImageBase
2075 mov
56($disp),%r11 # disp->HandlerData
2077 mov
0(%r11),%r10d # HandlerData[0]
2078 lea
(%rsi,%r10),%r10 # prologue label
2079 cmp %r10,%rbx # context->Rip<prologue label
2080 jb
.Lcommon_seh_tail
2082 mov
152($context),%rax # pull context->Rsp
2084 mov
4(%r11),%r10d # HandlerData[1]
2085 lea
(%rsi,%r10),%r10 # epilogue label
2086 cmp %r10,%rbx # context->Rip>=epilogue label
2087 jae
.Lcommon_seh_tail
2089 mov
208($context),%rax # pull context->R11
2093 lea
512($context),%rdi # &context.Xmm6
2095 .long
0xa548f3fc # cld; rep movsq
2100 mov
%rax,152($context) # restore context->Rsp
2101 mov
%rsi,168($context) # restore context->Rsi
2102 mov
%rdi,176($context) # restore context->Rdi
2104 mov
40($disp),%rdi # disp->ContextRecord
2105 mov
$context,%rsi # context
2106 mov \
$154,%ecx # sizeof(CONTEXT)
2107 .long
0xa548f3fc # cld; rep movsq
2110 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2111 mov
8(%rsi),%rdx # arg2, disp->ImageBase
2112 mov
0(%rsi),%r8 # arg3, disp->ControlPc
2113 mov
16(%rsi),%r9 # arg4, disp->FunctionEntry
2114 mov
40(%rsi),%r10 # disp->ContextRecord
2115 lea
56(%rsi),%r11 # &disp->HandlerData
2116 lea
24(%rsi),%r12 # &disp->EstablisherFrame
2117 mov
%r10,32(%rsp) # arg5
2118 mov
%r11,40(%rsp) # arg6
2119 mov
%r12,48(%rsp) # arg7
2120 mov
%rcx,56(%rsp) # arg8, (NULL)
2121 call
*__imp_RtlVirtualUnwind
(%rip)
2123 mov \
$1,%eax # ExceptionContinueSearch
2135 .size avx_handler
,.-avx_handler
2139 .rva
.LSEH_begin_poly1305_init
2140 .rva
.LSEH_end_poly1305_init
2141 .rva
.LSEH_info_poly1305_init
2143 .rva
.LSEH_begin_poly1305_blocks
2144 .rva
.LSEH_end_poly1305_blocks
2145 .rva
.LSEH_info_poly1305_blocks
2147 .rva
.LSEH_begin_poly1305_emit
2148 .rva
.LSEH_end_poly1305_emit
2149 .rva
.LSEH_info_poly1305_emit
2151 $code.=<<___
if ($avx);
2152 .rva
.LSEH_begin_poly1305_blocks_avx
2154 .rva
.LSEH_info_poly1305_blocks_avx_1
2158 .rva
.LSEH_info_poly1305_blocks_avx_2
2161 .rva
.LSEH_end_poly1305_blocks_avx
2162 .rva
.LSEH_info_poly1305_blocks_avx_3
2164 .rva
.LSEH_begin_poly1305_emit_avx
2165 .rva
.LSEH_end_poly1305_emit_avx
2166 .rva
.LSEH_info_poly1305_emit_avx
2168 $code.=<<___
if ($avx>1);
2169 .rva
.LSEH_begin_poly1305_blocks_avx2
2170 .rva
.Lbase2_64_avx2
2171 .rva
.LSEH_info_poly1305_blocks_avx2_1
2173 .rva
.Lbase2_64_avx2
2175 .rva
.LSEH_info_poly1305_blocks_avx2_2
2178 .rva
.LSEH_end_poly1305_blocks_avx2
2179 .rva
.LSEH_info_poly1305_blocks_avx2_3
2184 .LSEH_info_poly1305_init
:
2187 .rva
.LSEH_begin_poly1305_init
,.LSEH_begin_poly1305_init
2189 .LSEH_info_poly1305_blocks
:
2192 .rva
.Lblocks_body
,.Lblocks_epilogue
2194 .LSEH_info_poly1305_emit
:
2197 .rva
.LSEH_begin_poly1305_emit
,.LSEH_begin_poly1305_emit
2199 $code.=<<___
if ($avx);
2200 .LSEH_info_poly1305_blocks_avx_1
:
2203 .rva
.Lblocks_avx_body
,.Lblocks_avx_epilogue
# HandlerData[]
2205 .LSEH_info_poly1305_blocks_avx_2
:
2208 .rva
.Lbase2_64_avx_body
,.Lbase2_64_avx_epilogue
# HandlerData[]
2210 .LSEH_info_poly1305_blocks_avx_3
:
2213 .rva
.Ldo_avx_body
,.Ldo_avx_epilogue
# HandlerData[]
2215 .LSEH_info_poly1305_emit_avx
:
2218 .rva
.LSEH_begin_poly1305_emit_avx
,.LSEH_begin_poly1305_emit_avx
2220 $code.=<<___
if ($avx>1);
2221 .LSEH_info_poly1305_blocks_avx2_1
:
2224 .rva
.Lblocks_avx2_body
,.Lblocks_avx2_epilogue
# HandlerData[]
2226 .LSEH_info_poly1305_blocks_avx2_2
:
2229 .rva
.Lbase2_64_avx2_body
,.Lbase2_64_avx2_epilogue
# HandlerData[]
2231 .LSEH_info_poly1305_blocks_avx2_3
:
2234 .rva
.Ldo_avx2_body
,.Ldo_avx2_epilogue
# HandlerData[]
2238 foreach (split('\n',$code)) {
2239 s/\`([^\`]*)\`/eval($1)/ge;
2240 s/%r([a-z]+)#d/%e$1/g;
2241 s/%r([0-9]+)#d/%r$1d/g;