]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/poly1305/asm/poly1305-x86_64.pl
poly1305/asm/poly1305-x86_64.pl: allow nasm to assemble AVX512 code.
[thirdparty/openssl.git] / crypto / poly1305 / asm / poly1305-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # This module implements Poly1305 hash for x86_64.
18 #
19 # March 2015
20 #
21 # Initial release.
22 #
23 # December 2016
24 #
25 # Add AVX512F+VL+BW code path.
26 #
27 # Numbers are cycles per processed byte with poly1305_blocks alone,
28 # measured with rdtsc at fixed clock frequency.
29 #
30 # IALU/gcc-4.8(*) AVX(**) AVX2
31 # P4 4.46/+120% -
32 # Core 2 2.41/+90% -
33 # Westmere 1.88/+120% -
34 # Sandy Bridge 1.39/+140% 1.10
35 # Haswell 1.14/+175% 1.11 0.65
36 # Skylake 1.13/+120% 0.96 0.51
37 # Silvermont 2.83/+95% -
38 # Goldmont 1.70/+180% -
39 # VIA Nano 1.82/+150% -
40 # Sledgehammer 1.38/+160% -
41 # Bulldozer 2.30/+130% 0.97
42 #
43 # (*) improvement coefficients relative to clang are more modest and
44 # are ~50% on most processors, in both cases we are comparing to
45 # __int128 code;
46 # (**) SSE2 implementation was attempted, but among non-AVX processors
47 # it was faster than integer-only code only on older Intel P4 and
48 # Core processors, 50-30%, less newer processor is, but slower on
49 # contemporary ones, for example almost 2x slower on Atom, and as
50 # former are naturally disappearing, SSE2 is deemed unnecessary;
51
52 $flavour = shift;
53 $output = shift;
54 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
55
56 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
57
58 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
59 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
60 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
61 die "can't locate x86_64-xlate.pl";
62
63 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
64 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
65 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
66 }
67
68 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
69 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
70 $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
71 $avx += 1 if ($1==2.11 && $2>=8);
72 }
73
74 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
75 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
76 $avx = ($1>=10) + ($1>=12);
77 }
78
79 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
80 $avx = ($2>=3.0) + ($2>3.0);
81 }
82
83 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
84 *STDOUT=*OUT;
85
86 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
87 my ($mac,$nonce)=($inp,$len); # *_emit arguments
88 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
89 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
90
91 sub poly1305_iteration {
92 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
93 # output: $h0-$h2 *= $r0-$r1
94 $code.=<<___;
95 mulq $h0 # h0*r1
96 mov %rax,$d2
97 mov $r0,%rax
98 mov %rdx,$d3
99
100 mulq $h0 # h0*r0
101 mov %rax,$h0 # future $h0
102 mov $r0,%rax
103 mov %rdx,$d1
104
105 mulq $h1 # h1*r0
106 add %rax,$d2
107 mov $s1,%rax
108 adc %rdx,$d3
109
110 mulq $h1 # h1*s1
111 mov $h2,$h1 # borrow $h1
112 add %rax,$h0
113 adc %rdx,$d1
114
115 imulq $s1,$h1 # h2*s1
116 add $h1,$d2
117 mov $d1,$h1
118 adc \$0,$d3
119
120 imulq $r0,$h2 # h2*r0
121 add $d2,$h1
122 mov \$-4,%rax # mask value
123 adc $h2,$d3
124
125 and $d3,%rax # last reduction step
126 mov $d3,$h2
127 shr \$2,$d3
128 and \$3,$h2
129 add $d3,%rax
130 add %rax,$h0
131 adc \$0,$h1
132 adc \$0,$h2
133 ___
134 }
135
136 ########################################################################
137 # Layout of opaque area is following.
138 #
139 # unsigned __int64 h[3]; # current hash value base 2^64
140 # unsigned __int64 r[2]; # key value base 2^64
141
142 $code.=<<___;
143 .text
144
145 .extern OPENSSL_ia32cap_P
146
147 .globl poly1305_init
148 .hidden poly1305_init
149 .globl poly1305_blocks
150 .hidden poly1305_blocks
151 .globl poly1305_emit
152 .hidden poly1305_emit
153
154 .type poly1305_init,\@function,3
155 .align 32
156 poly1305_init:
157 xor %rax,%rax
158 mov %rax,0($ctx) # initialize hash value
159 mov %rax,8($ctx)
160 mov %rax,16($ctx)
161
162 cmp \$0,$inp
163 je .Lno_key
164
165 lea poly1305_blocks(%rip),%r10
166 lea poly1305_emit(%rip),%r11
167 ___
168 $code.=<<___ if ($avx);
169 mov OPENSSL_ia32cap_P+4(%rip),%r9
170 lea poly1305_blocks_avx(%rip),%rax
171 lea poly1305_emit_avx(%rip),%rcx
172 bt \$`60-32`,%r9 # AVX?
173 cmovc %rax,%r10
174 cmovc %rcx,%r11
175 ___
176 $code.=<<___ if ($avx>1);
177 lea poly1305_blocks_avx2(%rip),%rax
178 bt \$`5+32`,%r9 # AVX2?
179 cmovc %rax,%r10
180 ___
181 $code.=<<___;
182 mov \$0x0ffffffc0fffffff,%rax
183 mov \$0x0ffffffc0ffffffc,%rcx
184 and 0($inp),%rax
185 and 8($inp),%rcx
186 mov %rax,24($ctx)
187 mov %rcx,32($ctx)
188 ___
189 $code.=<<___ if ($flavour !~ /elf32/);
190 mov %r10,0(%rdx)
191 mov %r11,8(%rdx)
192 ___
193 $code.=<<___ if ($flavour =~ /elf32/);
194 mov %r10d,0(%rdx)
195 mov %r11d,4(%rdx)
196 ___
197 $code.=<<___;
198 mov \$1,%eax
199 .Lno_key:
200 ret
201 .size poly1305_init,.-poly1305_init
202
203 .type poly1305_blocks,\@function,4
204 .align 32
205 poly1305_blocks:
206 .Lblocks:
207 shr \$4,$len
208 jz .Lno_data # too short
209
210 push %rbx
211 push %rbp
212 push %r12
213 push %r13
214 push %r14
215 push %r15
216 .Lblocks_body:
217
218 mov $len,%r15 # reassign $len
219
220 mov 24($ctx),$r0 # load r
221 mov 32($ctx),$s1
222
223 mov 0($ctx),$h0 # load hash value
224 mov 8($ctx),$h1
225 mov 16($ctx),$h2
226
227 mov $s1,$r1
228 shr \$2,$s1
229 mov $r1,%rax
230 add $r1,$s1 # s1 = r1 + (r1 >> 2)
231 jmp .Loop
232
233 .align 32
234 .Loop:
235 add 0($inp),$h0 # accumulate input
236 adc 8($inp),$h1
237 lea 16($inp),$inp
238 adc $padbit,$h2
239 ___
240 &poly1305_iteration();
241 $code.=<<___;
242 mov $r1,%rax
243 dec %r15 # len-=16
244 jnz .Loop
245
246 mov $h0,0($ctx) # store hash value
247 mov $h1,8($ctx)
248 mov $h2,16($ctx)
249
250 mov 0(%rsp),%r15
251 mov 8(%rsp),%r14
252 mov 16(%rsp),%r13
253 mov 24(%rsp),%r12
254 mov 32(%rsp),%rbp
255 mov 40(%rsp),%rbx
256 lea 48(%rsp),%rsp
257 .Lno_data:
258 .Lblocks_epilogue:
259 ret
260 .size poly1305_blocks,.-poly1305_blocks
261
262 .type poly1305_emit,\@function,3
263 .align 32
264 poly1305_emit:
265 .Lemit:
266 mov 0($ctx),%r8 # load hash value
267 mov 8($ctx),%r9
268 mov 16($ctx),%r10
269
270 mov %r8,%rax
271 add \$5,%r8 # compare to modulus
272 mov %r9,%rcx
273 adc \$0,%r9
274 adc \$0,%r10
275 shr \$2,%r10 # did 130-bit value overfow?
276 cmovnz %r8,%rax
277 cmovnz %r9,%rcx
278
279 add 0($nonce),%rax # accumulate nonce
280 adc 8($nonce),%rcx
281 mov %rax,0($mac) # write result
282 mov %rcx,8($mac)
283
284 ret
285 .size poly1305_emit,.-poly1305_emit
286 ___
287 if ($avx) {
288
289 ########################################################################
290 # Layout of opaque area is following.
291 #
292 # unsigned __int32 h[5]; # current hash value base 2^26
293 # unsigned __int32 is_base2_26;
294 # unsigned __int64 r[2]; # key value base 2^64
295 # unsigned __int64 pad;
296 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
297 #
298 # where r^n are base 2^26 digits of degrees of multiplier key. There are
299 # 5 digits, but last four are interleaved with multiples of 5, totalling
300 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
301
302 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
303 map("%xmm$_",(0..15));
304
305 $code.=<<___;
306 .type __poly1305_block,\@abi-omnipotent
307 .align 32
308 __poly1305_block:
309 ___
310 &poly1305_iteration();
311 $code.=<<___;
312 ret
313 .size __poly1305_block,.-__poly1305_block
314
315 .type __poly1305_init_avx,\@abi-omnipotent
316 .align 32
317 __poly1305_init_avx:
318 mov $r0,$h0
319 mov $r1,$h1
320 xor $h2,$h2
321
322 lea 48+64($ctx),$ctx # size optimization
323
324 mov $r1,%rax
325 call __poly1305_block # r^2
326
327 mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
328 mov \$0x3ffffff,%edx
329 mov $h0,$d1
330 and $h0#d,%eax
331 mov $r0,$d2
332 and $r0#d,%edx
333 mov %eax,`16*0+0-64`($ctx)
334 shr \$26,$d1
335 mov %edx,`16*0+4-64`($ctx)
336 shr \$26,$d2
337
338 mov \$0x3ffffff,%eax
339 mov \$0x3ffffff,%edx
340 and $d1#d,%eax
341 and $d2#d,%edx
342 mov %eax,`16*1+0-64`($ctx)
343 lea (%rax,%rax,4),%eax # *5
344 mov %edx,`16*1+4-64`($ctx)
345 lea (%rdx,%rdx,4),%edx # *5
346 mov %eax,`16*2+0-64`($ctx)
347 shr \$26,$d1
348 mov %edx,`16*2+4-64`($ctx)
349 shr \$26,$d2
350
351 mov $h1,%rax
352 mov $r1,%rdx
353 shl \$12,%rax
354 shl \$12,%rdx
355 or $d1,%rax
356 or $d2,%rdx
357 and \$0x3ffffff,%eax
358 and \$0x3ffffff,%edx
359 mov %eax,`16*3+0-64`($ctx)
360 lea (%rax,%rax,4),%eax # *5
361 mov %edx,`16*3+4-64`($ctx)
362 lea (%rdx,%rdx,4),%edx # *5
363 mov %eax,`16*4+0-64`($ctx)
364 mov $h1,$d1
365 mov %edx,`16*4+4-64`($ctx)
366 mov $r1,$d2
367
368 mov \$0x3ffffff,%eax
369 mov \$0x3ffffff,%edx
370 shr \$14,$d1
371 shr \$14,$d2
372 and $d1#d,%eax
373 and $d2#d,%edx
374 mov %eax,`16*5+0-64`($ctx)
375 lea (%rax,%rax,4),%eax # *5
376 mov %edx,`16*5+4-64`($ctx)
377 lea (%rdx,%rdx,4),%edx # *5
378 mov %eax,`16*6+0-64`($ctx)
379 shr \$26,$d1
380 mov %edx,`16*6+4-64`($ctx)
381 shr \$26,$d2
382
383 mov $h2,%rax
384 shl \$24,%rax
385 or %rax,$d1
386 mov $d1#d,`16*7+0-64`($ctx)
387 lea ($d1,$d1,4),$d1 # *5
388 mov $d2#d,`16*7+4-64`($ctx)
389 lea ($d2,$d2,4),$d2 # *5
390 mov $d1#d,`16*8+0-64`($ctx)
391 mov $d2#d,`16*8+4-64`($ctx)
392
393 mov $r1,%rax
394 call __poly1305_block # r^3
395
396 mov \$0x3ffffff,%eax # save r^3 base 2^26
397 mov $h0,$d1
398 and $h0#d,%eax
399 shr \$26,$d1
400 mov %eax,`16*0+12-64`($ctx)
401
402 mov \$0x3ffffff,%edx
403 and $d1#d,%edx
404 mov %edx,`16*1+12-64`($ctx)
405 lea (%rdx,%rdx,4),%edx # *5
406 shr \$26,$d1
407 mov %edx,`16*2+12-64`($ctx)
408
409 mov $h1,%rax
410 shl \$12,%rax
411 or $d1,%rax
412 and \$0x3ffffff,%eax
413 mov %eax,`16*3+12-64`($ctx)
414 lea (%rax,%rax,4),%eax # *5
415 mov $h1,$d1
416 mov %eax,`16*4+12-64`($ctx)
417
418 mov \$0x3ffffff,%edx
419 shr \$14,$d1
420 and $d1#d,%edx
421 mov %edx,`16*5+12-64`($ctx)
422 lea (%rdx,%rdx,4),%edx # *5
423 shr \$26,$d1
424 mov %edx,`16*6+12-64`($ctx)
425
426 mov $h2,%rax
427 shl \$24,%rax
428 or %rax,$d1
429 mov $d1#d,`16*7+12-64`($ctx)
430 lea ($d1,$d1,4),$d1 # *5
431 mov $d1#d,`16*8+12-64`($ctx)
432
433 mov $r1,%rax
434 call __poly1305_block # r^4
435
436 mov \$0x3ffffff,%eax # save r^4 base 2^26
437 mov $h0,$d1
438 and $h0#d,%eax
439 shr \$26,$d1
440 mov %eax,`16*0+8-64`($ctx)
441
442 mov \$0x3ffffff,%edx
443 and $d1#d,%edx
444 mov %edx,`16*1+8-64`($ctx)
445 lea (%rdx,%rdx,4),%edx # *5
446 shr \$26,$d1
447 mov %edx,`16*2+8-64`($ctx)
448
449 mov $h1,%rax
450 shl \$12,%rax
451 or $d1,%rax
452 and \$0x3ffffff,%eax
453 mov %eax,`16*3+8-64`($ctx)
454 lea (%rax,%rax,4),%eax # *5
455 mov $h1,$d1
456 mov %eax,`16*4+8-64`($ctx)
457
458 mov \$0x3ffffff,%edx
459 shr \$14,$d1
460 and $d1#d,%edx
461 mov %edx,`16*5+8-64`($ctx)
462 lea (%rdx,%rdx,4),%edx # *5
463 shr \$26,$d1
464 mov %edx,`16*6+8-64`($ctx)
465
466 mov $h2,%rax
467 shl \$24,%rax
468 or %rax,$d1
469 mov $d1#d,`16*7+8-64`($ctx)
470 lea ($d1,$d1,4),$d1 # *5
471 mov $d1#d,`16*8+8-64`($ctx)
472
473 lea -48-64($ctx),$ctx # size [de-]optimization
474 ret
475 .size __poly1305_init_avx,.-__poly1305_init_avx
476
477 .type poly1305_blocks_avx,\@function,4
478 .align 32
479 poly1305_blocks_avx:
480 mov 20($ctx),%r8d # is_base2_26
481 cmp \$128,$len
482 jae .Lblocks_avx
483 test %r8d,%r8d
484 jz .Lblocks
485
486 .Lblocks_avx:
487 and \$-16,$len
488 jz .Lno_data_avx
489
490 vzeroupper
491
492 test %r8d,%r8d
493 jz .Lbase2_64_avx
494
495 test \$31,$len
496 jz .Leven_avx
497
498 push %rbx
499 push %rbp
500 push %r12
501 push %r13
502 push %r14
503 push %r15
504 .Lblocks_avx_body:
505
506 mov $len,%r15 # reassign $len
507
508 mov 0($ctx),$d1 # load hash value
509 mov 8($ctx),$d2
510 mov 16($ctx),$h2#d
511
512 mov 24($ctx),$r0 # load r
513 mov 32($ctx),$s1
514
515 ################################# base 2^26 -> base 2^64
516 mov $d1#d,$h0#d
517 and \$`-1*(1<<31)`,$d1
518 mov $d2,$r1 # borrow $r1
519 mov $d2#d,$h1#d
520 and \$`-1*(1<<31)`,$d2
521
522 shr \$6,$d1
523 shl \$52,$r1
524 add $d1,$h0
525 shr \$12,$h1
526 shr \$18,$d2
527 add $r1,$h0
528 adc $d2,$h1
529
530 mov $h2,$d1
531 shl \$40,$d1
532 shr \$24,$h2
533 add $d1,$h1
534 adc \$0,$h2 # can be partially reduced...
535
536 mov \$-4,$d2 # ... so reduce
537 mov $h2,$d1
538 and $h2,$d2
539 shr \$2,$d1
540 and \$3,$h2
541 add $d2,$d1 # =*5
542 add $d1,$h0
543 adc \$0,$h1
544 adc \$0,$h2
545
546 mov $s1,$r1
547 mov $s1,%rax
548 shr \$2,$s1
549 add $r1,$s1 # s1 = r1 + (r1 >> 2)
550
551 add 0($inp),$h0 # accumulate input
552 adc 8($inp),$h1
553 lea 16($inp),$inp
554 adc $padbit,$h2
555
556 call __poly1305_block
557
558 test $padbit,$padbit # if $padbit is zero,
559 jz .Lstore_base2_64_avx # store hash in base 2^64 format
560
561 ################################# base 2^64 -> base 2^26
562 mov $h0,%rax
563 mov $h0,%rdx
564 shr \$52,$h0
565 mov $h1,$r0
566 mov $h1,$r1
567 shr \$26,%rdx
568 and \$0x3ffffff,%rax # h[0]
569 shl \$12,$r0
570 and \$0x3ffffff,%rdx # h[1]
571 shr \$14,$h1
572 or $r0,$h0
573 shl \$24,$h2
574 and \$0x3ffffff,$h0 # h[2]
575 shr \$40,$r1
576 and \$0x3ffffff,$h1 # h[3]
577 or $r1,$h2 # h[4]
578
579 sub \$16,%r15
580 jz .Lstore_base2_26_avx
581
582 vmovd %rax#d,$H0
583 vmovd %rdx#d,$H1
584 vmovd $h0#d,$H2
585 vmovd $h1#d,$H3
586 vmovd $h2#d,$H4
587 jmp .Lproceed_avx
588
589 .align 32
590 .Lstore_base2_64_avx:
591 mov $h0,0($ctx)
592 mov $h1,8($ctx)
593 mov $h2,16($ctx) # note that is_base2_26 is zeroed
594 jmp .Ldone_avx
595
596 .align 16
597 .Lstore_base2_26_avx:
598 mov %rax#d,0($ctx) # store hash value base 2^26
599 mov %rdx#d,4($ctx)
600 mov $h0#d,8($ctx)
601 mov $h1#d,12($ctx)
602 mov $h2#d,16($ctx)
603 .align 16
604 .Ldone_avx:
605 mov 0(%rsp),%r15
606 mov 8(%rsp),%r14
607 mov 16(%rsp),%r13
608 mov 24(%rsp),%r12
609 mov 32(%rsp),%rbp
610 mov 40(%rsp),%rbx
611 lea 48(%rsp),%rsp
612 .Lno_data_avx:
613 .Lblocks_avx_epilogue:
614 ret
615
616 .align 32
617 .Lbase2_64_avx:
618 push %rbx
619 push %rbp
620 push %r12
621 push %r13
622 push %r14
623 push %r15
624 .Lbase2_64_avx_body:
625
626 mov $len,%r15 # reassign $len
627
628 mov 24($ctx),$r0 # load r
629 mov 32($ctx),$s1
630
631 mov 0($ctx),$h0 # load hash value
632 mov 8($ctx),$h1
633 mov 16($ctx),$h2#d
634
635 mov $s1,$r1
636 mov $s1,%rax
637 shr \$2,$s1
638 add $r1,$s1 # s1 = r1 + (r1 >> 2)
639
640 test \$31,$len
641 jz .Linit_avx
642
643 add 0($inp),$h0 # accumulate input
644 adc 8($inp),$h1
645 lea 16($inp),$inp
646 adc $padbit,$h2
647 sub \$16,%r15
648
649 call __poly1305_block
650
651 .Linit_avx:
652 ################################# base 2^64 -> base 2^26
653 mov $h0,%rax
654 mov $h0,%rdx
655 shr \$52,$h0
656 mov $h1,$d1
657 mov $h1,$d2
658 shr \$26,%rdx
659 and \$0x3ffffff,%rax # h[0]
660 shl \$12,$d1
661 and \$0x3ffffff,%rdx # h[1]
662 shr \$14,$h1
663 or $d1,$h0
664 shl \$24,$h2
665 and \$0x3ffffff,$h0 # h[2]
666 shr \$40,$d2
667 and \$0x3ffffff,$h1 # h[3]
668 or $d2,$h2 # h[4]
669
670 vmovd %rax#d,$H0
671 vmovd %rdx#d,$H1
672 vmovd $h0#d,$H2
673 vmovd $h1#d,$H3
674 vmovd $h2#d,$H4
675 movl \$1,20($ctx) # set is_base2_26
676
677 call __poly1305_init_avx
678
679 .Lproceed_avx:
680 mov %r15,$len
681
682 mov 0(%rsp),%r15
683 mov 8(%rsp),%r14
684 mov 16(%rsp),%r13
685 mov 24(%rsp),%r12
686 mov 32(%rsp),%rbp
687 mov 40(%rsp),%rbx
688 lea 48(%rsp),%rax
689 lea 48(%rsp),%rsp
690 .Lbase2_64_avx_epilogue:
691 jmp .Ldo_avx
692
693 .align 32
694 .Leven_avx:
695 vmovd 4*0($ctx),$H0 # load hash value
696 vmovd 4*1($ctx),$H1
697 vmovd 4*2($ctx),$H2
698 vmovd 4*3($ctx),$H3
699 vmovd 4*4($ctx),$H4
700
701 .Ldo_avx:
702 ___
703 $code.=<<___ if (!$win64);
704 lea -0x58(%rsp),%r11
705 sub \$0x178,%rsp
706 ___
707 $code.=<<___ if ($win64);
708 lea -0xf8(%rsp),%r11
709 sub \$0x218,%rsp
710 vmovdqa %xmm6,0x50(%r11)
711 vmovdqa %xmm7,0x60(%r11)
712 vmovdqa %xmm8,0x70(%r11)
713 vmovdqa %xmm9,0x80(%r11)
714 vmovdqa %xmm10,0x90(%r11)
715 vmovdqa %xmm11,0xa0(%r11)
716 vmovdqa %xmm12,0xb0(%r11)
717 vmovdqa %xmm13,0xc0(%r11)
718 vmovdqa %xmm14,0xd0(%r11)
719 vmovdqa %xmm15,0xe0(%r11)
720 .Ldo_avx_body:
721 ___
722 $code.=<<___;
723 sub \$64,$len
724 lea -32($inp),%rax
725 cmovc %rax,$inp
726
727 vmovdqu `16*3`($ctx),$D4 # preload r0^2
728 lea `16*3+64`($ctx),$ctx # size optimization
729 lea .Lconst(%rip),%rcx
730
731 ################################################################
732 # load input
733 vmovdqu 16*2($inp),$T0
734 vmovdqu 16*3($inp),$T1
735 vmovdqa 64(%rcx),$MASK # .Lmask26
736
737 vpsrldq \$6,$T0,$T2 # splat input
738 vpsrldq \$6,$T1,$T3
739 vpunpckhqdq $T1,$T0,$T4 # 4
740 vpunpcklqdq $T1,$T0,$T0 # 0:1
741 vpunpcklqdq $T3,$T2,$T3 # 2:3
742
743 vpsrlq \$40,$T4,$T4 # 4
744 vpsrlq \$26,$T0,$T1
745 vpand $MASK,$T0,$T0 # 0
746 vpsrlq \$4,$T3,$T2
747 vpand $MASK,$T1,$T1 # 1
748 vpsrlq \$30,$T3,$T3
749 vpand $MASK,$T2,$T2 # 2
750 vpand $MASK,$T3,$T3 # 3
751 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
752
753 jbe .Lskip_loop_avx
754
755 # expand and copy pre-calculated table to stack
756 vmovdqu `16*1-64`($ctx),$D1
757 vmovdqu `16*2-64`($ctx),$D2
758 vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
759 vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
760 vmovdqa $D3,-0x90(%r11)
761 vmovdqa $D0,0x00(%rsp)
762 vpshufd \$0xEE,$D1,$D4
763 vmovdqu `16*3-64`($ctx),$D0
764 vpshufd \$0x44,$D1,$D1
765 vmovdqa $D4,-0x80(%r11)
766 vmovdqa $D1,0x10(%rsp)
767 vpshufd \$0xEE,$D2,$D3
768 vmovdqu `16*4-64`($ctx),$D1
769 vpshufd \$0x44,$D2,$D2
770 vmovdqa $D3,-0x70(%r11)
771 vmovdqa $D2,0x20(%rsp)
772 vpshufd \$0xEE,$D0,$D4
773 vmovdqu `16*5-64`($ctx),$D2
774 vpshufd \$0x44,$D0,$D0
775 vmovdqa $D4,-0x60(%r11)
776 vmovdqa $D0,0x30(%rsp)
777 vpshufd \$0xEE,$D1,$D3
778 vmovdqu `16*6-64`($ctx),$D0
779 vpshufd \$0x44,$D1,$D1
780 vmovdqa $D3,-0x50(%r11)
781 vmovdqa $D1,0x40(%rsp)
782 vpshufd \$0xEE,$D2,$D4
783 vmovdqu `16*7-64`($ctx),$D1
784 vpshufd \$0x44,$D2,$D2
785 vmovdqa $D4,-0x40(%r11)
786 vmovdqa $D2,0x50(%rsp)
787 vpshufd \$0xEE,$D0,$D3
788 vmovdqu `16*8-64`($ctx),$D2
789 vpshufd \$0x44,$D0,$D0
790 vmovdqa $D3,-0x30(%r11)
791 vmovdqa $D0,0x60(%rsp)
792 vpshufd \$0xEE,$D1,$D4
793 vpshufd \$0x44,$D1,$D1
794 vmovdqa $D4,-0x20(%r11)
795 vmovdqa $D1,0x70(%rsp)
796 vpshufd \$0xEE,$D2,$D3
797 vmovdqa 0x00(%rsp),$D4 # preload r0^2
798 vpshufd \$0x44,$D2,$D2
799 vmovdqa $D3,-0x10(%r11)
800 vmovdqa $D2,0x80(%rsp)
801
802 jmp .Loop_avx
803
804 .align 32
805 .Loop_avx:
806 ################################################################
807 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
808 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
809 # \___________________/
810 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
811 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
812 # \___________________/ \____________________/
813 #
814 # Note that we start with inp[2:3]*r^2. This is because it
815 # doesn't depend on reduction in previous iteration.
816 ################################################################
817 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
818 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
819 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
820 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
821 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
822 #
823 # though note that $Tx and $Hx are "reversed" in this section,
824 # and $D4 is preloaded with r0^2...
825
826 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
827 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
828 vmovdqa $H2,0x20(%r11) # offload hash
829 vpmuludq $T2,$D4,$D2 # d3 = h2*r0
830 vmovdqa 0x10(%rsp),$H2 # r1^2
831 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
832 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
833
834 vmovdqa $H0,0x00(%r11) #
835 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
836 vmovdqa $H1,0x10(%r11) #
837 vpmuludq $T3,$H2,$H1 # h3*r1
838 vpaddq $H0,$D0,$D0 # d0 += h4*s1
839 vpaddq $H1,$D4,$D4 # d4 += h3*r1
840 vmovdqa $H3,0x30(%r11) #
841 vpmuludq $T2,$H2,$H0 # h2*r1
842 vpmuludq $T1,$H2,$H1 # h1*r1
843 vpaddq $H0,$D3,$D3 # d3 += h2*r1
844 vmovdqa 0x30(%rsp),$H3 # r2^2
845 vpaddq $H1,$D2,$D2 # d2 += h1*r1
846 vmovdqa $H4,0x40(%r11) #
847 vpmuludq $T0,$H2,$H2 # h0*r1
848 vpmuludq $T2,$H3,$H0 # h2*r2
849 vpaddq $H2,$D1,$D1 # d1 += h0*r1
850
851 vmovdqa 0x40(%rsp),$H4 # s2^2
852 vpaddq $H0,$D4,$D4 # d4 += h2*r2
853 vpmuludq $T1,$H3,$H1 # h1*r2
854 vpmuludq $T0,$H3,$H3 # h0*r2
855 vpaddq $H1,$D3,$D3 # d3 += h1*r2
856 vmovdqa 0x50(%rsp),$H2 # r3^2
857 vpaddq $H3,$D2,$D2 # d2 += h0*r2
858 vpmuludq $T4,$H4,$H0 # h4*s2
859 vpmuludq $T3,$H4,$H4 # h3*s2
860 vpaddq $H0,$D1,$D1 # d1 += h4*s2
861 vmovdqa 0x60(%rsp),$H3 # s3^2
862 vpaddq $H4,$D0,$D0 # d0 += h3*s2
863
864 vmovdqa 0x80(%rsp),$H4 # s4^2
865 vpmuludq $T1,$H2,$H1 # h1*r3
866 vpmuludq $T0,$H2,$H2 # h0*r3
867 vpaddq $H1,$D4,$D4 # d4 += h1*r3
868 vpaddq $H2,$D3,$D3 # d3 += h0*r3
869 vpmuludq $T4,$H3,$H0 # h4*s3
870 vpmuludq $T3,$H3,$H1 # h3*s3
871 vpaddq $H0,$D2,$D2 # d2 += h4*s3
872 vmovdqu 16*0($inp),$H0 # load input
873 vpaddq $H1,$D1,$D1 # d1 += h3*s3
874 vpmuludq $T2,$H3,$H3 # h2*s3
875 vpmuludq $T2,$H4,$T2 # h2*s4
876 vpaddq $H3,$D0,$D0 # d0 += h2*s3
877
878 vmovdqu 16*1($inp),$H1 #
879 vpaddq $T2,$D1,$D1 # d1 += h2*s4
880 vpmuludq $T3,$H4,$T3 # h3*s4
881 vpmuludq $T4,$H4,$T4 # h4*s4
882 vpsrldq \$6,$H0,$H2 # splat input
883 vpaddq $T3,$D2,$D2 # d2 += h3*s4
884 vpaddq $T4,$D3,$D3 # d3 += h4*s4
885 vpsrldq \$6,$H1,$H3 #
886 vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
887 vpmuludq $T1,$H4,$T0 # h1*s4
888 vpunpckhqdq $H1,$H0,$H4 # 4
889 vpaddq $T4,$D4,$D4 # d4 += h0*r4
890 vmovdqa -0x90(%r11),$T4 # r0^4
891 vpaddq $T0,$D0,$D0 # d0 += h1*s4
892
893 vpunpcklqdq $H1,$H0,$H0 # 0:1
894 vpunpcklqdq $H3,$H2,$H3 # 2:3
895
896 #vpsrlq \$40,$H4,$H4 # 4
897 vpsrldq \$`40/8`,$H4,$H4 # 4
898 vpsrlq \$26,$H0,$H1
899 vpand $MASK,$H0,$H0 # 0
900 vpsrlq \$4,$H3,$H2
901 vpand $MASK,$H1,$H1 # 1
902 vpand 0(%rcx),$H4,$H4 # .Lmask24
903 vpsrlq \$30,$H3,$H3
904 vpand $MASK,$H2,$H2 # 2
905 vpand $MASK,$H3,$H3 # 3
906 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
907
908 vpaddq 0x00(%r11),$H0,$H0 # add hash value
909 vpaddq 0x10(%r11),$H1,$H1
910 vpaddq 0x20(%r11),$H2,$H2
911 vpaddq 0x30(%r11),$H3,$H3
912 vpaddq 0x40(%r11),$H4,$H4
913
914 lea 16*2($inp),%rax
915 lea 16*4($inp),$inp
916 sub \$64,$len
917 cmovc %rax,$inp
918
919 ################################################################
920 # Now we accumulate (inp[0:1]+hash)*r^4
921 ################################################################
922 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
923 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
924 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
925 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
926 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
927
928 vpmuludq $H0,$T4,$T0 # h0*r0
929 vpmuludq $H1,$T4,$T1 # h1*r0
930 vpaddq $T0,$D0,$D0
931 vpaddq $T1,$D1,$D1
932 vmovdqa -0x80(%r11),$T2 # r1^4
933 vpmuludq $H2,$T4,$T0 # h2*r0
934 vpmuludq $H3,$T4,$T1 # h3*r0
935 vpaddq $T0,$D2,$D2
936 vpaddq $T1,$D3,$D3
937 vpmuludq $H4,$T4,$T4 # h4*r0
938 vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
939 vpaddq $T4,$D4,$D4
940
941 vpaddq $T0,$D0,$D0 # d0 += h4*s1
942 vpmuludq $H2,$T2,$T1 # h2*r1
943 vpmuludq $H3,$T2,$T0 # h3*r1
944 vpaddq $T1,$D3,$D3 # d3 += h2*r1
945 vmovdqa -0x60(%r11),$T3 # r2^4
946 vpaddq $T0,$D4,$D4 # d4 += h3*r1
947 vpmuludq $H1,$T2,$T1 # h1*r1
948 vpmuludq $H0,$T2,$T2 # h0*r1
949 vpaddq $T1,$D2,$D2 # d2 += h1*r1
950 vpaddq $T2,$D1,$D1 # d1 += h0*r1
951
952 vmovdqa -0x50(%r11),$T4 # s2^4
953 vpmuludq $H2,$T3,$T0 # h2*r2
954 vpmuludq $H1,$T3,$T1 # h1*r2
955 vpaddq $T0,$D4,$D4 # d4 += h2*r2
956 vpaddq $T1,$D3,$D3 # d3 += h1*r2
957 vmovdqa -0x40(%r11),$T2 # r3^4
958 vpmuludq $H0,$T3,$T3 # h0*r2
959 vpmuludq $H4,$T4,$T0 # h4*s2
960 vpaddq $T3,$D2,$D2 # d2 += h0*r2
961 vpaddq $T0,$D1,$D1 # d1 += h4*s2
962 vmovdqa -0x30(%r11),$T3 # s3^4
963 vpmuludq $H3,$T4,$T4 # h3*s2
964 vpmuludq $H1,$T2,$T1 # h1*r3
965 vpaddq $T4,$D0,$D0 # d0 += h3*s2
966
967 vmovdqa -0x10(%r11),$T4 # s4^4
968 vpaddq $T1,$D4,$D4 # d4 += h1*r3
969 vpmuludq $H0,$T2,$T2 # h0*r3
970 vpmuludq $H4,$T3,$T0 # h4*s3
971 vpaddq $T2,$D3,$D3 # d3 += h0*r3
972 vpaddq $T0,$D2,$D2 # d2 += h4*s3
973 vmovdqu 16*2($inp),$T0 # load input
974 vpmuludq $H3,$T3,$T2 # h3*s3
975 vpmuludq $H2,$T3,$T3 # h2*s3
976 vpaddq $T2,$D1,$D1 # d1 += h3*s3
977 vmovdqu 16*3($inp),$T1 #
978 vpaddq $T3,$D0,$D0 # d0 += h2*s3
979
980 vpmuludq $H2,$T4,$H2 # h2*s4
981 vpmuludq $H3,$T4,$H3 # h3*s4
982 vpsrldq \$6,$T0,$T2 # splat input
983 vpaddq $H2,$D1,$D1 # d1 += h2*s4
984 vpmuludq $H4,$T4,$H4 # h4*s4
985 vpsrldq \$6,$T1,$T3 #
986 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
987 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
988 vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
989 vpmuludq $H1,$T4,$H0
990 vpunpckhqdq $T1,$T0,$T4 # 4
991 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
992 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
993
994 vpunpcklqdq $T1,$T0,$T0 # 0:1
995 vpunpcklqdq $T3,$T2,$T3 # 2:3
996
997 #vpsrlq \$40,$T4,$T4 # 4
998 vpsrldq \$`40/8`,$T4,$T4 # 4
999 vpsrlq \$26,$T0,$T1
1000 vmovdqa 0x00(%rsp),$D4 # preload r0^2
1001 vpand $MASK,$T0,$T0 # 0
1002 vpsrlq \$4,$T3,$T2
1003 vpand $MASK,$T1,$T1 # 1
1004 vpand 0(%rcx),$T4,$T4 # .Lmask24
1005 vpsrlq \$30,$T3,$T3
1006 vpand $MASK,$T2,$T2 # 2
1007 vpand $MASK,$T3,$T3 # 3
1008 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1009
1010 ################################################################
1011 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1012 # and P. Schwabe
1013
1014 vpsrlq \$26,$H3,$D3
1015 vpand $MASK,$H3,$H3
1016 vpaddq $D3,$H4,$H4 # h3 -> h4
1017
1018 vpsrlq \$26,$H0,$D0
1019 vpand $MASK,$H0,$H0
1020 vpaddq $D0,$D1,$H1 # h0 -> h1
1021
1022 vpsrlq \$26,$H4,$D0
1023 vpand $MASK,$H4,$H4
1024
1025 vpsrlq \$26,$H1,$D1
1026 vpand $MASK,$H1,$H1
1027 vpaddq $D1,$H2,$H2 # h1 -> h2
1028
1029 vpaddq $D0,$H0,$H0
1030 vpsllq \$2,$D0,$D0
1031 vpaddq $D0,$H0,$H0 # h4 -> h0
1032
1033 vpsrlq \$26,$H2,$D2
1034 vpand $MASK,$H2,$H2
1035 vpaddq $D2,$H3,$H3 # h2 -> h3
1036
1037 vpsrlq \$26,$H0,$D0
1038 vpand $MASK,$H0,$H0
1039 vpaddq $D0,$H1,$H1 # h0 -> h1
1040
1041 vpsrlq \$26,$H3,$D3
1042 vpand $MASK,$H3,$H3
1043 vpaddq $D3,$H4,$H4 # h3 -> h4
1044
1045 ja .Loop_avx
1046
1047 .Lskip_loop_avx:
1048 ################################################################
1049 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1050
1051 vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1052 add \$32,$len
1053 jnz .Long_tail_avx
1054
1055 vpaddq $H2,$T2,$T2
1056 vpaddq $H0,$T0,$T0
1057 vpaddq $H1,$T1,$T1
1058 vpaddq $H3,$T3,$T3
1059 vpaddq $H4,$T4,$T4
1060
1061 .Long_tail_avx:
1062 vmovdqa $H2,0x20(%r11)
1063 vmovdqa $H0,0x00(%r11)
1064 vmovdqa $H1,0x10(%r11)
1065 vmovdqa $H3,0x30(%r11)
1066 vmovdqa $H4,0x40(%r11)
1067
1068 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1069 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1070 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1071 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1072 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1073
1074 vpmuludq $T2,$D4,$D2 # d2 = h2*r0
1075 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
1076 vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
1077 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
1078 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
1079 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
1080
1081 vpmuludq $T3,$H2,$H0 # h3*r1
1082 vpaddq $H0,$D4,$D4 # d4 += h3*r1
1083 vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
1084 vpmuludq $T2,$H2,$H1 # h2*r1
1085 vpaddq $H1,$D3,$D3 # d3 += h2*r1
1086 vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
1087 vpmuludq $T1,$H2,$H0 # h1*r1
1088 vpaddq $H0,$D2,$D2 # d2 += h1*r1
1089 vpmuludq $T0,$H2,$H2 # h0*r1
1090 vpaddq $H2,$D1,$D1 # d1 += h0*r1
1091 vpmuludq $T4,$H3,$H3 # h4*s1
1092 vpaddq $H3,$D0,$D0 # d0 += h4*s1
1093
1094 vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
1095 vpmuludq $T2,$H4,$H1 # h2*r2
1096 vpaddq $H1,$D4,$D4 # d4 += h2*r2
1097 vpmuludq $T1,$H4,$H0 # h1*r2
1098 vpaddq $H0,$D3,$D3 # d3 += h1*r2
1099 vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
1100 vpmuludq $T0,$H4,$H4 # h0*r2
1101 vpaddq $H4,$D2,$D2 # d2 += h0*r2
1102 vpmuludq $T4,$H2,$H1 # h4*s2
1103 vpaddq $H1,$D1,$D1 # d1 += h4*s2
1104 vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
1105 vpmuludq $T3,$H2,$H2 # h3*s2
1106 vpaddq $H2,$D0,$D0 # d0 += h3*s2
1107
1108 vpmuludq $T1,$H3,$H0 # h1*r3
1109 vpaddq $H0,$D4,$D4 # d4 += h1*r3
1110 vpmuludq $T0,$H3,$H3 # h0*r3
1111 vpaddq $H3,$D3,$D3 # d3 += h0*r3
1112 vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
1113 vpmuludq $T4,$H4,$H1 # h4*s3
1114 vpaddq $H1,$D2,$D2 # d2 += h4*s3
1115 vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
1116 vpmuludq $T3,$H4,$H0 # h3*s3
1117 vpaddq $H0,$D1,$D1 # d1 += h3*s3
1118 vpmuludq $T2,$H4,$H4 # h2*s3
1119 vpaddq $H4,$D0,$D0 # d0 += h2*s3
1120
1121 vpmuludq $T0,$H2,$H2 # h0*r4
1122 vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
1123 vpmuludq $T4,$H3,$H1 # h4*s4
1124 vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
1125 vpmuludq $T3,$H3,$H0 # h3*s4
1126 vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
1127 vpmuludq $T2,$H3,$H1 # h2*s4
1128 vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
1129 vpmuludq $T1,$H3,$H3 # h1*s4
1130 vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
1131
1132 jz .Lshort_tail_avx
1133
1134 vmovdqu 16*0($inp),$H0 # load input
1135 vmovdqu 16*1($inp),$H1
1136
1137 vpsrldq \$6,$H0,$H2 # splat input
1138 vpsrldq \$6,$H1,$H3
1139 vpunpckhqdq $H1,$H0,$H4 # 4
1140 vpunpcklqdq $H1,$H0,$H0 # 0:1
1141 vpunpcklqdq $H3,$H2,$H3 # 2:3
1142
1143 vpsrlq \$40,$H4,$H4 # 4
1144 vpsrlq \$26,$H0,$H1
1145 vpand $MASK,$H0,$H0 # 0
1146 vpsrlq \$4,$H3,$H2
1147 vpand $MASK,$H1,$H1 # 1
1148 vpsrlq \$30,$H3,$H3
1149 vpand $MASK,$H2,$H2 # 2
1150 vpand $MASK,$H3,$H3 # 3
1151 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
1152
1153 vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1154 vpaddq 0x00(%r11),$H0,$H0
1155 vpaddq 0x10(%r11),$H1,$H1
1156 vpaddq 0x20(%r11),$H2,$H2
1157 vpaddq 0x30(%r11),$H3,$H3
1158 vpaddq 0x40(%r11),$H4,$H4
1159
1160 ################################################################
1161 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1162
1163 vpmuludq $H0,$T4,$T0 # h0*r0
1164 vpaddq $T0,$D0,$D0 # d0 += h0*r0
1165 vpmuludq $H1,$T4,$T1 # h1*r0
1166 vpaddq $T1,$D1,$D1 # d1 += h1*r0
1167 vpmuludq $H2,$T4,$T0 # h2*r0
1168 vpaddq $T0,$D2,$D2 # d2 += h2*r0
1169 vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
1170 vpmuludq $H3,$T4,$T1 # h3*r0
1171 vpaddq $T1,$D3,$D3 # d3 += h3*r0
1172 vpmuludq $H4,$T4,$T4 # h4*r0
1173 vpaddq $T4,$D4,$D4 # d4 += h4*r0
1174
1175 vpmuludq $H3,$T2,$T0 # h3*r1
1176 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1177 vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
1178 vpmuludq $H2,$T2,$T1 # h2*r1
1179 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1180 vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
1181 vpmuludq $H1,$T2,$T0 # h1*r1
1182 vpaddq $T0,$D2,$D2 # d2 += h1*r1
1183 vpmuludq $H0,$T2,$T2 # h0*r1
1184 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1185 vpmuludq $H4,$T3,$T3 # h4*s1
1186 vpaddq $T3,$D0,$D0 # d0 += h4*s1
1187
1188 vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
1189 vpmuludq $H2,$T4,$T1 # h2*r2
1190 vpaddq $T1,$D4,$D4 # d4 += h2*r2
1191 vpmuludq $H1,$T4,$T0 # h1*r2
1192 vpaddq $T0,$D3,$D3 # d3 += h1*r2
1193 vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
1194 vpmuludq $H0,$T4,$T4 # h0*r2
1195 vpaddq $T4,$D2,$D2 # d2 += h0*r2
1196 vpmuludq $H4,$T2,$T1 # h4*s2
1197 vpaddq $T1,$D1,$D1 # d1 += h4*s2
1198 vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
1199 vpmuludq $H3,$T2,$T2 # h3*s2
1200 vpaddq $T2,$D0,$D0 # d0 += h3*s2
1201
1202 vpmuludq $H1,$T3,$T0 # h1*r3
1203 vpaddq $T0,$D4,$D4 # d4 += h1*r3
1204 vpmuludq $H0,$T3,$T3 # h0*r3
1205 vpaddq $T3,$D3,$D3 # d3 += h0*r3
1206 vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
1207 vpmuludq $H4,$T4,$T1 # h4*s3
1208 vpaddq $T1,$D2,$D2 # d2 += h4*s3
1209 vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
1210 vpmuludq $H3,$T4,$T0 # h3*s3
1211 vpaddq $T0,$D1,$D1 # d1 += h3*s3
1212 vpmuludq $H2,$T4,$T4 # h2*s3
1213 vpaddq $T4,$D0,$D0 # d0 += h2*s3
1214
1215 vpmuludq $H0,$T2,$T2 # h0*r4
1216 vpaddq $T2,$D4,$D4 # d4 += h0*r4
1217 vpmuludq $H4,$T3,$T1 # h4*s4
1218 vpaddq $T1,$D3,$D3 # d3 += h4*s4
1219 vpmuludq $H3,$T3,$T0 # h3*s4
1220 vpaddq $T0,$D2,$D2 # d2 += h3*s4
1221 vpmuludq $H2,$T3,$T1 # h2*s4
1222 vpaddq $T1,$D1,$D1 # d1 += h2*s4
1223 vpmuludq $H1,$T3,$T3 # h1*s4
1224 vpaddq $T3,$D0,$D0 # d0 += h1*s4
1225
1226 .Lshort_tail_avx:
1227 ################################################################
1228 # horizontal addition
1229
1230 vpsrldq \$8,$D4,$T4
1231 vpsrldq \$8,$D3,$T3
1232 vpsrldq \$8,$D1,$T1
1233 vpsrldq \$8,$D0,$T0
1234 vpsrldq \$8,$D2,$T2
1235 vpaddq $T3,$D3,$D3
1236 vpaddq $T4,$D4,$D4
1237 vpaddq $T0,$D0,$D0
1238 vpaddq $T1,$D1,$D1
1239 vpaddq $T2,$D2,$D2
1240
1241 ################################################################
1242 # lazy reduction
1243
1244 vpsrlq \$26,$D3,$H3
1245 vpand $MASK,$D3,$D3
1246 vpaddq $H3,$D4,$D4 # h3 -> h4
1247
1248 vpsrlq \$26,$D0,$H0
1249 vpand $MASK,$D0,$D0
1250 vpaddq $H0,$D1,$D1 # h0 -> h1
1251
1252 vpsrlq \$26,$D4,$H4
1253 vpand $MASK,$D4,$D4
1254
1255 vpsrlq \$26,$D1,$H1
1256 vpand $MASK,$D1,$D1
1257 vpaddq $H1,$D2,$D2 # h1 -> h2
1258
1259 vpaddq $H4,$D0,$D0
1260 vpsllq \$2,$H4,$H4
1261 vpaddq $H4,$D0,$D0 # h4 -> h0
1262
1263 vpsrlq \$26,$D2,$H2
1264 vpand $MASK,$D2,$D2
1265 vpaddq $H2,$D3,$D3 # h2 -> h3
1266
1267 vpsrlq \$26,$D0,$H0
1268 vpand $MASK,$D0,$D0
1269 vpaddq $H0,$D1,$D1 # h0 -> h1
1270
1271 vpsrlq \$26,$D3,$H3
1272 vpand $MASK,$D3,$D3
1273 vpaddq $H3,$D4,$D4 # h3 -> h4
1274
1275 vmovd $D0,`4*0-48-64`($ctx) # save partially reduced
1276 vmovd $D1,`4*1-48-64`($ctx)
1277 vmovd $D2,`4*2-48-64`($ctx)
1278 vmovd $D3,`4*3-48-64`($ctx)
1279 vmovd $D4,`4*4-48-64`($ctx)
1280 ___
1281 $code.=<<___ if ($win64);
1282 vmovdqa 0x50(%r11),%xmm6
1283 vmovdqa 0x60(%r11),%xmm7
1284 vmovdqa 0x70(%r11),%xmm8
1285 vmovdqa 0x80(%r11),%xmm9
1286 vmovdqa 0x90(%r11),%xmm10
1287 vmovdqa 0xa0(%r11),%xmm11
1288 vmovdqa 0xb0(%r11),%xmm12
1289 vmovdqa 0xc0(%r11),%xmm13
1290 vmovdqa 0xd0(%r11),%xmm14
1291 vmovdqa 0xe0(%r11),%xmm15
1292 lea 0xf8(%r11),%rsp
1293 .Ldo_avx_epilogue:
1294 ___
1295 $code.=<<___ if (!$win64);
1296 lea 0x58(%r11),%rsp
1297 ___
1298 $code.=<<___;
1299 vzeroupper
1300 ret
1301 .size poly1305_blocks_avx,.-poly1305_blocks_avx
1302
1303 .type poly1305_emit_avx,\@function,3
1304 .align 32
1305 poly1305_emit_avx:
1306 cmpl \$0,20($ctx) # is_base2_26?
1307 je .Lemit
1308
1309 mov 0($ctx),%eax # load hash value base 2^26
1310 mov 4($ctx),%ecx
1311 mov 8($ctx),%r8d
1312 mov 12($ctx),%r11d
1313 mov 16($ctx),%r10d
1314
1315 shl \$26,%rcx # base 2^26 -> base 2^64
1316 mov %r8,%r9
1317 shl \$52,%r8
1318 add %rcx,%rax
1319 shr \$12,%r9
1320 add %rax,%r8 # h0
1321 adc \$0,%r9
1322
1323 shl \$14,%r11
1324 mov %r10,%rax
1325 shr \$24,%r10
1326 add %r11,%r9
1327 shl \$40,%rax
1328 add %rax,%r9 # h1
1329 adc \$0,%r10 # h2
1330
1331 mov %r10,%rax # could be partially reduced, so reduce
1332 mov %r10,%rcx
1333 and \$3,%r10
1334 shr \$2,%rax
1335 and \$-4,%rcx
1336 add %rcx,%rax
1337 add %rax,%r8
1338 adc \$0,%r9
1339 adc \$0,%r10
1340
1341 mov %r8,%rax
1342 add \$5,%r8 # compare to modulus
1343 mov %r9,%rcx
1344 adc \$0,%r9
1345 adc \$0,%r10
1346 shr \$2,%r10 # did 130-bit value overfow?
1347 cmovnz %r8,%rax
1348 cmovnz %r9,%rcx
1349
1350 add 0($nonce),%rax # accumulate nonce
1351 adc 8($nonce),%rcx
1352 mov %rax,0($mac) # write result
1353 mov %rcx,8($mac)
1354
1355 ret
1356 .size poly1305_emit_avx,.-poly1305_emit_avx
1357 ___
1358
1359 if ($avx>1) {
1360 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1361 map("%ymm$_",(0..15));
1362 my $S4=$MASK;
1363
1364 $code.=<<___;
1365 .type poly1305_blocks_avx2,\@function,4
1366 .align 32
1367 poly1305_blocks_avx2:
1368 mov 20($ctx),%r8d # is_base2_26
1369 cmp \$128,$len
1370 jae .Lblocks_avx2
1371 test %r8d,%r8d
1372 jz .Lblocks
1373
1374 .Lblocks_avx2:
1375 and \$-16,$len
1376 jz .Lno_data_avx2
1377
1378 vzeroupper
1379
1380 test %r8d,%r8d
1381 jz .Lbase2_64_avx2
1382
1383 test \$63,$len
1384 jz .Leven_avx2
1385
1386 push %rbx
1387 push %rbp
1388 push %r12
1389 push %r13
1390 push %r14
1391 push %r15
1392 .Lblocks_avx2_body:
1393
1394 mov $len,%r15 # reassign $len
1395
1396 mov 0($ctx),$d1 # load hash value
1397 mov 8($ctx),$d2
1398 mov 16($ctx),$h2#d
1399
1400 mov 24($ctx),$r0 # load r
1401 mov 32($ctx),$s1
1402
1403 ################################# base 2^26 -> base 2^64
1404 mov $d1#d,$h0#d
1405 and \$`-1*(1<<31)`,$d1
1406 mov $d2,$r1 # borrow $r1
1407 mov $d2#d,$h1#d
1408 and \$`-1*(1<<31)`,$d2
1409
1410 shr \$6,$d1
1411 shl \$52,$r1
1412 add $d1,$h0
1413 shr \$12,$h1
1414 shr \$18,$d2
1415 add $r1,$h0
1416 adc $d2,$h1
1417
1418 mov $h2,$d1
1419 shl \$40,$d1
1420 shr \$24,$h2
1421 add $d1,$h1
1422 adc \$0,$h2 # can be partially reduced...
1423
1424 mov \$-4,$d2 # ... so reduce
1425 mov $h2,$d1
1426 and $h2,$d2
1427 shr \$2,$d1
1428 and \$3,$h2
1429 add $d2,$d1 # =*5
1430 add $d1,$h0
1431 adc \$0,$h1
1432 adc \$0,$h2
1433
1434 mov $s1,$r1
1435 mov $s1,%rax
1436 shr \$2,$s1
1437 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1438
1439 .Lbase2_26_pre_avx2:
1440 add 0($inp),$h0 # accumulate input
1441 adc 8($inp),$h1
1442 lea 16($inp),$inp
1443 adc $padbit,$h2
1444 sub \$16,%r15
1445
1446 call __poly1305_block
1447 mov $r1,%rax
1448
1449 test \$63,%r15
1450 jnz .Lbase2_26_pre_avx2
1451
1452 test $padbit,$padbit # if $padbit is zero,
1453 jz .Lstore_base2_64_avx2 # store hash in base 2^64 format
1454
1455 ################################# base 2^64 -> base 2^26
1456 mov $h0,%rax
1457 mov $h0,%rdx
1458 shr \$52,$h0
1459 mov $h1,$r0
1460 mov $h1,$r1
1461 shr \$26,%rdx
1462 and \$0x3ffffff,%rax # h[0]
1463 shl \$12,$r0
1464 and \$0x3ffffff,%rdx # h[1]
1465 shr \$14,$h1
1466 or $r0,$h0
1467 shl \$24,$h2
1468 and \$0x3ffffff,$h0 # h[2]
1469 shr \$40,$r1
1470 and \$0x3ffffff,$h1 # h[3]
1471 or $r1,$h2 # h[4]
1472
1473 test %r15,%r15
1474 jz .Lstore_base2_26_avx2
1475
1476 vmovd %rax#d,%x#$H0
1477 vmovd %rdx#d,%x#$H1
1478 vmovd $h0#d,%x#$H2
1479 vmovd $h1#d,%x#$H3
1480 vmovd $h2#d,%x#$H4
1481 jmp .Lproceed_avx2
1482
1483 .align 32
1484 .Lstore_base2_64_avx2:
1485 mov $h0,0($ctx)
1486 mov $h1,8($ctx)
1487 mov $h2,16($ctx) # note that is_base2_26 is zeroed
1488 jmp .Ldone_avx2
1489
1490 .align 16
1491 .Lstore_base2_26_avx2:
1492 mov %rax#d,0($ctx) # store hash value base 2^26
1493 mov %rdx#d,4($ctx)
1494 mov $h0#d,8($ctx)
1495 mov $h1#d,12($ctx)
1496 mov $h2#d,16($ctx)
1497 .align 16
1498 .Ldone_avx2:
1499 mov 0(%rsp),%r15
1500 mov 8(%rsp),%r14
1501 mov 16(%rsp),%r13
1502 mov 24(%rsp),%r12
1503 mov 32(%rsp),%rbp
1504 mov 40(%rsp),%rbx
1505 lea 48(%rsp),%rsp
1506 .Lno_data_avx2:
1507 .Lblocks_avx2_epilogue:
1508 ret
1509
1510 .align 32
1511 .Lbase2_64_avx2:
1512 push %rbx
1513 push %rbp
1514 push %r12
1515 push %r13
1516 push %r14
1517 push %r15
1518 .Lbase2_64_avx2_body:
1519
1520 mov $len,%r15 # reassign $len
1521
1522 mov 24($ctx),$r0 # load r
1523 mov 32($ctx),$s1
1524
1525 mov 0($ctx),$h0 # load hash value
1526 mov 8($ctx),$h1
1527 mov 16($ctx),$h2#d
1528
1529 mov $s1,$r1
1530 mov $s1,%rax
1531 shr \$2,$s1
1532 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1533
1534 test \$63,$len
1535 jz .Linit_avx2
1536
1537 .Lbase2_64_pre_avx2:
1538 add 0($inp),$h0 # accumulate input
1539 adc 8($inp),$h1
1540 lea 16($inp),$inp
1541 adc $padbit,$h2
1542 sub \$16,%r15
1543
1544 call __poly1305_block
1545 mov $r1,%rax
1546
1547 test \$63,%r15
1548 jnz .Lbase2_64_pre_avx2
1549
1550 .Linit_avx2:
1551 ################################# base 2^64 -> base 2^26
1552 mov $h0,%rax
1553 mov $h0,%rdx
1554 shr \$52,$h0
1555 mov $h1,$d1
1556 mov $h1,$d2
1557 shr \$26,%rdx
1558 and \$0x3ffffff,%rax # h[0]
1559 shl \$12,$d1
1560 and \$0x3ffffff,%rdx # h[1]
1561 shr \$14,$h1
1562 or $d1,$h0
1563 shl \$24,$h2
1564 and \$0x3ffffff,$h0 # h[2]
1565 shr \$40,$d2
1566 and \$0x3ffffff,$h1 # h[3]
1567 or $d2,$h2 # h[4]
1568
1569 vmovd %rax#d,%x#$H0
1570 vmovd %rdx#d,%x#$H1
1571 vmovd $h0#d,%x#$H2
1572 vmovd $h1#d,%x#$H3
1573 vmovd $h2#d,%x#$H4
1574 movl \$1,20($ctx) # set is_base2_26
1575
1576 call __poly1305_init_avx
1577
1578 .Lproceed_avx2:
1579 mov %r15,$len # restore $len
1580 mov OPENSSL_ia32cap_P+8(%rip),%r10d
1581 mov \$`(1<<31|1<<30|1<<16)`,%r11d
1582
1583 mov 0(%rsp),%r15
1584 mov 8(%rsp),%r14
1585 mov 16(%rsp),%r13
1586 mov 24(%rsp),%r12
1587 mov 32(%rsp),%rbp
1588 mov 40(%rsp),%rbx
1589 lea 48(%rsp),%rax
1590 lea 48(%rsp),%rsp
1591 .Lbase2_64_avx2_epilogue:
1592 jmp .Ldo_avx2
1593
1594 .align 32
1595 .Leven_avx2:
1596 mov OPENSSL_ia32cap_P+8(%rip),%r10d
1597 mov \$`(1<<31|1<<30|1<<16)`,%r11d
1598 vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
1599 vmovd 4*1($ctx),%x#$H1
1600 vmovd 4*2($ctx),%x#$H2
1601 vmovd 4*3($ctx),%x#$H3
1602 vmovd 4*4($ctx),%x#$H4
1603
1604 .Ldo_avx2:
1605 ___
1606 $code.=<<___ if ($avx>2);
1607 cmp \$512,$len
1608 jb .Lskip_avx512
1609 and %r11d,%r10d
1610 cmp %r11d,%r10d # check for AVX512F+BW+VL
1611 je .Lblocks_avx512
1612 .Lskip_avx512:
1613 ___
1614 $code.=<<___ if (!$win64);
1615 lea -8(%rsp),%r11
1616 sub \$0x128,%rsp
1617 ___
1618 $code.=<<___ if ($win64);
1619 lea -0xf8(%rsp),%r11
1620 sub \$0x1c8,%rsp
1621 vmovdqa %xmm6,0x50(%r11)
1622 vmovdqa %xmm7,0x60(%r11)
1623 vmovdqa %xmm8,0x70(%r11)
1624 vmovdqa %xmm9,0x80(%r11)
1625 vmovdqa %xmm10,0x90(%r11)
1626 vmovdqa %xmm11,0xa0(%r11)
1627 vmovdqa %xmm12,0xb0(%r11)
1628 vmovdqa %xmm13,0xc0(%r11)
1629 vmovdqa %xmm14,0xd0(%r11)
1630 vmovdqa %xmm15,0xe0(%r11)
1631 .Ldo_avx2_body:
1632 ___
1633 $code.=<<___;
1634 lea 48+64($ctx),$ctx # size optimization
1635 lea .Lconst(%rip),%rcx
1636
1637 # expand and copy pre-calculated table to stack
1638 vmovdqu `16*0-64`($ctx),%x#$T2
1639 and \$-512,%rsp
1640 vmovdqu `16*1-64`($ctx),%x#$T3
1641 vmovdqu `16*2-64`($ctx),%x#$T4
1642 vmovdqu `16*3-64`($ctx),%x#$D0
1643 vmovdqu `16*4-64`($ctx),%x#$D1
1644 vmovdqu `16*5-64`($ctx),%x#$D2
1645 vmovdqu `16*6-64`($ctx),%x#$D3
1646 vpermq \$0x15,$T2,$T2 # 00003412 -> 12343434
1647 vmovdqu `16*7-64`($ctx),%x#$D4
1648 vpermq \$0x15,$T3,$T3
1649 vpshufd \$0xc8,$T2,$T2 # 12343434 -> 14243444
1650 vmovdqu `16*8-64`($ctx),%x#$MASK
1651 vpermq \$0x15,$T4,$T4
1652 vpshufd \$0xc8,$T3,$T3
1653 vmovdqa $T2,0x00(%rsp)
1654 vpermq \$0x15,$D0,$D0
1655 vpshufd \$0xc8,$T4,$T4
1656 vmovdqa $T3,0x20(%rsp)
1657 vpermq \$0x15,$D1,$D1
1658 vpshufd \$0xc8,$D0,$D0
1659 vmovdqa $T4,0x40(%rsp)
1660 vpermq \$0x15,$D2,$D2
1661 vpshufd \$0xc8,$D1,$D1
1662 vmovdqa $D0,0x60(%rsp)
1663 vpermq \$0x15,$D3,$D3
1664 vpshufd \$0xc8,$D2,$D2
1665 vmovdqa $D1,0x80(%rsp)
1666 vpermq \$0x15,$D4,$D4
1667 vpshufd \$0xc8,$D3,$D3
1668 vmovdqa $D2,0xa0(%rsp)
1669 vpermq \$0x15,$MASK,$MASK
1670 vpshufd \$0xc8,$D4,$D4
1671 vmovdqa $D3,0xc0(%rsp)
1672 vpshufd \$0xc8,$MASK,$MASK
1673 vmovdqa $D4,0xe0(%rsp)
1674 vmovdqa $MASK,0x100(%rsp)
1675 vmovdqa 64(%rcx),$MASK # .Lmask26
1676
1677 ################################################################
1678 # load input
1679 vmovdqu 16*0($inp),%x#$T0
1680 vmovdqu 16*1($inp),%x#$T1
1681 vinserti128 \$1,16*2($inp),$T0,$T0
1682 vinserti128 \$1,16*3($inp),$T1,$T1
1683 lea 16*4($inp),$inp
1684
1685 vpsrldq \$6,$T0,$T2 # splat input
1686 vpsrldq \$6,$T1,$T3
1687 vpunpckhqdq $T1,$T0,$T4 # 4
1688 vpunpcklqdq $T3,$T2,$T2 # 2:3
1689 vpunpcklqdq $T1,$T0,$T0 # 0:1
1690
1691 vpsrlq \$30,$T2,$T3
1692 vpsrlq \$4,$T2,$T2
1693 vpsrlq \$26,$T0,$T1
1694 vpsrlq \$40,$T4,$T4 # 4
1695 vpand $MASK,$T2,$T2 # 2
1696 vpand $MASK,$T0,$T0 # 0
1697 vpand $MASK,$T1,$T1 # 1
1698 vpand $MASK,$T3,$T3 # 3
1699 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1700
1701 lea 0x90(%rsp),%rax # size optimization
1702 vpaddq $H2,$T2,$H2 # accumulate input
1703 sub \$64,$len
1704 jz .Ltail_avx2
1705 jmp .Loop_avx2
1706
1707 .align 32
1708 .Loop_avx2:
1709 ################################################################
1710 # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1711 # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1712 # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1713 # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1714 # \________/\__________/
1715 ################################################################
1716 #vpaddq $H2,$T2,$H2 # accumulate input
1717 vpaddq $H0,$T0,$H0
1718 vmovdqa `32*0`(%rsp),$T0 # r0^4
1719 vpaddq $H1,$T1,$H1
1720 vmovdqa `32*1`(%rsp),$T1 # r1^4
1721 vpaddq $H3,$T3,$H3
1722 vmovdqa `32*3`(%rsp),$T2 # r2^4
1723 vpaddq $H4,$T4,$H4
1724 vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
1725 vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
1726
1727 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1728 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1729 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1730 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1731 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1732 #
1733 # however, as h2 is "chronologically" first one available pull
1734 # corresponding operations up, so it's
1735 #
1736 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1737 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1738 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1739 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1740 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1741
1742 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1743 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1744 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1745 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1746 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1747
1748 vpmuludq $H0,$T1,$T4 # h0*r1
1749 vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1750 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1751 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1752 vpmuludq $H3,$T1,$T4 # h3*r1
1753 vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
1754 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1755 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1756 vmovdqa `32*4-0x90`(%rax),$T1 # s2
1757
1758 vpmuludq $H0,$T0,$T4 # h0*r0
1759 vpmuludq $H1,$T0,$H2 # h1*r0
1760 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1761 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1762 vpmuludq $H3,$T0,$T4 # h3*r0
1763 vpmuludq $H4,$T0,$H2 # h4*r0
1764 vmovdqu 16*0($inp),%x#$T0 # load input
1765 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1766 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1767 vinserti128 \$1,16*2($inp),$T0,$T0
1768
1769 vpmuludq $H3,$T1,$T4 # h3*s2
1770 vpmuludq $H4,$T1,$H2 # h4*s2
1771 vmovdqu 16*1($inp),%x#$T1
1772 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1773 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1774 vmovdqa `32*5-0x90`(%rax),$H2 # r3
1775 vpmuludq $H1,$T2,$T4 # h1*r2
1776 vpmuludq $H0,$T2,$T2 # h0*r2
1777 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1778 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1779 vinserti128 \$1,16*3($inp),$T1,$T1
1780 lea 16*4($inp),$inp
1781
1782 vpmuludq $H1,$H2,$T4 # h1*r3
1783 vpmuludq $H0,$H2,$H2 # h0*r3
1784 vpsrldq \$6,$T0,$T2 # splat input
1785 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1786 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1787 vpmuludq $H3,$T3,$T4 # h3*s3
1788 vpmuludq $H4,$T3,$H2 # h4*s3
1789 vpsrldq \$6,$T1,$T3
1790 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1791 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1792 vpunpckhqdq $T1,$T0,$T4 # 4
1793
1794 vpmuludq $H3,$S4,$H3 # h3*s4
1795 vpmuludq $H4,$S4,$H4 # h4*s4
1796 vpunpcklqdq $T1,$T0,$T0 # 0:1
1797 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1798 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1799 vpunpcklqdq $T3,$T2,$T3 # 2:3
1800 vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
1801 vpmuludq $H1,$S4,$H0 # h1*s4
1802 vmovdqa 64(%rcx),$MASK # .Lmask26
1803 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1804 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1805
1806 ################################################################
1807 # lazy reduction (interleaved with tail of input splat)
1808
1809 vpsrlq \$26,$H3,$D3
1810 vpand $MASK,$H3,$H3
1811 vpaddq $D3,$H4,$H4 # h3 -> h4
1812
1813 vpsrlq \$26,$H0,$D0
1814 vpand $MASK,$H0,$H0
1815 vpaddq $D0,$D1,$H1 # h0 -> h1
1816
1817 vpsrlq \$26,$H4,$D4
1818 vpand $MASK,$H4,$H4
1819
1820 vpsrlq \$4,$T3,$T2
1821
1822 vpsrlq \$26,$H1,$D1
1823 vpand $MASK,$H1,$H1
1824 vpaddq $D1,$H2,$H2 # h1 -> h2
1825
1826 vpaddq $D4,$H0,$H0
1827 vpsllq \$2,$D4,$D4
1828 vpaddq $D4,$H0,$H0 # h4 -> h0
1829
1830 vpand $MASK,$T2,$T2 # 2
1831 vpsrlq \$26,$T0,$T1
1832
1833 vpsrlq \$26,$H2,$D2
1834 vpand $MASK,$H2,$H2
1835 vpaddq $D2,$H3,$H3 # h2 -> h3
1836
1837 vpaddq $T2,$H2,$H2 # modulo-scheduled
1838 vpsrlq \$30,$T3,$T3
1839
1840 vpsrlq \$26,$H0,$D0
1841 vpand $MASK,$H0,$H0
1842 vpaddq $D0,$H1,$H1 # h0 -> h1
1843
1844 vpsrlq \$40,$T4,$T4 # 4
1845
1846 vpsrlq \$26,$H3,$D3
1847 vpand $MASK,$H3,$H3
1848 vpaddq $D3,$H4,$H4 # h3 -> h4
1849
1850 vpand $MASK,$T0,$T0 # 0
1851 vpand $MASK,$T1,$T1 # 1
1852 vpand $MASK,$T3,$T3 # 3
1853 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1854
1855 sub \$64,$len
1856 jnz .Loop_avx2
1857
1858 .byte 0x66,0x90
1859 .Ltail_avx2:
1860 ################################################################
1861 # while above multiplications were by r^4 in all lanes, in last
1862 # iteration we multiply least significant lane by r^4 and most
1863 # significant one by r, so copy of above except that references
1864 # to the precomputed table are displaced by 4...
1865
1866 #vpaddq $H2,$T2,$H2 # accumulate input
1867 vpaddq $H0,$T0,$H0
1868 vmovdqu `32*0+4`(%rsp),$T0 # r0^4
1869 vpaddq $H1,$T1,$H1
1870 vmovdqu `32*1+4`(%rsp),$T1 # r1^4
1871 vpaddq $H3,$T3,$H3
1872 vmovdqu `32*3+4`(%rsp),$T2 # r2^4
1873 vpaddq $H4,$T4,$H4
1874 vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
1875 vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
1876
1877 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1878 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1879 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1880 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1881 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1882
1883 vpmuludq $H0,$T1,$T4 # h0*r1
1884 vpmuludq $H1,$T1,$H2 # h1*r1
1885 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1886 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1887 vpmuludq $H3,$T1,$T4 # h3*r1
1888 vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
1889 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1890 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1891
1892 vpmuludq $H0,$T0,$T4 # h0*r0
1893 vpmuludq $H1,$T0,$H2 # h1*r0
1894 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1895 vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
1896 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1897 vpmuludq $H3,$T0,$T4 # h3*r0
1898 vpmuludq $H4,$T0,$H2 # h4*r0
1899 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1900 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1901
1902 vpmuludq $H3,$T1,$T4 # h3*s2
1903 vpmuludq $H4,$T1,$H2 # h4*s2
1904 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1905 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1906 vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
1907 vpmuludq $H1,$T2,$T4 # h1*r2
1908 vpmuludq $H0,$T2,$T2 # h0*r2
1909 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1910 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1911
1912 vpmuludq $H1,$H2,$T4 # h1*r3
1913 vpmuludq $H0,$H2,$H2 # h0*r3
1914 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1915 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1916 vpmuludq $H3,$T3,$T4 # h3*s3
1917 vpmuludq $H4,$T3,$H2 # h4*s3
1918 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1919 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1920
1921 vpmuludq $H3,$S4,$H3 # h3*s4
1922 vpmuludq $H4,$S4,$H4 # h4*s4
1923 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1924 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1925 vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
1926 vpmuludq $H1,$S4,$H0 # h1*s4
1927 vmovdqa 64(%rcx),$MASK # .Lmask26
1928 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1929 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1930
1931 ################################################################
1932 # horizontal addition
1933
1934 vpsrldq \$8,$D1,$T1
1935 vpsrldq \$8,$H2,$T2
1936 vpsrldq \$8,$H3,$T3
1937 vpsrldq \$8,$H4,$T4
1938 vpsrldq \$8,$H0,$T0
1939 vpaddq $T1,$D1,$D1
1940 vpaddq $T2,$H2,$H2
1941 vpaddq $T3,$H3,$H3
1942 vpaddq $T4,$H4,$H4
1943 vpaddq $T0,$H0,$H0
1944
1945 vpermq \$0x2,$H3,$T3
1946 vpermq \$0x2,$H4,$T4
1947 vpermq \$0x2,$H0,$T0
1948 vpermq \$0x2,$D1,$T1
1949 vpermq \$0x2,$H2,$T2
1950 vpaddq $T3,$H3,$H3
1951 vpaddq $T4,$H4,$H4
1952 vpaddq $T0,$H0,$H0
1953 vpaddq $T1,$D1,$D1
1954 vpaddq $T2,$H2,$H2
1955
1956 ################################################################
1957 # lazy reduction
1958
1959 vpsrlq \$26,$H3,$D3
1960 vpand $MASK,$H3,$H3
1961 vpaddq $D3,$H4,$H4 # h3 -> h4
1962
1963 vpsrlq \$26,$H0,$D0
1964 vpand $MASK,$H0,$H0
1965 vpaddq $D0,$D1,$H1 # h0 -> h1
1966
1967 vpsrlq \$26,$H4,$D4
1968 vpand $MASK,$H4,$H4
1969
1970 vpsrlq \$26,$H1,$D1
1971 vpand $MASK,$H1,$H1
1972 vpaddq $D1,$H2,$H2 # h1 -> h2
1973
1974 vpaddq $D4,$H0,$H0
1975 vpsllq \$2,$D4,$D4
1976 vpaddq $D4,$H0,$H0 # h4 -> h0
1977
1978 vpsrlq \$26,$H2,$D2
1979 vpand $MASK,$H2,$H2
1980 vpaddq $D2,$H3,$H3 # h2 -> h3
1981
1982 vpsrlq \$26,$H0,$D0
1983 vpand $MASK,$H0,$H0
1984 vpaddq $D0,$H1,$H1 # h0 -> h1
1985
1986 vpsrlq \$26,$H3,$D3
1987 vpand $MASK,$H3,$H3
1988 vpaddq $D3,$H4,$H4 # h3 -> h4
1989
1990 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1991 vmovd %x#$H1,`4*1-48-64`($ctx)
1992 vmovd %x#$H2,`4*2-48-64`($ctx)
1993 vmovd %x#$H3,`4*3-48-64`($ctx)
1994 vmovd %x#$H4,`4*4-48-64`($ctx)
1995 ___
1996 $code.=<<___ if ($win64);
1997 vmovdqa 0x50(%r11),%xmm6
1998 vmovdqa 0x60(%r11),%xmm7
1999 vmovdqa 0x70(%r11),%xmm8
2000 vmovdqa 0x80(%r11),%xmm9
2001 vmovdqa 0x90(%r11),%xmm10
2002 vmovdqa 0xa0(%r11),%xmm11
2003 vmovdqa 0xb0(%r11),%xmm12
2004 vmovdqa 0xc0(%r11),%xmm13
2005 vmovdqa 0xd0(%r11),%xmm14
2006 vmovdqa 0xe0(%r11),%xmm15
2007 lea 0xf8(%r11),%rsp
2008 .Ldo_avx2_epilogue:
2009 ___
2010 $code.=<<___ if (!$win64);
2011 lea 8(%r11),%rsp
2012 ___
2013 $code.=<<___;
2014 vzeroupper
2015 ret
2016 .size poly1305_blocks_avx2,.-poly1305_blocks_avx2
2017 ___
2018 #######################################################################
2019 if ($avx>2) {
2020 # On entry we have input length divisible by 64. But since inner loop
2021 # processes 128 bytes per iteration, cases when length is not divisible
2022 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2023 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2024 # for this tail, we wouldn't have to even allocate stack frame...
2025
2026 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%ymm$_",(16..24));
2027 my ($M0,$M1,$M2,$M3,$M4) = map("%ymm$_",(25..29));
2028 my $PADBIT="%zmm30";
2029 my $GATHER="%ymm31";
2030
2031 $code.=<<___;
2032 .type poly1305_blocks_avx512,\@function,4
2033 .align 32
2034 poly1305_blocks_avx512:
2035 .Lblocks_avx512:
2036 vzeroupper
2037 ___
2038 $code.=<<___ if (!$win64);
2039 lea -8(%rsp),%r11
2040 sub \$0x128,%rsp
2041 ___
2042 $code.=<<___ if ($win64);
2043 lea -0xf8(%rsp),%r11
2044 sub \$0x1c8,%rsp
2045 vmovdqa %xmm6,0x50(%r11)
2046 vmovdqa %xmm7,0x60(%r11)
2047 vmovdqa %xmm8,0x70(%r11)
2048 vmovdqa %xmm9,0x80(%r11)
2049 vmovdqa %xmm10,0x90(%r11)
2050 vmovdqa %xmm11,0xa0(%r11)
2051 vmovdqa %xmm12,0xb0(%r11)
2052 vmovdqa %xmm13,0xc0(%r11)
2053 vmovdqa %xmm14,0xd0(%r11)
2054 vmovdqa %xmm15,0xe0(%r11)
2055 .Ldo_avx512_body:
2056 ___
2057 $code.=<<___;
2058 lea 48+64($ctx),$ctx # size optimization
2059 lea .Lconst(%rip),%rcx
2060
2061 # expand pre-calculated table
2062 vmovdqu32 `16*0-64`($ctx),%x#$R0
2063 and \$-512,%rsp
2064 vmovdqu32 `16*1-64`($ctx),%x#$R1
2065 vmovdqu32 `16*2-64`($ctx),%x#$S1
2066 vmovdqu32 `16*3-64`($ctx),%x#$R2
2067 vmovdqu32 `16*4-64`($ctx),%x#$S2
2068 vmovdqu32 `16*5-64`($ctx),%x#$R3
2069 vmovdqu32 `16*6-64`($ctx),%x#$S3
2070 vmovdqu32 `16*7-64`($ctx),%x#$R4
2071 vmovdqu32 `16*8-64`($ctx),%x#$S4
2072 vpermq \$0x15,$R0,$R0 # 00003412 -> 12343434
2073 vmovdqa64 64(%rcx),$MASK # .Lmask26
2074 vpermq \$0x15,$R1,$R1
2075 vmovdqa32 128(%rcx),$GATHER # .Lgather
2076 vpermq \$0x15,$S1,$S1
2077 vpshufd \$0xc8,$R0,$R0 # 12343434 -> 14243444
2078 vpermq \$0x15,$R2,$R2
2079 vpshufd \$0xc8,$R1,$R1
2080 vmovdqa32 $R0,0x00(%rsp) # save in case $len%128 != 0
2081 vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304
2082 vpermq \$0x15,$S2,$S2
2083 vpshufd \$0xc8,$S1,$S1
2084 vmovdqa32 $R1,0x20(%rsp)
2085 vpsrlq \$32,$R1,$T1
2086 vpermq \$0x15,$R3,$R3
2087 vpshufd \$0xc8,$R2,$R2
2088 vmovdqa32 $S1,0x40(%rsp)
2089 vpermq \$0x15,$S3,$S3
2090 vpshufd \$0xc8,$S2,$S2
2091 vpermq \$0x15,$R4,$R4
2092 vpshufd \$0xc8,$R3,$R3
2093 vmovdqa32 $R2,0x60(%rsp)
2094 vpermq \$0x15,$S4,$S4
2095 vpshufd \$0xc8,$S3,$S3
2096 vmovdqa32 $S2,0x80(%rsp)
2097 vpshufd \$0xc8,$R4,$R4
2098 vpshufd \$0xc8,$S4,$S4
2099 vmovdqa32 $R3,0xa0(%rsp)
2100 vmovdqa32 $S3,0xc0(%rsp)
2101 vmovdqa32 $R4,0xe0(%rsp)
2102 vmovdqa32 $S4,0x100(%rsp)
2103
2104 ################################################################
2105 # calculate 5th through 8th powers of the key
2106 #
2107 # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2108 # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2109 # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3
2110 # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4
2111 # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0
2112
2113 vpmuludq $T0,$R0,$D0 # d0 = r0'*r0
2114 vpmuludq $T0,$R1,$D1 # d1 = r0'*r1
2115 vpmuludq $T0,$R2,$D2 # d2 = r0'*r2
2116 vpmuludq $T0,$R3,$D3 # d3 = r0'*r3
2117 vpmuludq $T0,$R4,$D4 # d4 = r0'*r4
2118 vpsrlq \$32,$R2,$T2
2119
2120 vpmuludq $T1,$S4,$M0
2121 vpmuludq $T1,$R0,$M1
2122 vpmuludq $T1,$R1,$M2
2123 vpmuludq $T1,$R2,$M3
2124 vpmuludq $T1,$R3,$M4
2125 vpsrlq \$32,$R3,$T3
2126 vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4
2127 vpaddq $M1,$D1,$D1 # d1 += r1'*r0
2128 vpaddq $M2,$D2,$D2 # d2 += r1'*r1
2129 vpaddq $M3,$D3,$D3 # d3 += r1'*r2
2130 vpaddq $M4,$D4,$D4 # d4 += r1'*r3
2131
2132 vpmuludq $T2,$S3,$M0
2133 vpmuludq $T2,$S4,$M1
2134 vpmuludq $T2,$R1,$M3
2135 vpmuludq $T2,$R2,$M4
2136 vpmuludq $T2,$R0,$M2
2137 vpsrlq \$32,$R4,$T4
2138 vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3
2139 vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4
2140 vpaddq $M3,$D3,$D3 # d3 += r2'*r1
2141 vpaddq $M4,$D4,$D4 # d4 += r2'*r2
2142 vpaddq $M2,$D2,$D2 # d2 += r2'*r0
2143
2144 vpmuludq $T3,$S2,$M0
2145 vpmuludq $T3,$R0,$M3
2146 vpmuludq $T3,$R1,$M4
2147 vpmuludq $T3,$S3,$M1
2148 vpmuludq $T3,$S4,$M2
2149 vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2
2150 vpaddq $M3,$D3,$D3 # d3 += r3'*r0
2151 vpaddq $M4,$D4,$D4 # d4 += r3'*r1
2152 vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3
2153 vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4
2154
2155 vpmuludq $T4,$S4,$M3
2156 vpmuludq $T4,$R0,$M4
2157 vpmuludq $T4,$S1,$M0
2158 vpmuludq $T4,$S2,$M1
2159 vpmuludq $T4,$S3,$M2
2160 vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4
2161 vpaddq $M4,$D4,$D4 # d4 += r2'*r0
2162 vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1
2163 vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2
2164 vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3
2165
2166 ################################################################
2167 # load input
2168 vmovdqu64 16*0($inp),%x#$T0
2169 vmovdqu64 16*1($inp),%x#$T1
2170 vinserti64x2 \$1,16*2($inp),$T0,$T0
2171 vinserti64x2 \$1,16*3($inp),$T1,$T1
2172
2173 ################################################################
2174 # lazy reduction
2175
2176 vpsrlq \$26,$D3,$M3
2177 vpandq $MASK,$D3,$D3
2178 vpaddq $M3,$D4,$D4 # d3 -> d4
2179
2180 vpsrlq \$26,$D0,$M0
2181 vpandq $MASK,$D0,$D0
2182 vpaddq $M0,$D1,$D1 # d0 -> d1
2183
2184 vpsrlq \$26,$D4,$M4
2185 vpandq $MASK,$D4,$D4
2186
2187 vpsrlq \$26,$D1,$M1
2188 vpandq $MASK,$D1,$D1
2189 vpaddq $M1,$D2,$D2 # d1 -> d2
2190
2191 vpaddq $M4,$D0,$D0
2192 vpsllq \$2,$M4,$M4
2193 vpaddq $M4,$D0,$D0 # d4 -> d0
2194
2195 vpsrlq \$26,$D2,$M2
2196 vpandq $MASK,$D2,$D2
2197 vpaddq $M2,$D3,$D3 # d2 -> d3
2198
2199 vpsrlq \$26,$D0,$M0
2200 vpandq $MASK,$D0,$D0
2201 vpaddq $M0,$D1,$D1 # d0 -> d1
2202
2203 vpsrlq \$26,$D3,$M3
2204 vpandq $MASK,$D3,$D3
2205 vpaddq $M3,$D4,$D4 # d3 -> d4
2206
2207 ___
2208 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3));
2209 map(s/%y/%z/,($M4,$M0,$M1,$M2,$M3));
2210 map(s/%y/%z/,($MASK));
2211 $code.=<<___;
2212 ################################################################
2213 # load more input
2214 vinserti64x2 \$2,16*4($inp),$T0,$T0
2215 vinserti64x2 \$2,16*5($inp),$T1,$T1
2216 vinserti64x2 \$3,16*6($inp),$T0,$T0
2217 vinserti64x2 \$3,16*7($inp),$T1,$T1
2218 lea 16*8($inp),$inp
2219
2220 vpbroadcastq %x#$MASK,$MASK
2221 vpbroadcastq 32(%rcx),$PADBIT
2222
2223 ################################################################
2224 # at this point we have 14243444 in $R0-$S4 and 05060708 in
2225 # $D0-$D4, and the goal is 1828384858687888 in $R0-$S4
2226
2227 mov \$0x5555,%eax
2228 vpbroadcastq %x#$D0,$M0 # 0808080808080808
2229 vpbroadcastq %x#$D1,$M1
2230 vpbroadcastq %x#$D2,$M2
2231 vpbroadcastq %x#$D3,$M3
2232 vpbroadcastq %x#$D4,$M4
2233 kmovw %eax,%k3
2234 vpsllq \$32,$D0,$D0 # 05060708 -> 50607080
2235 vpsllq \$32,$D1,$D1
2236 vpsllq \$32,$D2,$D2
2237 vpsllq \$32,$D3,$D3
2238 vpsllq \$32,$D4,$D4
2239 ___
2240 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2241 $code.=<<___;
2242 vinserti64x4 \$1,$R0,$D0,$D0 # 1424344450607080
2243 vinserti64x4 \$1,$R1,$D1,$D1
2244 vinserti64x4 \$1,$R2,$D2,$D2
2245 vinserti64x4 \$1,$R3,$D3,$D3
2246 vinserti64x4 \$1,$R4,$D4,$D4
2247 ___
2248 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2249 map(s/%y/%z/,($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4));
2250 $code.=<<___;
2251 vpblendmd $M0,$D0,${R0}{%k3} # 1828384858687888
2252 vpblendmd $M1,$D1,${R1}{%k3}
2253 vpblendmd $M2,$D2,${R2}{%k3}
2254 vpblendmd $M3,$D3,${R3}{%k3}
2255 vpblendmd $M4,$D4,${R4}{%k3}
2256
2257 vpslld \$2,$R1,$S1 # *5
2258 vpslld \$2,$R2,$S2
2259 vpslld \$2,$R3,$S3
2260 vpslld \$2,$R4,$S4
2261 vpaddd $R1,$S1,$S1
2262 vpaddd $R2,$S2,$S2
2263 vpaddd $R3,$S3,$S3
2264 vpaddd $R4,$S4,$S4
2265
2266 vpsrldq \$6,$T0,$T2 # splat input
2267 vpsrldq \$6,$T1,$T3
2268 vpunpckhqdq $T1,$T0,$T4 # 4
2269 vpunpcklqdq $T3,$T2,$T2 # 2:3
2270 vpunpcklqdq $T1,$T0,$T0 # 0:1
2271
2272 vpsrlq \$30,$T2,$T3
2273 vpsrlq \$4,$T2,$T2
2274 vpsrlq \$26,$T0,$T1
2275 vpsrlq \$40,$T4,$T4 # 4
2276 vpandq $MASK,$T2,$T2 # 2
2277 vpandq $MASK,$T0,$T0 # 0
2278 #vpandq $MASK,$T1,$T1 # 1
2279 #vpandq $MASK,$T3,$T3 # 3
2280 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2281
2282 vpaddq $H2,$T2,$H2 # accumulate input
2283 mov \$0x0f,%eax
2284 sub \$192,$len
2285 jbe .Ltail_avx512
2286
2287 .Loop_avx512:
2288 ################################################################
2289 # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2290 # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2291 # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2292 # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2293 # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2294 # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2295 # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2296 # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2297 # \________/\___________/
2298 ################################################################
2299 #vpaddq $H2,$T2,$H2 # accumulate input
2300
2301 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
2302 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
2303 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
2304 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
2305 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2306 #
2307 # however, as h2 is "chronologically" first one available pull
2308 # corresponding operations up, so it's
2309 #
2310 # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
2311 # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
2312 # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
2313 # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
2314 # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
2315
2316 vpmuludq $H2,$R1,$D3 # d3 = h2*r1
2317 vpaddq $H0,$T0,$H0
2318 vmovdqu64 16*0($inp),%x#$M0 # load input
2319 vpmuludq $H2,$R2,$D4 # d4 = h2*r2
2320 vpandq $MASK,$T1,$T1 # 1, module-scheduled
2321 vmovdqu64 16*1($inp),%x#$M1
2322 vpmuludq $H2,$S3,$D0 # d0 = h2*s3
2323 vpandq $MASK,$T3,$T3 # 3
2324 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2325 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2326 vpmuludq $H2,$R0,$D2 # d2 = h2*r0
2327 vpaddq $H1,$T1,$H1 # accumulate input
2328 vpaddq $H3,$T3,$H3
2329 vpaddq $H4,$T4,$H4
2330
2331 vinserti64x2 \$1,16*2($inp),$M0,$T0
2332 vinserti64x2 \$1,16*3($inp),$M1,$T1
2333 vpmuludq $H0,$R3,$M3
2334 vpmuludq $H0,$R4,$M4
2335 vpmuludq $H0,$R0,$M0
2336 vpmuludq $H0,$R1,$M1
2337 vpaddq $M3,$D3,$D3 # d3 += h0*r3
2338 vpaddq $M4,$D4,$D4 # d4 += h0*r4
2339 vpaddq $M0,$D0,$D0 # d0 += h0*r0
2340 vpaddq $M1,$D1,$D1 # d1 += h0*r1
2341
2342 vinserti64x2 \$2,16*4($inp),$T0,$T0
2343 vinserti64x2 \$2,16*5($inp),$T1,$T1
2344 vpmuludq $H1,$R2,$M3
2345 vpmuludq $H1,$R3,$M4
2346 vpmuludq $H1,$S4,$M0
2347 vpmuludq $H0,$R2,$M2
2348 vpaddq $M3,$D3,$D3 # d3 += h1*r2
2349 vpaddq $M4,$D4,$D4 # d4 += h1*r3
2350 vpaddq $M0,$D0,$D0 # d0 += h1*s4
2351 vpaddq $M2,$D2,$D2 # d2 += h0*r2
2352
2353 vinserti64x2 \$3,16*6($inp),$T0,$T0
2354 vinserti64x2 \$3,16*7($inp),$T1,$T1
2355 vpmuludq $H3,$R0,$M3
2356 vpmuludq $H3,$R1,$M4
2357 vpmuludq $H1,$R0,$M1
2358 vpmuludq $H1,$R1,$M2
2359 vpaddq $M3,$D3,$D3 # d3 += h3*r0
2360 vpaddq $M4,$D4,$D4 # d4 += h3*r1
2361 vpaddq $M1,$D1,$D1 # d1 += h1*r0
2362 vpaddq $M2,$D2,$D2 # d2 += h1*r1
2363
2364 vpsrldq \$6,$T0,$T2 # splat input
2365 vpsrldq \$6,$T1,$T3
2366 vpunpckhqdq $T1,$T0,$T4 # 4
2367 vpmuludq $H4,$S4,$M3
2368 vpmuludq $H4,$R0,$M4
2369 vpmuludq $H3,$S2,$M0
2370 vpmuludq $H3,$S3,$M1
2371 vpaddq $M3,$D3,$D3 # d3 += h4*s4
2372 vpmuludq $H3,$S4,$M2
2373 vpaddq $M4,$D4,$D4 # d4 += h4*r0
2374 vpaddq $M0,$D0,$D0 # d0 += h3*s2
2375 vpaddq $M1,$D1,$D1 # d1 += h3*s3
2376 vpaddq $M2,$D2,$D2 # d2 += h3*s4
2377
2378 vpunpcklqdq $T1,$T0,$T0 # 0:1
2379 vpunpcklqdq $T3,$T2,$T3 # 2:3
2380 lea 16*8($inp),$inp
2381 vpmuludq $H4,$S1,$M0
2382 vpmuludq $H4,$S2,$M1
2383 vpmuludq $H4,$S3,$M2
2384 vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
2385 vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
2386 vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
2387
2388 ################################################################
2389 # lazy reduction (interleaved with tail of input splat)
2390
2391 vpsrlq \$26,$D3,$H3
2392 vpandq $MASK,$D3,$D3
2393 vpaddq $H3,$D4,$H4 # h3 -> h4
2394
2395 vpsrlq \$26,$H0,$D0
2396 vpandq $MASK,$H0,$H0
2397 vpaddq $D0,$H1,$H1 # h0 -> h1
2398
2399 vpsrlq \$26,$H4,$D4
2400 vpandq $MASK,$H4,$H4
2401
2402 vpsrlq \$4,$T3,$T2
2403
2404 vpsrlq \$26,$H1,$D1
2405 vpandq $MASK,$H1,$H1
2406 vpaddq $D1,$H2,$H2 # h1 -> h2
2407
2408 vpaddq $D4,$H0,$H0
2409 vpsllq \$2,$D4,$D4
2410 vpaddq $D4,$H0,$H0 # h4 -> h0
2411
2412 vpandq $MASK,$T2,$T2 # 2
2413 vpsrlq \$26,$T0,$T1
2414
2415 vpsrlq \$26,$H2,$D2
2416 vpandq $MASK,$H2,$H2
2417 vpaddq $D2,$D3,$H3 # h2 -> h3
2418
2419 vpaddq $T2,$H2,$H2 # modulo-scheduled
2420 vpsrlq \$30,$T3,$T3
2421
2422 vpsrlq \$26,$H0,$D0
2423 vpandq $MASK,$H0,$H0
2424 vpaddq $D0,$H1,$H1 # h0 -> h1
2425
2426 vpsrlq \$40,$T4,$T4 # 4
2427
2428 vpsrlq \$26,$H3,$D3
2429 vpandq $MASK,$H3,$H3
2430 vpaddq $D3,$H4,$H4 # h3 -> h4
2431
2432 vpandq $MASK,$T0,$T0 # 0
2433 #vpandq $MASK,$T1,$T1 # 1
2434 #vpandq $MASK,$T3,$T3 # 3
2435 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2436
2437 sub \$128,$len
2438 ja .Loop_avx512
2439
2440 .Ltail_avx512:
2441 ################################################################
2442 # while above multiplications were by r^8 in all lanes, in last
2443 # iteration we multiply least significant lane by r^8 and most
2444 # significant one by r, that's why table gets shifted...
2445
2446 vpsrlq \$32,$R0,$R0 # 0102030405060708
2447 vpsrlq \$32,$R1,$R1
2448 vpsrlq \$32,$R2,$R2
2449 vpsrlq \$32,$S3,$S3
2450 vpsrlq \$32,$S4,$S4
2451 vpsrlq \$32,$R3,$R3
2452 vpsrlq \$32,$R4,$R4
2453 vpsrlq \$32,$S1,$S1
2454 vpsrlq \$32,$S2,$S2
2455
2456 ################################################################
2457 # load either next or last 64 byte of input
2458 lea ($inp,$len),$inp
2459
2460 #vpaddq $H2,$T2,$H2 # accumulate input
2461 vpaddq $H0,$T0,$H0
2462
2463 vpmuludq $H2,$R1,$D3 # d3 = h2*r1
2464 vpmuludq $H2,$R2,$D4 # d4 = h2*r2
2465 vpmuludq $H2,$S3,$D0 # d0 = h2*s3
2466 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2467 vpmuludq $H2,$R0,$D2 # d2 = h2*r0
2468 vpandq $MASK,$T1,$T1 # 1, module-scheduled
2469 vpandq $MASK,$T3,$T3 # 3
2470 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2471 vpaddq $H1,$T1,$H1 # accumulate input
2472 vpaddq $H3,$T3,$H3
2473 vpaddq $H4,$T4,$H4
2474
2475 vmovdqu64 16*0($inp),%x#$T0
2476 vpmuludq $H0,$R3,$M3
2477 vpmuludq $H0,$R4,$M4
2478 vpmuludq $H0,$R0,$M0
2479 vpmuludq $H0,$R1,$M1
2480 vpaddq $M3,$D3,$D3 # d3 += h0*r3
2481 vpaddq $M4,$D4,$D4 # d4 += h0*r4
2482 vpaddq $M0,$D0,$D0 # d0 += h0*r0
2483 vpaddq $M1,$D1,$D1 # d1 += h0*r1
2484
2485 vmovdqu64 16*1($inp),%x#$T1
2486 vpmuludq $H1,$R2,$M3
2487 vpmuludq $H1,$R3,$M4
2488 vpmuludq $H1,$S4,$M0
2489 vpmuludq $H0,$R2,$M2
2490 vpaddq $M3,$D3,$D3 # d3 += h1*r2
2491 vpaddq $M4,$D4,$D4 # d4 += h1*r3
2492 vpaddq $M0,$D0,$D0 # d0 += h1*s4
2493 vpaddq $M2,$D2,$D2 # d2 += h0*r2
2494
2495 vinserti64x2 \$1,16*2($inp),$T0,$T0
2496 vpmuludq $H3,$R0,$M3
2497 vpmuludq $H3,$R1,$M4
2498 vpmuludq $H1,$R0,$M1
2499 vpmuludq $H1,$R1,$M2
2500 vpaddq $M3,$D3,$D3 # d3 += h3*r0
2501 vpaddq $M4,$D4,$D4 # d4 += h3*r1
2502 vpaddq $M1,$D1,$D1 # d1 += h1*r0
2503 vpaddq $M2,$D2,$D2 # d2 += h1*r1
2504
2505 vinserti64x2 \$1,16*3($inp),$T1,$T1
2506 vpmuludq $H4,$S4,$M3
2507 vpmuludq $H4,$R0,$M4
2508 vpmuludq $H3,$S2,$M0
2509 vpmuludq $H3,$S3,$M1
2510 vpmuludq $H3,$S4,$M2
2511 vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4
2512 vpaddq $M4,$D4,$D4 # d4 += h4*r0
2513 vpaddq $M0,$D0,$D0 # d0 += h3*s2
2514 vpaddq $M1,$D1,$D1 # d1 += h3*s3
2515 vpaddq $M2,$D2,$D2 # d2 += h3*s4
2516
2517 vpmuludq $H4,$S1,$M0
2518 vpmuludq $H4,$S2,$M1
2519 vpmuludq $H4,$S3,$M2
2520 vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
2521 vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
2522 vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
2523
2524 ################################################################
2525 # horizontal addition
2526
2527 mov \$1,%eax
2528 vpsrldq \$8,$H3,$D3
2529 vpsrldq \$8,$D4,$H4
2530 vpsrldq \$8,$H0,$D0
2531 vpsrldq \$8,$H1,$D1
2532 vpsrldq \$8,$H2,$D2
2533 vpaddq $D3,$H3,$H3
2534 vpaddq $D4,$H4,$H4
2535 vpaddq $D0,$H0,$H0
2536 vpaddq $D1,$H1,$H1
2537 vpaddq $D2,$H2,$H2
2538
2539 kmovw %eax,%k3
2540 vpermq \$0x2,$H3,$D3
2541 vpermq \$0x2,$H4,$D4
2542 vpermq \$0x2,$H0,$D0
2543 vpermq \$0x2,$H1,$D1
2544 vpermq \$0x2,$H2,$D2
2545 vpaddq $D3,$H3,$H3
2546 vpaddq $D4,$H4,$H4
2547 vpaddq $D0,$H0,$H0
2548 vpaddq $D1,$H1,$H1
2549 vpaddq $D2,$H2,$H2
2550
2551 vextracti64x4 \$0x1,$H3,%y#$D3
2552 vextracti64x4 \$0x1,$H4,%y#$D4
2553 vextracti64x4 \$0x1,$H0,%y#$D0
2554 vextracti64x4 \$0x1,$H1,%y#$D1
2555 vextracti64x4 \$0x1,$H2,%y#$D2
2556 vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case
2557 vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2
2558 vpaddq $D0,$H0,${H0}{%k3}{z}
2559 vpaddq $D1,$H1,${H1}{%k3}{z}
2560 vpaddq $D2,$H2,${H2}{%k3}{z}
2561 ___
2562 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2563 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2564 $code.=<<___;
2565 ################################################################
2566 # lazy reduction (interleaved with input splat)
2567
2568 vpsrlq \$26,$H3,$D3
2569 vpandq $MASK,$H3,$H3
2570 vpsrldq \$6,$T0,$T2 # splat input
2571 vpsrldq \$6,$T1,$T3
2572 vpunpckhqdq $T1,$T0,$T4 # 4
2573 vpaddq $D3,$H4,$H4 # h3 -> h4
2574
2575 vpsrlq \$26,$H0,$D0
2576 vpandq $MASK,$H0,$H0
2577 vpunpcklqdq $T3,$T2,$T2 # 2:3
2578 vpunpcklqdq $T1,$T0,$T0 # 0:1
2579 vpaddq $D0,$H1,$H1 # h0 -> h1
2580
2581 vpsrlq \$26,$H4,$D4
2582 vpandq $MASK,$H4,$H4
2583
2584 vpsrlq \$26,$H1,$D1
2585 vpandq $MASK,$H1,$H1
2586 vpsrlq \$30,$T2,$T3
2587 vpsrlq \$4,$T2,$T2
2588 vpaddq $D1,$H2,$H2 # h1 -> h2
2589
2590 vpaddq $D4,$H0,$H0
2591 vpsllq \$2,$D4,$D4
2592 vpsrlq \$26,$T0,$T1
2593 vpsrlq \$40,$T4,$T4 # 4
2594 vpaddq $D4,$H0,$H0 # h4 -> h0
2595
2596 vpsrlq \$26,$H2,$D2
2597 vpandq $MASK,$H2,$H2
2598 vpandq $MASK,$T2,$T2 # 2
2599 vpandq $MASK,$T0,$T0 # 0
2600 vpaddq $D2,$H3,$H3 # h2 -> h3
2601
2602 vpsrlq \$26,$H0,$D0
2603 vpandq $MASK,$H0,$H0
2604 vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2
2605 vpandq $MASK,$T1,$T1 # 1
2606 vpaddq $D0,$H1,$H1 # h0 -> h1
2607
2608 vpsrlq \$26,$H3,$D3
2609 vpandq $MASK,$H3,$H3
2610 vpandq $MASK,$T3,$T3 # 3
2611 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2612 vpaddq $D3,$H4,$H4 # h3 -> h4
2613
2614 lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2
2615 add \$64,$len
2616 jnz .Ltail_avx2
2617
2618 vpsubq $T2,$H2,$H2 # undo input accumulation
2619 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2620 vmovd %x#$H1,`4*1-48-64`($ctx)
2621 vmovd %x#$H2,`4*2-48-64`($ctx)
2622 vmovd %x#$H3,`4*3-48-64`($ctx)
2623 vmovd %x#$H4,`4*4-48-64`($ctx)
2624 ___
2625 $code.=<<___ if ($win64);
2626 vmovdqa 0x50(%r11),%xmm6
2627 vmovdqa 0x60(%r11),%xmm7
2628 vmovdqa 0x70(%r11),%xmm8
2629 vmovdqa 0x80(%r11),%xmm9
2630 vmovdqa 0x90(%r11),%xmm10
2631 vmovdqa 0xa0(%r11),%xmm11
2632 vmovdqa 0xb0(%r11),%xmm12
2633 vmovdqa 0xc0(%r11),%xmm13
2634 vmovdqa 0xd0(%r11),%xmm14
2635 vmovdqa 0xe0(%r11),%xmm15
2636 lea 0xf8(%r11),%rsp
2637 .Ldo_avx512_epilogue:
2638 ___
2639 $code.=<<___ if (!$win64);
2640 lea 8(%r11),%rsp
2641 ___
2642 $code.=<<___;
2643 vzeroupper
2644 ret
2645 .size poly1305_blocks_avx512,.-poly1305_blocks_avx512
2646 ___
2647 } }
2648 $code.=<<___;
2649 .align 64
2650 .Lconst:
2651 .Lmask24:
2652 .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
2653 .L129:
2654 .long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
2655 .Lmask26:
2656 .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
2657 .Lfive:
2658 .long 5,0,5,0,5,0,5,0
2659 .Lgather:
2660 .long 0,8, 32,40, 64,72, 96,104
2661 ___
2662 }
2663
2664 $code.=<<___;
2665 .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2666 .align 16
2667 ___
2668
2669 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2670 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2671 if ($win64) {
2672 $rec="%rcx";
2673 $frame="%rdx";
2674 $context="%r8";
2675 $disp="%r9";
2676
2677 $code.=<<___;
2678 .extern __imp_RtlVirtualUnwind
2679 .type se_handler,\@abi-omnipotent
2680 .align 16
2681 se_handler:
2682 push %rsi
2683 push %rdi
2684 push %rbx
2685 push %rbp
2686 push %r12
2687 push %r13
2688 push %r14
2689 push %r15
2690 pushfq
2691 sub \$64,%rsp
2692
2693 mov 120($context),%rax # pull context->Rax
2694 mov 248($context),%rbx # pull context->Rip
2695
2696 mov 8($disp),%rsi # disp->ImageBase
2697 mov 56($disp),%r11 # disp->HandlerData
2698
2699 mov 0(%r11),%r10d # HandlerData[0]
2700 lea (%rsi,%r10),%r10 # prologue label
2701 cmp %r10,%rbx # context->Rip<.Lprologue
2702 jb .Lcommon_seh_tail
2703
2704 mov 152($context),%rax # pull context->Rsp
2705
2706 mov 4(%r11),%r10d # HandlerData[1]
2707 lea (%rsi,%r10),%r10 # epilogue label
2708 cmp %r10,%rbx # context->Rip>=.Lepilogue
2709 jae .Lcommon_seh_tail
2710
2711 lea 48(%rax),%rax
2712
2713 mov -8(%rax),%rbx
2714 mov -16(%rax),%rbp
2715 mov -24(%rax),%r12
2716 mov -32(%rax),%r13
2717 mov -40(%rax),%r14
2718 mov -48(%rax),%r15
2719 mov %rbx,144($context) # restore context->Rbx
2720 mov %rbp,160($context) # restore context->Rbp
2721 mov %r12,216($context) # restore context->R12
2722 mov %r13,224($context) # restore context->R13
2723 mov %r14,232($context) # restore context->R14
2724 mov %r15,240($context) # restore context->R14
2725
2726 jmp .Lcommon_seh_tail
2727 .size se_handler,.-se_handler
2728
2729 .type avx_handler,\@abi-omnipotent
2730 .align 16
2731 avx_handler:
2732 push %rsi
2733 push %rdi
2734 push %rbx
2735 push %rbp
2736 push %r12
2737 push %r13
2738 push %r14
2739 push %r15
2740 pushfq
2741 sub \$64,%rsp
2742
2743 mov 120($context),%rax # pull context->Rax
2744 mov 248($context),%rbx # pull context->Rip
2745
2746 mov 8($disp),%rsi # disp->ImageBase
2747 mov 56($disp),%r11 # disp->HandlerData
2748
2749 mov 0(%r11),%r10d # HandlerData[0]
2750 lea (%rsi,%r10),%r10 # prologue label
2751 cmp %r10,%rbx # context->Rip<prologue label
2752 jb .Lcommon_seh_tail
2753
2754 mov 152($context),%rax # pull context->Rsp
2755
2756 mov 4(%r11),%r10d # HandlerData[1]
2757 lea (%rsi,%r10),%r10 # epilogue label
2758 cmp %r10,%rbx # context->Rip>=epilogue label
2759 jae .Lcommon_seh_tail
2760
2761 mov 208($context),%rax # pull context->R11
2762
2763 lea 0x50(%rax),%rsi
2764 lea 0xf8(%rax),%rax
2765 lea 512($context),%rdi # &context.Xmm6
2766 mov \$20,%ecx
2767 .long 0xa548f3fc # cld; rep movsq
2768
2769 .Lcommon_seh_tail:
2770 mov 8(%rax),%rdi
2771 mov 16(%rax),%rsi
2772 mov %rax,152($context) # restore context->Rsp
2773 mov %rsi,168($context) # restore context->Rsi
2774 mov %rdi,176($context) # restore context->Rdi
2775
2776 mov 40($disp),%rdi # disp->ContextRecord
2777 mov $context,%rsi # context
2778 mov \$154,%ecx # sizeof(CONTEXT)
2779 .long 0xa548f3fc # cld; rep movsq
2780
2781 mov $disp,%rsi
2782 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2783 mov 8(%rsi),%rdx # arg2, disp->ImageBase
2784 mov 0(%rsi),%r8 # arg3, disp->ControlPc
2785 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
2786 mov 40(%rsi),%r10 # disp->ContextRecord
2787 lea 56(%rsi),%r11 # &disp->HandlerData
2788 lea 24(%rsi),%r12 # &disp->EstablisherFrame
2789 mov %r10,32(%rsp) # arg5
2790 mov %r11,40(%rsp) # arg6
2791 mov %r12,48(%rsp) # arg7
2792 mov %rcx,56(%rsp) # arg8, (NULL)
2793 call *__imp_RtlVirtualUnwind(%rip)
2794
2795 mov \$1,%eax # ExceptionContinueSearch
2796 add \$64,%rsp
2797 popfq
2798 pop %r15
2799 pop %r14
2800 pop %r13
2801 pop %r12
2802 pop %rbp
2803 pop %rbx
2804 pop %rdi
2805 pop %rsi
2806 ret
2807 .size avx_handler,.-avx_handler
2808
2809 .section .pdata
2810 .align 4
2811 .rva .LSEH_begin_poly1305_init
2812 .rva .LSEH_end_poly1305_init
2813 .rva .LSEH_info_poly1305_init
2814
2815 .rva .LSEH_begin_poly1305_blocks
2816 .rva .LSEH_end_poly1305_blocks
2817 .rva .LSEH_info_poly1305_blocks
2818
2819 .rva .LSEH_begin_poly1305_emit
2820 .rva .LSEH_end_poly1305_emit
2821 .rva .LSEH_info_poly1305_emit
2822 ___
2823 $code.=<<___ if ($avx);
2824 .rva .LSEH_begin_poly1305_blocks_avx
2825 .rva .Lbase2_64_avx
2826 .rva .LSEH_info_poly1305_blocks_avx_1
2827
2828 .rva .Lbase2_64_avx
2829 .rva .Leven_avx
2830 .rva .LSEH_info_poly1305_blocks_avx_2
2831
2832 .rva .Leven_avx
2833 .rva .LSEH_end_poly1305_blocks_avx
2834 .rva .LSEH_info_poly1305_blocks_avx_3
2835
2836 .rva .LSEH_begin_poly1305_emit_avx
2837 .rva .LSEH_end_poly1305_emit_avx
2838 .rva .LSEH_info_poly1305_emit_avx
2839 ___
2840 $code.=<<___ if ($avx>1);
2841 .rva .LSEH_begin_poly1305_blocks_avx2
2842 .rva .Lbase2_64_avx2
2843 .rva .LSEH_info_poly1305_blocks_avx2_1
2844
2845 .rva .Lbase2_64_avx2
2846 .rva .Leven_avx2
2847 .rva .LSEH_info_poly1305_blocks_avx2_2
2848
2849 .rva .Leven_avx2
2850 .rva .LSEH_end_poly1305_blocks_avx2
2851 .rva .LSEH_info_poly1305_blocks_avx2_3
2852 ___
2853 $code.=<<___ if ($avx>2);
2854 .rva .LSEH_begin_poly1305_blocks_avx512
2855 .rva .LSEH_end_poly1305_blocks_avx512
2856 .rva .LSEH_info_poly1305_blocks_avx512
2857 ___
2858 $code.=<<___;
2859 .section .xdata
2860 .align 8
2861 .LSEH_info_poly1305_init:
2862 .byte 9,0,0,0
2863 .rva se_handler
2864 .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2865
2866 .LSEH_info_poly1305_blocks:
2867 .byte 9,0,0,0
2868 .rva se_handler
2869 .rva .Lblocks_body,.Lblocks_epilogue
2870
2871 .LSEH_info_poly1305_emit:
2872 .byte 9,0,0,0
2873 .rva se_handler
2874 .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2875 ___
2876 $code.=<<___ if ($avx);
2877 .LSEH_info_poly1305_blocks_avx_1:
2878 .byte 9,0,0,0
2879 .rva se_handler
2880 .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
2881
2882 .LSEH_info_poly1305_blocks_avx_2:
2883 .byte 9,0,0,0
2884 .rva se_handler
2885 .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
2886
2887 .LSEH_info_poly1305_blocks_avx_3:
2888 .byte 9,0,0,0
2889 .rva avx_handler
2890 .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
2891
2892 .LSEH_info_poly1305_emit_avx:
2893 .byte 9,0,0,0
2894 .rva se_handler
2895 .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2896 ___
2897 $code.=<<___ if ($avx>1);
2898 .LSEH_info_poly1305_blocks_avx2_1:
2899 .byte 9,0,0,0
2900 .rva se_handler
2901 .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
2902
2903 .LSEH_info_poly1305_blocks_avx2_2:
2904 .byte 9,0,0,0
2905 .rva se_handler
2906 .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
2907
2908 .LSEH_info_poly1305_blocks_avx2_3:
2909 .byte 9,0,0,0
2910 .rva avx_handler
2911 .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
2912 ___
2913 $code.=<<___ if ($avx>2);
2914 .LSEH_info_poly1305_blocks_avx512:
2915 .byte 9,0,0,0
2916 .rva avx_handler
2917 .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[]
2918 ___
2919 }
2920
2921 foreach (split('\n',$code)) {
2922 s/\`([^\`]*)\`/eval($1)/ge;
2923 s/%r([a-z]+)#d/%e$1/g;
2924 s/%r([0-9]+)#d/%r$1d/g;
2925 s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;
2926
2927 print $_,"\n";
2928 }
2929 close STDOUT;