]>
Commit | Line | Data |
---|---|---|
6aa36e8e RS |
1 | #! /usr/bin/env perl |
2 | # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved. | |
3 | # | |
4 | # Licensed under the OpenSSL license (the "License"). You may not use | |
5 | # this file except in compliance with the License. You can obtain a copy | |
6 | # in the file LICENSE in the source distribution or at | |
7 | # https://www.openssl.org/source/license.html | |
8 | ||
a98c648e AP |
9 | # |
10 | # ==================================================================== | |
11 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | |
12 | # project. The module is, however, dual licensed under OpenSSL and | |
13 | # CRYPTOGAMS licenses depending on where you obtain it. For further | |
14 | # details see http://www.openssl.org/~appro/cryptogams/. | |
15 | # ==================================================================== | |
16 | # | |
17 | # This module implements Poly1305 hash for x86_64. | |
18 | # | |
19 | # March 2015 | |
20 | # | |
abb8c44f AP |
21 | # Initial release. |
22 | # | |
23 | # December 2016 | |
24 | # | |
25 | # Add AVX512F+VL+BW code path. | |
26 | # | |
a98c648e AP |
27 | # Numbers are cycles per processed byte with poly1305_blocks alone, |
28 | # measured with rdtsc at fixed clock frequency. | |
29 | # | |
30 | # IALU/gcc-4.8(*) AVX(**) AVX2 | |
4b8736a2 AP |
31 | # P4 4.46/+120% - |
32 | # Core 2 2.41/+90% - | |
33 | # Westmere 1.88/+120% - | |
a98c648e | 34 | # Sandy Bridge 1.39/+140% 1.10 |
4b8736a2 AP |
35 | # Haswell 1.14/+175% 1.11 0.65 |
36 | # Skylake 1.13/+120% 0.96 0.51 | |
a98c648e | 37 | # Silvermont 2.83/+95% - |
ace05265 | 38 | # Goldmont 1.70/+180% - |
a98c648e AP |
39 | # VIA Nano 1.82/+150% - |
40 | # Sledgehammer 1.38/+160% - | |
4b8736a2 | 41 | # Bulldozer 2.30/+130% 0.97 |
a98c648e AP |
42 | # |
43 | # (*) improvement coefficients relative to clang are more modest and | |
44 | # are ~50% on most processors, in both cases we are comparing to | |
45 | # __int128 code; | |
46 | # (**) SSE2 implementation was attempted, but among non-AVX processors | |
47 | # it was faster than integer-only code only on older Intel P4 and | |
48 | # Core processors, 50-30%, less newer processor is, but slower on | |
49 | # contemporary ones, for example almost 2x slower on Atom, and as | |
50 | # former are naturally disappearing, SSE2 is deemed unnecessary; | |
51 | ||
52 | $flavour = shift; | |
53 | $output = shift; | |
54 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | |
55 | ||
56 | $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | |
57 | ||
58 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | |
59 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | |
60 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | |
61 | die "can't locate x86_64-xlate.pl"; | |
62 | ||
63 | if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` | |
64 | =~ /GNU assembler version ([2-9]\.[0-9]+)/) { | |
abb8c44f | 65 | $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25); |
a98c648e AP |
66 | } |
67 | ||
68 | if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && | |
1ea01427 AP |
69 | `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) { |
70 | $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12); | |
71 | $avx += 1 if ($1==2.11 && $2>=8); | |
a98c648e AP |
72 | } |
73 | ||
74 | if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && | |
75 | `ml64 2>&1` =~ /Version ([0-9]+)\./) { | |
76 | $avx = ($1>=10) + ($1>=12); | |
77 | } | |
78 | ||
79 | if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { | |
80 | $avx = ($2>=3.0) + ($2>3.0); | |
81 | } | |
82 | ||
cfe1d992 | 83 | open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; |
a98c648e AP |
84 | *STDOUT=*OUT; |
85 | ||
86 | my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx"); | |
87 | my ($mac,$nonce)=($inp,$len); # *_emit arguments | |
88 | my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13)); | |
89 | my ($h0,$h1,$h2)=("%r14","%rbx","%rbp"); | |
90 | ||
91 | sub poly1305_iteration { | |
92 | # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1 | |
93 | # output: $h0-$h2 *= $r0-$r1 | |
94 | $code.=<<___; | |
95 | mulq $h0 # h0*r1 | |
96 | mov %rax,$d2 | |
97 | mov $r0,%rax | |
98 | mov %rdx,$d3 | |
99 | ||
100 | mulq $h0 # h0*r0 | |
101 | mov %rax,$h0 # future $h0 | |
102 | mov $r0,%rax | |
103 | mov %rdx,$d1 | |
104 | ||
105 | mulq $h1 # h1*r0 | |
106 | add %rax,$d2 | |
107 | mov $s1,%rax | |
108 | adc %rdx,$d3 | |
109 | ||
110 | mulq $h1 # h1*s1 | |
111 | mov $h2,$h1 # borrow $h1 | |
112 | add %rax,$h0 | |
113 | adc %rdx,$d1 | |
114 | ||
115 | imulq $s1,$h1 # h2*s1 | |
116 | add $h1,$d2 | |
117 | mov $d1,$h1 | |
118 | adc \$0,$d3 | |
119 | ||
120 | imulq $r0,$h2 # h2*r0 | |
121 | add $d2,$h1 | |
122 | mov \$-4,%rax # mask value | |
123 | adc $h2,$d3 | |
124 | ||
125 | and $d3,%rax # last reduction step | |
126 | mov $d3,$h2 | |
127 | shr \$2,$d3 | |
128 | and \$3,$h2 | |
129 | add $d3,%rax | |
130 | add %rax,$h0 | |
131 | adc \$0,$h1 | |
4b8736a2 | 132 | adc \$0,$h2 |
a98c648e AP |
133 | ___ |
134 | } | |
135 | ||
136 | ######################################################################## | |
137 | # Layout of opaque area is following. | |
138 | # | |
139 | # unsigned __int64 h[3]; # current hash value base 2^64 | |
140 | # unsigned __int64 r[2]; # key value base 2^64 | |
141 | ||
142 | $code.=<<___; | |
143 | .text | |
144 | ||
145 | .extern OPENSSL_ia32cap_P | |
146 | ||
147 | .globl poly1305_init | |
3992e8c0 | 148 | .hidden poly1305_init |
4ef29667 | 149 | .globl poly1305_blocks |
3992e8c0 | 150 | .hidden poly1305_blocks |
4ef29667 | 151 | .globl poly1305_emit |
3992e8c0 AP |
152 | .hidden poly1305_emit |
153 | ||
a85dbf11 | 154 | .type poly1305_init,\@function,3 |
a98c648e AP |
155 | .align 32 |
156 | poly1305_init: | |
157 | xor %rax,%rax | |
158 | mov %rax,0($ctx) # initialize hash value | |
159 | mov %rax,8($ctx) | |
160 | mov %rax,16($ctx) | |
161 | ||
162 | cmp \$0,$inp | |
163 | je .Lno_key | |
164 | ||
165 | lea poly1305_blocks(%rip),%r10 | |
166 | lea poly1305_emit(%rip),%r11 | |
167 | ___ | |
168 | $code.=<<___ if ($avx); | |
169 | mov OPENSSL_ia32cap_P+4(%rip),%r9 | |
170 | lea poly1305_blocks_avx(%rip),%rax | |
171 | lea poly1305_emit_avx(%rip),%rcx | |
172 | bt \$`60-32`,%r9 # AVX? | |
173 | cmovc %rax,%r10 | |
174 | cmovc %rcx,%r11 | |
175 | ___ | |
176 | $code.=<<___ if ($avx>1); | |
177 | lea poly1305_blocks_avx2(%rip),%rax | |
178 | bt \$`5+32`,%r9 # AVX2? | |
179 | cmovc %rax,%r10 | |
180 | ___ | |
181 | $code.=<<___; | |
182 | mov \$0x0ffffffc0fffffff,%rax | |
183 | mov \$0x0ffffffc0ffffffc,%rcx | |
184 | and 0($inp),%rax | |
185 | and 8($inp),%rcx | |
186 | mov %rax,24($ctx) | |
187 | mov %rcx,32($ctx) | |
2460c7f1 AP |
188 | ___ |
189 | $code.=<<___ if ($flavour !~ /elf32/); | |
a98c648e AP |
190 | mov %r10,0(%rdx) |
191 | mov %r11,8(%rdx) | |
2460c7f1 AP |
192 | ___ |
193 | $code.=<<___ if ($flavour =~ /elf32/); | |
194 | mov %r10d,0(%rdx) | |
195 | mov %r11d,4(%rdx) | |
196 | ___ | |
197 | $code.=<<___; | |
a98c648e AP |
198 | mov \$1,%eax |
199 | .Lno_key: | |
200 | ret | |
201 | .size poly1305_init,.-poly1305_init | |
202 | ||
a98c648e AP |
203 | .type poly1305_blocks,\@function,4 |
204 | .align 32 | |
205 | poly1305_blocks: | |
a85dbf11 | 206 | .Lblocks: |
4b8736a2 AP |
207 | shr \$4,$len |
208 | jz .Lno_data # too short | |
a98c648e AP |
209 | |
210 | push %rbx | |
211 | push %rbp | |
212 | push %r12 | |
213 | push %r13 | |
214 | push %r14 | |
215 | push %r15 | |
216 | .Lblocks_body: | |
217 | ||
218 | mov $len,%r15 # reassign $len | |
219 | ||
220 | mov 24($ctx),$r0 # load r | |
221 | mov 32($ctx),$s1 | |
222 | ||
223 | mov 0($ctx),$h0 # load hash value | |
224 | mov 8($ctx),$h1 | |
225 | mov 16($ctx),$h2 | |
226 | ||
227 | mov $s1,$r1 | |
228 | shr \$2,$s1 | |
229 | mov $r1,%rax | |
230 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
231 | jmp .Loop | |
232 | ||
233 | .align 32 | |
234 | .Loop: | |
235 | add 0($inp),$h0 # accumulate input | |
236 | adc 8($inp),$h1 | |
237 | lea 16($inp),$inp | |
238 | adc $padbit,$h2 | |
239 | ___ | |
240 | &poly1305_iteration(); | |
241 | $code.=<<___; | |
242 | mov $r1,%rax | |
4b8736a2 AP |
243 | dec %r15 # len-=16 |
244 | jnz .Loop | |
a98c648e AP |
245 | |
246 | mov $h0,0($ctx) # store hash value | |
247 | mov $h1,8($ctx) | |
248 | mov $h2,16($ctx) | |
249 | ||
250 | mov 0(%rsp),%r15 | |
251 | mov 8(%rsp),%r14 | |
252 | mov 16(%rsp),%r13 | |
253 | mov 24(%rsp),%r12 | |
254 | mov 32(%rsp),%rbp | |
255 | mov 40(%rsp),%rbx | |
256 | lea 48(%rsp),%rsp | |
257 | .Lno_data: | |
258 | .Lblocks_epilogue: | |
259 | ret | |
260 | .size poly1305_blocks,.-poly1305_blocks | |
261 | ||
a98c648e AP |
262 | .type poly1305_emit,\@function,3 |
263 | .align 32 | |
264 | poly1305_emit: | |
a85dbf11 | 265 | .Lemit: |
a98c648e AP |
266 | mov 0($ctx),%r8 # load hash value |
267 | mov 8($ctx),%r9 | |
268 | mov 16($ctx),%r10 | |
269 | ||
270 | mov %r8,%rax | |
271 | add \$5,%r8 # compare to modulus | |
272 | mov %r9,%rcx | |
273 | adc \$0,%r9 | |
274 | adc \$0,%r10 | |
275 | shr \$2,%r10 # did 130-bit value overfow? | |
276 | cmovnz %r8,%rax | |
277 | cmovnz %r9,%rcx | |
278 | ||
279 | add 0($nonce),%rax # accumulate nonce | |
280 | adc 8($nonce),%rcx | |
281 | mov %rax,0($mac) # write result | |
282 | mov %rcx,8($mac) | |
283 | ||
284 | ret | |
285 | .size poly1305_emit,.-poly1305_emit | |
286 | ___ | |
287 | if ($avx) { | |
288 | ||
289 | ######################################################################## | |
290 | # Layout of opaque area is following. | |
291 | # | |
292 | # unsigned __int32 h[5]; # current hash value base 2^26 | |
293 | # unsigned __int32 is_base2_26; | |
294 | # unsigned __int64 r[2]; # key value base 2^64 | |
295 | # unsigned __int64 pad; | |
296 | # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9]; | |
297 | # | |
298 | # where r^n are base 2^26 digits of degrees of multiplier key. There are | |
299 | # 5 digits, but last four are interleaved with multiples of 5, totalling | |
300 | # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4. | |
301 | ||
302 | my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = | |
303 | map("%xmm$_",(0..15)); | |
304 | ||
305 | $code.=<<___; | |
306 | .type __poly1305_block,\@abi-omnipotent | |
307 | .align 32 | |
308 | __poly1305_block: | |
309 | ___ | |
310 | &poly1305_iteration(); | |
311 | $code.=<<___; | |
312 | ret | |
313 | .size __poly1305_block,.-__poly1305_block | |
314 | ||
315 | .type __poly1305_init_avx,\@abi-omnipotent | |
316 | .align 32 | |
317 | __poly1305_init_avx: | |
318 | mov $r0,$h0 | |
319 | mov $r1,$h1 | |
320 | xor $h2,$h2 | |
321 | ||
322 | lea 48+64($ctx),$ctx # size optimization | |
323 | ||
324 | mov $r1,%rax | |
325 | call __poly1305_block # r^2 | |
326 | ||
327 | mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26 | |
328 | mov \$0x3ffffff,%edx | |
329 | mov $h0,$d1 | |
330 | and $h0#d,%eax | |
331 | mov $r0,$d2 | |
332 | and $r0#d,%edx | |
333 | mov %eax,`16*0+0-64`($ctx) | |
334 | shr \$26,$d1 | |
335 | mov %edx,`16*0+4-64`($ctx) | |
336 | shr \$26,$d2 | |
337 | ||
338 | mov \$0x3ffffff,%eax | |
339 | mov \$0x3ffffff,%edx | |
340 | and $d1#d,%eax | |
341 | and $d2#d,%edx | |
342 | mov %eax,`16*1+0-64`($ctx) | |
343 | lea (%rax,%rax,4),%eax # *5 | |
344 | mov %edx,`16*1+4-64`($ctx) | |
345 | lea (%rdx,%rdx,4),%edx # *5 | |
346 | mov %eax,`16*2+0-64`($ctx) | |
347 | shr \$26,$d1 | |
348 | mov %edx,`16*2+4-64`($ctx) | |
349 | shr \$26,$d2 | |
350 | ||
351 | mov $h1,%rax | |
352 | mov $r1,%rdx | |
353 | shl \$12,%rax | |
354 | shl \$12,%rdx | |
355 | or $d1,%rax | |
356 | or $d2,%rdx | |
357 | and \$0x3ffffff,%eax | |
358 | and \$0x3ffffff,%edx | |
359 | mov %eax,`16*3+0-64`($ctx) | |
360 | lea (%rax,%rax,4),%eax # *5 | |
361 | mov %edx,`16*3+4-64`($ctx) | |
362 | lea (%rdx,%rdx,4),%edx # *5 | |
363 | mov %eax,`16*4+0-64`($ctx) | |
364 | mov $h1,$d1 | |
365 | mov %edx,`16*4+4-64`($ctx) | |
366 | mov $r1,$d2 | |
367 | ||
368 | mov \$0x3ffffff,%eax | |
369 | mov \$0x3ffffff,%edx | |
370 | shr \$14,$d1 | |
371 | shr \$14,$d2 | |
372 | and $d1#d,%eax | |
373 | and $d2#d,%edx | |
374 | mov %eax,`16*5+0-64`($ctx) | |
375 | lea (%rax,%rax,4),%eax # *5 | |
376 | mov %edx,`16*5+4-64`($ctx) | |
377 | lea (%rdx,%rdx,4),%edx # *5 | |
378 | mov %eax,`16*6+0-64`($ctx) | |
379 | shr \$26,$d1 | |
380 | mov %edx,`16*6+4-64`($ctx) | |
381 | shr \$26,$d2 | |
382 | ||
383 | mov $h2,%rax | |
384 | shl \$24,%rax | |
385 | or %rax,$d1 | |
386 | mov $d1#d,`16*7+0-64`($ctx) | |
387 | lea ($d1,$d1,4),$d1 # *5 | |
388 | mov $d2#d,`16*7+4-64`($ctx) | |
389 | lea ($d2,$d2,4),$d2 # *5 | |
390 | mov $d1#d,`16*8+0-64`($ctx) | |
391 | mov $d2#d,`16*8+4-64`($ctx) | |
392 | ||
393 | mov $r1,%rax | |
394 | call __poly1305_block # r^3 | |
395 | ||
396 | mov \$0x3ffffff,%eax # save r^3 base 2^26 | |
397 | mov $h0,$d1 | |
398 | and $h0#d,%eax | |
399 | shr \$26,$d1 | |
400 | mov %eax,`16*0+12-64`($ctx) | |
401 | ||
402 | mov \$0x3ffffff,%edx | |
403 | and $d1#d,%edx | |
404 | mov %edx,`16*1+12-64`($ctx) | |
405 | lea (%rdx,%rdx,4),%edx # *5 | |
406 | shr \$26,$d1 | |
407 | mov %edx,`16*2+12-64`($ctx) | |
408 | ||
409 | mov $h1,%rax | |
410 | shl \$12,%rax | |
411 | or $d1,%rax | |
412 | and \$0x3ffffff,%eax | |
413 | mov %eax,`16*3+12-64`($ctx) | |
414 | lea (%rax,%rax,4),%eax # *5 | |
415 | mov $h1,$d1 | |
416 | mov %eax,`16*4+12-64`($ctx) | |
417 | ||
418 | mov \$0x3ffffff,%edx | |
419 | shr \$14,$d1 | |
420 | and $d1#d,%edx | |
421 | mov %edx,`16*5+12-64`($ctx) | |
422 | lea (%rdx,%rdx,4),%edx # *5 | |
423 | shr \$26,$d1 | |
424 | mov %edx,`16*6+12-64`($ctx) | |
425 | ||
426 | mov $h2,%rax | |
427 | shl \$24,%rax | |
428 | or %rax,$d1 | |
429 | mov $d1#d,`16*7+12-64`($ctx) | |
430 | lea ($d1,$d1,4),$d1 # *5 | |
431 | mov $d1#d,`16*8+12-64`($ctx) | |
432 | ||
433 | mov $r1,%rax | |
434 | call __poly1305_block # r^4 | |
435 | ||
436 | mov \$0x3ffffff,%eax # save r^4 base 2^26 | |
437 | mov $h0,$d1 | |
438 | and $h0#d,%eax | |
439 | shr \$26,$d1 | |
440 | mov %eax,`16*0+8-64`($ctx) | |
441 | ||
442 | mov \$0x3ffffff,%edx | |
443 | and $d1#d,%edx | |
444 | mov %edx,`16*1+8-64`($ctx) | |
445 | lea (%rdx,%rdx,4),%edx # *5 | |
446 | shr \$26,$d1 | |
447 | mov %edx,`16*2+8-64`($ctx) | |
448 | ||
449 | mov $h1,%rax | |
450 | shl \$12,%rax | |
451 | or $d1,%rax | |
452 | and \$0x3ffffff,%eax | |
453 | mov %eax,`16*3+8-64`($ctx) | |
454 | lea (%rax,%rax,4),%eax # *5 | |
455 | mov $h1,$d1 | |
456 | mov %eax,`16*4+8-64`($ctx) | |
457 | ||
458 | mov \$0x3ffffff,%edx | |
459 | shr \$14,$d1 | |
460 | and $d1#d,%edx | |
461 | mov %edx,`16*5+8-64`($ctx) | |
462 | lea (%rdx,%rdx,4),%edx # *5 | |
463 | shr \$26,$d1 | |
464 | mov %edx,`16*6+8-64`($ctx) | |
465 | ||
466 | mov $h2,%rax | |
467 | shl \$24,%rax | |
468 | or %rax,$d1 | |
469 | mov $d1#d,`16*7+8-64`($ctx) | |
470 | lea ($d1,$d1,4),$d1 # *5 | |
471 | mov $d1#d,`16*8+8-64`($ctx) | |
472 | ||
473 | lea -48-64($ctx),$ctx # size [de-]optimization | |
474 | ret | |
475 | .size __poly1305_init_avx,.-__poly1305_init_avx | |
476 | ||
477 | .type poly1305_blocks_avx,\@function,4 | |
478 | .align 32 | |
479 | poly1305_blocks_avx: | |
480 | mov 20($ctx),%r8d # is_base2_26 | |
481 | cmp \$128,$len | |
482 | jae .Lblocks_avx | |
483 | test %r8d,%r8d | |
a85dbf11 | 484 | jz .Lblocks |
a98c648e AP |
485 | |
486 | .Lblocks_avx: | |
487 | and \$-16,$len | |
488 | jz .Lno_data_avx | |
489 | ||
490 | vzeroupper | |
491 | ||
492 | test %r8d,%r8d | |
493 | jz .Lbase2_64_avx | |
494 | ||
495 | test \$31,$len | |
496 | jz .Leven_avx | |
497 | ||
498 | push %rbx | |
499 | push %rbp | |
500 | push %r12 | |
501 | push %r13 | |
502 | push %r14 | |
503 | push %r15 | |
504 | .Lblocks_avx_body: | |
505 | ||
506 | mov $len,%r15 # reassign $len | |
507 | ||
508 | mov 0($ctx),$d1 # load hash value | |
509 | mov 8($ctx),$d2 | |
510 | mov 16($ctx),$h2#d | |
511 | ||
512 | mov 24($ctx),$r0 # load r | |
513 | mov 32($ctx),$s1 | |
514 | ||
515 | ################################# base 2^26 -> base 2^64 | |
516 | mov $d1#d,$h0#d | |
28411657 | 517 | and \$`-1*(1<<31)`,$d1 |
a98c648e AP |
518 | mov $d2,$r1 # borrow $r1 |
519 | mov $d2#d,$h1#d | |
28411657 | 520 | and \$`-1*(1<<31)`,$d2 |
a98c648e AP |
521 | |
522 | shr \$6,$d1 | |
523 | shl \$52,$r1 | |
524 | add $d1,$h0 | |
525 | shr \$12,$h1 | |
526 | shr \$18,$d2 | |
527 | add $r1,$h0 | |
528 | adc $d2,$h1 | |
529 | ||
530 | mov $h2,$d1 | |
531 | shl \$40,$d1 | |
532 | shr \$24,$h2 | |
533 | add $d1,$h1 | |
534 | adc \$0,$h2 # can be partially reduced... | |
535 | ||
536 | mov \$-4,$d2 # ... so reduce | |
537 | mov $h2,$d1 | |
538 | and $h2,$d2 | |
539 | shr \$2,$d1 | |
540 | and \$3,$h2 | |
541 | add $d2,$d1 # =*5 | |
542 | add $d1,$h0 | |
543 | adc \$0,$h1 | |
4b8736a2 | 544 | adc \$0,$h2 |
a98c648e AP |
545 | |
546 | mov $s1,$r1 | |
547 | mov $s1,%rax | |
548 | shr \$2,$s1 | |
549 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
550 | ||
551 | add 0($inp),$h0 # accumulate input | |
552 | adc 8($inp),$h1 | |
553 | lea 16($inp),$inp | |
554 | adc $padbit,$h2 | |
555 | ||
556 | call __poly1305_block | |
557 | ||
558 | test $padbit,$padbit # if $padbit is zero, | |
559 | jz .Lstore_base2_64_avx # store hash in base 2^64 format | |
560 | ||
561 | ################################# base 2^64 -> base 2^26 | |
562 | mov $h0,%rax | |
563 | mov $h0,%rdx | |
564 | shr \$52,$h0 | |
565 | mov $h1,$r0 | |
566 | mov $h1,$r1 | |
567 | shr \$26,%rdx | |
568 | and \$0x3ffffff,%rax # h[0] | |
569 | shl \$12,$r0 | |
570 | and \$0x3ffffff,%rdx # h[1] | |
571 | shr \$14,$h1 | |
572 | or $r0,$h0 | |
573 | shl \$24,$h2 | |
574 | and \$0x3ffffff,$h0 # h[2] | |
575 | shr \$40,$r1 | |
576 | and \$0x3ffffff,$h1 # h[3] | |
577 | or $r1,$h2 # h[4] | |
578 | ||
579 | sub \$16,%r15 | |
580 | jz .Lstore_base2_26_avx | |
581 | ||
582 | vmovd %rax#d,$H0 | |
583 | vmovd %rdx#d,$H1 | |
584 | vmovd $h0#d,$H2 | |
585 | vmovd $h1#d,$H3 | |
586 | vmovd $h2#d,$H4 | |
587 | jmp .Lproceed_avx | |
588 | ||
589 | .align 32 | |
590 | .Lstore_base2_64_avx: | |
591 | mov $h0,0($ctx) | |
592 | mov $h1,8($ctx) | |
593 | mov $h2,16($ctx) # note that is_base2_26 is zeroed | |
594 | jmp .Ldone_avx | |
595 | ||
596 | .align 16 | |
597 | .Lstore_base2_26_avx: | |
598 | mov %rax#d,0($ctx) # store hash value base 2^26 | |
599 | mov %rdx#d,4($ctx) | |
600 | mov $h0#d,8($ctx) | |
601 | mov $h1#d,12($ctx) | |
602 | mov $h2#d,16($ctx) | |
603 | .align 16 | |
604 | .Ldone_avx: | |
605 | mov 0(%rsp),%r15 | |
606 | mov 8(%rsp),%r14 | |
607 | mov 16(%rsp),%r13 | |
608 | mov 24(%rsp),%r12 | |
609 | mov 32(%rsp),%rbp | |
610 | mov 40(%rsp),%rbx | |
611 | lea 48(%rsp),%rsp | |
612 | .Lno_data_avx: | |
613 | .Lblocks_avx_epilogue: | |
614 | ret | |
615 | ||
616 | .align 32 | |
617 | .Lbase2_64_avx: | |
618 | push %rbx | |
619 | push %rbp | |
620 | push %r12 | |
621 | push %r13 | |
622 | push %r14 | |
623 | push %r15 | |
624 | .Lbase2_64_avx_body: | |
625 | ||
626 | mov $len,%r15 # reassign $len | |
627 | ||
628 | mov 24($ctx),$r0 # load r | |
629 | mov 32($ctx),$s1 | |
630 | ||
631 | mov 0($ctx),$h0 # load hash value | |
632 | mov 8($ctx),$h1 | |
633 | mov 16($ctx),$h2#d | |
634 | ||
635 | mov $s1,$r1 | |
636 | mov $s1,%rax | |
637 | shr \$2,$s1 | |
638 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
639 | ||
640 | test \$31,$len | |
641 | jz .Linit_avx | |
642 | ||
643 | add 0($inp),$h0 # accumulate input | |
644 | adc 8($inp),$h1 | |
645 | lea 16($inp),$inp | |
646 | adc $padbit,$h2 | |
647 | sub \$16,%r15 | |
648 | ||
649 | call __poly1305_block | |
650 | ||
651 | .Linit_avx: | |
652 | ################################# base 2^64 -> base 2^26 | |
653 | mov $h0,%rax | |
654 | mov $h0,%rdx | |
655 | shr \$52,$h0 | |
656 | mov $h1,$d1 | |
657 | mov $h1,$d2 | |
658 | shr \$26,%rdx | |
659 | and \$0x3ffffff,%rax # h[0] | |
660 | shl \$12,$d1 | |
661 | and \$0x3ffffff,%rdx # h[1] | |
662 | shr \$14,$h1 | |
663 | or $d1,$h0 | |
664 | shl \$24,$h2 | |
665 | and \$0x3ffffff,$h0 # h[2] | |
666 | shr \$40,$d2 | |
667 | and \$0x3ffffff,$h1 # h[3] | |
668 | or $d2,$h2 # h[4] | |
669 | ||
670 | vmovd %rax#d,$H0 | |
671 | vmovd %rdx#d,$H1 | |
672 | vmovd $h0#d,$H2 | |
673 | vmovd $h1#d,$H3 | |
674 | vmovd $h2#d,$H4 | |
675 | movl \$1,20($ctx) # set is_base2_26 | |
676 | ||
677 | call __poly1305_init_avx | |
678 | ||
679 | .Lproceed_avx: | |
680 | mov %r15,$len | |
681 | ||
682 | mov 0(%rsp),%r15 | |
683 | mov 8(%rsp),%r14 | |
684 | mov 16(%rsp),%r13 | |
685 | mov 24(%rsp),%r12 | |
686 | mov 32(%rsp),%rbp | |
687 | mov 40(%rsp),%rbx | |
688 | lea 48(%rsp),%rax | |
689 | lea 48(%rsp),%rsp | |
690 | .Lbase2_64_avx_epilogue: | |
691 | jmp .Ldo_avx | |
692 | ||
693 | .align 32 | |
694 | .Leven_avx: | |
695 | vmovd 4*0($ctx),$H0 # load hash value | |
696 | vmovd 4*1($ctx),$H1 | |
697 | vmovd 4*2($ctx),$H2 | |
698 | vmovd 4*3($ctx),$H3 | |
699 | vmovd 4*4($ctx),$H4 | |
700 | ||
701 | .Ldo_avx: | |
702 | ___ | |
703 | $code.=<<___ if (!$win64); | |
704 | lea -0x58(%rsp),%r11 | |
705 | sub \$0x178,%rsp | |
706 | ___ | |
707 | $code.=<<___ if ($win64); | |
708 | lea -0xf8(%rsp),%r11 | |
709 | sub \$0x218,%rsp | |
710 | vmovdqa %xmm6,0x50(%r11) | |
711 | vmovdqa %xmm7,0x60(%r11) | |
712 | vmovdqa %xmm8,0x70(%r11) | |
713 | vmovdqa %xmm9,0x80(%r11) | |
714 | vmovdqa %xmm10,0x90(%r11) | |
715 | vmovdqa %xmm11,0xa0(%r11) | |
716 | vmovdqa %xmm12,0xb0(%r11) | |
717 | vmovdqa %xmm13,0xc0(%r11) | |
718 | vmovdqa %xmm14,0xd0(%r11) | |
719 | vmovdqa %xmm15,0xe0(%r11) | |
720 | .Ldo_avx_body: | |
721 | ___ | |
722 | $code.=<<___; | |
723 | sub \$64,$len | |
724 | lea -32($inp),%rax | |
725 | cmovc %rax,$inp | |
726 | ||
727 | vmovdqu `16*3`($ctx),$D4 # preload r0^2 | |
728 | lea `16*3+64`($ctx),$ctx # size optimization | |
729 | lea .Lconst(%rip),%rcx | |
730 | ||
731 | ################################################################ | |
732 | # load input | |
733 | vmovdqu 16*2($inp),$T0 | |
734 | vmovdqu 16*3($inp),$T1 | |
735 | vmovdqa 64(%rcx),$MASK # .Lmask26 | |
736 | ||
737 | vpsrldq \$6,$T0,$T2 # splat input | |
738 | vpsrldq \$6,$T1,$T3 | |
739 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
740 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
741 | vpunpcklqdq $T3,$T2,$T3 # 2:3 | |
742 | ||
743 | vpsrlq \$40,$T4,$T4 # 4 | |
744 | vpsrlq \$26,$T0,$T1 | |
745 | vpand $MASK,$T0,$T0 # 0 | |
746 | vpsrlq \$4,$T3,$T2 | |
747 | vpand $MASK,$T1,$T1 # 1 | |
748 | vpsrlq \$30,$T3,$T3 | |
749 | vpand $MASK,$T2,$T2 # 2 | |
750 | vpand $MASK,$T3,$T3 # 3 | |
751 | vpor 32(%rcx),$T4,$T4 # padbit, yes, always | |
752 | ||
753 | jbe .Lskip_loop_avx | |
754 | ||
755 | # expand and copy pre-calculated table to stack | |
756 | vmovdqu `16*1-64`($ctx),$D1 | |
757 | vmovdqu `16*2-64`($ctx),$D2 | |
758 | vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434 | |
759 | vpshufd \$0x44,$D4,$D0 # xx12 -> 1212 | |
760 | vmovdqa $D3,-0x90(%r11) | |
761 | vmovdqa $D0,0x00(%rsp) | |
762 | vpshufd \$0xEE,$D1,$D4 | |
763 | vmovdqu `16*3-64`($ctx),$D0 | |
764 | vpshufd \$0x44,$D1,$D1 | |
765 | vmovdqa $D4,-0x80(%r11) | |
766 | vmovdqa $D1,0x10(%rsp) | |
767 | vpshufd \$0xEE,$D2,$D3 | |
768 | vmovdqu `16*4-64`($ctx),$D1 | |
769 | vpshufd \$0x44,$D2,$D2 | |
770 | vmovdqa $D3,-0x70(%r11) | |
771 | vmovdqa $D2,0x20(%rsp) | |
772 | vpshufd \$0xEE,$D0,$D4 | |
773 | vmovdqu `16*5-64`($ctx),$D2 | |
774 | vpshufd \$0x44,$D0,$D0 | |
775 | vmovdqa $D4,-0x60(%r11) | |
776 | vmovdqa $D0,0x30(%rsp) | |
777 | vpshufd \$0xEE,$D1,$D3 | |
778 | vmovdqu `16*6-64`($ctx),$D0 | |
779 | vpshufd \$0x44,$D1,$D1 | |
780 | vmovdqa $D3,-0x50(%r11) | |
781 | vmovdqa $D1,0x40(%rsp) | |
782 | vpshufd \$0xEE,$D2,$D4 | |
783 | vmovdqu `16*7-64`($ctx),$D1 | |
784 | vpshufd \$0x44,$D2,$D2 | |
785 | vmovdqa $D4,-0x40(%r11) | |
786 | vmovdqa $D2,0x50(%rsp) | |
787 | vpshufd \$0xEE,$D0,$D3 | |
788 | vmovdqu `16*8-64`($ctx),$D2 | |
789 | vpshufd \$0x44,$D0,$D0 | |
790 | vmovdqa $D3,-0x30(%r11) | |
791 | vmovdqa $D0,0x60(%rsp) | |
792 | vpshufd \$0xEE,$D1,$D4 | |
793 | vpshufd \$0x44,$D1,$D1 | |
794 | vmovdqa $D4,-0x20(%r11) | |
795 | vmovdqa $D1,0x70(%rsp) | |
796 | vpshufd \$0xEE,$D2,$D3 | |
797 | vmovdqa 0x00(%rsp),$D4 # preload r0^2 | |
798 | vpshufd \$0x44,$D2,$D2 | |
799 | vmovdqa $D3,-0x10(%r11) | |
800 | vmovdqa $D2,0x80(%rsp) | |
801 | ||
802 | jmp .Loop_avx | |
803 | ||
804 | .align 32 | |
805 | .Loop_avx: | |
806 | ################################################################ | |
807 | # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 | |
808 | # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r | |
809 | # \___________________/ | |
810 | # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 | |
811 | # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r | |
812 | # \___________________/ \____________________/ | |
813 | # | |
814 | # Note that we start with inp[2:3]*r^2. This is because it | |
815 | # doesn't depend on reduction in previous iteration. | |
816 | ################################################################ | |
817 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
818 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
819 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
820 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
821 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
822 | # | |
823 | # though note that $Tx and $Hx are "reversed" in this section, | |
824 | # and $D4 is preloaded with r0^2... | |
825 | ||
826 | vpmuludq $T0,$D4,$D0 # d0 = h0*r0 | |
827 | vpmuludq $T1,$D4,$D1 # d1 = h1*r0 | |
828 | vmovdqa $H2,0x20(%r11) # offload hash | |
829 | vpmuludq $T2,$D4,$D2 # d3 = h2*r0 | |
830 | vmovdqa 0x10(%rsp),$H2 # r1^2 | |
831 | vpmuludq $T3,$D4,$D3 # d3 = h3*r0 | |
832 | vpmuludq $T4,$D4,$D4 # d4 = h4*r0 | |
833 | ||
834 | vmovdqa $H0,0x00(%r11) # | |
835 | vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1 | |
836 | vmovdqa $H1,0x10(%r11) # | |
837 | vpmuludq $T3,$H2,$H1 # h3*r1 | |
838 | vpaddq $H0,$D0,$D0 # d0 += h4*s1 | |
839 | vpaddq $H1,$D4,$D4 # d4 += h3*r1 | |
840 | vmovdqa $H3,0x30(%r11) # | |
841 | vpmuludq $T2,$H2,$H0 # h2*r1 | |
842 | vpmuludq $T1,$H2,$H1 # h1*r1 | |
843 | vpaddq $H0,$D3,$D3 # d3 += h2*r1 | |
844 | vmovdqa 0x30(%rsp),$H3 # r2^2 | |
845 | vpaddq $H1,$D2,$D2 # d2 += h1*r1 | |
846 | vmovdqa $H4,0x40(%r11) # | |
847 | vpmuludq $T0,$H2,$H2 # h0*r1 | |
848 | vpmuludq $T2,$H3,$H0 # h2*r2 | |
849 | vpaddq $H2,$D1,$D1 # d1 += h0*r1 | |
850 | ||
851 | vmovdqa 0x40(%rsp),$H4 # s2^2 | |
852 | vpaddq $H0,$D4,$D4 # d4 += h2*r2 | |
853 | vpmuludq $T1,$H3,$H1 # h1*r2 | |
854 | vpmuludq $T0,$H3,$H3 # h0*r2 | |
855 | vpaddq $H1,$D3,$D3 # d3 += h1*r2 | |
856 | vmovdqa 0x50(%rsp),$H2 # r3^2 | |
857 | vpaddq $H3,$D2,$D2 # d2 += h0*r2 | |
858 | vpmuludq $T4,$H4,$H0 # h4*s2 | |
859 | vpmuludq $T3,$H4,$H4 # h3*s2 | |
860 | vpaddq $H0,$D1,$D1 # d1 += h4*s2 | |
861 | vmovdqa 0x60(%rsp),$H3 # s3^2 | |
862 | vpaddq $H4,$D0,$D0 # d0 += h3*s2 | |
863 | ||
864 | vmovdqa 0x80(%rsp),$H4 # s4^2 | |
865 | vpmuludq $T1,$H2,$H1 # h1*r3 | |
866 | vpmuludq $T0,$H2,$H2 # h0*r3 | |
867 | vpaddq $H1,$D4,$D4 # d4 += h1*r3 | |
868 | vpaddq $H2,$D3,$D3 # d3 += h0*r3 | |
869 | vpmuludq $T4,$H3,$H0 # h4*s3 | |
870 | vpmuludq $T3,$H3,$H1 # h3*s3 | |
871 | vpaddq $H0,$D2,$D2 # d2 += h4*s3 | |
872 | vmovdqu 16*0($inp),$H0 # load input | |
873 | vpaddq $H1,$D1,$D1 # d1 += h3*s3 | |
874 | vpmuludq $T2,$H3,$H3 # h2*s3 | |
875 | vpmuludq $T2,$H4,$T2 # h2*s4 | |
876 | vpaddq $H3,$D0,$D0 # d0 += h2*s3 | |
877 | ||
878 | vmovdqu 16*1($inp),$H1 # | |
879 | vpaddq $T2,$D1,$D1 # d1 += h2*s4 | |
880 | vpmuludq $T3,$H4,$T3 # h3*s4 | |
881 | vpmuludq $T4,$H4,$T4 # h4*s4 | |
882 | vpsrldq \$6,$H0,$H2 # splat input | |
883 | vpaddq $T3,$D2,$D2 # d2 += h3*s4 | |
884 | vpaddq $T4,$D3,$D3 # d3 += h4*s4 | |
885 | vpsrldq \$6,$H1,$H3 # | |
886 | vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4 | |
887 | vpmuludq $T1,$H4,$T0 # h1*s4 | |
888 | vpunpckhqdq $H1,$H0,$H4 # 4 | |
889 | vpaddq $T4,$D4,$D4 # d4 += h0*r4 | |
890 | vmovdqa -0x90(%r11),$T4 # r0^4 | |
891 | vpaddq $T0,$D0,$D0 # d0 += h1*s4 | |
892 | ||
893 | vpunpcklqdq $H1,$H0,$H0 # 0:1 | |
894 | vpunpcklqdq $H3,$H2,$H3 # 2:3 | |
895 | ||
896 | #vpsrlq \$40,$H4,$H4 # 4 | |
897 | vpsrldq \$`40/8`,$H4,$H4 # 4 | |
898 | vpsrlq \$26,$H0,$H1 | |
899 | vpand $MASK,$H0,$H0 # 0 | |
900 | vpsrlq \$4,$H3,$H2 | |
901 | vpand $MASK,$H1,$H1 # 1 | |
902 | vpand 0(%rcx),$H4,$H4 # .Lmask24 | |
903 | vpsrlq \$30,$H3,$H3 | |
904 | vpand $MASK,$H2,$H2 # 2 | |
905 | vpand $MASK,$H3,$H3 # 3 | |
906 | vpor 32(%rcx),$H4,$H4 # padbit, yes, always | |
907 | ||
908 | vpaddq 0x00(%r11),$H0,$H0 # add hash value | |
909 | vpaddq 0x10(%r11),$H1,$H1 | |
910 | vpaddq 0x20(%r11),$H2,$H2 | |
911 | vpaddq 0x30(%r11),$H3,$H3 | |
912 | vpaddq 0x40(%r11),$H4,$H4 | |
913 | ||
914 | lea 16*2($inp),%rax | |
915 | lea 16*4($inp),$inp | |
916 | sub \$64,$len | |
917 | cmovc %rax,$inp | |
918 | ||
919 | ################################################################ | |
920 | # Now we accumulate (inp[0:1]+hash)*r^4 | |
921 | ################################################################ | |
922 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
923 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
924 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
925 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
926 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
927 | ||
928 | vpmuludq $H0,$T4,$T0 # h0*r0 | |
929 | vpmuludq $H1,$T4,$T1 # h1*r0 | |
930 | vpaddq $T0,$D0,$D0 | |
931 | vpaddq $T1,$D1,$D1 | |
932 | vmovdqa -0x80(%r11),$T2 # r1^4 | |
933 | vpmuludq $H2,$T4,$T0 # h2*r0 | |
934 | vpmuludq $H3,$T4,$T1 # h3*r0 | |
935 | vpaddq $T0,$D2,$D2 | |
936 | vpaddq $T1,$D3,$D3 | |
937 | vpmuludq $H4,$T4,$T4 # h4*r0 | |
938 | vpmuludq -0x70(%r11),$H4,$T0 # h4*s1 | |
939 | vpaddq $T4,$D4,$D4 | |
940 | ||
941 | vpaddq $T0,$D0,$D0 # d0 += h4*s1 | |
942 | vpmuludq $H2,$T2,$T1 # h2*r1 | |
943 | vpmuludq $H3,$T2,$T0 # h3*r1 | |
944 | vpaddq $T1,$D3,$D3 # d3 += h2*r1 | |
945 | vmovdqa -0x60(%r11),$T3 # r2^4 | |
946 | vpaddq $T0,$D4,$D4 # d4 += h3*r1 | |
947 | vpmuludq $H1,$T2,$T1 # h1*r1 | |
948 | vpmuludq $H0,$T2,$T2 # h0*r1 | |
949 | vpaddq $T1,$D2,$D2 # d2 += h1*r1 | |
950 | vpaddq $T2,$D1,$D1 # d1 += h0*r1 | |
951 | ||
952 | vmovdqa -0x50(%r11),$T4 # s2^4 | |
953 | vpmuludq $H2,$T3,$T0 # h2*r2 | |
954 | vpmuludq $H1,$T3,$T1 # h1*r2 | |
955 | vpaddq $T0,$D4,$D4 # d4 += h2*r2 | |
956 | vpaddq $T1,$D3,$D3 # d3 += h1*r2 | |
957 | vmovdqa -0x40(%r11),$T2 # r3^4 | |
958 | vpmuludq $H0,$T3,$T3 # h0*r2 | |
959 | vpmuludq $H4,$T4,$T0 # h4*s2 | |
960 | vpaddq $T3,$D2,$D2 # d2 += h0*r2 | |
961 | vpaddq $T0,$D1,$D1 # d1 += h4*s2 | |
962 | vmovdqa -0x30(%r11),$T3 # s3^4 | |
963 | vpmuludq $H3,$T4,$T4 # h3*s2 | |
964 | vpmuludq $H1,$T2,$T1 # h1*r3 | |
965 | vpaddq $T4,$D0,$D0 # d0 += h3*s2 | |
966 | ||
967 | vmovdqa -0x10(%r11),$T4 # s4^4 | |
968 | vpaddq $T1,$D4,$D4 # d4 += h1*r3 | |
969 | vpmuludq $H0,$T2,$T2 # h0*r3 | |
970 | vpmuludq $H4,$T3,$T0 # h4*s3 | |
971 | vpaddq $T2,$D3,$D3 # d3 += h0*r3 | |
972 | vpaddq $T0,$D2,$D2 # d2 += h4*s3 | |
973 | vmovdqu 16*2($inp),$T0 # load input | |
974 | vpmuludq $H3,$T3,$T2 # h3*s3 | |
975 | vpmuludq $H2,$T3,$T3 # h2*s3 | |
976 | vpaddq $T2,$D1,$D1 # d1 += h3*s3 | |
977 | vmovdqu 16*3($inp),$T1 # | |
978 | vpaddq $T3,$D0,$D0 # d0 += h2*s3 | |
979 | ||
980 | vpmuludq $H2,$T4,$H2 # h2*s4 | |
981 | vpmuludq $H3,$T4,$H3 # h3*s4 | |
982 | vpsrldq \$6,$T0,$T2 # splat input | |
983 | vpaddq $H2,$D1,$D1 # d1 += h2*s4 | |
984 | vpmuludq $H4,$T4,$H4 # h4*s4 | |
985 | vpsrldq \$6,$T1,$T3 # | |
986 | vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4 | |
987 | vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4 | |
988 | vpmuludq -0x20(%r11),$H0,$H4 # h0*r4 | |
989 | vpmuludq $H1,$T4,$H0 | |
990 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
991 | vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 | |
992 | vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 | |
993 | ||
994 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
995 | vpunpcklqdq $T3,$T2,$T3 # 2:3 | |
996 | ||
997 | #vpsrlq \$40,$T4,$T4 # 4 | |
998 | vpsrldq \$`40/8`,$T4,$T4 # 4 | |
999 | vpsrlq \$26,$T0,$T1 | |
1000 | vmovdqa 0x00(%rsp),$D4 # preload r0^2 | |
1001 | vpand $MASK,$T0,$T0 # 0 | |
1002 | vpsrlq \$4,$T3,$T2 | |
1003 | vpand $MASK,$T1,$T1 # 1 | |
1004 | vpand 0(%rcx),$T4,$T4 # .Lmask24 | |
1005 | vpsrlq \$30,$T3,$T3 | |
1006 | vpand $MASK,$T2,$T2 # 2 | |
1007 | vpand $MASK,$T3,$T3 # 3 | |
1008 | vpor 32(%rcx),$T4,$T4 # padbit, yes, always | |
1009 | ||
1010 | ################################################################ | |
1011 | # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein | |
1012 | # and P. Schwabe | |
1013 | ||
1014 | vpsrlq \$26,$H3,$D3 | |
1015 | vpand $MASK,$H3,$H3 | |
1016 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1017 | ||
1018 | vpsrlq \$26,$H0,$D0 | |
1019 | vpand $MASK,$H0,$H0 | |
1020 | vpaddq $D0,$D1,$H1 # h0 -> h1 | |
1021 | ||
1022 | vpsrlq \$26,$H4,$D0 | |
1023 | vpand $MASK,$H4,$H4 | |
1024 | ||
1025 | vpsrlq \$26,$H1,$D1 | |
1026 | vpand $MASK,$H1,$H1 | |
1027 | vpaddq $D1,$H2,$H2 # h1 -> h2 | |
1028 | ||
1029 | vpaddq $D0,$H0,$H0 | |
1030 | vpsllq \$2,$D0,$D0 | |
1031 | vpaddq $D0,$H0,$H0 # h4 -> h0 | |
1032 | ||
1033 | vpsrlq \$26,$H2,$D2 | |
1034 | vpand $MASK,$H2,$H2 | |
1035 | vpaddq $D2,$H3,$H3 # h2 -> h3 | |
1036 | ||
1037 | vpsrlq \$26,$H0,$D0 | |
1038 | vpand $MASK,$H0,$H0 | |
1039 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
1040 | ||
1041 | vpsrlq \$26,$H3,$D3 | |
1042 | vpand $MASK,$H3,$H3 | |
1043 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1044 | ||
1045 | ja .Loop_avx | |
1046 | ||
1047 | .Lskip_loop_avx: | |
1048 | ################################################################ | |
1049 | # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 | |
1050 | ||
1051 | vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2 | |
1052 | add \$32,$len | |
1053 | jnz .Long_tail_avx | |
1054 | ||
1055 | vpaddq $H2,$T2,$T2 | |
1056 | vpaddq $H0,$T0,$T0 | |
1057 | vpaddq $H1,$T1,$T1 | |
1058 | vpaddq $H3,$T3,$T3 | |
1059 | vpaddq $H4,$T4,$T4 | |
1060 | ||
1061 | .Long_tail_avx: | |
1062 | vmovdqa $H2,0x20(%r11) | |
1063 | vmovdqa $H0,0x00(%r11) | |
1064 | vmovdqa $H1,0x10(%r11) | |
1065 | vmovdqa $H3,0x30(%r11) | |
1066 | vmovdqa $H4,0x40(%r11) | |
1067 | ||
1068 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
1069 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
1070 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
1071 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
1072 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
1073 | ||
1074 | vpmuludq $T2,$D4,$D2 # d2 = h2*r0 | |
1075 | vpmuludq $T0,$D4,$D0 # d0 = h0*r0 | |
1076 | vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n | |
1077 | vpmuludq $T1,$D4,$D1 # d1 = h1*r0 | |
1078 | vpmuludq $T3,$D4,$D3 # d3 = h3*r0 | |
1079 | vpmuludq $T4,$D4,$D4 # d4 = h4*r0 | |
1080 | ||
1081 | vpmuludq $T3,$H2,$H0 # h3*r1 | |
1082 | vpaddq $H0,$D4,$D4 # d4 += h3*r1 | |
1083 | vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n | |
1084 | vpmuludq $T2,$H2,$H1 # h2*r1 | |
1085 | vpaddq $H1,$D3,$D3 # d3 += h2*r1 | |
1086 | vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n | |
1087 | vpmuludq $T1,$H2,$H0 # h1*r1 | |
1088 | vpaddq $H0,$D2,$D2 # d2 += h1*r1 | |
1089 | vpmuludq $T0,$H2,$H2 # h0*r1 | |
1090 | vpaddq $H2,$D1,$D1 # d1 += h0*r1 | |
1091 | vpmuludq $T4,$H3,$H3 # h4*s1 | |
1092 | vpaddq $H3,$D0,$D0 # d0 += h4*s1 | |
1093 | ||
1094 | vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n | |
1095 | vpmuludq $T2,$H4,$H1 # h2*r2 | |
1096 | vpaddq $H1,$D4,$D4 # d4 += h2*r2 | |
1097 | vpmuludq $T1,$H4,$H0 # h1*r2 | |
1098 | vpaddq $H0,$D3,$D3 # d3 += h1*r2 | |
1099 | vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n | |
1100 | vpmuludq $T0,$H4,$H4 # h0*r2 | |
1101 | vpaddq $H4,$D2,$D2 # d2 += h0*r2 | |
1102 | vpmuludq $T4,$H2,$H1 # h4*s2 | |
1103 | vpaddq $H1,$D1,$D1 # d1 += h4*s2 | |
1104 | vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n | |
1105 | vpmuludq $T3,$H2,$H2 # h3*s2 | |
1106 | vpaddq $H2,$D0,$D0 # d0 += h3*s2 | |
1107 | ||
1108 | vpmuludq $T1,$H3,$H0 # h1*r3 | |
1109 | vpaddq $H0,$D4,$D4 # d4 += h1*r3 | |
1110 | vpmuludq $T0,$H3,$H3 # h0*r3 | |
1111 | vpaddq $H3,$D3,$D3 # d3 += h0*r3 | |
1112 | vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n | |
1113 | vpmuludq $T4,$H4,$H1 # h4*s3 | |
1114 | vpaddq $H1,$D2,$D2 # d2 += h4*s3 | |
1115 | vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n | |
1116 | vpmuludq $T3,$H4,$H0 # h3*s3 | |
1117 | vpaddq $H0,$D1,$D1 # d1 += h3*s3 | |
1118 | vpmuludq $T2,$H4,$H4 # h2*s3 | |
1119 | vpaddq $H4,$D0,$D0 # d0 += h2*s3 | |
1120 | ||
1121 | vpmuludq $T0,$H2,$H2 # h0*r4 | |
1122 | vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4 | |
1123 | vpmuludq $T4,$H3,$H1 # h4*s4 | |
1124 | vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4 | |
1125 | vpmuludq $T3,$H3,$H0 # h3*s4 | |
1126 | vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4 | |
1127 | vpmuludq $T2,$H3,$H1 # h2*s4 | |
1128 | vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4 | |
1129 | vpmuludq $T1,$H3,$H3 # h1*s4 | |
1130 | vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4 | |
1131 | ||
1132 | jz .Lshort_tail_avx | |
1133 | ||
1134 | vmovdqu 16*0($inp),$H0 # load input | |
1135 | vmovdqu 16*1($inp),$H1 | |
1136 | ||
1137 | vpsrldq \$6,$H0,$H2 # splat input | |
1138 | vpsrldq \$6,$H1,$H3 | |
1139 | vpunpckhqdq $H1,$H0,$H4 # 4 | |
1140 | vpunpcklqdq $H1,$H0,$H0 # 0:1 | |
1141 | vpunpcklqdq $H3,$H2,$H3 # 2:3 | |
1142 | ||
1143 | vpsrlq \$40,$H4,$H4 # 4 | |
1144 | vpsrlq \$26,$H0,$H1 | |
1145 | vpand $MASK,$H0,$H0 # 0 | |
1146 | vpsrlq \$4,$H3,$H2 | |
1147 | vpand $MASK,$H1,$H1 # 1 | |
1148 | vpsrlq \$30,$H3,$H3 | |
1149 | vpand $MASK,$H2,$H2 # 2 | |
1150 | vpand $MASK,$H3,$H3 # 3 | |
1151 | vpor 32(%rcx),$H4,$H4 # padbit, yes, always | |
1152 | ||
1153 | vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4 | |
1154 | vpaddq 0x00(%r11),$H0,$H0 | |
1155 | vpaddq 0x10(%r11),$H1,$H1 | |
1156 | vpaddq 0x20(%r11),$H2,$H2 | |
1157 | vpaddq 0x30(%r11),$H3,$H3 | |
1158 | vpaddq 0x40(%r11),$H4,$H4 | |
1159 | ||
1160 | ################################################################ | |
1161 | # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate | |
1162 | ||
1163 | vpmuludq $H0,$T4,$T0 # h0*r0 | |
1164 | vpaddq $T0,$D0,$D0 # d0 += h0*r0 | |
1165 | vpmuludq $H1,$T4,$T1 # h1*r0 | |
1166 | vpaddq $T1,$D1,$D1 # d1 += h1*r0 | |
1167 | vpmuludq $H2,$T4,$T0 # h2*r0 | |
1168 | vpaddq $T0,$D2,$D2 # d2 += h2*r0 | |
1169 | vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n | |
1170 | vpmuludq $H3,$T4,$T1 # h3*r0 | |
1171 | vpaddq $T1,$D3,$D3 # d3 += h3*r0 | |
1172 | vpmuludq $H4,$T4,$T4 # h4*r0 | |
1173 | vpaddq $T4,$D4,$D4 # d4 += h4*r0 | |
1174 | ||
1175 | vpmuludq $H3,$T2,$T0 # h3*r1 | |
1176 | vpaddq $T0,$D4,$D4 # d4 += h3*r1 | |
1177 | vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1 | |
1178 | vpmuludq $H2,$T2,$T1 # h2*r1 | |
1179 | vpaddq $T1,$D3,$D3 # d3 += h2*r1 | |
1180 | vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2 | |
1181 | vpmuludq $H1,$T2,$T0 # h1*r1 | |
1182 | vpaddq $T0,$D2,$D2 # d2 += h1*r1 | |
1183 | vpmuludq $H0,$T2,$T2 # h0*r1 | |
1184 | vpaddq $T2,$D1,$D1 # d1 += h0*r1 | |
1185 | vpmuludq $H4,$T3,$T3 # h4*s1 | |
1186 | vpaddq $T3,$D0,$D0 # d0 += h4*s1 | |
1187 | ||
1188 | vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2 | |
1189 | vpmuludq $H2,$T4,$T1 # h2*r2 | |
1190 | vpaddq $T1,$D4,$D4 # d4 += h2*r2 | |
1191 | vpmuludq $H1,$T4,$T0 # h1*r2 | |
1192 | vpaddq $T0,$D3,$D3 # d3 += h1*r2 | |
1193 | vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3 | |
1194 | vpmuludq $H0,$T4,$T4 # h0*r2 | |
1195 | vpaddq $T4,$D2,$D2 # d2 += h0*r2 | |
1196 | vpmuludq $H4,$T2,$T1 # h4*s2 | |
1197 | vpaddq $T1,$D1,$D1 # d1 += h4*s2 | |
1198 | vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3 | |
1199 | vpmuludq $H3,$T2,$T2 # h3*s2 | |
1200 | vpaddq $T2,$D0,$D0 # d0 += h3*s2 | |
1201 | ||
1202 | vpmuludq $H1,$T3,$T0 # h1*r3 | |
1203 | vpaddq $T0,$D4,$D4 # d4 += h1*r3 | |
1204 | vpmuludq $H0,$T3,$T3 # h0*r3 | |
1205 | vpaddq $T3,$D3,$D3 # d3 += h0*r3 | |
1206 | vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4 | |
1207 | vpmuludq $H4,$T4,$T1 # h4*s3 | |
1208 | vpaddq $T1,$D2,$D2 # d2 += h4*s3 | |
1209 | vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4 | |
1210 | vpmuludq $H3,$T4,$T0 # h3*s3 | |
1211 | vpaddq $T0,$D1,$D1 # d1 += h3*s3 | |
1212 | vpmuludq $H2,$T4,$T4 # h2*s3 | |
1213 | vpaddq $T4,$D0,$D0 # d0 += h2*s3 | |
1214 | ||
1215 | vpmuludq $H0,$T2,$T2 # h0*r4 | |
1216 | vpaddq $T2,$D4,$D4 # d4 += h0*r4 | |
1217 | vpmuludq $H4,$T3,$T1 # h4*s4 | |
1218 | vpaddq $T1,$D3,$D3 # d3 += h4*s4 | |
1219 | vpmuludq $H3,$T3,$T0 # h3*s4 | |
1220 | vpaddq $T0,$D2,$D2 # d2 += h3*s4 | |
1221 | vpmuludq $H2,$T3,$T1 # h2*s4 | |
1222 | vpaddq $T1,$D1,$D1 # d1 += h2*s4 | |
1223 | vpmuludq $H1,$T3,$T3 # h1*s4 | |
1224 | vpaddq $T3,$D0,$D0 # d0 += h1*s4 | |
1225 | ||
1226 | .Lshort_tail_avx: | |
1ea8ae50 AP |
1227 | ################################################################ |
1228 | # horizontal addition | |
1229 | ||
1230 | vpsrldq \$8,$D4,$T4 | |
1231 | vpsrldq \$8,$D3,$T3 | |
1232 | vpsrldq \$8,$D1,$T1 | |
1233 | vpsrldq \$8,$D0,$T0 | |
1234 | vpsrldq \$8,$D2,$T2 | |
1235 | vpaddq $T3,$D3,$D3 | |
1236 | vpaddq $T4,$D4,$D4 | |
1237 | vpaddq $T0,$D0,$D0 | |
1238 | vpaddq $T1,$D1,$D1 | |
1239 | vpaddq $T2,$D2,$D2 | |
1240 | ||
a98c648e AP |
1241 | ################################################################ |
1242 | # lazy reduction | |
1243 | ||
1244 | vpsrlq \$26,$D3,$H3 | |
1245 | vpand $MASK,$D3,$D3 | |
1246 | vpaddq $H3,$D4,$D4 # h3 -> h4 | |
1247 | ||
1248 | vpsrlq \$26,$D0,$H0 | |
1249 | vpand $MASK,$D0,$D0 | |
1250 | vpaddq $H0,$D1,$D1 # h0 -> h1 | |
1251 | ||
1252 | vpsrlq \$26,$D4,$H4 | |
1253 | vpand $MASK,$D4,$D4 | |
1254 | ||
1255 | vpsrlq \$26,$D1,$H1 | |
1256 | vpand $MASK,$D1,$D1 | |
1257 | vpaddq $H1,$D2,$D2 # h1 -> h2 | |
1258 | ||
1259 | vpaddq $H4,$D0,$D0 | |
1260 | vpsllq \$2,$H4,$H4 | |
1261 | vpaddq $H4,$D0,$D0 # h4 -> h0 | |
1262 | ||
1263 | vpsrlq \$26,$D2,$H2 | |
1264 | vpand $MASK,$D2,$D2 | |
1265 | vpaddq $H2,$D3,$D3 # h2 -> h3 | |
1266 | ||
1267 | vpsrlq \$26,$D0,$H0 | |
1268 | vpand $MASK,$D0,$D0 | |
1269 | vpaddq $H0,$D1,$D1 # h0 -> h1 | |
1270 | ||
1271 | vpsrlq \$26,$D3,$H3 | |
1272 | vpand $MASK,$D3,$D3 | |
1273 | vpaddq $H3,$D4,$D4 # h3 -> h4 | |
1274 | ||
1ea8ae50 AP |
1275 | vmovd $D0,`4*0-48-64`($ctx) # save partially reduced |
1276 | vmovd $D1,`4*1-48-64`($ctx) | |
1277 | vmovd $D2,`4*2-48-64`($ctx) | |
1278 | vmovd $D3,`4*3-48-64`($ctx) | |
1279 | vmovd $D4,`4*4-48-64`($ctx) | |
a98c648e AP |
1280 | ___ |
1281 | $code.=<<___ if ($win64); | |
1282 | vmovdqa 0x50(%r11),%xmm6 | |
1283 | vmovdqa 0x60(%r11),%xmm7 | |
1284 | vmovdqa 0x70(%r11),%xmm8 | |
1285 | vmovdqa 0x80(%r11),%xmm9 | |
1286 | vmovdqa 0x90(%r11),%xmm10 | |
1287 | vmovdqa 0xa0(%r11),%xmm11 | |
1288 | vmovdqa 0xb0(%r11),%xmm12 | |
1289 | vmovdqa 0xc0(%r11),%xmm13 | |
1290 | vmovdqa 0xd0(%r11),%xmm14 | |
1291 | vmovdqa 0xe0(%r11),%xmm15 | |
1292 | lea 0xf8(%r11),%rsp | |
1293 | .Ldo_avx_epilogue: | |
1294 | ___ | |
1295 | $code.=<<___ if (!$win64); | |
1296 | lea 0x58(%r11),%rsp | |
1297 | ___ | |
1298 | $code.=<<___; | |
1299 | vzeroupper | |
1300 | ret | |
1301 | .size poly1305_blocks_avx,.-poly1305_blocks_avx | |
1302 | ||
1303 | .type poly1305_emit_avx,\@function,3 | |
1304 | .align 32 | |
1305 | poly1305_emit_avx: | |
1306 | cmpl \$0,20($ctx) # is_base2_26? | |
a85dbf11 | 1307 | je .Lemit |
a98c648e AP |
1308 | |
1309 | mov 0($ctx),%eax # load hash value base 2^26 | |
1310 | mov 4($ctx),%ecx | |
1311 | mov 8($ctx),%r8d | |
1312 | mov 12($ctx),%r11d | |
1313 | mov 16($ctx),%r10d | |
1314 | ||
1315 | shl \$26,%rcx # base 2^26 -> base 2^64 | |
1316 | mov %r8,%r9 | |
1317 | shl \$52,%r8 | |
1318 | add %rcx,%rax | |
1319 | shr \$12,%r9 | |
1320 | add %rax,%r8 # h0 | |
1321 | adc \$0,%r9 | |
1322 | ||
1323 | shl \$14,%r11 | |
1324 | mov %r10,%rax | |
1325 | shr \$24,%r10 | |
1326 | add %r11,%r9 | |
1327 | shl \$40,%rax | |
1328 | add %rax,%r9 # h1 | |
1329 | adc \$0,%r10 # h2 | |
1330 | ||
1331 | mov %r10,%rax # could be partially reduced, so reduce | |
1332 | mov %r10,%rcx | |
1333 | and \$3,%r10 | |
1334 | shr \$2,%rax | |
1335 | and \$-4,%rcx | |
1336 | add %rcx,%rax | |
1337 | add %rax,%r8 | |
1338 | adc \$0,%r9 | |
4b8736a2 | 1339 | adc \$0,%r10 |
a98c648e AP |
1340 | |
1341 | mov %r8,%rax | |
1342 | add \$5,%r8 # compare to modulus | |
1343 | mov %r9,%rcx | |
1344 | adc \$0,%r9 | |
1345 | adc \$0,%r10 | |
1346 | shr \$2,%r10 # did 130-bit value overfow? | |
1347 | cmovnz %r8,%rax | |
1348 | cmovnz %r9,%rcx | |
1349 | ||
1350 | add 0($nonce),%rax # accumulate nonce | |
1351 | adc 8($nonce),%rcx | |
1352 | mov %rax,0($mac) # write result | |
1353 | mov %rcx,8($mac) | |
1354 | ||
1355 | ret | |
1356 | .size poly1305_emit_avx,.-poly1305_emit_avx | |
1357 | ___ | |
1358 | ||
1359 | if ($avx>1) { | |
1360 | my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) = | |
1361 | map("%ymm$_",(0..15)); | |
1362 | my $S4=$MASK; | |
1363 | ||
1364 | $code.=<<___; | |
1365 | .type poly1305_blocks_avx2,\@function,4 | |
1366 | .align 32 | |
1367 | poly1305_blocks_avx2: | |
1368 | mov 20($ctx),%r8d # is_base2_26 | |
1369 | cmp \$128,$len | |
1370 | jae .Lblocks_avx2 | |
1371 | test %r8d,%r8d | |
a85dbf11 | 1372 | jz .Lblocks |
a98c648e AP |
1373 | |
1374 | .Lblocks_avx2: | |
1375 | and \$-16,$len | |
1376 | jz .Lno_data_avx2 | |
1377 | ||
1378 | vzeroupper | |
1379 | ||
1380 | test %r8d,%r8d | |
1381 | jz .Lbase2_64_avx2 | |
1382 | ||
1383 | test \$63,$len | |
1384 | jz .Leven_avx2 | |
1385 | ||
1386 | push %rbx | |
1387 | push %rbp | |
1388 | push %r12 | |
1389 | push %r13 | |
1390 | push %r14 | |
1391 | push %r15 | |
1392 | .Lblocks_avx2_body: | |
1393 | ||
1394 | mov $len,%r15 # reassign $len | |
1395 | ||
1396 | mov 0($ctx),$d1 # load hash value | |
1397 | mov 8($ctx),$d2 | |
1398 | mov 16($ctx),$h2#d | |
1399 | ||
1400 | mov 24($ctx),$r0 # load r | |
1401 | mov 32($ctx),$s1 | |
1402 | ||
1403 | ################################# base 2^26 -> base 2^64 | |
1404 | mov $d1#d,$h0#d | |
28411657 | 1405 | and \$`-1*(1<<31)`,$d1 |
a98c648e AP |
1406 | mov $d2,$r1 # borrow $r1 |
1407 | mov $d2#d,$h1#d | |
28411657 | 1408 | and \$`-1*(1<<31)`,$d2 |
a98c648e AP |
1409 | |
1410 | shr \$6,$d1 | |
1411 | shl \$52,$r1 | |
1412 | add $d1,$h0 | |
1413 | shr \$12,$h1 | |
1414 | shr \$18,$d2 | |
1415 | add $r1,$h0 | |
1416 | adc $d2,$h1 | |
1417 | ||
1418 | mov $h2,$d1 | |
1419 | shl \$40,$d1 | |
1420 | shr \$24,$h2 | |
1421 | add $d1,$h1 | |
1422 | adc \$0,$h2 # can be partially reduced... | |
1423 | ||
1424 | mov \$-4,$d2 # ... so reduce | |
1425 | mov $h2,$d1 | |
1426 | and $h2,$d2 | |
1427 | shr \$2,$d1 | |
1428 | and \$3,$h2 | |
1429 | add $d2,$d1 # =*5 | |
1430 | add $d1,$h0 | |
1431 | adc \$0,$h1 | |
4b8736a2 | 1432 | adc \$0,$h2 |
a98c648e AP |
1433 | |
1434 | mov $s1,$r1 | |
1435 | mov $s1,%rax | |
1436 | shr \$2,$s1 | |
1437 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
1438 | ||
1439 | .Lbase2_26_pre_avx2: | |
1440 | add 0($inp),$h0 # accumulate input | |
1441 | adc 8($inp),$h1 | |
1442 | lea 16($inp),$inp | |
1443 | adc $padbit,$h2 | |
1444 | sub \$16,%r15 | |
1445 | ||
1446 | call __poly1305_block | |
1447 | mov $r1,%rax | |
1448 | ||
1449 | test \$63,%r15 | |
1450 | jnz .Lbase2_26_pre_avx2 | |
1451 | ||
1452 | test $padbit,$padbit # if $padbit is zero, | |
1453 | jz .Lstore_base2_64_avx2 # store hash in base 2^64 format | |
1454 | ||
1455 | ################################# base 2^64 -> base 2^26 | |
1456 | mov $h0,%rax | |
1457 | mov $h0,%rdx | |
1458 | shr \$52,$h0 | |
1459 | mov $h1,$r0 | |
1460 | mov $h1,$r1 | |
1461 | shr \$26,%rdx | |
1462 | and \$0x3ffffff,%rax # h[0] | |
1463 | shl \$12,$r0 | |
1464 | and \$0x3ffffff,%rdx # h[1] | |
1465 | shr \$14,$h1 | |
1466 | or $r0,$h0 | |
1467 | shl \$24,$h2 | |
1468 | and \$0x3ffffff,$h0 # h[2] | |
1469 | shr \$40,$r1 | |
1470 | and \$0x3ffffff,$h1 # h[3] | |
1471 | or $r1,$h2 # h[4] | |
1472 | ||
1473 | test %r15,%r15 | |
1474 | jz .Lstore_base2_26_avx2 | |
1475 | ||
1476 | vmovd %rax#d,%x#$H0 | |
1477 | vmovd %rdx#d,%x#$H1 | |
1478 | vmovd $h0#d,%x#$H2 | |
1479 | vmovd $h1#d,%x#$H3 | |
1480 | vmovd $h2#d,%x#$H4 | |
1481 | jmp .Lproceed_avx2 | |
1482 | ||
1483 | .align 32 | |
1484 | .Lstore_base2_64_avx2: | |
1485 | mov $h0,0($ctx) | |
1486 | mov $h1,8($ctx) | |
1487 | mov $h2,16($ctx) # note that is_base2_26 is zeroed | |
1488 | jmp .Ldone_avx2 | |
1489 | ||
1490 | .align 16 | |
1491 | .Lstore_base2_26_avx2: | |
1492 | mov %rax#d,0($ctx) # store hash value base 2^26 | |
1493 | mov %rdx#d,4($ctx) | |
1494 | mov $h0#d,8($ctx) | |
1495 | mov $h1#d,12($ctx) | |
1496 | mov $h2#d,16($ctx) | |
1497 | .align 16 | |
1498 | .Ldone_avx2: | |
1499 | mov 0(%rsp),%r15 | |
1500 | mov 8(%rsp),%r14 | |
1501 | mov 16(%rsp),%r13 | |
1502 | mov 24(%rsp),%r12 | |
1503 | mov 32(%rsp),%rbp | |
1504 | mov 40(%rsp),%rbx | |
1505 | lea 48(%rsp),%rsp | |
1506 | .Lno_data_avx2: | |
1507 | .Lblocks_avx2_epilogue: | |
1508 | ret | |
1509 | ||
1510 | .align 32 | |
1511 | .Lbase2_64_avx2: | |
1512 | push %rbx | |
1513 | push %rbp | |
1514 | push %r12 | |
1515 | push %r13 | |
1516 | push %r14 | |
1517 | push %r15 | |
1518 | .Lbase2_64_avx2_body: | |
1519 | ||
1520 | mov $len,%r15 # reassign $len | |
1521 | ||
1522 | mov 24($ctx),$r0 # load r | |
1523 | mov 32($ctx),$s1 | |
1524 | ||
1525 | mov 0($ctx),$h0 # load hash value | |
1526 | mov 8($ctx),$h1 | |
1527 | mov 16($ctx),$h2#d | |
1528 | ||
1529 | mov $s1,$r1 | |
1530 | mov $s1,%rax | |
1531 | shr \$2,$s1 | |
1532 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
1533 | ||
1534 | test \$63,$len | |
1535 | jz .Linit_avx2 | |
1536 | ||
1537 | .Lbase2_64_pre_avx2: | |
1538 | add 0($inp),$h0 # accumulate input | |
1539 | adc 8($inp),$h1 | |
1540 | lea 16($inp),$inp | |
1541 | adc $padbit,$h2 | |
1542 | sub \$16,%r15 | |
1543 | ||
1544 | call __poly1305_block | |
1545 | mov $r1,%rax | |
1546 | ||
1547 | test \$63,%r15 | |
1548 | jnz .Lbase2_64_pre_avx2 | |
1549 | ||
1550 | .Linit_avx2: | |
1551 | ################################# base 2^64 -> base 2^26 | |
1552 | mov $h0,%rax | |
1553 | mov $h0,%rdx | |
1554 | shr \$52,$h0 | |
1555 | mov $h1,$d1 | |
1556 | mov $h1,$d2 | |
1557 | shr \$26,%rdx | |
1558 | and \$0x3ffffff,%rax # h[0] | |
1559 | shl \$12,$d1 | |
1560 | and \$0x3ffffff,%rdx # h[1] | |
1561 | shr \$14,$h1 | |
1562 | or $d1,$h0 | |
1563 | shl \$24,$h2 | |
1564 | and \$0x3ffffff,$h0 # h[2] | |
1565 | shr \$40,$d2 | |
1566 | and \$0x3ffffff,$h1 # h[3] | |
1567 | or $d2,$h2 # h[4] | |
1568 | ||
1569 | vmovd %rax#d,%x#$H0 | |
1570 | vmovd %rdx#d,%x#$H1 | |
1571 | vmovd $h0#d,%x#$H2 | |
1572 | vmovd $h1#d,%x#$H3 | |
1573 | vmovd $h2#d,%x#$H4 | |
1574 | movl \$1,20($ctx) # set is_base2_26 | |
1575 | ||
1576 | call __poly1305_init_avx | |
1577 | ||
1578 | .Lproceed_avx2: | |
abb8c44f AP |
1579 | mov %r15,$len # restore $len |
1580 | mov OPENSSL_ia32cap_P+8(%rip),%r10d | |
1581 | mov \$`(1<<31|1<<30|1<<16)`,%r11d | |
a98c648e AP |
1582 | |
1583 | mov 0(%rsp),%r15 | |
1584 | mov 8(%rsp),%r14 | |
1585 | mov 16(%rsp),%r13 | |
1586 | mov 24(%rsp),%r12 | |
1587 | mov 32(%rsp),%rbp | |
1588 | mov 40(%rsp),%rbx | |
1589 | lea 48(%rsp),%rax | |
1590 | lea 48(%rsp),%rsp | |
1591 | .Lbase2_64_avx2_epilogue: | |
1592 | jmp .Ldo_avx2 | |
1593 | ||
1594 | .align 32 | |
1595 | .Leven_avx2: | |
abb8c44f AP |
1596 | mov OPENSSL_ia32cap_P+8(%rip),%r10d |
1597 | mov \$`(1<<31|1<<30|1<<16)`,%r11d | |
a98c648e AP |
1598 | vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26 |
1599 | vmovd 4*1($ctx),%x#$H1 | |
1600 | vmovd 4*2($ctx),%x#$H2 | |
1601 | vmovd 4*3($ctx),%x#$H3 | |
1602 | vmovd 4*4($ctx),%x#$H4 | |
1603 | ||
1604 | .Ldo_avx2: | |
1605 | ___ | |
abb8c44f AP |
1606 | $code.=<<___ if ($avx>2); |
1607 | cmp \$512,$len | |
1608 | jb .Lskip_avx512 | |
1609 | and %r11d,%r10d | |
1610 | cmp %r11d,%r10d # check for AVX512F+BW+VL | |
1611 | je .Lblocks_avx512 | |
1612 | .Lskip_avx512: | |
1613 | ___ | |
a98c648e AP |
1614 | $code.=<<___ if (!$win64); |
1615 | lea -8(%rsp),%r11 | |
1616 | sub \$0x128,%rsp | |
1617 | ___ | |
1618 | $code.=<<___ if ($win64); | |
1619 | lea -0xf8(%rsp),%r11 | |
1620 | sub \$0x1c8,%rsp | |
1621 | vmovdqa %xmm6,0x50(%r11) | |
1622 | vmovdqa %xmm7,0x60(%r11) | |
1623 | vmovdqa %xmm8,0x70(%r11) | |
1624 | vmovdqa %xmm9,0x80(%r11) | |
1625 | vmovdqa %xmm10,0x90(%r11) | |
1626 | vmovdqa %xmm11,0xa0(%r11) | |
1627 | vmovdqa %xmm12,0xb0(%r11) | |
1628 | vmovdqa %xmm13,0xc0(%r11) | |
1629 | vmovdqa %xmm14,0xd0(%r11) | |
1630 | vmovdqa %xmm15,0xe0(%r11) | |
1631 | .Ldo_avx2_body: | |
1632 | ___ | |
1633 | $code.=<<___; | |
1634 | lea 48+64($ctx),$ctx # size optimization | |
1635 | lea .Lconst(%rip),%rcx | |
1636 | ||
1637 | # expand and copy pre-calculated table to stack | |
1638 | vmovdqu `16*0-64`($ctx),%x#$T2 | |
1639 | and \$-512,%rsp | |
1640 | vmovdqu `16*1-64`($ctx),%x#$T3 | |
1641 | vmovdqu `16*2-64`($ctx),%x#$T4 | |
1642 | vmovdqu `16*3-64`($ctx),%x#$D0 | |
1643 | vmovdqu `16*4-64`($ctx),%x#$D1 | |
1644 | vmovdqu `16*5-64`($ctx),%x#$D2 | |
1645 | vmovdqu `16*6-64`($ctx),%x#$D3 | |
1646 | vpermq \$0x15,$T2,$T2 # 00003412 -> 12343434 | |
1647 | vmovdqu `16*7-64`($ctx),%x#$D4 | |
1648 | vpermq \$0x15,$T3,$T3 | |
1649 | vpshufd \$0xc8,$T2,$T2 # 12343434 -> 14243444 | |
1650 | vmovdqu `16*8-64`($ctx),%x#$MASK | |
1651 | vpermq \$0x15,$T4,$T4 | |
1652 | vpshufd \$0xc8,$T3,$T3 | |
1653 | vmovdqa $T2,0x00(%rsp) | |
1654 | vpermq \$0x15,$D0,$D0 | |
1655 | vpshufd \$0xc8,$T4,$T4 | |
1656 | vmovdqa $T3,0x20(%rsp) | |
1657 | vpermq \$0x15,$D1,$D1 | |
1658 | vpshufd \$0xc8,$D0,$D0 | |
1659 | vmovdqa $T4,0x40(%rsp) | |
1660 | vpermq \$0x15,$D2,$D2 | |
1661 | vpshufd \$0xc8,$D1,$D1 | |
1662 | vmovdqa $D0,0x60(%rsp) | |
1663 | vpermq \$0x15,$D3,$D3 | |
1664 | vpshufd \$0xc8,$D2,$D2 | |
1665 | vmovdqa $D1,0x80(%rsp) | |
1666 | vpermq \$0x15,$D4,$D4 | |
1667 | vpshufd \$0xc8,$D3,$D3 | |
1668 | vmovdqa $D2,0xa0(%rsp) | |
1669 | vpermq \$0x15,$MASK,$MASK | |
1670 | vpshufd \$0xc8,$D4,$D4 | |
1671 | vmovdqa $D3,0xc0(%rsp) | |
1672 | vpshufd \$0xc8,$MASK,$MASK | |
1673 | vmovdqa $D4,0xe0(%rsp) | |
1674 | vmovdqa $MASK,0x100(%rsp) | |
1675 | vmovdqa 64(%rcx),$MASK # .Lmask26 | |
1676 | ||
1677 | ################################################################ | |
1678 | # load input | |
1679 | vmovdqu 16*0($inp),%x#$T0 | |
1680 | vmovdqu 16*1($inp),%x#$T1 | |
1681 | vinserti128 \$1,16*2($inp),$T0,$T0 | |
1682 | vinserti128 \$1,16*3($inp),$T1,$T1 | |
1683 | lea 16*4($inp),$inp | |
1684 | ||
1685 | vpsrldq \$6,$T0,$T2 # splat input | |
1686 | vpsrldq \$6,$T1,$T3 | |
1687 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
1688 | vpunpcklqdq $T3,$T2,$T2 # 2:3 | |
1689 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
1690 | ||
1691 | vpsrlq \$30,$T2,$T3 | |
1692 | vpsrlq \$4,$T2,$T2 | |
1693 | vpsrlq \$26,$T0,$T1 | |
1694 | vpsrlq \$40,$T4,$T4 # 4 | |
1695 | vpand $MASK,$T2,$T2 # 2 | |
1696 | vpand $MASK,$T0,$T0 # 0 | |
1697 | vpand $MASK,$T1,$T1 # 1 | |
1698 | vpand $MASK,$T3,$T3 # 3 | |
1699 | vpor 32(%rcx),$T4,$T4 # padbit, yes, always | |
1700 | ||
1701 | lea 0x90(%rsp),%rax # size optimization | |
1702 | vpaddq $H2,$T2,$H2 # accumulate input | |
1703 | sub \$64,$len | |
1704 | jz .Ltail_avx2 | |
1705 | jmp .Loop_avx2 | |
1706 | ||
1707 | .align 32 | |
1708 | .Loop_avx2: | |
1709 | ################################################################ | |
abb8c44f AP |
1710 | # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4 |
1711 | # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3 | |
1712 | # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2 | |
1713 | # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1 | |
1714 | # \________/\__________/ | |
a98c648e AP |
1715 | ################################################################ |
1716 | #vpaddq $H2,$T2,$H2 # accumulate input | |
1717 | vpaddq $H0,$T0,$H0 | |
1718 | vmovdqa `32*0`(%rsp),$T0 # r0^4 | |
1719 | vpaddq $H1,$T1,$H1 | |
1720 | vmovdqa `32*1`(%rsp),$T1 # r1^4 | |
1721 | vpaddq $H3,$T3,$H3 | |
1722 | vmovdqa `32*3`(%rsp),$T2 # r2^4 | |
1723 | vpaddq $H4,$T4,$H4 | |
1724 | vmovdqa `32*6-0x90`(%rax),$T3 # s3^4 | |
1725 | vmovdqa `32*8-0x90`(%rax),$S4 # s4^4 | |
1726 | ||
1727 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
1728 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
1729 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
1730 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
1731 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
1732 | # | |
1733 | # however, as h2 is "chronologically" first one available pull | |
1734 | # corresponding operations up, so it's | |
1735 | # | |
1736 | # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4 | |
1737 | # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4 | |
1738 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
1739 | # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 | |
1740 | # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4 | |
1741 | ||
1742 | vpmuludq $H2,$T0,$D2 # d2 = h2*r0 | |
1743 | vpmuludq $H2,$T1,$D3 # d3 = h2*r1 | |
1744 | vpmuludq $H2,$T2,$D4 # d4 = h2*r2 | |
1745 | vpmuludq $H2,$T3,$D0 # d0 = h2*s3 | |
1746 | vpmuludq $H2,$S4,$D1 # d1 = h2*s4 | |
1747 | ||
1748 | vpmuludq $H0,$T1,$T4 # h0*r1 | |
1749 | vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp | |
1750 | vpaddq $T4,$D1,$D1 # d1 += h0*r1 | |
1751 | vpaddq $H2,$D2,$D2 # d2 += h1*r1 | |
1752 | vpmuludq $H3,$T1,$T4 # h3*r1 | |
1753 | vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1 | |
1754 | vpaddq $T4,$D4,$D4 # d4 += h3*r1 | |
1755 | vpaddq $H2,$D0,$D0 # d0 += h4*s1 | |
1756 | vmovdqa `32*4-0x90`(%rax),$T1 # s2 | |
1757 | ||
1758 | vpmuludq $H0,$T0,$T4 # h0*r0 | |
1759 | vpmuludq $H1,$T0,$H2 # h1*r0 | |
1760 | vpaddq $T4,$D0,$D0 # d0 += h0*r0 | |
1761 | vpaddq $H2,$D1,$D1 # d1 += h1*r0 | |
1762 | vpmuludq $H3,$T0,$T4 # h3*r0 | |
1763 | vpmuludq $H4,$T0,$H2 # h4*r0 | |
1764 | vmovdqu 16*0($inp),%x#$T0 # load input | |
1765 | vpaddq $T4,$D3,$D3 # d3 += h3*r0 | |
1766 | vpaddq $H2,$D4,$D4 # d4 += h4*r0 | |
1767 | vinserti128 \$1,16*2($inp),$T0,$T0 | |
1768 | ||
1769 | vpmuludq $H3,$T1,$T4 # h3*s2 | |
1770 | vpmuludq $H4,$T1,$H2 # h4*s2 | |
1771 | vmovdqu 16*1($inp),%x#$T1 | |
1772 | vpaddq $T4,$D0,$D0 # d0 += h3*s2 | |
1773 | vpaddq $H2,$D1,$D1 # d1 += h4*s2 | |
1774 | vmovdqa `32*5-0x90`(%rax),$H2 # r3 | |
1775 | vpmuludq $H1,$T2,$T4 # h1*r2 | |
1776 | vpmuludq $H0,$T2,$T2 # h0*r2 | |
1777 | vpaddq $T4,$D3,$D3 # d3 += h1*r2 | |
1778 | vpaddq $T2,$D2,$D2 # d2 += h0*r2 | |
1779 | vinserti128 \$1,16*3($inp),$T1,$T1 | |
1780 | lea 16*4($inp),$inp | |
1781 | ||
1782 | vpmuludq $H1,$H2,$T4 # h1*r3 | |
1783 | vpmuludq $H0,$H2,$H2 # h0*r3 | |
1784 | vpsrldq \$6,$T0,$T2 # splat input | |
1785 | vpaddq $T4,$D4,$D4 # d4 += h1*r3 | |
1786 | vpaddq $H2,$D3,$D3 # d3 += h0*r3 | |
1787 | vpmuludq $H3,$T3,$T4 # h3*s3 | |
1788 | vpmuludq $H4,$T3,$H2 # h4*s3 | |
1789 | vpsrldq \$6,$T1,$T3 | |
1790 | vpaddq $T4,$D1,$D1 # d1 += h3*s3 | |
1791 | vpaddq $H2,$D2,$D2 # d2 += h4*s3 | |
1792 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
1793 | ||
1794 | vpmuludq $H3,$S4,$H3 # h3*s4 | |
1795 | vpmuludq $H4,$S4,$H4 # h4*s4 | |
1796 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
1797 | vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 | |
1798 | vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 | |
1799 | vpunpcklqdq $T3,$T2,$T3 # 2:3 | |
1800 | vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4 | |
1801 | vpmuludq $H1,$S4,$H0 # h1*s4 | |
1802 | vmovdqa 64(%rcx),$MASK # .Lmask26 | |
1803 | vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 | |
1804 | vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 | |
1805 | ||
1806 | ################################################################ | |
1807 | # lazy reduction (interleaved with tail of input splat) | |
1808 | ||
1809 | vpsrlq \$26,$H3,$D3 | |
1810 | vpand $MASK,$H3,$H3 | |
1811 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1812 | ||
1813 | vpsrlq \$26,$H0,$D0 | |
1814 | vpand $MASK,$H0,$H0 | |
1815 | vpaddq $D0,$D1,$H1 # h0 -> h1 | |
1816 | ||
1817 | vpsrlq \$26,$H4,$D4 | |
1818 | vpand $MASK,$H4,$H4 | |
1819 | ||
1820 | vpsrlq \$4,$T3,$T2 | |
1821 | ||
1822 | vpsrlq \$26,$H1,$D1 | |
1823 | vpand $MASK,$H1,$H1 | |
1824 | vpaddq $D1,$H2,$H2 # h1 -> h2 | |
1825 | ||
1826 | vpaddq $D4,$H0,$H0 | |
1827 | vpsllq \$2,$D4,$D4 | |
1828 | vpaddq $D4,$H0,$H0 # h4 -> h0 | |
1829 | ||
1830 | vpand $MASK,$T2,$T2 # 2 | |
1831 | vpsrlq \$26,$T0,$T1 | |
1832 | ||
1833 | vpsrlq \$26,$H2,$D2 | |
1834 | vpand $MASK,$H2,$H2 | |
1835 | vpaddq $D2,$H3,$H3 # h2 -> h3 | |
1836 | ||
1837 | vpaddq $T2,$H2,$H2 # modulo-scheduled | |
1838 | vpsrlq \$30,$T3,$T3 | |
1839 | ||
1840 | vpsrlq \$26,$H0,$D0 | |
1841 | vpand $MASK,$H0,$H0 | |
1842 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
1843 | ||
1844 | vpsrlq \$40,$T4,$T4 # 4 | |
1845 | ||
1846 | vpsrlq \$26,$H3,$D3 | |
1847 | vpand $MASK,$H3,$H3 | |
1848 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1849 | ||
1850 | vpand $MASK,$T0,$T0 # 0 | |
1851 | vpand $MASK,$T1,$T1 # 1 | |
1852 | vpand $MASK,$T3,$T3 # 3 | |
1853 | vpor 32(%rcx),$T4,$T4 # padbit, yes, always | |
1854 | ||
1855 | sub \$64,$len | |
1856 | jnz .Loop_avx2 | |
1857 | ||
1858 | .byte 0x66,0x90 | |
1859 | .Ltail_avx2: | |
1860 | ################################################################ | |
1861 | # while above multiplications were by r^4 in all lanes, in last | |
1862 | # iteration we multiply least significant lane by r^4 and most | |
1863 | # significant one by r, so copy of above except that references | |
1864 | # to the precomputed table are displaced by 4... | |
1865 | ||
1866 | #vpaddq $H2,$T2,$H2 # accumulate input | |
1867 | vpaddq $H0,$T0,$H0 | |
1868 | vmovdqu `32*0+4`(%rsp),$T0 # r0^4 | |
1869 | vpaddq $H1,$T1,$H1 | |
1870 | vmovdqu `32*1+4`(%rsp),$T1 # r1^4 | |
1871 | vpaddq $H3,$T3,$H3 | |
1872 | vmovdqu `32*3+4`(%rsp),$T2 # r2^4 | |
1873 | vpaddq $H4,$T4,$H4 | |
1874 | vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4 | |
1875 | vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4 | |
1876 | ||
1877 | vpmuludq $H2,$T0,$D2 # d2 = h2*r0 | |
1878 | vpmuludq $H2,$T1,$D3 # d3 = h2*r1 | |
1879 | vpmuludq $H2,$T2,$D4 # d4 = h2*r2 | |
1880 | vpmuludq $H2,$T3,$D0 # d0 = h2*s3 | |
1881 | vpmuludq $H2,$S4,$D1 # d1 = h2*s4 | |
1882 | ||
1883 | vpmuludq $H0,$T1,$T4 # h0*r1 | |
1884 | vpmuludq $H1,$T1,$H2 # h1*r1 | |
1885 | vpaddq $T4,$D1,$D1 # d1 += h0*r1 | |
1886 | vpaddq $H2,$D2,$D2 # d2 += h1*r1 | |
1887 | vpmuludq $H3,$T1,$T4 # h3*r1 | |
1888 | vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1 | |
1889 | vpaddq $T4,$D4,$D4 # d4 += h3*r1 | |
1890 | vpaddq $H2,$D0,$D0 # d0 += h4*s1 | |
1891 | ||
1892 | vpmuludq $H0,$T0,$T4 # h0*r0 | |
1893 | vpmuludq $H1,$T0,$H2 # h1*r0 | |
1894 | vpaddq $T4,$D0,$D0 # d0 += h0*r0 | |
1895 | vmovdqu `32*4+4-0x90`(%rax),$T1 # s2 | |
1896 | vpaddq $H2,$D1,$D1 # d1 += h1*r0 | |
1897 | vpmuludq $H3,$T0,$T4 # h3*r0 | |
1898 | vpmuludq $H4,$T0,$H2 # h4*r0 | |
1899 | vpaddq $T4,$D3,$D3 # d3 += h3*r0 | |
1900 | vpaddq $H2,$D4,$D4 # d4 += h4*r0 | |
1901 | ||
1902 | vpmuludq $H3,$T1,$T4 # h3*s2 | |
1903 | vpmuludq $H4,$T1,$H2 # h4*s2 | |
1904 | vpaddq $T4,$D0,$D0 # d0 += h3*s2 | |
1905 | vpaddq $H2,$D1,$D1 # d1 += h4*s2 | |
1906 | vmovdqu `32*5+4-0x90`(%rax),$H2 # r3 | |
1907 | vpmuludq $H1,$T2,$T4 # h1*r2 | |
1908 | vpmuludq $H0,$T2,$T2 # h0*r2 | |
1909 | vpaddq $T4,$D3,$D3 # d3 += h1*r2 | |
1910 | vpaddq $T2,$D2,$D2 # d2 += h0*r2 | |
1911 | ||
1912 | vpmuludq $H1,$H2,$T4 # h1*r3 | |
1913 | vpmuludq $H0,$H2,$H2 # h0*r3 | |
1914 | vpaddq $T4,$D4,$D4 # d4 += h1*r3 | |
1915 | vpaddq $H2,$D3,$D3 # d3 += h0*r3 | |
1916 | vpmuludq $H3,$T3,$T4 # h3*s3 | |
1917 | vpmuludq $H4,$T3,$H2 # h4*s3 | |
1918 | vpaddq $T4,$D1,$D1 # d1 += h3*s3 | |
1919 | vpaddq $H2,$D2,$D2 # d2 += h4*s3 | |
1920 | ||
1921 | vpmuludq $H3,$S4,$H3 # h3*s4 | |
1922 | vpmuludq $H4,$S4,$H4 # h4*s4 | |
1923 | vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 | |
1924 | vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 | |
1925 | vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4 | |
1926 | vpmuludq $H1,$S4,$H0 # h1*s4 | |
1927 | vmovdqa 64(%rcx),$MASK # .Lmask26 | |
1928 | vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 | |
1929 | vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 | |
1930 | ||
1ea8ae50 AP |
1931 | ################################################################ |
1932 | # horizontal addition | |
1933 | ||
1934 | vpsrldq \$8,$D1,$T1 | |
1935 | vpsrldq \$8,$H2,$T2 | |
1936 | vpsrldq \$8,$H3,$T3 | |
1937 | vpsrldq \$8,$H4,$T4 | |
1938 | vpsrldq \$8,$H0,$T0 | |
1939 | vpaddq $T1,$D1,$D1 | |
1940 | vpaddq $T2,$H2,$H2 | |
1941 | vpaddq $T3,$H3,$H3 | |
1942 | vpaddq $T4,$H4,$H4 | |
1943 | vpaddq $T0,$H0,$H0 | |
1944 | ||
1945 | vpermq \$0x2,$H3,$T3 | |
1946 | vpermq \$0x2,$H4,$T4 | |
1947 | vpermq \$0x2,$H0,$T0 | |
1948 | vpermq \$0x2,$D1,$T1 | |
1949 | vpermq \$0x2,$H2,$T2 | |
1950 | vpaddq $T3,$H3,$H3 | |
1951 | vpaddq $T4,$H4,$H4 | |
1952 | vpaddq $T0,$H0,$H0 | |
1953 | vpaddq $T1,$D1,$D1 | |
1954 | vpaddq $T2,$H2,$H2 | |
1955 | ||
a98c648e AP |
1956 | ################################################################ |
1957 | # lazy reduction | |
1958 | ||
1959 | vpsrlq \$26,$H3,$D3 | |
1960 | vpand $MASK,$H3,$H3 | |
1961 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1962 | ||
1963 | vpsrlq \$26,$H0,$D0 | |
1964 | vpand $MASK,$H0,$H0 | |
1965 | vpaddq $D0,$D1,$H1 # h0 -> h1 | |
1966 | ||
1967 | vpsrlq \$26,$H4,$D4 | |
1968 | vpand $MASK,$H4,$H4 | |
1969 | ||
1970 | vpsrlq \$26,$H1,$D1 | |
1971 | vpand $MASK,$H1,$H1 | |
1972 | vpaddq $D1,$H2,$H2 # h1 -> h2 | |
1973 | ||
1974 | vpaddq $D4,$H0,$H0 | |
1975 | vpsllq \$2,$D4,$D4 | |
1976 | vpaddq $D4,$H0,$H0 # h4 -> h0 | |
1977 | ||
1978 | vpsrlq \$26,$H2,$D2 | |
1979 | vpand $MASK,$H2,$H2 | |
1980 | vpaddq $D2,$H3,$H3 # h2 -> h3 | |
1981 | ||
1982 | vpsrlq \$26,$H0,$D0 | |
1983 | vpand $MASK,$H0,$H0 | |
1984 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
1985 | ||
1986 | vpsrlq \$26,$H3,$D3 | |
1987 | vpand $MASK,$H3,$H3 | |
1988 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1989 | ||
a98c648e AP |
1990 | vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced |
1991 | vmovd %x#$H1,`4*1-48-64`($ctx) | |
1992 | vmovd %x#$H2,`4*2-48-64`($ctx) | |
1993 | vmovd %x#$H3,`4*3-48-64`($ctx) | |
1994 | vmovd %x#$H4,`4*4-48-64`($ctx) | |
1995 | ___ | |
1996 | $code.=<<___ if ($win64); | |
1997 | vmovdqa 0x50(%r11),%xmm6 | |
1998 | vmovdqa 0x60(%r11),%xmm7 | |
1999 | vmovdqa 0x70(%r11),%xmm8 | |
2000 | vmovdqa 0x80(%r11),%xmm9 | |
2001 | vmovdqa 0x90(%r11),%xmm10 | |
2002 | vmovdqa 0xa0(%r11),%xmm11 | |
2003 | vmovdqa 0xb0(%r11),%xmm12 | |
2004 | vmovdqa 0xc0(%r11),%xmm13 | |
2005 | vmovdqa 0xd0(%r11),%xmm14 | |
2006 | vmovdqa 0xe0(%r11),%xmm15 | |
2007 | lea 0xf8(%r11),%rsp | |
2008 | .Ldo_avx2_epilogue: | |
2009 | ___ | |
2010 | $code.=<<___ if (!$win64); | |
2011 | lea 8(%r11),%rsp | |
2012 | ___ | |
2013 | $code.=<<___; | |
2014 | vzeroupper | |
2015 | ret | |
2016 | .size poly1305_blocks_avx2,.-poly1305_blocks_avx2 | |
2017 | ___ | |
abb8c44f AP |
2018 | ####################################################################### |
2019 | if ($avx>2) { | |
2020 | # On entry we have input length divisible by 64. But since inner loop | |
2021 | # processes 128 bytes per iteration, cases when length is not divisible | |
2022 | # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this | |
2023 | # reason stack layout is kept identical to poly1305_blocks_avx2. If not | |
2024 | # for this tail, we wouldn't have to even allocate stack frame... | |
2025 | ||
2026 | my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%ymm$_",(16..24)); | |
2027 | my ($M0,$M1,$M2,$M3,$M4) = map("%ymm$_",(25..29)); | |
2028 | my $PADBIT="%zmm30"; | |
2029 | my $GATHER="%ymm31"; | |
2030 | ||
2031 | $code.=<<___; | |
2032 | .type poly1305_blocks_avx512,\@function,4 | |
2033 | .align 32 | |
2034 | poly1305_blocks_avx512: | |
2035 | .Lblocks_avx512: | |
2036 | vzeroupper | |
2037 | ___ | |
2038 | $code.=<<___ if (!$win64); | |
2039 | lea -8(%rsp),%r11 | |
2040 | sub \$0x128,%rsp | |
2041 | ___ | |
2042 | $code.=<<___ if ($win64); | |
2043 | lea -0xf8(%rsp),%r11 | |
2044 | sub \$0x1c8,%rsp | |
2045 | vmovdqa %xmm6,0x50(%r11) | |
2046 | vmovdqa %xmm7,0x60(%r11) | |
2047 | vmovdqa %xmm8,0x70(%r11) | |
2048 | vmovdqa %xmm9,0x80(%r11) | |
2049 | vmovdqa %xmm10,0x90(%r11) | |
2050 | vmovdqa %xmm11,0xa0(%r11) | |
2051 | vmovdqa %xmm12,0xb0(%r11) | |
2052 | vmovdqa %xmm13,0xc0(%r11) | |
2053 | vmovdqa %xmm14,0xd0(%r11) | |
2054 | vmovdqa %xmm15,0xe0(%r11) | |
2055 | .Ldo_avx512_body: | |
2056 | ___ | |
2057 | $code.=<<___; | |
2058 | lea 48+64($ctx),$ctx # size optimization | |
2059 | lea .Lconst(%rip),%rcx | |
2060 | ||
2061 | # expand pre-calculated table | |
2062 | vmovdqu32 `16*0-64`($ctx),%x#$R0 | |
2063 | and \$-512,%rsp | |
2064 | vmovdqu32 `16*1-64`($ctx),%x#$R1 | |
2065 | vmovdqu32 `16*2-64`($ctx),%x#$S1 | |
2066 | vmovdqu32 `16*3-64`($ctx),%x#$R2 | |
2067 | vmovdqu32 `16*4-64`($ctx),%x#$S2 | |
2068 | vmovdqu32 `16*5-64`($ctx),%x#$R3 | |
2069 | vmovdqu32 `16*6-64`($ctx),%x#$S3 | |
2070 | vmovdqu32 `16*7-64`($ctx),%x#$R4 | |
2071 | vmovdqu32 `16*8-64`($ctx),%x#$S4 | |
2072 | vpermq \$0x15,$R0,$R0 # 00003412 -> 12343434 | |
2073 | vmovdqa64 64(%rcx),$MASK # .Lmask26 | |
2074 | vpermq \$0x15,$R1,$R1 | |
2075 | vmovdqa32 128(%rcx),$GATHER # .Lgather | |
2076 | vpermq \$0x15,$S1,$S1 | |
2077 | vpshufd \$0xc8,$R0,$R0 # 12343434 -> 14243444 | |
2078 | vpermq \$0x15,$R2,$R2 | |
2079 | vpshufd \$0xc8,$R1,$R1 | |
2080 | vmovdqa32 $R0,0x00(%rsp) # save in case $len%128 != 0 | |
2081 | vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304 | |
2082 | vpermq \$0x15,$S2,$S2 | |
2083 | vpshufd \$0xc8,$S1,$S1 | |
2084 | vmovdqa32 $R1,0x20(%rsp) | |
2085 | vpsrlq \$32,$R1,$T1 | |
2086 | vpermq \$0x15,$R3,$R3 | |
2087 | vpshufd \$0xc8,$R2,$R2 | |
2088 | vmovdqa32 $S1,0x40(%rsp) | |
2089 | vpermq \$0x15,$S3,$S3 | |
2090 | vpshufd \$0xc8,$S2,$S2 | |
2091 | vpermq \$0x15,$R4,$R4 | |
2092 | vpshufd \$0xc8,$R3,$R3 | |
2093 | vmovdqa32 $R2,0x60(%rsp) | |
2094 | vpermq \$0x15,$S4,$S4 | |
2095 | vpshufd \$0xc8,$S3,$S3 | |
2096 | vmovdqa32 $S2,0x80(%rsp) | |
2097 | vpshufd \$0xc8,$R4,$R4 | |
2098 | vpshufd \$0xc8,$S4,$S4 | |
2099 | vmovdqa32 $R3,0xa0(%rsp) | |
2100 | vmovdqa32 $S3,0xc0(%rsp) | |
2101 | vmovdqa32 $R4,0xe0(%rsp) | |
2102 | vmovdqa32 $S4,0x100(%rsp) | |
2103 | ||
2104 | ################################################################ | |
2105 | # calculate 5th through 8th powers of the key | |
2106 | # | |
2107 | # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1 | |
2108 | # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2 | |
2109 | # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3 | |
2110 | # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4 | |
2111 | # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0 | |
2112 | ||
2113 | vpmuludq $T0,$R0,$D0 # d0 = r0'*r0 | |
2114 | vpmuludq $T0,$R1,$D1 # d1 = r0'*r1 | |
2115 | vpmuludq $T0,$R2,$D2 # d2 = r0'*r2 | |
2116 | vpmuludq $T0,$R3,$D3 # d3 = r0'*r3 | |
2117 | vpmuludq $T0,$R4,$D4 # d4 = r0'*r4 | |
2118 | vpsrlq \$32,$R2,$T2 | |
2119 | ||
2120 | vpmuludq $T1,$S4,$M0 | |
2121 | vpmuludq $T1,$R0,$M1 | |
2122 | vpmuludq $T1,$R1,$M2 | |
2123 | vpmuludq $T1,$R2,$M3 | |
2124 | vpmuludq $T1,$R3,$M4 | |
2125 | vpsrlq \$32,$R3,$T3 | |
2126 | vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4 | |
2127 | vpaddq $M1,$D1,$D1 # d1 += r1'*r0 | |
2128 | vpaddq $M2,$D2,$D2 # d2 += r1'*r1 | |
2129 | vpaddq $M3,$D3,$D3 # d3 += r1'*r2 | |
2130 | vpaddq $M4,$D4,$D4 # d4 += r1'*r3 | |
2131 | ||
2132 | vpmuludq $T2,$S3,$M0 | |
2133 | vpmuludq $T2,$S4,$M1 | |
2134 | vpmuludq $T2,$R1,$M3 | |
2135 | vpmuludq $T2,$R2,$M4 | |
2136 | vpmuludq $T2,$R0,$M2 | |
2137 | vpsrlq \$32,$R4,$T4 | |
2138 | vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3 | |
2139 | vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4 | |
2140 | vpaddq $M3,$D3,$D3 # d3 += r2'*r1 | |
2141 | vpaddq $M4,$D4,$D4 # d4 += r2'*r2 | |
2142 | vpaddq $M2,$D2,$D2 # d2 += r2'*r0 | |
2143 | ||
2144 | vpmuludq $T3,$S2,$M0 | |
2145 | vpmuludq $T3,$R0,$M3 | |
2146 | vpmuludq $T3,$R1,$M4 | |
2147 | vpmuludq $T3,$S3,$M1 | |
2148 | vpmuludq $T3,$S4,$M2 | |
2149 | vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2 | |
2150 | vpaddq $M3,$D3,$D3 # d3 += r3'*r0 | |
2151 | vpaddq $M4,$D4,$D4 # d4 += r3'*r1 | |
2152 | vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3 | |
2153 | vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4 | |
2154 | ||
2155 | vpmuludq $T4,$S4,$M3 | |
2156 | vpmuludq $T4,$R0,$M4 | |
2157 | vpmuludq $T4,$S1,$M0 | |
2158 | vpmuludq $T4,$S2,$M1 | |
2159 | vpmuludq $T4,$S3,$M2 | |
2160 | vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4 | |
2161 | vpaddq $M4,$D4,$D4 # d4 += r2'*r0 | |
2162 | vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1 | |
2163 | vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2 | |
2164 | vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3 | |
2165 | ||
2166 | ################################################################ | |
2167 | # load input | |
c1e1fc50 AP |
2168 | vmovdqu64 16*0($inp),%z#$T3 |
2169 | vmovdqu64 16*4($inp),%z#$T4 | |
2170 | lea 16*8($inp),$inp | |
abb8c44f AP |
2171 | |
2172 | ################################################################ | |
2173 | # lazy reduction | |
2174 | ||
2175 | vpsrlq \$26,$D3,$M3 | |
2176 | vpandq $MASK,$D3,$D3 | |
2177 | vpaddq $M3,$D4,$D4 # d3 -> d4 | |
2178 | ||
2179 | vpsrlq \$26,$D0,$M0 | |
2180 | vpandq $MASK,$D0,$D0 | |
2181 | vpaddq $M0,$D1,$D1 # d0 -> d1 | |
2182 | ||
2183 | vpsrlq \$26,$D4,$M4 | |
2184 | vpandq $MASK,$D4,$D4 | |
2185 | ||
2186 | vpsrlq \$26,$D1,$M1 | |
2187 | vpandq $MASK,$D1,$D1 | |
2188 | vpaddq $M1,$D2,$D2 # d1 -> d2 | |
2189 | ||
2190 | vpaddq $M4,$D0,$D0 | |
2191 | vpsllq \$2,$M4,$M4 | |
2192 | vpaddq $M4,$D0,$D0 # d4 -> d0 | |
2193 | ||
2194 | vpsrlq \$26,$D2,$M2 | |
2195 | vpandq $MASK,$D2,$D2 | |
2196 | vpaddq $M2,$D3,$D3 # d2 -> d3 | |
2197 | ||
2198 | vpsrlq \$26,$D0,$M0 | |
2199 | vpandq $MASK,$D0,$D0 | |
2200 | vpaddq $M0,$D1,$D1 # d0 -> d1 | |
2201 | ||
2202 | vpsrlq \$26,$D3,$M3 | |
2203 | vpandq $MASK,$D3,$D3 | |
2204 | vpaddq $M3,$D4,$D4 # d3 -> d4 | |
2205 | ||
2206 | ___ | |
c1e1fc50 | 2207 | map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain |
abb8c44f | 2208 | map(s/%y/%z/,($M4,$M0,$M1,$M2,$M3)); |
c1e1fc50 AP |
2209 | map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4)); |
2210 | map(s/%y/%z/,($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4)); | |
2211 | map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4)); | |
abb8c44f AP |
2212 | map(s/%y/%z/,($MASK)); |
2213 | $code.=<<___; | |
2214 | ################################################################ | |
c1e1fc50 AP |
2215 | # at this point we have 14243444 in $R0-$S4 and 05060708 in |
2216 | # $D0-$D4, ... | |
abb8c44f | 2217 | |
c1e1fc50 AP |
2218 | vpunpcklqdq $T4,$T3,$T0 # transpose input |
2219 | vpunpckhqdq $T4,$T3,$T4 | |
abb8c44f | 2220 | |
c1e1fc50 AP |
2221 | # ... since input 64-bit lanes are ordered as 73625140, we could |
2222 | # "vperm" it to 76543210 (here and in each loop iteration), *or* | |
2223 | # we could just flow along, hence the goal for $R0-$S4 is | |
2224 | # 1858286838784888 ... | |
2225 | ||
2226 | mov \$0b0110011001100110,%eax | |
2227 | mov \$0b1100110011001100,%r8d | |
2228 | mov \$0b0101010101010101,%r9d | |
2229 | kmovw %eax,%k1 | |
2230 | kmovw %r8d,%k2 | |
2231 | kmovw %r9d,%k3 | |
abb8c44f | 2232 | |
c1e1fc50 | 2233 | vpbroadcastq %x#$D0,$M0 # 0808080808080808 |
abb8c44f AP |
2234 | vpbroadcastq %x#$D1,$M1 |
2235 | vpbroadcastq %x#$D2,$M2 | |
2236 | vpbroadcastq %x#$D3,$M3 | |
2237 | vpbroadcastq %x#$D4,$M4 | |
c1e1fc50 AP |
2238 | |
2239 | vpexpandd $D0,${D0}{%k1} # 05060708 -> -05--06--07--08- | |
2240 | vpexpandd $D1,${D1}{%k1} | |
2241 | vpexpandd $D2,${D2}{%k1} | |
2242 | vpexpandd $D3,${D3}{%k1} | |
2243 | vpexpandd $D4,${D4}{%k1} | |
2244 | ||
2245 | vpexpandd $R0,${D0}{%k2} # -05--06--07--08- -> 145-246-347-448- | |
2246 | vpexpandd $R1,${D1}{%k2} | |
2247 | vpexpandd $R2,${D2}{%k2} | |
2248 | vpexpandd $R3,${D3}{%k2} | |
2249 | vpexpandd $R4,${D4}{%k2} | |
2250 | ||
2251 | vpblendmd $M0,$D0,${R0}{%k3} # 1858286838784888 | |
abb8c44f AP |
2252 | vpblendmd $M1,$D1,${R1}{%k3} |
2253 | vpblendmd $M2,$D2,${R2}{%k3} | |
2254 | vpblendmd $M3,$D3,${R3}{%k3} | |
2255 | vpblendmd $M4,$D4,${R4}{%k3} | |
2256 | ||
2257 | vpslld \$2,$R1,$S1 # *5 | |
2258 | vpslld \$2,$R2,$S2 | |
2259 | vpslld \$2,$R3,$S3 | |
2260 | vpslld \$2,$R4,$S4 | |
2261 | vpaddd $R1,$S1,$S1 | |
2262 | vpaddd $R2,$S2,$S2 | |
2263 | vpaddd $R3,$S3,$S3 | |
2264 | vpaddd $R4,$S4,$S4 | |
2265 | ||
c1e1fc50 AP |
2266 | vpbroadcastq %x#$MASK,$MASK |
2267 | vpbroadcastq 32(%rcx),$PADBIT # .L129 | |
abb8c44f | 2268 | |
c1e1fc50 AP |
2269 | vpsrlq \$52,$T0,$T2 # splat input |
2270 | vpsllq \$12,$T4,$T3 | |
2271 | vporq $T3,$T2,$T2 | |
abb8c44f | 2272 | vpsrlq \$26,$T0,$T1 |
c1e1fc50 | 2273 | vpsrlq \$14,$T4,$T3 |
abb8c44f AP |
2274 | vpsrlq \$40,$T4,$T4 # 4 |
2275 | vpandq $MASK,$T2,$T2 # 2 | |
2276 | vpandq $MASK,$T0,$T0 # 0 | |
c1e1fc50 | 2277 | vpandq $MASK,$T1,$T1 # 1 |
abb8c44f AP |
2278 | #vpandq $MASK,$T3,$T3 # 3 |
2279 | #vporq $PADBIT,$T4,$T4 # padbit, yes, always | |
2280 | ||
2281 | vpaddq $H2,$T2,$H2 # accumulate input | |
2282 | mov \$0x0f,%eax | |
2283 | sub \$192,$len | |
2284 | jbe .Ltail_avx512 | |
2285 | ||
2286 | .Loop_avx512: | |
2287 | ################################################################ | |
2288 | # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8 | |
2289 | # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7 | |
2290 | # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6 | |
2291 | # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5 | |
2292 | # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4 | |
2293 | # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3 | |
2294 | # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2 | |
2295 | # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1 | |
2296 | # \________/\___________/ | |
2297 | ################################################################ | |
2298 | #vpaddq $H2,$T2,$H2 # accumulate input | |
2299 | ||
2300 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
2301 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
2302 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
2303 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
2304 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
2305 | # | |
2306 | # however, as h2 is "chronologically" first one available pull | |
2307 | # corresponding operations up, so it's | |
2308 | # | |
2309 | # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4 | |
2310 | # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0 | |
2311 | # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1 | |
2312 | # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2 | |
2313 | # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3 | |
2314 | ||
2315 | vpmuludq $H2,$R1,$D3 # d3 = h2*r1 | |
2316 | vpaddq $H0,$T0,$H0 | |
abb8c44f | 2317 | vpmuludq $H2,$R2,$D4 # d4 = h2*r2 |
abb8c44f | 2318 | vpmuludq $H2,$S3,$D0 # d0 = h2*s3 |
c1e1fc50 | 2319 | vpandq $MASK,$T3,$T3 # 3, module-scheduled |
abb8c44f AP |
2320 | vpmuludq $H2,$S4,$D1 # d1 = h2*s4 |
2321 | vporq $PADBIT,$T4,$T4 # padbit, yes, always | |
2322 | vpmuludq $H2,$R0,$D2 # d2 = h2*r0 | |
2323 | vpaddq $H1,$T1,$H1 # accumulate input | |
2324 | vpaddq $H3,$T3,$H3 | |
2325 | vpaddq $H4,$T4,$H4 | |
2326 | ||
c1e1fc50 AP |
2327 | vmovdqu64 16*0($inp),$T3 # load input |
2328 | vmovdqu64 16*4($inp),$T4 | |
2329 | lea 16*8($inp),$inp | |
abb8c44f AP |
2330 | vpmuludq $H0,$R3,$M3 |
2331 | vpmuludq $H0,$R4,$M4 | |
2332 | vpmuludq $H0,$R0,$M0 | |
2333 | vpmuludq $H0,$R1,$M1 | |
2334 | vpaddq $M3,$D3,$D3 # d3 += h0*r3 | |
2335 | vpaddq $M4,$D4,$D4 # d4 += h0*r4 | |
2336 | vpaddq $M0,$D0,$D0 # d0 += h0*r0 | |
2337 | vpaddq $M1,$D1,$D1 # d1 += h0*r1 | |
2338 | ||
abb8c44f AP |
2339 | vpmuludq $H1,$R2,$M3 |
2340 | vpmuludq $H1,$R3,$M4 | |
2341 | vpmuludq $H1,$S4,$M0 | |
2342 | vpmuludq $H0,$R2,$M2 | |
2343 | vpaddq $M3,$D3,$D3 # d3 += h1*r2 | |
2344 | vpaddq $M4,$D4,$D4 # d4 += h1*r3 | |
2345 | vpaddq $M0,$D0,$D0 # d0 += h1*s4 | |
2346 | vpaddq $M2,$D2,$D2 # d2 += h0*r2 | |
2347 | ||
c1e1fc50 AP |
2348 | vpunpcklqdq $T4,$T3,$T0 # transpose input |
2349 | vpunpckhqdq $T4,$T3,$T4 | |
2350 | ||
abb8c44f AP |
2351 | vpmuludq $H3,$R0,$M3 |
2352 | vpmuludq $H3,$R1,$M4 | |
2353 | vpmuludq $H1,$R0,$M1 | |
2354 | vpmuludq $H1,$R1,$M2 | |
2355 | vpaddq $M3,$D3,$D3 # d3 += h3*r0 | |
2356 | vpaddq $M4,$D4,$D4 # d4 += h3*r1 | |
2357 | vpaddq $M1,$D1,$D1 # d1 += h1*r0 | |
2358 | vpaddq $M2,$D2,$D2 # d2 += h1*r1 | |
2359 | ||
abb8c44f AP |
2360 | vpmuludq $H4,$S4,$M3 |
2361 | vpmuludq $H4,$R0,$M4 | |
2362 | vpmuludq $H3,$S2,$M0 | |
2363 | vpmuludq $H3,$S3,$M1 | |
2364 | vpaddq $M3,$D3,$D3 # d3 += h4*s4 | |
2365 | vpmuludq $H3,$S4,$M2 | |
2366 | vpaddq $M4,$D4,$D4 # d4 += h4*r0 | |
2367 | vpaddq $M0,$D0,$D0 # d0 += h3*s2 | |
2368 | vpaddq $M1,$D1,$D1 # d1 += h3*s3 | |
2369 | vpaddq $M2,$D2,$D2 # d2 += h3*s4 | |
2370 | ||
abb8c44f AP |
2371 | vpmuludq $H4,$S1,$M0 |
2372 | vpmuludq $H4,$S2,$M1 | |
2373 | vpmuludq $H4,$S3,$M2 | |
2374 | vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1 | |
2375 | vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2 | |
2376 | vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3 | |
2377 | ||
2378 | ################################################################ | |
c1e1fc50 AP |
2379 | # lazy reduction (interleaved with input splat) |
2380 | ||
2381 | vpsrlq \$52,$T0,$T2 # splat input | |
2382 | vpsllq \$12,$T4,$T3 | |
abb8c44f AP |
2383 | |
2384 | vpsrlq \$26,$D3,$H3 | |
2385 | vpandq $MASK,$D3,$D3 | |
2386 | vpaddq $H3,$D4,$H4 # h3 -> h4 | |
2387 | ||
c1e1fc50 AP |
2388 | vporq $T3,$T2,$T2 |
2389 | ||
abb8c44f AP |
2390 | vpsrlq \$26,$H0,$D0 |
2391 | vpandq $MASK,$H0,$H0 | |
2392 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
2393 | ||
c1e1fc50 AP |
2394 | vpandq $MASK,$T2,$T2 # 2 |
2395 | ||
abb8c44f AP |
2396 | vpsrlq \$26,$H4,$D4 |
2397 | vpandq $MASK,$H4,$H4 | |
2398 | ||
abb8c44f AP |
2399 | vpsrlq \$26,$H1,$D1 |
2400 | vpandq $MASK,$H1,$H1 | |
2401 | vpaddq $D1,$H2,$H2 # h1 -> h2 | |
2402 | ||
2403 | vpaddq $D4,$H0,$H0 | |
2404 | vpsllq \$2,$D4,$D4 | |
2405 | vpaddq $D4,$H0,$H0 # h4 -> h0 | |
2406 | ||
c1e1fc50 | 2407 | vpaddq $T2,$H2,$H2 # modulo-scheduled |
abb8c44f AP |
2408 | vpsrlq \$26,$T0,$T1 |
2409 | ||
2410 | vpsrlq \$26,$H2,$D2 | |
2411 | vpandq $MASK,$H2,$H2 | |
2412 | vpaddq $D2,$D3,$H3 # h2 -> h3 | |
2413 | ||
c1e1fc50 | 2414 | vpsrlq \$14,$T4,$T3 |
abb8c44f AP |
2415 | |
2416 | vpsrlq \$26,$H0,$D0 | |
2417 | vpandq $MASK,$H0,$H0 | |
2418 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
2419 | ||
2420 | vpsrlq \$40,$T4,$T4 # 4 | |
2421 | ||
2422 | vpsrlq \$26,$H3,$D3 | |
2423 | vpandq $MASK,$H3,$H3 | |
2424 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
2425 | ||
2426 | vpandq $MASK,$T0,$T0 # 0 | |
c1e1fc50 | 2427 | vpandq $MASK,$T1,$T1 # 1 |
abb8c44f AP |
2428 | #vpandq $MASK,$T3,$T3 # 3 |
2429 | #vporq $PADBIT,$T4,$T4 # padbit, yes, always | |
2430 | ||
2431 | sub \$128,$len | |
2432 | ja .Loop_avx512 | |
2433 | ||
2434 | .Ltail_avx512: | |
2435 | ################################################################ | |
2436 | # while above multiplications were by r^8 in all lanes, in last | |
2437 | # iteration we multiply least significant lane by r^8 and most | |
2438 | # significant one by r, that's why table gets shifted... | |
2439 | ||
c1e1fc50 | 2440 | vpsrlq \$32,$R0,$R0 # 0105020603070408 |
abb8c44f AP |
2441 | vpsrlq \$32,$R1,$R1 |
2442 | vpsrlq \$32,$R2,$R2 | |
2443 | vpsrlq \$32,$S3,$S3 | |
2444 | vpsrlq \$32,$S4,$S4 | |
2445 | vpsrlq \$32,$R3,$R3 | |
2446 | vpsrlq \$32,$R4,$R4 | |
2447 | vpsrlq \$32,$S1,$S1 | |
2448 | vpsrlq \$32,$S2,$S2 | |
2449 | ||
2450 | ################################################################ | |
2451 | # load either next or last 64 byte of input | |
2452 | lea ($inp,$len),$inp | |
2453 | ||
2454 | #vpaddq $H2,$T2,$H2 # accumulate input | |
2455 | vpaddq $H0,$T0,$H0 | |
2456 | ||
2457 | vpmuludq $H2,$R1,$D3 # d3 = h2*r1 | |
2458 | vpmuludq $H2,$R2,$D4 # d4 = h2*r2 | |
2459 | vpmuludq $H2,$S3,$D0 # d0 = h2*s3 | |
2460 | vpmuludq $H2,$S4,$D1 # d1 = h2*s4 | |
2461 | vpmuludq $H2,$R0,$D2 # d2 = h2*r0 | |
c1e1fc50 | 2462 | vpandq $MASK,$T3,$T3 # 3, module-scheduled |
abb8c44f AP |
2463 | vporq $PADBIT,$T4,$T4 # padbit, yes, always |
2464 | vpaddq $H1,$T1,$H1 # accumulate input | |
2465 | vpaddq $H3,$T3,$H3 | |
2466 | vpaddq $H4,$T4,$H4 | |
2467 | ||
2468 | vmovdqu64 16*0($inp),%x#$T0 | |
2469 | vpmuludq $H0,$R3,$M3 | |
2470 | vpmuludq $H0,$R4,$M4 | |
2471 | vpmuludq $H0,$R0,$M0 | |
2472 | vpmuludq $H0,$R1,$M1 | |
2473 | vpaddq $M3,$D3,$D3 # d3 += h0*r3 | |
2474 | vpaddq $M4,$D4,$D4 # d4 += h0*r4 | |
2475 | vpaddq $M0,$D0,$D0 # d0 += h0*r0 | |
2476 | vpaddq $M1,$D1,$D1 # d1 += h0*r1 | |
2477 | ||
2478 | vmovdqu64 16*1($inp),%x#$T1 | |
2479 | vpmuludq $H1,$R2,$M3 | |
2480 | vpmuludq $H1,$R3,$M4 | |
2481 | vpmuludq $H1,$S4,$M0 | |
2482 | vpmuludq $H0,$R2,$M2 | |
2483 | vpaddq $M3,$D3,$D3 # d3 += h1*r2 | |
2484 | vpaddq $M4,$D4,$D4 # d4 += h1*r3 | |
2485 | vpaddq $M0,$D0,$D0 # d0 += h1*s4 | |
2486 | vpaddq $M2,$D2,$D2 # d2 += h0*r2 | |
2487 | ||
2488 | vinserti64x2 \$1,16*2($inp),$T0,$T0 | |
2489 | vpmuludq $H3,$R0,$M3 | |
2490 | vpmuludq $H3,$R1,$M4 | |
2491 | vpmuludq $H1,$R0,$M1 | |
2492 | vpmuludq $H1,$R1,$M2 | |
2493 | vpaddq $M3,$D3,$D3 # d3 += h3*r0 | |
2494 | vpaddq $M4,$D4,$D4 # d4 += h3*r1 | |
2495 | vpaddq $M1,$D1,$D1 # d1 += h1*r0 | |
2496 | vpaddq $M2,$D2,$D2 # d2 += h1*r1 | |
2497 | ||
2498 | vinserti64x2 \$1,16*3($inp),$T1,$T1 | |
2499 | vpmuludq $H4,$S4,$M3 | |
2500 | vpmuludq $H4,$R0,$M4 | |
2501 | vpmuludq $H3,$S2,$M0 | |
2502 | vpmuludq $H3,$S3,$M1 | |
2503 | vpmuludq $H3,$S4,$M2 | |
2504 | vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4 | |
2505 | vpaddq $M4,$D4,$D4 # d4 += h4*r0 | |
2506 | vpaddq $M0,$D0,$D0 # d0 += h3*s2 | |
2507 | vpaddq $M1,$D1,$D1 # d1 += h3*s3 | |
2508 | vpaddq $M2,$D2,$D2 # d2 += h3*s4 | |
2509 | ||
2510 | vpmuludq $H4,$S1,$M0 | |
2511 | vpmuludq $H4,$S2,$M1 | |
2512 | vpmuludq $H4,$S3,$M2 | |
2513 | vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1 | |
2514 | vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2 | |
2515 | vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3 | |
2516 | ||
2517 | ################################################################ | |
2518 | # horizontal addition | |
2519 | ||
2520 | mov \$1,%eax | |
2521 | vpsrldq \$8,$H3,$D3 | |
2522 | vpsrldq \$8,$D4,$H4 | |
2523 | vpsrldq \$8,$H0,$D0 | |
2524 | vpsrldq \$8,$H1,$D1 | |
2525 | vpsrldq \$8,$H2,$D2 | |
2526 | vpaddq $D3,$H3,$H3 | |
2527 | vpaddq $D4,$H4,$H4 | |
2528 | vpaddq $D0,$H0,$H0 | |
2529 | vpaddq $D1,$H1,$H1 | |
2530 | vpaddq $D2,$H2,$H2 | |
2531 | ||
2532 | kmovw %eax,%k3 | |
2533 | vpermq \$0x2,$H3,$D3 | |
2534 | vpermq \$0x2,$H4,$D4 | |
2535 | vpermq \$0x2,$H0,$D0 | |
2536 | vpermq \$0x2,$H1,$D1 | |
2537 | vpermq \$0x2,$H2,$D2 | |
2538 | vpaddq $D3,$H3,$H3 | |
2539 | vpaddq $D4,$H4,$H4 | |
2540 | vpaddq $D0,$H0,$H0 | |
2541 | vpaddq $D1,$H1,$H1 | |
2542 | vpaddq $D2,$H2,$H2 | |
2543 | ||
2544 | vextracti64x4 \$0x1,$H3,%y#$D3 | |
2545 | vextracti64x4 \$0x1,$H4,%y#$D4 | |
2546 | vextracti64x4 \$0x1,$H0,%y#$D0 | |
2547 | vextracti64x4 \$0x1,$H1,%y#$D1 | |
2548 | vextracti64x4 \$0x1,$H2,%y#$D2 | |
2549 | vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case | |
2550 | vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2 | |
2551 | vpaddq $D0,$H0,${H0}{%k3}{z} | |
2552 | vpaddq $D1,$H1,${H1}{%k3}{z} | |
2553 | vpaddq $D2,$H2,${H2}{%k3}{z} | |
2554 | ___ | |
2555 | map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT)); | |
2556 | map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK)); | |
2557 | $code.=<<___; | |
2558 | ################################################################ | |
2559 | # lazy reduction (interleaved with input splat) | |
2560 | ||
2561 | vpsrlq \$26,$H3,$D3 | |
2562 | vpandq $MASK,$H3,$H3 | |
2563 | vpsrldq \$6,$T0,$T2 # splat input | |
2564 | vpsrldq \$6,$T1,$T3 | |
2565 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
2566 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
2567 | ||
2568 | vpsrlq \$26,$H0,$D0 | |
2569 | vpandq $MASK,$H0,$H0 | |
2570 | vpunpcklqdq $T3,$T2,$T2 # 2:3 | |
2571 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
2572 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
2573 | ||
2574 | vpsrlq \$26,$H4,$D4 | |
2575 | vpandq $MASK,$H4,$H4 | |
2576 | ||
2577 | vpsrlq \$26,$H1,$D1 | |
2578 | vpandq $MASK,$H1,$H1 | |
2579 | vpsrlq \$30,$T2,$T3 | |
2580 | vpsrlq \$4,$T2,$T2 | |
2581 | vpaddq $D1,$H2,$H2 # h1 -> h2 | |
2582 | ||
2583 | vpaddq $D4,$H0,$H0 | |
2584 | vpsllq \$2,$D4,$D4 | |
2585 | vpsrlq \$26,$T0,$T1 | |
2586 | vpsrlq \$40,$T4,$T4 # 4 | |
2587 | vpaddq $D4,$H0,$H0 # h4 -> h0 | |
2588 | ||
2589 | vpsrlq \$26,$H2,$D2 | |
2590 | vpandq $MASK,$H2,$H2 | |
2591 | vpandq $MASK,$T2,$T2 # 2 | |
2592 | vpandq $MASK,$T0,$T0 # 0 | |
2593 | vpaddq $D2,$H3,$H3 # h2 -> h3 | |
2594 | ||
2595 | vpsrlq \$26,$H0,$D0 | |
2596 | vpandq $MASK,$H0,$H0 | |
2597 | vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2 | |
2598 | vpandq $MASK,$T1,$T1 # 1 | |
2599 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
2600 | ||
2601 | vpsrlq \$26,$H3,$D3 | |
2602 | vpandq $MASK,$H3,$H3 | |
2603 | vpandq $MASK,$T3,$T3 # 3 | |
2604 | vporq $PADBIT,$T4,$T4 # padbit, yes, always | |
2605 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
2606 | ||
2607 | lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2 | |
2608 | add \$64,$len | |
2609 | jnz .Ltail_avx2 | |
2610 | ||
2611 | vpsubq $T2,$H2,$H2 # undo input accumulation | |
2612 | vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced | |
2613 | vmovd %x#$H1,`4*1-48-64`($ctx) | |
2614 | vmovd %x#$H2,`4*2-48-64`($ctx) | |
2615 | vmovd %x#$H3,`4*3-48-64`($ctx) | |
2616 | vmovd %x#$H4,`4*4-48-64`($ctx) | |
c1e1fc50 | 2617 | vzeroall |
abb8c44f AP |
2618 | ___ |
2619 | $code.=<<___ if ($win64); | |
c1e1fc50 AP |
2620 | movdqa 0x50(%r11),%xmm6 |
2621 | movdqa 0x60(%r11),%xmm7 | |
2622 | movdqa 0x70(%r11),%xmm8 | |
2623 | movdqa 0x80(%r11),%xmm9 | |
2624 | movdqa 0x90(%r11),%xmm10 | |
2625 | movdqa 0xa0(%r11),%xmm11 | |
2626 | movdqa 0xb0(%r11),%xmm12 | |
2627 | movdqa 0xc0(%r11),%xmm13 | |
2628 | movdqa 0xd0(%r11),%xmm14 | |
2629 | movdqa 0xe0(%r11),%xmm15 | |
abb8c44f AP |
2630 | lea 0xf8(%r11),%rsp |
2631 | .Ldo_avx512_epilogue: | |
2632 | ___ | |
2633 | $code.=<<___ if (!$win64); | |
2634 | lea 8(%r11),%rsp | |
2635 | ___ | |
2636 | $code.=<<___; | |
abb8c44f AP |
2637 | ret |
2638 | .size poly1305_blocks_avx512,.-poly1305_blocks_avx512 | |
2639 | ___ | |
2640 | } } | |
a98c648e AP |
2641 | $code.=<<___; |
2642 | .align 64 | |
2643 | .Lconst: | |
2644 | .Lmask24: | |
2645 | .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0 | |
2646 | .L129: | |
6ca3e6e7 | 2647 | .long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0 |
a98c648e AP |
2648 | .Lmask26: |
2649 | .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0 | |
2650 | .Lfive: | |
2651 | .long 5,0,5,0,5,0,5,0 | |
abb8c44f AP |
2652 | .Lgather: |
2653 | .long 0,8, 32,40, 64,72, 96,104 | |
a98c648e AP |
2654 | ___ |
2655 | } | |
2656 | ||
2657 | $code.=<<___; | |
2658 | .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | |
2659 | .align 16 | |
2660 | ___ | |
2661 | ||
2662 | # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | |
2663 | # CONTEXT *context,DISPATCHER_CONTEXT *disp) | |
2664 | if ($win64) { | |
2665 | $rec="%rcx"; | |
2666 | $frame="%rdx"; | |
2667 | $context="%r8"; | |
2668 | $disp="%r9"; | |
2669 | ||
2670 | $code.=<<___; | |
2671 | .extern __imp_RtlVirtualUnwind | |
2672 | .type se_handler,\@abi-omnipotent | |
2673 | .align 16 | |
2674 | se_handler: | |
2675 | push %rsi | |
2676 | push %rdi | |
2677 | push %rbx | |
2678 | push %rbp | |
2679 | push %r12 | |
2680 | push %r13 | |
2681 | push %r14 | |
2682 | push %r15 | |
2683 | pushfq | |
2684 | sub \$64,%rsp | |
2685 | ||
2686 | mov 120($context),%rax # pull context->Rax | |
2687 | mov 248($context),%rbx # pull context->Rip | |
2688 | ||
2689 | mov 8($disp),%rsi # disp->ImageBase | |
2690 | mov 56($disp),%r11 # disp->HandlerData | |
2691 | ||
2692 | mov 0(%r11),%r10d # HandlerData[0] | |
2693 | lea (%rsi,%r10),%r10 # prologue label | |
2694 | cmp %r10,%rbx # context->Rip<.Lprologue | |
2695 | jb .Lcommon_seh_tail | |
2696 | ||
2697 | mov 152($context),%rax # pull context->Rsp | |
2698 | ||
2699 | mov 4(%r11),%r10d # HandlerData[1] | |
2700 | lea (%rsi,%r10),%r10 # epilogue label | |
2701 | cmp %r10,%rbx # context->Rip>=.Lepilogue | |
2702 | jae .Lcommon_seh_tail | |
2703 | ||
2704 | lea 48(%rax),%rax | |
2705 | ||
2706 | mov -8(%rax),%rbx | |
2707 | mov -16(%rax),%rbp | |
2708 | mov -24(%rax),%r12 | |
2709 | mov -32(%rax),%r13 | |
2710 | mov -40(%rax),%r14 | |
2711 | mov -48(%rax),%r15 | |
2712 | mov %rbx,144($context) # restore context->Rbx | |
2713 | mov %rbp,160($context) # restore context->Rbp | |
2714 | mov %r12,216($context) # restore context->R12 | |
2715 | mov %r13,224($context) # restore context->R13 | |
2716 | mov %r14,232($context) # restore context->R14 | |
2717 | mov %r15,240($context) # restore context->R14 | |
2718 | ||
2719 | jmp .Lcommon_seh_tail | |
2720 | .size se_handler,.-se_handler | |
2721 | ||
2722 | .type avx_handler,\@abi-omnipotent | |
2723 | .align 16 | |
2724 | avx_handler: | |
2725 | push %rsi | |
2726 | push %rdi | |
2727 | push %rbx | |
2728 | push %rbp | |
2729 | push %r12 | |
2730 | push %r13 | |
2731 | push %r14 | |
2732 | push %r15 | |
2733 | pushfq | |
2734 | sub \$64,%rsp | |
2735 | ||
2736 | mov 120($context),%rax # pull context->Rax | |
2737 | mov 248($context),%rbx # pull context->Rip | |
2738 | ||
2739 | mov 8($disp),%rsi # disp->ImageBase | |
2740 | mov 56($disp),%r11 # disp->HandlerData | |
2741 | ||
2742 | mov 0(%r11),%r10d # HandlerData[0] | |
2743 | lea (%rsi,%r10),%r10 # prologue label | |
2744 | cmp %r10,%rbx # context->Rip<prologue label | |
2745 | jb .Lcommon_seh_tail | |
2746 | ||
2747 | mov 152($context),%rax # pull context->Rsp | |
2748 | ||
2749 | mov 4(%r11),%r10d # HandlerData[1] | |
2750 | lea (%rsi,%r10),%r10 # epilogue label | |
2751 | cmp %r10,%rbx # context->Rip>=epilogue label | |
2752 | jae .Lcommon_seh_tail | |
2753 | ||
2754 | mov 208($context),%rax # pull context->R11 | |
2755 | ||
2756 | lea 0x50(%rax),%rsi | |
2757 | lea 0xf8(%rax),%rax | |
2758 | lea 512($context),%rdi # &context.Xmm6 | |
2759 | mov \$20,%ecx | |
2760 | .long 0xa548f3fc # cld; rep movsq | |
2761 | ||
2762 | .Lcommon_seh_tail: | |
2763 | mov 8(%rax),%rdi | |
2764 | mov 16(%rax),%rsi | |
2765 | mov %rax,152($context) # restore context->Rsp | |
2766 | mov %rsi,168($context) # restore context->Rsi | |
2767 | mov %rdi,176($context) # restore context->Rdi | |
2768 | ||
2769 | mov 40($disp),%rdi # disp->ContextRecord | |
2770 | mov $context,%rsi # context | |
2771 | mov \$154,%ecx # sizeof(CONTEXT) | |
2772 | .long 0xa548f3fc # cld; rep movsq | |
2773 | ||
2774 | mov $disp,%rsi | |
2775 | xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | |
2776 | mov 8(%rsi),%rdx # arg2, disp->ImageBase | |
2777 | mov 0(%rsi),%r8 # arg3, disp->ControlPc | |
2778 | mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | |
2779 | mov 40(%rsi),%r10 # disp->ContextRecord | |
2780 | lea 56(%rsi),%r11 # &disp->HandlerData | |
2781 | lea 24(%rsi),%r12 # &disp->EstablisherFrame | |
2782 | mov %r10,32(%rsp) # arg5 | |
2783 | mov %r11,40(%rsp) # arg6 | |
2784 | mov %r12,48(%rsp) # arg7 | |
2785 | mov %rcx,56(%rsp) # arg8, (NULL) | |
2786 | call *__imp_RtlVirtualUnwind(%rip) | |
2787 | ||
2788 | mov \$1,%eax # ExceptionContinueSearch | |
2789 | add \$64,%rsp | |
2790 | popfq | |
2791 | pop %r15 | |
2792 | pop %r14 | |
2793 | pop %r13 | |
2794 | pop %r12 | |
2795 | pop %rbp | |
2796 | pop %rbx | |
2797 | pop %rdi | |
2798 | pop %rsi | |
2799 | ret | |
2800 | .size avx_handler,.-avx_handler | |
2801 | ||
2802 | .section .pdata | |
2803 | .align 4 | |
2804 | .rva .LSEH_begin_poly1305_init | |
2805 | .rva .LSEH_end_poly1305_init | |
2806 | .rva .LSEH_info_poly1305_init | |
2807 | ||
2808 | .rva .LSEH_begin_poly1305_blocks | |
2809 | .rva .LSEH_end_poly1305_blocks | |
2810 | .rva .LSEH_info_poly1305_blocks | |
2811 | ||
2812 | .rva .LSEH_begin_poly1305_emit | |
2813 | .rva .LSEH_end_poly1305_emit | |
2814 | .rva .LSEH_info_poly1305_emit | |
2815 | ___ | |
2816 | $code.=<<___ if ($avx); | |
2817 | .rva .LSEH_begin_poly1305_blocks_avx | |
2818 | .rva .Lbase2_64_avx | |
2819 | .rva .LSEH_info_poly1305_blocks_avx_1 | |
2820 | ||
2821 | .rva .Lbase2_64_avx | |
2822 | .rva .Leven_avx | |
2823 | .rva .LSEH_info_poly1305_blocks_avx_2 | |
2824 | ||
2825 | .rva .Leven_avx | |
2826 | .rva .LSEH_end_poly1305_blocks_avx | |
2827 | .rva .LSEH_info_poly1305_blocks_avx_3 | |
2828 | ||
2829 | .rva .LSEH_begin_poly1305_emit_avx | |
2830 | .rva .LSEH_end_poly1305_emit_avx | |
2831 | .rva .LSEH_info_poly1305_emit_avx | |
2832 | ___ | |
2833 | $code.=<<___ if ($avx>1); | |
2834 | .rva .LSEH_begin_poly1305_blocks_avx2 | |
2835 | .rva .Lbase2_64_avx2 | |
2836 | .rva .LSEH_info_poly1305_blocks_avx2_1 | |
2837 | ||
2838 | .rva .Lbase2_64_avx2 | |
2839 | .rva .Leven_avx2 | |
2840 | .rva .LSEH_info_poly1305_blocks_avx2_2 | |
2841 | ||
2842 | .rva .Leven_avx2 | |
2843 | .rva .LSEH_end_poly1305_blocks_avx2 | |
2844 | .rva .LSEH_info_poly1305_blocks_avx2_3 | |
2845 | ___ | |
abb8c44f AP |
2846 | $code.=<<___ if ($avx>2); |
2847 | .rva .LSEH_begin_poly1305_blocks_avx512 | |
2848 | .rva .LSEH_end_poly1305_blocks_avx512 | |
2849 | .rva .LSEH_info_poly1305_blocks_avx512 | |
2850 | ___ | |
a98c648e AP |
2851 | $code.=<<___; |
2852 | .section .xdata | |
2853 | .align 8 | |
2854 | .LSEH_info_poly1305_init: | |
2855 | .byte 9,0,0,0 | |
2856 | .rva se_handler | |
2857 | .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init | |
2858 | ||
2859 | .LSEH_info_poly1305_blocks: | |
2860 | .byte 9,0,0,0 | |
2861 | .rva se_handler | |
2862 | .rva .Lblocks_body,.Lblocks_epilogue | |
2863 | ||
2864 | .LSEH_info_poly1305_emit: | |
2865 | .byte 9,0,0,0 | |
2866 | .rva se_handler | |
2867 | .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit | |
2868 | ___ | |
2869 | $code.=<<___ if ($avx); | |
2870 | .LSEH_info_poly1305_blocks_avx_1: | |
2871 | .byte 9,0,0,0 | |
2872 | .rva se_handler | |
2873 | .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[] | |
2874 | ||
2875 | .LSEH_info_poly1305_blocks_avx_2: | |
2876 | .byte 9,0,0,0 | |
2877 | .rva se_handler | |
2878 | .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[] | |
2879 | ||
2880 | .LSEH_info_poly1305_blocks_avx_3: | |
2881 | .byte 9,0,0,0 | |
2882 | .rva avx_handler | |
2883 | .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[] | |
2884 | ||
2885 | .LSEH_info_poly1305_emit_avx: | |
2886 | .byte 9,0,0,0 | |
2887 | .rva se_handler | |
2888 | .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx | |
2889 | ___ | |
2890 | $code.=<<___ if ($avx>1); | |
2891 | .LSEH_info_poly1305_blocks_avx2_1: | |
2892 | .byte 9,0,0,0 | |
2893 | .rva se_handler | |
2894 | .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[] | |
2895 | ||
2896 | .LSEH_info_poly1305_blocks_avx2_2: | |
2897 | .byte 9,0,0,0 | |
2898 | .rva se_handler | |
2899 | .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[] | |
2900 | ||
2901 | .LSEH_info_poly1305_blocks_avx2_3: | |
2902 | .byte 9,0,0,0 | |
2903 | .rva avx_handler | |
2904 | .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[] | |
2905 | ___ | |
abb8c44f AP |
2906 | $code.=<<___ if ($avx>2); |
2907 | .LSEH_info_poly1305_blocks_avx512: | |
2908 | .byte 9,0,0,0 | |
2909 | .rva avx_handler | |
2910 | .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[] | |
2911 | ___ | |
a98c648e AP |
2912 | } |
2913 | ||
2914 | foreach (split('\n',$code)) { | |
2915 | s/\`([^\`]*)\`/eval($1)/ge; | |
2916 | s/%r([a-z]+)#d/%e$1/g; | |
2917 | s/%r([0-9]+)#d/%r$1d/g; | |
abb8c44f | 2918 | s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g; |
a98c648e AP |
2919 | |
2920 | print $_,"\n"; | |
2921 | } | |
2922 | close STDOUT; |